You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by la...@apache.org on 2016/11/01 19:50:55 UTC

[01/50] hbase git commit: HBASE-14221 - Reduce the number of time row comparison is done in a Scan (Ram)

Repository: hbase
Updated Branches:
  refs/heads/branch-1.0 fd55483bc -> a55842a0a


HBASE-14221 - Reduce the number of time row comparison is done in a Scan
(Ram)

Conflicts:
	hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/cccf8e6a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/cccf8e6a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/cccf8e6a

Branch: refs/heads/branch-1.0
Commit: cccf8e6a4a791aa94e738f42370b4f7e1f90353a
Parents: 5d854d3
Author: ramkrishna <ra...@gmail.com>
Authored: Fri Jan 8 13:58:52 2016 +0530
Committer: ramkrishna <ra...@gmail.com>
Committed: Fri Jan 8 14:02:39 2016 +0530

----------------------------------------------------------------------
 .../hbase/regionserver/ScanQueryMatcher.java    | 32 +++++++++++---------
 .../hadoop/hbase/regionserver/StoreScanner.java | 20 ++++++++++--
 2 files changed, 35 insertions(+), 17 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/cccf8e6a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java
index 032b4ce..901dbad 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java
@@ -278,23 +278,27 @@ public class ScanQueryMatcher {
     if (filter != null && filter.filterAllRemaining()) {
       return MatchCode.DONE_SCAN;
     }
-    int ret = this.rowComparator.compareRows(row, this.rowOffset, this.rowLength,
+    if (row != null) {
+      int ret = this.rowComparator.compareRows(row, this.rowOffset, this.rowLength,
         cell.getRowArray(), cell.getRowOffset(), cell.getRowLength());
-    if (!this.isReversed) {
-      if (ret <= -1) {
-        return MatchCode.DONE;
-      } else if (ret >= 1) {
-        // could optimize this, if necessary?
-        // Could also be called SEEK_TO_CURRENT_ROW, but this
-        // should be rare/never happens.
-        return MatchCode.SEEK_NEXT_ROW;
+      if (!this.isReversed) {
+        if (ret <= -1) {
+          return MatchCode.DONE;
+        } else if (ret >= 1) {
+          // could optimize this, if necessary?
+          // Could also be called SEEK_TO_CURRENT_ROW, but this
+          // should be rare/never happens.
+          return MatchCode.SEEK_NEXT_ROW;
+        }
+      } else {
+        if (ret <= -1) {
+          return MatchCode.SEEK_NEXT_ROW;
+        } else if (ret >= 1) {
+          return MatchCode.DONE;
+        }
       }
     } else {
-      if (ret <= -1) {
-        return MatchCode.SEEK_NEXT_ROW;
-      } else if (ret >= 1) {
-        return MatchCode.DONE;
-      }
+      return MatchCode.DONE;
     }
 
     // optimize case.

http://git-wip-us.apache.org/repos/asf/hbase/blob/cccf8e6a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
index 94e94d8..b983b12 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
@@ -474,8 +474,7 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner
     byte[] row = cell.getRowArray();
     int offset = cell.getRowOffset();
     short length = cell.getRowLength();
-    if (limit < 0 || matcher.row == null || !Bytes.equals(row, offset, length, matcher.row,
-        matcher.rowOffset, matcher.rowLength)) {
+    if (limit < 0 || matcher.row == null) {
       this.countPerRow = 0;
       matcher.setRow(row, offset, length);
     }
@@ -512,6 +511,10 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner
             if (!matcher.moreRowsMayExistAfter(cell)) {
               return false;
             }
+            // Setting the matcher.row = null, will mean that after the subsequent seekToNextRow()
+            // the heap.peek() will any way be in the next row. So the SQM.match(cell) need do
+            // another compareRow to say the current row is DONE
+            matcher.row = null;
             seekToNextRow(cell);
             break LOOP;
           }
@@ -532,6 +535,10 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner
             if (!matcher.moreRowsMayExistAfter(cell)) {
               return false;
             }
+            // Setting the matcher.row = null, will mean that after the subsequent seekToNextRow()
+            // the heap.peek() will any way be in the next row. So the SQM.match(cell) need do
+            // another compareRow to say the current row is DONE
+            matcher.row = null;
             seekToNextRow(cell);
           } else if (qcode == ScanQueryMatcher.MatchCode.INCLUDE_AND_SEEK_NEXT_COL) {
             seekAsDirection(matcher.getKeyForNextColumn(cell));
@@ -545,6 +552,10 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner
           continue;
 
         case DONE:
+          // We are sure that this row is done and we are in the next row.
+          // So subsequent StoresScanner.next() call need not do another compare
+          // and set the matcher.row
+          matcher.row = null;
           return true;
 
         case DONE_SCAN:
@@ -557,7 +568,10 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner
           if (!matcher.moreRowsMayExistAfter(cell)) {
             return false;
           }
-
+          // Setting the matcher.row = null, will mean that after the subsequent seekToNextRow()
+          // the heap.peek() will any way be in the next row. So the SQM.match(cell) need do
+          // another compareRow to say the current row is DONE
+          matcher.row = null;
           seekToNextRow(cell);
           break;
 


[46/50] hbase git commit: HBASE-15635 Mean age of Blocks in cache (seconds) on webUI should be greater than zero

Posted by la...@apache.org.
HBASE-15635 Mean age of Blocks in cache (seconds) on webUI should be greater than zero

Conflicts:
	hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheStats.java


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a049e518
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a049e518
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a049e518

Branch: refs/heads/branch-1.0
Commit: a049e518f3c1967de1668a4c5e618705ab11fb02
Parents: 89b432d
Author: chenheng <ch...@apache.org>
Authored: Thu Aug 18 10:28:06 2016 +0800
Committer: chenheng <ch...@apache.org>
Committed: Thu Aug 18 11:00:35 2016 +0800

----------------------------------------------------------------------
 .../hadoop/hbase/tmpl/regionserver/BlockCacheTmpl.jamon  | 11 ++---------
 .../org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java |  4 +++-
 .../org/apache/hadoop/hbase/io/hfile/CacheStats.java     |  4 +++-
 3 files changed, 8 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/a049e518/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheTmpl.jamon
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheTmpl.jamon
index 9883848..f5485c2 100644
--- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheTmpl.jamon
+++ b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheTmpl.jamon
@@ -176,9 +176,6 @@ org.apache.hadoop.util.StringUtils;
 </%args>
 <%java>
   AgeSnapshot ageAtEvictionSnapshot = bc.getStats().getAgeAtEvictionSnapshot();
-  // Only show if non-zero mean and stddev as is the case in combinedblockcache
-  double mean = ageAtEvictionSnapshot.getMean();
-  double stddev = ageAtEvictionSnapshot.getStdDev();
 </%java>
     <tr>
         <td>Evicted</td>
@@ -190,20 +187,16 @@ org.apache.hadoop.util.StringUtils;
         <td><% String.format("%,d", bc.getStats().getEvictionCount()) %></td>
         <td>The total number of times an eviction has occurred</td>
     </tr>
-<%if mean > 0 %>
     <tr>
         <td>Mean</td>
-        <td><% String.format("%,d", (long)(ageAtEvictionSnapshot.getMean()/(1000000 * 1000))) %></td>
+        <td><% String.format("%,d", (long)(ageAtEvictionSnapshot.getMean())) %></td>
         <td>Mean age of Blocks at eviction time (seconds)</td>
     </tr>
-</%if>
-<%if stddev > 0 %>
     <tr>
         <td>StdDev</td>
-        <td><% String.format("%,d", (long)(ageAtEvictionSnapshot.getStdDev()/1000000)) %></td>
+        <td><% String.format("%,d", (long)(ageAtEvictionSnapshot.getStdDev())) %></td>
         <td>Standard Deviation for age of Blocks at eviction time</td>
     </tr>
-</%if>
 </%def>
 
 <%def bc_stats>

http://git-wip-us.apache.org/repos/asf/hbase/blob/a049e518/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java
index 94638da..2d3f524 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java
@@ -41,6 +41,8 @@ import com.yammer.metrics.stats.Snapshot;
  */
 @InterfaceAudience.Private
 public class BlockCacheUtil {
+
+  public static final long NANOS_PER_SECOND = 1000000000;
   /**
    * Needed making histograms.
    */
@@ -225,7 +227,7 @@ public class BlockCacheUtil {
         this.dataBlockCount++;
         this.dataSize += cb.getSize();
       }
-      long age = this.now - cb.getCachedTime();
+      long age = (this.now - cb.getCachedTime())/NANOS_PER_SECOND;
       this.age.update(age);
       return false;
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/a049e518/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheStats.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheStats.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheStats.java
index 00accfc..ba5fbf4 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheStats.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheStats.java
@@ -131,7 +131,9 @@ public class CacheStats {
   }
 
   public void evicted(final long t) {
-    if (t > this.startTime) this.ageAtEviction.update(t - this.startTime);
+    if (t > this.startTime) {
+      this.ageAtEviction.update((t - this.startTime)/BlockCacheUtil.NANOS_PER_SECOND);
+    }
     this.evictedBlockCount.incrementAndGet();
   }
 


[39/50] hbase git commit: HBASE-15645 ADDENDUM Label the new methods on Table introduced by HBASE-15645 as InterfaceAudience.Private

Posted by la...@apache.org.
HBASE-15645 ADDENDUM Label the new methods on Table introduced by HBASE-15645 as InterfaceAudience.Private

Signed-off-by: stack <st...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/89111950
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/89111950
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/89111950

Branch: refs/heads/branch-1.0
Commit: 891119501dc6f3fc073bb38688a5bd3d42ffa6e7
Parents: fd5c5fb
Author: Phil Yang <ud...@gmail.com>
Authored: Wed Apr 27 11:21:17 2016 +0800
Committer: Sean Busbey <bu...@apache.org>
Committed: Fri Apr 29 08:44:41 2016 -0500

----------------------------------------------------------------------
 .../src/main/java/org/apache/hadoop/hbase/client/Table.java      | 4 ++++
 1 file changed, 4 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/89111950/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java
index 8c6169d..ee742b2 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java
@@ -606,11 +606,13 @@ public interface Table extends Closeable {
    * early and throw SocketTimeoutException.
    * @param operationTimeout the total timeout of each operation in millisecond.
    */
+  @InterfaceAudience.Private
   public void setOperationTimeout(int operationTimeout);
 
   /**
    * Get timeout (millisecond) of each operation for in Table instance.
    */
+  @InterfaceAudience.Private
   public int getOperationTimeout();
 
   /**
@@ -620,10 +622,12 @@ public interface Table extends Closeable {
    * retries exhausted or operation timeout reached.
    * @param rpcTimeout the timeout of each rpc request in millisecond.
    */
+  @InterfaceAudience.Private
   public void setRpcTimeout(int rpcTimeout);
 
   /**
    * Get timeout (millisecond) of each rpc request in this Table instance.
    */
+  @InterfaceAudience.Private
   public int getRpcTimeout();
 }


[13/50] hbase git commit: Updated pom.xml version to 1.0.4-SNAPSHOT

Posted by la...@apache.org.
Updated pom.xml version to 1.0.4-SNAPSHOT


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5c2022f5
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5c2022f5
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5c2022f5

Branch: refs/heads/branch-1.0
Commit: 5c2022f56ebff3b7fd387c609970cc7042651e14
Parents: 9c42bea
Author: Enis Soztutar <en...@apache.org>
Authored: Thu Jan 28 13:58:21 2016 -0800
Committer: Enis Soztutar <en...@apache.org>
Committed: Thu Jan 28 13:58:21 2016 -0800

----------------------------------------------------------------------
 hbase-annotations/pom.xml     | 2 +-
 hbase-assembly/pom.xml        | 2 +-
 hbase-checkstyle/pom.xml      | 4 ++--
 hbase-client/pom.xml          | 2 +-
 hbase-common/pom.xml          | 2 +-
 hbase-examples/pom.xml        | 2 +-
 hbase-hadoop-compat/pom.xml   | 2 +-
 hbase-hadoop2-compat/pom.xml  | 2 +-
 hbase-it/pom.xml              | 2 +-
 hbase-prefix-tree/pom.xml     | 2 +-
 hbase-protocol/pom.xml        | 2 +-
 hbase-resource-bundle/pom.xml | 2 +-
 hbase-rest/pom.xml            | 2 +-
 hbase-server/pom.xml          | 2 +-
 hbase-shell/pom.xml           | 2 +-
 hbase-testing-util/pom.xml    | 2 +-
 hbase-thrift/pom.xml          | 2 +-
 pom.xml                       | 2 +-
 18 files changed, 19 insertions(+), 19 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/5c2022f5/hbase-annotations/pom.xml
----------------------------------------------------------------------
diff --git a/hbase-annotations/pom.xml b/hbase-annotations/pom.xml
index f8b7986..f22ea05 100644
--- a/hbase-annotations/pom.xml
+++ b/hbase-annotations/pom.xml
@@ -23,7 +23,7 @@
   <parent>
     <artifactId>hbase</artifactId>
     <groupId>org.apache.hbase</groupId>
-    <version>1.0.3</version>
+    <version>1.0.4-SNAPSHOT</version>
     <relativePath>..</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/5c2022f5/hbase-assembly/pom.xml
----------------------------------------------------------------------
diff --git a/hbase-assembly/pom.xml b/hbase-assembly/pom.xml
index 02ce2d8..9bbb9d4 100644
--- a/hbase-assembly/pom.xml
+++ b/hbase-assembly/pom.xml
@@ -23,7 +23,7 @@
   <parent>
     <artifactId>hbase</artifactId>
     <groupId>org.apache.hbase</groupId>
-    <version>1.0.3</version>
+    <version>1.0.4-SNAPSHOT</version>
     <relativePath>..</relativePath>
   </parent>
   <artifactId>hbase-assembly</artifactId>

http://git-wip-us.apache.org/repos/asf/hbase/blob/5c2022f5/hbase-checkstyle/pom.xml
----------------------------------------------------------------------
diff --git a/hbase-checkstyle/pom.xml b/hbase-checkstyle/pom.xml
index 05af30d..654ecec 100644
--- a/hbase-checkstyle/pom.xml
+++ b/hbase-checkstyle/pom.xml
@@ -24,14 +24,14 @@
 <modelVersion>4.0.0</modelVersion>
 <groupId>org.apache.hbase</groupId>
 <artifactId>hbase-checkstyle</artifactId>
-<version>1.0.3</version>
+<version>1.0.4-SNAPSHOT</version>
 <name>Apache HBase - Checkstyle</name>
 <description>Module to hold Checkstyle properties for HBase.</description>
 
   <parent>
     <artifactId>hbase</artifactId>
     <groupId>org.apache.hbase</groupId>
-    <version>1.0.3</version>
+    <version>1.0.4-SNAPSHOT</version>
     <relativePath>..</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/5c2022f5/hbase-client/pom.xml
----------------------------------------------------------------------
diff --git a/hbase-client/pom.xml b/hbase-client/pom.xml
index 58732f9..770b60c 100644
--- a/hbase-client/pom.xml
+++ b/hbase-client/pom.xml
@@ -24,7 +24,7 @@
   <parent>
     <artifactId>hbase</artifactId>
     <groupId>org.apache.hbase</groupId>
-    <version>1.0.3</version>
+    <version>1.0.4-SNAPSHOT</version>
     <relativePath>..</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/5c2022f5/hbase-common/pom.xml
----------------------------------------------------------------------
diff --git a/hbase-common/pom.xml b/hbase-common/pom.xml
index 1b654ba..260b268 100644
--- a/hbase-common/pom.xml
+++ b/hbase-common/pom.xml
@@ -23,7 +23,7 @@
   <parent>
     <artifactId>hbase</artifactId>
     <groupId>org.apache.hbase</groupId>
-    <version>1.0.3</version>
+    <version>1.0.4-SNAPSHOT</version>
     <relativePath>..</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/5c2022f5/hbase-examples/pom.xml
----------------------------------------------------------------------
diff --git a/hbase-examples/pom.xml b/hbase-examples/pom.xml
index 966c2f9..24af59e 100644
--- a/hbase-examples/pom.xml
+++ b/hbase-examples/pom.xml
@@ -23,7 +23,7 @@
   <parent>
     <artifactId>hbase</artifactId>
     <groupId>org.apache.hbase</groupId>
-    <version>1.0.3</version>
+    <version>1.0.4-SNAPSHOT</version>
     <relativePath>..</relativePath>
   </parent>
   <artifactId>hbase-examples</artifactId>

http://git-wip-us.apache.org/repos/asf/hbase/blob/5c2022f5/hbase-hadoop-compat/pom.xml
----------------------------------------------------------------------
diff --git a/hbase-hadoop-compat/pom.xml b/hbase-hadoop-compat/pom.xml
index 1de2bb5..3964048 100644
--- a/hbase-hadoop-compat/pom.xml
+++ b/hbase-hadoop-compat/pom.xml
@@ -23,7 +23,7 @@
     <parent>
         <artifactId>hbase</artifactId>
         <groupId>org.apache.hbase</groupId>
-        <version>1.0.3</version>
+        <version>1.0.4-SNAPSHOT</version>
         <relativePath>..</relativePath>
     </parent>
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/5c2022f5/hbase-hadoop2-compat/pom.xml
----------------------------------------------------------------------
diff --git a/hbase-hadoop2-compat/pom.xml b/hbase-hadoop2-compat/pom.xml
index 5c85b01..5198e5e 100644
--- a/hbase-hadoop2-compat/pom.xml
+++ b/hbase-hadoop2-compat/pom.xml
@@ -21,7 +21,7 @@ limitations under the License.
   <parent>
     <artifactId>hbase</artifactId>
     <groupId>org.apache.hbase</groupId>
-    <version>1.0.3</version>
+    <version>1.0.4-SNAPSHOT</version>
     <relativePath>..</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/5c2022f5/hbase-it/pom.xml
----------------------------------------------------------------------
diff --git a/hbase-it/pom.xml b/hbase-it/pom.xml
index 714a25c..836cd5a 100644
--- a/hbase-it/pom.xml
+++ b/hbase-it/pom.xml
@@ -23,7 +23,7 @@
   <parent>
     <artifactId>hbase</artifactId>
     <groupId>org.apache.hbase</groupId>
-    <version>1.0.3</version>
+    <version>1.0.4-SNAPSHOT</version>
     <relativePath>..</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/5c2022f5/hbase-prefix-tree/pom.xml
----------------------------------------------------------------------
diff --git a/hbase-prefix-tree/pom.xml b/hbase-prefix-tree/pom.xml
index 7a7fb1d..bcb14cb 100644
--- a/hbase-prefix-tree/pom.xml
+++ b/hbase-prefix-tree/pom.xml
@@ -23,7 +23,7 @@
   <parent>
     <artifactId>hbase</artifactId>
     <groupId>org.apache.hbase</groupId>
-    <version>1.0.3</version>
+    <version>1.0.4-SNAPSHOT</version>
     <relativePath>..</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/5c2022f5/hbase-protocol/pom.xml
----------------------------------------------------------------------
diff --git a/hbase-protocol/pom.xml b/hbase-protocol/pom.xml
index c8996aa..b805a96 100644
--- a/hbase-protocol/pom.xml
+++ b/hbase-protocol/pom.xml
@@ -23,7 +23,7 @@
     <parent>
         <artifactId>hbase</artifactId>
         <groupId>org.apache.hbase</groupId>
-        <version>1.0.3</version>
+        <version>1.0.4-SNAPSHOT</version>
         <relativePath>..</relativePath>
     </parent>
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/5c2022f5/hbase-resource-bundle/pom.xml
----------------------------------------------------------------------
diff --git a/hbase-resource-bundle/pom.xml b/hbase-resource-bundle/pom.xml
index e53ac1a..1aecd3d 100644
--- a/hbase-resource-bundle/pom.xml
+++ b/hbase-resource-bundle/pom.xml
@@ -23,7 +23,7 @@
   <parent>
     <artifactId>hbase</artifactId>
     <groupId>org.apache.hbase</groupId>
-    <version>1.0.3</version>
+    <version>1.0.4-SNAPSHOT</version>
     <relativePath>..</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/5c2022f5/hbase-rest/pom.xml
----------------------------------------------------------------------
diff --git a/hbase-rest/pom.xml b/hbase-rest/pom.xml
index 4cc4bd8..e75d215 100644
--- a/hbase-rest/pom.xml
+++ b/hbase-rest/pom.xml
@@ -25,7 +25,7 @@
   <parent>
     <artifactId>hbase</artifactId>
     <groupId>org.apache.hbase</groupId>
-    <version>1.0.3</version>
+    <version>1.0.4-SNAPSHOT</version>
     <relativePath>..</relativePath>
   </parent>
   <artifactId>hbase-rest</artifactId>

http://git-wip-us.apache.org/repos/asf/hbase/blob/5c2022f5/hbase-server/pom.xml
----------------------------------------------------------------------
diff --git a/hbase-server/pom.xml b/hbase-server/pom.xml
index d025ed0..babc96a 100644
--- a/hbase-server/pom.xml
+++ b/hbase-server/pom.xml
@@ -23,7 +23,7 @@
   <parent>
     <artifactId>hbase</artifactId>
     <groupId>org.apache.hbase</groupId>
-    <version>1.0.3</version>
+    <version>1.0.4-SNAPSHOT</version>
     <relativePath>..</relativePath>
   </parent>
   <artifactId>hbase-server</artifactId>

http://git-wip-us.apache.org/repos/asf/hbase/blob/5c2022f5/hbase-shell/pom.xml
----------------------------------------------------------------------
diff --git a/hbase-shell/pom.xml b/hbase-shell/pom.xml
index 172b250..0691f10 100644
--- a/hbase-shell/pom.xml
+++ b/hbase-shell/pom.xml
@@ -23,7 +23,7 @@
   <parent>
     <artifactId>hbase</artifactId>
     <groupId>org.apache.hbase</groupId>
-    <version>1.0.3</version>
+    <version>1.0.4-SNAPSHOT</version>
     <relativePath>..</relativePath>
   </parent>
   <artifactId>hbase-shell</artifactId>

http://git-wip-us.apache.org/repos/asf/hbase/blob/5c2022f5/hbase-testing-util/pom.xml
----------------------------------------------------------------------
diff --git a/hbase-testing-util/pom.xml b/hbase-testing-util/pom.xml
index 9844269..e7249d4 100644
--- a/hbase-testing-util/pom.xml
+++ b/hbase-testing-util/pom.xml
@@ -23,7 +23,7 @@
     <parent>
         <artifactId>hbase</artifactId>
         <groupId>org.apache.hbase</groupId>
-        <version>1.0.3</version>
+        <version>1.0.4-SNAPSHOT</version>
         <relativePath>..</relativePath>
     </parent>
     <artifactId>hbase-testing-util</artifactId>

http://git-wip-us.apache.org/repos/asf/hbase/blob/5c2022f5/hbase-thrift/pom.xml
----------------------------------------------------------------------
diff --git a/hbase-thrift/pom.xml b/hbase-thrift/pom.xml
index 3848787..b6d175d 100644
--- a/hbase-thrift/pom.xml
+++ b/hbase-thrift/pom.xml
@@ -23,7 +23,7 @@
   <parent>
     <artifactId>hbase</artifactId>
     <groupId>org.apache.hbase</groupId>
-    <version>1.0.3</version>
+    <version>1.0.4-SNAPSHOT</version>
     <relativePath>..</relativePath>
   </parent>
   <artifactId>hbase-thrift</artifactId>

http://git-wip-us.apache.org/repos/asf/hbase/blob/5c2022f5/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 0793f01..bf52503 100644
--- a/pom.xml
+++ b/pom.xml
@@ -39,7 +39,7 @@
   <groupId>org.apache.hbase</groupId>
   <artifactId>hbase</artifactId>
   <packaging>pom</packaging>
-  <version>1.0.3</version>
+  <version>1.0.4-SNAPSHOT</version>
   <name>Apache HBase</name>
   <description>
     Apache HBase� is the Hadoop database. Use it when you need


[30/50] hbase git commit: HBASE-15478 add comments to syncRunnerIndex handling explaining constraints on possible values.

Posted by la...@apache.org.
HBASE-15478 add comments to syncRunnerIndex handling explaining constraints on possible values.

Signed-off-by: zhangduo <zh...@apache.org>

 Conflicts:
	hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d5eb7560
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d5eb7560
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d5eb7560

Branch: refs/heads/branch-1.0
Commit: d5eb756044cb8819e2881b7dc6fd0f7c772b6561
Parents: d8a5820
Author: Sean Busbey <bu...@apache.org>
Authored: Thu Mar 17 15:22:07 2016 -0500
Committer: Sean Busbey <bu...@apache.org>
Committed: Mon Mar 21 00:36:53 2016 -0500

----------------------------------------------------------------------
 .../hadoop/hbase/regionserver/wal/FSHLog.java      | 17 +++++++++++++----
 1 file changed, 13 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/d5eb7560/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java
index 9e886a7..85de419 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java
@@ -1871,12 +1871,21 @@ public class FSHLog implements WAL {
           LOG.trace("Sequence=" + sequence + ", syncCount=" + this.syncFuturesCount);
         }
 
-        // Below expects that the offer 'transfers' responsibility for the outstanding syncs to the
-        // syncRunner. We should never get an exception in here. HBASE-11145 was because queue
-        // was sized exactly to the count of user handlers but we could have more if we factor in
-        // meta handlers doing opens and closes.
+        // syncRunnerIndex is bound to the range [0, Integer.MAX_INT - 1] as follows:
+        //   * The maximum value possible for syncRunners.length is Integer.MAX_INT
+        //   * syncRunnerIndex starts at 0 and is incremented only here
+        //   * after the increment, the value is bounded by the '%' operator to [0, syncRunners.length),
+        //     presuming the value was positive prior to the '%' operator.
+        //   * after being bound to [0, Integer.MAX_INT - 1], the new value is stored in syncRunnerIndex
+        //     ensuring that it can't grow without bound and overflow.
+        //   * note that the value after the increment must be positive, because the most it could have
+        //     been prior was Integer.MAX_INT - 1 and we only increment by 1.
         this.syncRunnerIndex = (this.syncRunnerIndex + 1) % this.syncRunners.length;
         try {
+          // Below expects that the offer 'transfers' responsibility for the outstanding syncs to the
+          // syncRunner. We should never get an exception in here. HBASE-11145 was because queue
+          // was sized exactly to the count of user handlers but we could have more if we factor in
+          // meta handlers doing opens and closes.
           this.syncRunners[this.syncRunnerIndex].offer(sequence, this.syncFutures, this.syncFuturesCount);
         } catch (Exception e) {
           cleanupOutstandingSyncsOnException(sequence, e);


[14/50] hbase git commit: HBASE-15200 ZooKeeper znode ACL checks should only compare the shortname

Posted by la...@apache.org.
HBASE-15200 ZooKeeper znode ACL checks should only compare the shortname

Conflicts:
	hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b43442c5
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b43442c5
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b43442c5

Branch: refs/heads/branch-1.0
Commit: b43442c58a1a66f1c17f889c081fea159caaebf5
Parents: 5c2022f
Author: Andrew Purtell <ap...@apache.org>
Authored: Mon Feb 1 09:48:16 2016 -0800
Committer: Andrew Purtell <ap...@apache.org>
Committed: Wed Feb 3 16:19:23 2016 -0800

----------------------------------------------------------------------
 .../hbase/zookeeper/ZooKeeperWatcher.java       | 79 ++++++++++++++++++--
 .../java/org/apache/hadoop/hbase/AuthUtil.java  |  5 ++
 2 files changed, 78 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/b43442c5/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java
index 7b591f8..983153f 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java
@@ -24,12 +24,15 @@ import java.util.ArrayList;
 import java.util.List;
 import java.util.concurrent.CopyOnWriteArrayList;
 import java.util.concurrent.CountDownLatch;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Abortable;
+import org.apache.hadoop.hbase.AuthUtil;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.ZooKeeperConnectionException;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -126,6 +129,9 @@ public class ZooKeeperWatcher implements Watcher, Abortable, Closeable {
 
   private final Exception constructorCaller;
 
+  /* A pattern that matches a Kerberos name, borrowed from Hadoop's KerberosName */
+  private static final Pattern NAME_PATTERN = Pattern.compile("([^/@]*)(/([^/@]*))?@([^/@]*)");
+
   /**
    * Instantiate a ZooKeeper connection and watcher.
    * @param identifier string that is passed to RecoverableZookeeper to be used as
@@ -218,6 +224,7 @@ public class ZooKeeperWatcher implements Watcher, Abortable, Closeable {
    */
   public void checkAndSetZNodeAcls() {
     if (!ZKUtil.isSecureZooKeeper(getConfiguration())) {
+      LOG.info("not a secure deployment, proceeding");
       return;
     }
 
@@ -262,13 +269,23 @@ public class ZooKeeperWatcher implements Watcher, Abortable, Closeable {
    * @throws IOException
    */
   private boolean isBaseZnodeAclSetup(List<ACL> acls) throws IOException {
-    String superUser = conf.get("hbase.superuser");
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Checking znode ACLs");
+    }
+    String superUser = conf.get(AuthUtil.SUPERUSER_CONF_KEY);
+    // Check whether ACL set for all superusers
+    if (superUser != null && !checkACLForSuperUsers(new String[] { superUser }, acls)) {
+      return false;
+    }
 
     // this assumes that current authenticated user is the same as zookeeper client user
     // configured via JAAS
     String hbaseUser = UserGroupInformation.getCurrentUser().getShortUserName();
 
     if (acls.isEmpty()) {
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("ACL is empty");
+      }
       return false;
     }
 
@@ -279,23 +296,73 @@ public class ZooKeeperWatcher implements Watcher, Abortable, Closeable {
       // and one for the hbase user
       if (Ids.ANYONE_ID_UNSAFE.equals(id)) {
         if (perms != Perms.READ) {
+          if (LOG.isDebugEnabled()) {
+            LOG.debug(String.format("permissions for '%s' are not correct: have %0x, want %0x",
+              id, perms, Perms.READ));
+          }
           return false;
         }
-      } else if (superUser != null && new Id("sasl", superUser).equals(id)) {
-        if (perms != Perms.ALL) {
-          return false;
+      } else if ("sasl".equals(id.getScheme())) {
+        String name = id.getId();
+        // If ZooKeeper recorded the Kerberos full name in the ACL, use only the shortname
+        Matcher match = NAME_PATTERN.matcher(name);
+        if (match.matches()) {
+          name = match.group(1);
         }
-      } else if (new Id("sasl", hbaseUser).equals(id)) {
-        if (perms != Perms.ALL) {
+        if (name.equals(hbaseUser)) {
+          if (perms != Perms.ALL) {
+            if (LOG.isDebugEnabled()) {
+              LOG.debug(String.format("permissions for '%s' are not correct: have %0x, want %0x",
+                id, perms, Perms.ALL));
+            }
+            return false;
+          }
+        } else {
+          if (LOG.isDebugEnabled()) {
+            LOG.debug("Unexpected shortname in SASL ACL: " + id);
+          }
           return false;
         }
       } else {
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("unexpected ACL id '" + id + "'");
+        }
         return false;
       }
     }
     return true;
   }
 
+  /*
+   * Validate whether ACL set for all superusers.
+   */
+  private boolean checkACLForSuperUsers(String[] superUsers, List<ACL> acls) {
+    for (String user : superUsers) {
+      boolean hasAccess = false;
+      // TODO: Validate super group members also when ZK supports setting node ACL for groups.
+      if (!user.startsWith(AuthUtil.GROUP_PREFIX)) {
+        for (ACL acl : acls) {
+          if (user.equals(acl.getId().getId())) {
+            if (acl.getPerms() == Perms.ALL) {
+              hasAccess = true;
+            } else {
+              if (LOG.isDebugEnabled()) {
+                LOG.debug(String.format(
+                  "superuser '%s' does not have correct permissions: have %0x, want %0x",
+                  acl.getId().getId(), acl.getPerms(), Perms.ALL));
+              }
+            }
+            break;
+          }
+        }
+        if (!hasAccess) {
+          return false;
+        }
+      }
+    }
+    return true;
+  }
+
   @Override
   public String toString() {
     return this.identifier + ", quorum=" + quorum + ", baseZNode=" + baseZNode;

http://git-wip-us.apache.org/repos/asf/hbase/blob/b43442c5/hbase-common/src/main/java/org/apache/hadoop/hbase/AuthUtil.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/AuthUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/AuthUtil.java
index cb7ab83..765a4b3 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/AuthUtil.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/AuthUtil.java
@@ -38,6 +38,11 @@ import org.apache.hadoop.security.UserGroupInformation;
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
 public class AuthUtil {
+  /** Prefix character to denote group names */
+  public static final String GROUP_PREFIX = "@";
+  /** Configuration key for superusers */
+  public static final String SUPERUSER_CONF_KEY = "hbase.superuser";
+
   private static final Log LOG = LogFactory.getLog(AuthUtil.class);
 
   private AuthUtil() {


[44/50] hbase git commit: HBASE-16180 Fix ST_WRITE_TO_STATIC_FROM_INSTANCE_METHOD findbugs introduced by parent

Posted by la...@apache.org.
HBASE-16180 Fix ST_WRITE_TO_STATIC_FROM_INSTANCE_METHOD findbugs introduced by parent


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/fb9a6481
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/fb9a6481
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/fb9a6481

Branch: refs/heads/branch-1.0
Commit: fb9a64816d6a19c2a9de3b91c5c145c2f62ca698
Parents: 14d0bef
Author: stack <st...@apache.org>
Authored: Tue Jul 5 15:43:56 2016 -0700
Committer: stack <st...@apache.org>
Committed: Wed Jul 6 09:13:12 2016 -0700

----------------------------------------------------------------------
 .../main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java  | 2 ++
 1 file changed, 2 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/fb9a6481/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java
index edab0dc..7e6e4a2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java
@@ -46,6 +46,8 @@ import org.apache.hadoop.io.Writable;
  * Writes HFile format version 2.
  */
 @InterfaceAudience.Private
+@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="ST_WRITE_TO_STATIC_FROM_INSTANCE_METHOD",
+  justification="Understood but doing it anyway; HBASE-14730")
 public class HFileWriterV2 extends AbstractHFileWriter {
   static final Log LOG = LogFactory.getLog(HFileWriterV2.class);
 


[35/50] hbase git commit: HBASE-15645 hbase.rpc.timeout is not used in operations of HTable

Posted by la...@apache.org.
HBASE-15645 hbase.rpc.timeout is not used in operations of HTable

Signed-off-by: stack <st...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/48f158f0
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/48f158f0
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/48f158f0

Branch: refs/heads/branch-1.0
Commit: 48f158f0a834c28ec06a88aa39389da685ec7a0e
Parents: 0b20b27
Author: Phil Yang <ud...@gmail.com>
Authored: Tue Apr 26 18:02:22 2016 +0800
Committer: stack <st...@apache.org>
Committed: Tue Apr 26 10:55:36 2016 -0700

----------------------------------------------------------------------
 .../apache/hadoop/hbase/client/HBaseAdmin.java  |  5 ++-
 .../org/apache/hadoop/hbase/client/HTable.java  | 43 +++++++++++++++-----
 .../apache/hadoop/hbase/client/HTablePool.java  | 16 ++++++++
 .../hadoop/hbase/client/RpcRetryingCaller.java  | 16 ++++++--
 .../hbase/client/RpcRetryingCallerFactory.java  | 25 +++++++++++-
 .../client/StatsTrackingRpcRetryingCaller.java  |  2 +-
 .../org/apache/hadoop/hbase/client/Table.java   | 31 ++++++++++++++
 .../client/TestFastFailWithoutTestUtil.java     |  2 +-
 .../org/apache/hadoop/hbase/HConstants.java     |  4 +-
 .../src/main/resources/hbase-default.xml        | 11 ++++-
 .../hadoop/hbase/rest/client/RemoteHTable.java  | 16 ++++++++
 .../hadoop/hbase/client/HTableWrapper.java      | 16 ++++++++
 .../org/apache/hadoop/hbase/client/TestHCM.java | 26 +++++++++++-
 13 files changed, 191 insertions(+), 22 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/48f158f0/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
index 8cd3037..73d8c89 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
@@ -183,6 +183,7 @@ public class HBaseAdmin implements Admin {
   private boolean cleanupConnectionOnClose = false; // close the connection in close()
   private boolean closed = false;
   private int operationTimeout;
+  private int rpcTimeout;
 
   private RpcRetryingCallerFactory rpcCallerFactory;
 
@@ -237,6 +238,8 @@ public class HBaseAdmin implements Admin {
         "hbase.client.retries.longer.multiplier", 10);
     this.operationTimeout = this.conf.getInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT,
         HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT);
+    this.rpcTimeout = this.conf.getInt(HConstants.HBASE_RPC_TIMEOUT_KEY,
+        HConstants.DEFAULT_HBASE_RPC_TIMEOUT);
 
     this.rpcCallerFactory = RpcRetryingCallerFactory.instantiate(this.conf);
   }
@@ -3596,7 +3599,7 @@ public class HBaseAdmin implements Admin {
   }
 
   private <V> V executeCallable(MasterCallable<V> callable) throws IOException {
-    RpcRetryingCaller<V> caller = rpcCallerFactory.newCaller();
+    RpcRetryingCaller<V> caller = rpcCallerFactory.newCaller(rpcTimeout);
     try {
       return caller.callWithRetries(callable, operationTimeout);
     } finally {

http://git-wip-us.apache.org/repos/asf/hbase/blob/48f158f0/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
index 533cbab..2bc0791 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
@@ -129,7 +129,8 @@ public class HTable implements HTableInterface, RegionLocator {
   protected int scannerCaching;
   protected long scannerMaxResultSize;
   private ExecutorService pool;  // For Multi & Scan
-  private int operationTimeout;
+  private int operationTimeout; // global timeout for each blocking method with retrying rpc
+  private int rpcTimeout; // timeout for each rpc request
   private final boolean cleanupPoolOnClose; // shutdown the pool in close()
   private final boolean cleanupConnectionOnClose; // close the connection in close()
   private Consistency defaultConsistency = Consistency.STRONG;
@@ -360,6 +361,8 @@ public class HTable implements HTableInterface, RegionLocator {
 
     this.operationTimeout = tableName.isSystemTable() ?
         tableConfiguration.getMetaOperationTimeout() : tableConfiguration.getOperationTimeout();
+    this.rpcTimeout = configuration.getInt(HConstants.HBASE_RPC_TIMEOUT_KEY,
+        HConstants.DEFAULT_HBASE_RPC_TIMEOUT);
     this.scannerCaching = tableConfiguration.getScannerCaching();
     this.scannerMaxResultSize = tableConfiguration.getScannerMaxResultSize();
     if (this.rpcCallerFactory == null) {
@@ -808,7 +811,8 @@ public class HTable implements HTableInterface, RegionLocator {
          }
        }
      };
-     return rpcCallerFactory.<Result>newCaller().callWithRetries(callable, this.operationTimeout);
+     return rpcCallerFactory.<Result>newCaller(rpcTimeout).callWithRetries(callable,
+         this.operationTimeout);
    }
 
   /**
@@ -913,7 +917,8 @@ public class HTable implements HTableInterface, RegionLocator {
           }
         }
       };
-      return rpcCallerFactory.<Result>newCaller().callWithRetries(callable, this.operationTimeout);
+      return rpcCallerFactory.<Result>newCaller(rpcTimeout).callWithRetries(callable,
+          this.operationTimeout);
     }
 
     // Call that takes into account the replica
@@ -1029,7 +1034,8 @@ public class HTable implements HTableInterface, RegionLocator {
         }
       }
     };
-    rpcCallerFactory.<Boolean> newCaller().callWithRetries(callable, this.operationTimeout);
+    rpcCallerFactory.<Boolean> newCaller(rpcTimeout).callWithRetries(callable,
+        this.operationTimeout);
   }
 
   /**
@@ -1146,7 +1152,8 @@ public class HTable implements HTableInterface, RegionLocator {
           }
         }
       };
-    return rpcCallerFactory.<Result> newCaller().callWithRetries(callable, this.operationTimeout);
+    return rpcCallerFactory.<Result> newCaller(rpcTimeout).callWithRetries(callable,
+        this.operationTimeout);
   }
 
   /**
@@ -1177,7 +1184,8 @@ public class HTable implements HTableInterface, RegionLocator {
         }
       }
     };
-    return rpcCallerFactory.<Result> newCaller().callWithRetries(callable, this.operationTimeout);
+    return rpcCallerFactory.<Result> newCaller(rpcTimeout).callWithRetries(callable,
+        this.operationTimeout);
   }
 
   /**
@@ -1244,7 +1252,8 @@ public class HTable implements HTableInterface, RegionLocator {
           }
         }
       };
-    return rpcCallerFactory.<Long> newCaller().callWithRetries(callable, this.operationTimeout);
+    return rpcCallerFactory.<Long> newCaller(rpcTimeout).callWithRetries(callable,
+        this.operationTimeout);
   }
 
   /**
@@ -1273,7 +1282,8 @@ public class HTable implements HTableInterface, RegionLocator {
           }
         }
       };
-    return rpcCallerFactory.<Boolean> newCaller().callWithRetries(callable, this.operationTimeout);
+    return rpcCallerFactory.<Boolean> newCaller(rpcTimeout).callWithRetries(callable,
+        this.operationTimeout);
   }
 
   /**
@@ -1303,7 +1313,8 @@ public class HTable implements HTableInterface, RegionLocator {
           }
         }
       };
-    return rpcCallerFactory.<Boolean> newCaller().callWithRetries(callable, this.operationTimeout);
+    return rpcCallerFactory.<Boolean> newCaller(rpcTimeout).callWithRetries(callable,
+        this.operationTimeout);
   }
 
   /**
@@ -1332,7 +1343,8 @@ public class HTable implements HTableInterface, RegionLocator {
           }
         }
       };
-    return rpcCallerFactory.<Boolean> newCaller().callWithRetries(callable, this.operationTimeout);
+    return rpcCallerFactory.<Boolean> newCaller(rpcTimeout).callWithRetries(callable,
+        this.operationTimeout);
   }
 
   /**
@@ -1362,7 +1374,8 @@ public class HTable implements HTableInterface, RegionLocator {
           }
         }
       };
-    return rpcCallerFactory.<Boolean> newCaller().callWithRetries(callable, this.operationTimeout);
+    return rpcCallerFactory.<Boolean> newCaller(rpcTimeout).callWithRetries(callable,
+        this.operationTimeout);
   }
 
   /**
@@ -1815,6 +1828,14 @@ public class HTable implements HTableInterface, RegionLocator {
     return operationTimeout;
   }
 
+  @Override public void setRpcTimeout(int rpcTimeout) {
+    this.rpcTimeout = rpcTimeout;
+  }
+
+  @Override public int getRpcTimeout() {
+    return rpcTimeout;
+  }
+
   @Override
   public String toString() {
     return tableName + ";" + connection;

http://git-wip-us.apache.org/repos/asf/hbase/blob/48f158f0/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTablePool.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTablePool.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTablePool.java
index 4b998a6..d837bf8 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTablePool.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTablePool.java
@@ -672,5 +672,21 @@ public class HTablePool implements Closeable {
       checkState();
       return table.checkAndMutate(row, family, qualifier, compareOp, value, mutation);
     }
+
+    @Override public void setOperationTimeout(int operationTimeout) {
+      table.setOperationTimeout(operationTimeout);
+    }
+
+    @Override public int getOperationTimeout() {
+      return table.getOperationTimeout();
+    }
+
+    @Override public void setRpcTimeout(int rpcTimeout) {
+      table.setRpcTimeout(rpcTimeout);
+    }
+
+    @Override public int getRpcTimeout() {
+      return table.getRpcTimeout();
+    }
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/48f158f0/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCaller.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCaller.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCaller.java
index 49c7efd..77ed031 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCaller.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCaller.java
@@ -63,21 +63,23 @@ public class RpcRetryingCaller<T> {
 
   private final long pause;
   private final int retries;
+  private final int rpcTimeout;// timeout for each rpc request
   private final AtomicBoolean cancelled = new AtomicBoolean(false);
   private final RetryingCallerInterceptor interceptor;
   private final RetryingCallerInterceptorContext context;
 
   public RpcRetryingCaller(long pause, int retries, int startLogErrorsCnt) {
-    this(pause, retries, RetryingCallerInterceptorFactory.NO_OP_INTERCEPTOR, startLogErrorsCnt);
+    this(pause, retries, RetryingCallerInterceptorFactory.NO_OP_INTERCEPTOR, startLogErrorsCnt, 0);
   }
 
   public RpcRetryingCaller(long pause, int retries,
-      RetryingCallerInterceptor interceptor, int startLogErrorsCnt) {
+      RetryingCallerInterceptor interceptor, int startLogErrorsCnt, int rpcTimeout) {
     this.pause = pause;
     this.retries = retries;
     this.interceptor = interceptor;
     context = interceptor.createEmptyContext();
     this.startLogErrorsCnt = startLogErrorsCnt;
+    this.rpcTimeout = rpcTimeout;
   }
 
   private int getRemainingTime(int callTimeout) {
@@ -97,6 +99,14 @@ public class RpcRetryingCaller<T> {
     }
   }
 
+  private int getTimeout(int callTimeout){
+    int timeout = getRemainingTime(callTimeout);
+    if (timeout <= 0 || rpcTimeout > 0 && rpcTimeout < timeout){
+      timeout = rpcTimeout;
+    }
+    return timeout;
+  }
+
   public void cancel(){
     cancelled.set(true);
     synchronized (cancelled){
@@ -123,7 +133,7 @@ public class RpcRetryingCaller<T> {
       try {
         callable.prepare(tries != 0); // if called with false, check table status on ZK
         interceptor.intercept(context.prepare(callable, tries));
-        return callable.call(getRemainingTime(callTimeout));
+        return callable.call(getTimeout(callTimeout));
       } catch (PreemptiveFastFailException e) {
         throw e;
       } catch (Throwable t) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/48f158f0/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerFactory.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerFactory.java
index 1bf7bb0..09b70b8 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerFactory.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerFactory.java
@@ -33,6 +33,7 @@ public class RpcRetryingCallerFactory {
   protected final Configuration conf;
   private final long pause;
   private final int retries;
+  private final int rpcTimeout;
   private final RetryingCallerInterceptor interceptor;
   private final int startLogErrorsCnt;
   private final boolean enableBackPressure;
@@ -53,6 +54,7 @@ public class RpcRetryingCallerFactory {
     this.interceptor = interceptor;
     enableBackPressure = conf.getBoolean(HConstants.ENABLE_CLIENT_BACKPRESSURE,
         HConstants.DEFAULT_ENABLE_CLIENT_BACKPRESSURE);
+    rpcTimeout = conf.getInt(HConstants.HBASE_RPC_TIMEOUT_KEY,HConstants.DEFAULT_HBASE_RPC_TIMEOUT);
   }
 
   /**
@@ -62,11 +64,32 @@ public class RpcRetryingCallerFactory {
     this.stats = statisticTracker;
   }
 
+  /**
+   * Create a new RetryingCaller with specific rpc timeout.
+   */
+  public <T> RpcRetryingCaller<T> newCaller(int rpcTimeout) {
+    // We store the values in the factory instance. This way, constructing new objects
+    //  is cheap as it does not require parsing a complex structure.
+    RpcRetryingCaller<T> caller = new RpcRetryingCaller<T>(pause, retries, interceptor,
+        startLogErrorsCnt, rpcTimeout);
+
+    // wrap it with stats, if we are tracking them
+    if (enableBackPressure && this.stats != null) {
+      caller = new StatsTrackingRpcRetryingCaller<T>(pause, retries, interceptor,
+          startLogErrorsCnt, stats);
+    }
+
+    return caller;
+  }
+
+  /**
+   * Create a new RetryingCaller with configured rpc timeout.
+   */
   public <T> RpcRetryingCaller<T> newCaller() {
     // We store the values in the factory instance. This way, constructing new objects
     //  is cheap as it does not require parsing a complex structure.
     RpcRetryingCaller<T> caller = new RpcRetryingCaller<T>(pause, retries, interceptor,
-        startLogErrorsCnt);
+        startLogErrorsCnt, rpcTimeout);
 
     // wrap it with stats, if we are tracking them
     if (enableBackPressure && this.stats != null) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/48f158f0/hbase-client/src/main/java/org/apache/hadoop/hbase/client/StatsTrackingRpcRetryingCaller.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/StatsTrackingRpcRetryingCaller.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/StatsTrackingRpcRetryingCaller.java
index fc175bb..cbd625d 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/StatsTrackingRpcRetryingCaller.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/StatsTrackingRpcRetryingCaller.java
@@ -39,7 +39,7 @@ public class StatsTrackingRpcRetryingCaller<T> extends RpcRetryingCaller<T> {
   public StatsTrackingRpcRetryingCaller(long pause, int retries,
       RetryingCallerInterceptor interceptor, int startLogErrorsCnt,
       ServerStatisticTracker stats) {
-    super(pause, retries, interceptor, startLogErrorsCnt);
+    super(pause, retries, interceptor, startLogErrorsCnt, 0);
     this.stats = stats;
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/48f158f0/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java
index 9a6744b..8c6169d 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java
@@ -595,4 +595,35 @@ public interface Table extends Closeable {
    */
   boolean checkAndMutate(byte[] row, byte[] family, byte[] qualifier,
       CompareFilter.CompareOp compareOp, byte[] value, RowMutations mutation) throws IOException;
+
+  /**
+   * Set timeout (millisecond) of each operation in this Table instance, will override the value
+   * of hbase.client.operation.timeout in configuration.
+   * Operation timeout is a top-level restriction that makes sure a blocking method will not be
+   * blocked more than this. In each operation, if rpc request fails because of timeout or
+   * other reason, it will retry until success or throw a RetriesExhaustedException. But if the
+   * total time being blocking reach the operation timeout before retries exhausted, it will break
+   * early and throw SocketTimeoutException.
+   * @param operationTimeout the total timeout of each operation in millisecond.
+   */
+  public void setOperationTimeout(int operationTimeout);
+
+  /**
+   * Get timeout (millisecond) of each operation for in Table instance.
+   */
+  public int getOperationTimeout();
+
+  /**
+   * Set timeout (millisecond) of each rpc request in operations of this Table instance, will
+   * override the value of hbase.rpc.timeout in configuration.
+   * If a rpc request waiting too long, it will stop waiting and send a new request to retry until
+   * retries exhausted or operation timeout reached.
+   * @param rpcTimeout the timeout of each rpc request in millisecond.
+   */
+  public void setRpcTimeout(int rpcTimeout);
+
+  /**
+   * Get timeout (millisecond) of each rpc request in this Table instance.
+   */
+  public int getRpcTimeout();
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/48f158f0/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestFastFailWithoutTestUtil.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestFastFailWithoutTestUtil.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestFastFailWithoutTestUtil.java
index 7cb0be6..b387683 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestFastFailWithoutTestUtil.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestFastFailWithoutTestUtil.java
@@ -563,7 +563,7 @@ public class TestFastFailWithoutTestUtil {
 
   public RpcRetryingCaller<Void> getRpcRetryingCaller(int pauseTime,
       int retries, RetryingCallerInterceptor interceptor) {
-    return new RpcRetryingCaller<Void>(pauseTime, retries, interceptor, 9) {
+    return new RpcRetryingCaller<Void>(pauseTime, retries, interceptor, 9, 0) {
       @Override
       public Void callWithRetries(RetryingCallable<Void> callable,
           int callTimeout) throws IOException, RuntimeException {

http://git-wip-us.apache.org/repos/asf/hbase/blob/48f158f0/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
index f1f3e1a..4ad01c8 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
@@ -272,10 +272,10 @@ public final class HConstants {
   /** Parameter name for HBase client IPC pool size */
   public static final String HBASE_CLIENT_IPC_POOL_SIZE = "hbase.client.ipc.pool.size";
 
-  /** Parameter name for HBase client operation timeout, which overrides RPC timeout */
+  /** Parameter name for HBase client operation timeout. */
   public static final String HBASE_CLIENT_OPERATION_TIMEOUT = "hbase.client.operation.timeout";
 
-  /** Parameter name for HBase client operation timeout, which overrides RPC timeout */
+  /** Parameter name for HBase client operation timeout. */
   public static final String HBASE_CLIENT_META_OPERATION_TIMEOUT =
     "hbase.client.meta.operation.timeout";
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/48f158f0/hbase-common/src/main/resources/hbase-default.xml
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/resources/hbase-default.xml b/hbase-common/src/main/resources/hbase-default.xml
index c2581bf..9cc2a9e 100644
--- a/hbase-common/src/main/resources/hbase-default.xml
+++ b/hbase-common/src/main/resources/hbase-default.xml
@@ -795,11 +795,20 @@ possible configurations would overwhelm and obscure the important.
   <property>
     <name>hbase.rpc.timeout</name>
     <value>60000</value>
-    <description>This is for the RPC layer to define how long HBase client applications
+    <description>This is for the RPC layer to define how long (millisecond) HBase client applications
         take for a remote call to time out. It uses pings to check connections
         but will eventually throw a TimeoutException.</description>
   </property>
   <property>
+    <name>hbase.client.operation.timeout</name>
+    <value>1200000</value>
+    <description>Operation timeout is a top-level restriction (millisecond) that makes sure a
+        blocking operation in Table will not be blocked more than this. In each operation, if rpc
+        request fails because of timeout or other reason, it will retry until success or throw
+        RetriesExhaustedException. But if the total time being blocking reach the operation timeout
+        before retries exhausted, it will break early and throw SocketTimeoutException.</description>
+  </property>
+  <property>
     <name>hbase.rpc.shortoperation.timeout</name>
     <value>10000</value>
     <description>This is another version of "hbase.rpc.timeout". For those RPC operation

http://git-wip-us.apache.org/repos/asf/hbase/blob/48f158f0/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java
index 0300ea2..8429e12 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java
@@ -856,4 +856,20 @@ public class RemoteHTable implements Table {
       CompareOp compareOp, byte[] value, RowMutations rm) throws IOException {
     throw new UnsupportedOperationException("checkAndMutate not implemented");
   }
+
+  @Override public void setOperationTimeout(int operationTimeout) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override public int getOperationTimeout() {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override public void setRpcTimeout(int rpcTimeout) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override public int getRpcTimeout() {
+    throw new UnsupportedOperationException();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/48f158f0/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTableWrapper.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTableWrapper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTableWrapper.java
index 1f84bb4..2d25f63 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTableWrapper.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTableWrapper.java
@@ -365,4 +365,20 @@ public class HTableWrapper implements HTableInterface {
       CompareOp compareOp, byte[] value, RowMutations rm) throws IOException {
     return table.checkAndMutate(row, family, qualifier, compareOp, value, rm);
   }
+
+  @Override public void setOperationTimeout(int operationTimeout) {
+    table.setOperationTimeout(operationTimeout);
+  }
+
+  @Override public int getOperationTimeout() {
+    return table.getOperationTimeout();
+  }
+
+  @Override public void setRpcTimeout(int rpcTimeout) {
+    table.setRpcTimeout(rpcTimeout);
+  }
+
+  @Override public int getRpcTimeout() {
+    return table.getRpcTimeout();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/48f158f0/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java
index 306e139..832ee93 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java
@@ -126,9 +126,19 @@ public class TestHCM {
     }
   }
 
+  public static class SleepCoprocessor extends BaseRegionObserver {
+    public static final int SLEEP_TIME = 5000;
+
+    @Override public void preGetOp(final ObserverContext<RegionCoprocessorEnvironment> e,
+        final Get get, final List<Cell> results) throws IOException {
+      Threads.sleep(SLEEP_TIME);
+    }
+  }
+
   @BeforeClass
   public static void setUpBeforeClass() throws Exception {
     TEST_UTIL.getConfiguration().setBoolean(HConstants.STATUS_PUBLISHED, true);
+    TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 5);
     TEST_UTIL.startMiniCluster(2);
   }
 
@@ -299,7 +309,7 @@ public class TestHCM {
     HTableDescriptor hdt = TEST_UTIL.createTableDescriptor("HCM-testOperationTimeout");
     hdt.addCoprocessor(SleepAndFailFirstTime.class.getName());
     HTable table = TEST_UTIL.createTable(hdt, new byte[][]{FAM_NAM}, TEST_UTIL.getConfiguration());
-
+    table.setRpcTimeout(Integer.MAX_VALUE);
     // Check that it works if the timeout is big enough
     table.setOperationTimeout(120 * 1000);
     table.get(new Get(FAM_NAM));
@@ -322,6 +332,20 @@ public class TestHCM {
     }
   }
 
+  @Test(expected = RetriesExhaustedException.class)
+  public void testRpcTimeout() throws Exception {
+    HTableDescriptor hdt = TEST_UTIL.createTableDescriptor("HCM-testRpcTimeout");
+    hdt.addCoprocessor(SleepCoprocessor.class.getName());
+    Configuration c = new Configuration(TEST_UTIL.getConfiguration());
+
+    try (Table t = TEST_UTIL.createTable(hdt, new byte[][] { FAM_NAM }, c)) {
+      assert t instanceof HTable;
+      HTable table = (HTable) t;
+      table.setRpcTimeout(SleepCoprocessor.SLEEP_TIME / 2);
+      table.setOperationTimeout(SleepCoprocessor.SLEEP_TIME * 100);
+      table.get(new Get(FAM_NAM));
+    }
+  }
 
   private void testConnectionClose(boolean allowsInterrupt) throws Exception {
     TableName tableName = TableName.valueOf("HCM-testConnectionClose" + allowsInterrupt);


[25/50] hbase git commit: HBASE-15122 Servlets generate XSS_REQUEST_PARAMETER_TO_SERVLET_WRITER findbugs warnings (Samir Ahmic)

Posted by la...@apache.org.
HBASE-15122 Servlets generate XSS_REQUEST_PARAMETER_TO_SERVLET_WRITER findbugs warnings (Samir Ahmic)

Conflicts:
	pom.xml


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f280c459
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f280c459
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f280c459

Branch: refs/heads/branch-1.0
Commit: f280c45956d01639756b04757e74229f486e6a8f
Parents: 21ab184
Author: chenheng <ch...@apache.org>
Authored: Mon Feb 15 13:52:37 2016 +0800
Committer: chenheng <ch...@apache.org>
Committed: Mon Feb 15 14:01:26 2016 +0800

----------------------------------------------------------------------
 .../src/main/resources/supplemental-models.xml  |  36 ++
 hbase-server/pom.xml                            |  11 +
 .../hadoop/hbase/http/jmx/JMXJsonServlet.java   |   8 +-
 .../src/main/resources/ESAPI.properties         | 431 +++++++++++++++++++
 .../hbase/http/jmx/TestJMXJsonServlet.java      |   6 +
 .../src/test/resources/ESAPI.properties         | 431 +++++++++++++++++++
 pom.xml                                         |   1 +
 7 files changed, 923 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/f280c459/hbase-resource-bundle/src/main/resources/supplemental-models.xml
----------------------------------------------------------------------
diff --git a/hbase-resource-bundle/src/main/resources/supplemental-models.xml b/hbase-resource-bundle/src/main/resources/supplemental-models.xml
index faa7887..a27ecd7 100644
--- a/hbase-resource-bundle/src/main/resources/supplemental-models.xml
+++ b/hbase-resource-bundle/src/main/resources/supplemental-models.xml
@@ -61,6 +61,24 @@ under the License.
       </licenses>
     </project>
   </supplement>
+  <supplement>
+    <project>
+      <groupId>commons-beanutils</groupId>
+      <artifactId>commons-beanutils-core</artifactId>
+
+      <organization>
+        <name>The Apache Software Foundation</name>
+        <url>http://www.apache.org/</url>
+      </organization>
+      <licenses>
+        <license>
+          <name>Apache Software License, Version 2.0</name>
+          <url>http://www.apache.org/licenses/LICENSE-2.0.txt</url>
+          <distribution>repo</distribution>
+        </license>
+      </licenses>
+    </project>
+  </supplement>
 <!-- Artifacts with ambiguously named licenses in POM -->
   <supplement>
     <project>
@@ -1292,4 +1310,22 @@ Copyright (c) 2007-2011 The JRuby project
       </licenses>
     </project>
   </supplement>
+  <supplement>
+    <project>
+      <groupId>xalan</groupId>
+      <artifactId>xalan</artifactId>
+
+      <organization>
+        <name>The Apache Software Foundation</name>
+        <url>http://www.apache.org/</url>
+      </organization>
+      <licenses>
+        <license>
+          <name>The Apache Software License, Version 2.0</name>
+          <url>http://www.apache.org/licenses/LICENSE-2.0.txt</url>
+          <distribution>repo</distribution>
+        </license>
+      </licenses>
+    </project>
+  </supplement>
 </supplementalDataModels>

http://git-wip-us.apache.org/repos/asf/hbase/blob/f280c459/hbase-server/pom.xml
----------------------------------------------------------------------
diff --git a/hbase-server/pom.xml b/hbase-server/pom.xml
index babc96a..28433b5 100644
--- a/hbase-server/pom.xml
+++ b/hbase-server/pom.xml
@@ -541,6 +541,17 @@
       <artifactId>bcprov-jdk16</artifactId>
       <scope>test</scope>
     </dependency>
+    <dependency>
+      <groupId>org.owasp.esapi</groupId>
+      <artifactId>esapi</artifactId>
+      <version>2.1.0</version>
+      <exclusions>
+        <exclusion>
+          <artifactId>xercesImpl</artifactId>
+          <groupId>xerces</groupId>
+        </exclusion>
+      </exclusions>
+    </dependency>
   </dependencies>
   <profiles>
     <!-- Needs to make the profile in apache parent pom -->

http://git-wip-us.apache.org/repos/asf/hbase/blob/f280c459/hbase-server/src/main/java/org/apache/hadoop/hbase/http/jmx/JMXJsonServlet.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/http/jmx/JMXJsonServlet.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/http/jmx/JMXJsonServlet.java
index b6e97a8..5573f2b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/http/jmx/JMXJsonServlet.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/http/jmx/JMXJsonServlet.java
@@ -38,6 +38,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.http.HttpServer;
 import org.apache.hadoop.hbase.util.JSONBean;
+import org.owasp.esapi.ESAPI;
 
 /*
  * This servlet is based off of the JMXProxyServlet from Tomcat 7.0.14. It has
@@ -160,7 +161,7 @@ public class JMXJsonServlet extends HttpServlet {
         jsonpcb = request.getParameter(CALLBACK_PARAM);
         if (jsonpcb != null) {
           response.setContentType("application/javascript; charset=utf8");
-          writer.write(jsonpcb + "(");
+          writer.write(encodeJS(jsonpcb) + "(");
         } else {
           response.setContentType("application/json; charset=utf8");
         }
@@ -213,4 +214,9 @@ public class JMXJsonServlet extends HttpServlet {
       response.setStatus(HttpServletResponse.SC_BAD_REQUEST);
     }
   }
+
+  private String encodeJS(String inputStr) {
+    return ESAPI.encoder().encodeForJavaScript(inputStr);
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/f280c459/hbase-server/src/main/resources/ESAPI.properties
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/resources/ESAPI.properties b/hbase-server/src/main/resources/ESAPI.properties
new file mode 100644
index 0000000..9074001
--- /dev/null
+++ b/hbase-server/src/main/resources/ESAPI.properties
@@ -0,0 +1,431 @@
+#
+# OWASP Enterprise Security API (ESAPI) Properties file -- PRODUCTION Version
+#
+# This file is part of the Open Web Application Security Project (OWASP)
+# Enterprise Security API (ESAPI) project. For details, please see
+# http://www.owasp.org/index.php/ESAPI.
+#
+# Copyright (c) 2008,2009 - The OWASP Foundation
+#
+# DISCUSS: This may cause a major backwards compatibility issue, etc. but
+#           from a name space perspective, we probably should have prefaced
+#           all the property names with ESAPI or at least OWASP. Otherwise
+#           there could be problems is someone loads this properties file into
+#           the System properties.  We could also put this file into the
+#           esapi.jar file (perhaps as a ResourceBundle) and then allow an external
+#           ESAPI properties be defined that would overwrite these defaults.
+#           That keeps the application's properties relatively simple as usually
+#           they will only want to override a few properties. If looks like we
+#           already support multiple override levels of this in the
+#           DefaultSecurityConfiguration class, but I'm suggesting placing the
+#           defaults in the esapi.jar itself. That way, if the jar is signed,
+#           we could detect if those properties had been tampered with. (The
+#           code to check the jar signatures is pretty simple... maybe 70-90 LOC,
+#           but off course there is an execution penalty (similar to the way
+#           that the separate sunjce.jar used to be when a class from it was
+#           first loaded). Thoughts?
+###############################################################################
+#
+# WARNING: Operating system protection should be used to lock down the .esapi
+# resources directory and all the files inside and all the directories all the
+# way up to the root directory of the file system.  Note that if you are using
+# file-based implementations, that some files may need to be read-write as they
+# get updated dynamically.
+#
+# Before using, be sure to update the MasterKey and MasterSalt as described below.
+# N.B.: If you had stored data that you have previously encrypted with ESAPI 1.4,
+#        you *must* FIRST decrypt it using ESAPI 1.4 and then (if so desired)
+#        re-encrypt it with ESAPI 2.0. If you fail to do this, you will NOT be
+#        able to decrypt your data with ESAPI 2.0.
+#
+#        YOU HAVE BEEN WARNED!!! More details are in the ESAPI 2.0 Release Notes.
+#
+#===========================================================================
+# ESAPI Configuration
+#
+# If true, then print all the ESAPI properties set here when they are loaded.
+# If false, they are not printed. Useful to reduce output when running JUnit tests.
+# If you need to troubleshoot a properties related problem, turning this on may help.
+# This is 'false' in the src/test/resources/.esapi version. It is 'true' by
+# default for reasons of backward compatibility with earlier ESAPI versions.
+ESAPI.printProperties=true
+
+# ESAPI is designed to be easily extensible. You can use the reference implementation
+# or implement your own providers to take advantage of your enterprise's security
+# infrastructure. The functions in ESAPI are referenced using the ESAPI locator, like:
+#
+#    String ciphertext =
+#        ESAPI.encryptor().encrypt("Secret message");   // Deprecated in 2.0
+#    CipherText cipherText =
+#        ESAPI.encryptor().encrypt(new PlainText("Secret message")); // Preferred
+#
+# Below you can specify the classname for the provider that you wish to use in your
+# application. The only requirement is that it implement the appropriate ESAPI interface.
+# This allows you to switch security implementations in the future without rewriting the
+# entire application.
+#
+# ExperimentalAccessController requires ESAPI-AccessControlPolicy.xml in .esapi directory
+ESAPI.AccessControl=org.owasp.esapi.reference.DefaultAccessController
+# FileBasedAuthenticator requires users.txt file in .esapi directory
+ESAPI.Authenticator=org.owasp.esapi.reference.FileBasedAuthenticator
+ESAPI.Encoder=org.owasp.esapi.reference.DefaultEncoder
+ESAPI.Encryptor=org.owasp.esapi.reference.crypto.JavaEncryptor
+
+ESAPI.Executor=org.owasp.esapi.reference.DefaultExecutor
+ESAPI.HTTPUtilities=org.owasp.esapi.reference.DefaultHTTPUtilities
+ESAPI.IntrusionDetector=org.owasp.esapi.reference.DefaultIntrusionDetector
+# Log4JFactory Requires log4j.xml or log4j.properties in classpath - http://www.laliluna.de/log4j-tutorial.html
+ESAPI.Logger=org.owasp.esapi.reference.Log4JLogFactory
+#ESAPI.Logger=org.owasp.esapi.reference.JavaLogFactory
+ESAPI.Randomizer=org.owasp.esapi.reference.DefaultRandomizer
+ESAPI.Validator=org.owasp.esapi.reference.DefaultValidator
+
+#===========================================================================
+# ESAPI Authenticator
+#
+Authenticator.AllowedLoginAttempts=3
+Authenticator.MaxOldPasswordHashes=13
+Authenticator.UsernameParameterName=username
+Authenticator.PasswordParameterName=password
+# RememberTokenDuration (in days)
+Authenticator.RememberTokenDuration=14
+# Session Timeouts (in minutes)
+Authenticator.IdleTimeoutDuration=20
+Authenticator.AbsoluteTimeoutDuration=120
+
+#===========================================================================
+# ESAPI Encoder
+#
+# ESAPI canonicalizes input before validation to prevent bypassing filters with encoded attacks.
+# Failure to canonicalize input is a very common mistake when implementing validation schemes.
+# Canonicalization is automatic when using the ESAPI Validator, but you can also use the
+# following code to canonicalize data.
+#
+#      ESAPI.Encoder().canonicalize( "%22hello world&#x22;" );
+#
+# Multiple encoding is when a single encoding format is applied multiple times, multiple
+# different encoding formats are applied, or when multiple formats are nested. Allowing
+# multiple encoding is strongly discouraged.
+Encoder.AllowMultipleEncoding=false
+#
+# The default list of codecs to apply when canonicalizing untrusted data. The list should include the codecs
+# for all downstream interpreters or decoders. For example, if the data is likely to end up in a URL, HTML, or
+# inside JavaScript, then the list of codecs below is appropriate. The order of the list is not terribly important.
+Encoder.DefaultCodecList=HTMLEntityCodec,PercentCodec,JavaScriptCodec
+
+
+#===========================================================================
+# ESAPI Encryption
+#
+# The ESAPI Encryptor provides basic cryptographic functions with a simplified API.
+# To get started, generate a new key using java -classpath esapi.jar org.owasp.esapi.reference.crypto.JavaEncryptor
+# There is not currently any support for key rotation, so be careful when changing your key and salt as it
+# will invalidate all signed, encrypted, and hashed data.
+#
+# WARNING: Not all combinations of algorithms and key lengths are supported.
+# If you choose to use a key length greater than 128, you MUST download the
+# unlimited strength policy files and install in the lib directory of your JRE/JDK.
+# See http://java.sun.com/javase/downloads/index.jsp for more information.
+#
+# Backward compatibility with ESAPI Java 1.4 is supported by the two deprecated API
+# methods, Encryptor.encrypt(String) and Encryptor.decrypt(String). However, whenever
+# possible, these methods should be avoided as they use ECB cipher mode, which in almost
+# all circumstances a poor choice because of it's weakness. CBC cipher mode is the default
+# for the new Encryptor encrypt / decrypt methods for ESAPI Java 2.0.  In general, you
+# should only use this compatibility setting if you have persistent data encrypted with
+# version 1.4 and even then, you should ONLY set this compatibility mode UNTIL
+# you have decrypted all of your old encrypted data and then re-encrypted it with
+# ESAPI 2.0 using CBC mode. If you have some reason to mix the deprecated 1.4 mode
+# with the new 2.0 methods, make sure that you use the same cipher algorithm for both
+# (256-bit AES was the default for 1.4; 128-bit is the default for 2.0; see below for
+# more details.) Otherwise, you will have to use the new 2.0 encrypt / decrypt methods
+# where you can specify a SecretKey. (Note that if you are using the 256-bit AES,
+# that requires downloading the special jurisdiction policy files mentioned above.)
+#
+#        ***** IMPORTANT: Do NOT forget to replace these with your own values! *****
+# To calculate these values, you can run:
+#        java -classpath esapi.jar org.owasp.esapi.reference.crypto.JavaEncryptor
+#
+Encryptor.MasterKey=
+Encryptor.MasterSalt=
+
+# Provides the default JCE provider that ESAPI will "prefer" for its symmetric
+# encryption and hashing. (That is it will look to this provider first, but it
+# will defer to other providers if the requested algorithm is not implemented
+# by this provider.) If left unset, ESAPI will just use your Java VM's current
+# preferred JCE provider, which is generally set in the file
+# "$JAVA_HOME/jre/lib/security/java.security".
+#
+# The main intent of this is to allow ESAPI symmetric encryption to be
+# used with a FIPS 140-2 compliant crypto-module. For details, see the section
+# "Using ESAPI Symmetric Encryption with FIPS 140-2 Cryptographic Modules" in
+# the ESAPI 2.0 Symmetric Encryption User Guide, at:
+# http://owasp-esapi-java.googlecode.com/svn/trunk/documentation/esapi4java-core-2.0-symmetric-crypto-user-guide.html
+# However, this property also allows you to easily use an alternate JCE provider
+# such as "Bouncy Castle" without having to make changes to "java.security".
+# See Javadoc for SecurityProviderLoader for further details. If you wish to use
+# a provider that is not known to SecurityProviderLoader, you may specify the
+# fully-qualified class name of the JCE provider class that implements
+# java.security.Provider. If the name contains a '.', this is interpreted as
+# a fully-qualified class name that implements java.security.Provider.
+#
+# NOTE: Setting this property has the side-effect of changing it in your application
+#       as well, so if you are using JCE in your application directly rather than
+#       through ESAPI (you wouldn't do that, would you? ;-), it will change the
+#       preferred JCE provider there as well.
+#
+# Default: Keeps the JCE provider set to whatever JVM sets it to.
+Encryptor.PreferredJCEProvider=
+
+# AES is the most widely used and strongest encryption algorithm. This
+# should agree with your Encryptor.CipherTransformation property.
+# By default, ESAPI Java 1.4 uses "PBEWithMD5AndDES" and which is
+# very weak. It is essentially a password-based encryption key, hashed
+# with MD5 around 1K times and then encrypted with the weak DES algorithm
+# (56-bits) using ECB mode and an unspecified padding (it is
+# JCE provider specific, but most likely "NoPadding"). However, 2.0 uses
+# "AES/CBC/PKCSPadding". If you want to change these, change them here.
+# Warning: This property does not control the default reference implementation for
+#           ESAPI 2.0 using JavaEncryptor. Also, this property will be dropped
+#           in the future.
+# @deprecated
+Encryptor.EncryptionAlgorithm=AES
+#        For ESAPI Java 2.0 - New encrypt / decrypt methods use this.
+Encryptor.CipherTransformation=AES/CBC/PKCS5Padding
+
+# Applies to ESAPI 2.0 and later only!
+# Comma-separated list of cipher modes that provide *BOTH*
+# confidentiality *AND* message authenticity. (NIST refers to such cipher
+# modes as "combined modes" so that's what we shall call them.) If any of these
+# cipher modes are used then no MAC is calculated and stored
+# in the CipherText upon encryption. Likewise, if one of these
+# cipher modes is used with decryption, no attempt will be made
+# to validate the MAC contained in the CipherText object regardless
+# of whether it contains one or not. Since the expectation is that
+# these cipher modes support support message authenticity already,
+# injecting a MAC in the CipherText object would be at best redundant.
+#
+# Note that as of JDK 1.5, the SunJCE provider does not support *any*
+# of these cipher modes. Of these listed, only GCM and CCM are currently
+# NIST approved. YMMV for other JCE providers. E.g., Bouncy Castle supports
+# GCM and CCM with "NoPadding" mode, but not with "PKCS5Padding" or other
+# padding modes.
+Encryptor.cipher_modes.combined_modes=GCM,CCM,IAPM,EAX,OCB,CWC
+
+# Applies to ESAPI 2.0 and later only!
+# Additional cipher modes allowed for ESAPI 2.0 encryption. These
+# cipher modes are in _addition_ to those specified by the property
+# 'Encryptor.cipher_modes.combined_modes'.
+# Note: We will add support for streaming modes like CFB & OFB once
+# we add support for 'specified' to the property 'Encryptor.ChooseIVMethod'
+# (probably in ESAPI 2.1).
+# DISCUSS: Better name?
+Encryptor.cipher_modes.additional_allowed=CBC
+
+# 128-bit is almost always sufficient and appears to be more resistant to
+# related key attacks than is 256-bit AES. Use '_' to use default key size
+# for cipher algorithms (where it makes sense because the algorithm supports
+# a variable key size). Key length must agree to what's provided as the
+# cipher transformation, otherwise this will be ignored after logging a
+# warning.
+#
+# NOTE: This is what applies BOTH ESAPI 1.4 and 2.0. See warning above about mixing!
+Encryptor.EncryptionKeyLength=128
+
+# Because 2.0 uses CBC mode by default, it requires an initialization vector (IV).
+# (All cipher modes except ECB require an IV.) There are two choices: we can either
+# use a fixed IV known to both parties or allow ESAPI to choose a random IV. While
+# the IV does not need to be hidden from adversaries, it is important that the
+# adversary not be allowed to choose it. Also, random IVs are generally much more
+# secure than fixed IVs. (In fact, it is essential that feed-back cipher modes
+# such as CFB and OFB use a different IV for each encryption with a given key so
+# in such cases, random IVs are much preferred. By default, ESAPI 2.0 uses random
+# IVs. If you wish to use 'fixed' IVs, set 'Encryptor.ChooseIVMethod=fixed' and
+# uncomment the Encryptor.fixedIV.
+#
+# Valid values:        random|fixed|specified        'specified' not yet implemented; planned for 2.1
+Encryptor.ChooseIVMethod=random
+# If you choose to use a fixed IV, then you must place a fixed IV here that
+# is known to all others who are sharing your secret key. The format should
+# be a hex string that is the same length as the cipher block size for the
+# cipher algorithm that you are using. The following is an example for AES
+# from an AES test vector for AES-128/CBC as described in:
+# NIST Special Publication 800-38A (2001 Edition)
+# "Recommendation for Block Cipher Modes of Operation".
+# (Note that the block size for AES is 16 bytes == 128 bits.)
+#
+Encryptor.fixedIV=0x000102030405060708090a0b0c0d0e0f
+
+# Whether or not CipherText should use a message authentication code (MAC) with it.
+# This prevents an adversary from altering the IV as well as allowing a more
+# fool-proof way of determining the decryption failed because of an incorrect
+# key being supplied. This refers to the "separate" MAC calculated and stored
+# in CipherText, not part of any MAC that is calculated as a result of a
+# "combined mode" cipher mode.
+#
+# If you are using ESAPI with a FIPS 140-2 cryptographic module, you *must* also
+# set this property to false.
+Encryptor.CipherText.useMAC=true
+
+# Whether or not the PlainText object may be overwritten and then marked
+# eligible for garbage collection. If not set, this is still treated as 'true'.
+Encryptor.PlainText.overwrite=true
+
+# Do not use DES except in a legacy situations. 56-bit is way too small key size.
+#Encryptor.EncryptionKeyLength=56
+#Encryptor.EncryptionAlgorithm=DES
+
+# TripleDES is considered strong enough for most purposes.
+#    Note:    There is also a 112-bit version of DESede. Using the 168-bit version
+#            requires downloading the special jurisdiction policy from Sun.
+#Encryptor.EncryptionKeyLength=168
+#Encryptor.EncryptionAlgorithm=DESede
+
+Encryptor.HashAlgorithm=SHA-512
+Encryptor.HashIterations=1024
+Encryptor.DigitalSignatureAlgorithm=SHA1withDSA
+Encryptor.DigitalSignatureKeyLength=1024
+Encryptor.RandomAlgorithm=SHA1PRNG
+Encryptor.CharacterEncoding=UTF-8
+
+
+#===========================================================================
+# ESAPI HttpUtilties
+#
+# The HttpUtilities provide basic protections to HTTP requests and responses. Primarily these methods
+# protect against malicious data from attackers, such as unprintable characters, escaped characters,
+# and other simple attacks. The HttpUtilities also provides utility methods for dealing with cookies,
+# headers, and CSRF tokens.
+#
+# Default file upload location (remember to escape backslashes with \\)
+HttpUtilities.UploadDir=C:\\ESAPI\\testUpload
+HttpUtilities.UploadTempDir=C:\\temp
+# Force flags on cookies, if you use HttpUtilities to set cookies
+HttpUtilities.ForceHttpOnlySession=false
+HttpUtilities.ForceSecureSession=false
+HttpUtilities.ForceHttpOnlyCookies=true
+HttpUtilities.ForceSecureCookies=true
+# Maximum size of HTTP headers
+HttpUtilities.MaxHeaderSize=4096
+# File upload configuration
+HttpUtilities.ApprovedUploadExtensions=.zip,.pdf,.doc,.docx,.ppt,.pptx,.tar,.gz,.tgz,.rar,.war,.jar,.ear,.xls,.rtf,.properties,.java,.class,.txt,.xml,.jsp,.jsf,.exe,.dll
+HttpUtilities.MaxUploadFileBytes=500000000
+# Using UTF-8 throughout your stack is highly recommended. That includes your database driver,
+# container, and any other technologies you may be using. Failure to do this may expose you
+# to Unicode transcoding injection attacks. Use of UTF-8 does not hinder internationalization.
+HttpUtilities.ResponseContentType=text/html; charset=UTF-8
+
+
+
+#===========================================================================
+# ESAPI Executor
+# CHECKME - Not sure what this is used for, but surely it should be made OS independent.
+Executor.WorkingDirectory=C:\\Windows\\Temp
+Executor.ApprovedExecutables=C:\\Windows\\System32\\cmd.exe,C:\\Windows\\System32\\runas.exe
+
+
+#===========================================================================
+# ESAPI Logging
+# Set the application name if these logs are combined with other applications
+Logger.ApplicationName=ExampleApplication
+# If you use an HTML log viewer that does not properly HTML escape log data, you can set LogEncodingRequired to true
+Logger.LogEncodingRequired=false
+# Determines whether ESAPI should log the application name. This might be clutter in some single-server/single-app environments.
+Logger.LogApplicationName=true
+# Determines whether ESAPI should log the server IP and port. This might be clutter in some single-server environments.
+Logger.LogServerIP=true
+# LogFileName, the name of the logging file. Provide a full directory path (e.g., C:\\ESAPI\\ESAPI_logging_file) if you
+# want to place it in a specific directory.
+Logger.LogFileName=ESAPI_logging_file
+# MaxLogFileSize, the max size (in bytes) of a single log file before it cuts over to a new one (default is 10,000,000)
+Logger.MaxLogFileSize=10000000
+
+
+#===========================================================================
+# ESAPI Intrusion Detection
+#
+# Each event has a base to which .count, .interval, and .action are added
+# The IntrusionException will fire if we receive "count" events within "interval" seconds
+# The IntrusionDetector is configurable to take the following actions: log, logout, and disable
+#  (multiple actions separated by commas are allowed e.g. event.test.actions=log,disable
+#
+# Custom Events
+# Names must start with "event." as the base
+# Use IntrusionDetector.addEvent( "test" ) in your code to trigger "event.test" here
+# You can also disable intrusion detection completely by changing
+# the following parameter to true
+#
+IntrusionDetector.Disable=false
+#
+IntrusionDetector.event.test.count=2
+IntrusionDetector.event.test.interval=10
+IntrusionDetector.event.test.actions=disable,log
+
+# Exception Events
+# All EnterpriseSecurityExceptions are registered automatically
+# Call IntrusionDetector.getInstance().addException(e) for Exceptions that do not extend EnterpriseSecurityException
+# Use the fully qualified classname of the exception as the base
+
+# any intrusion is an attack
+IntrusionDetector.org.owasp.esapi.errors.IntrusionException.count=1
+IntrusionDetector.org.owasp.esapi.errors.IntrusionException.interval=1
+IntrusionDetector.org.owasp.esapi.errors.IntrusionException.actions=log,disable,logout
+
+# for test purposes
+# CHECKME: Shouldn't there be something in the property name itself that designates
+#           that these are for testing???
+IntrusionDetector.org.owasp.esapi.errors.IntegrityException.count=10
+IntrusionDetector.org.owasp.esapi.errors.IntegrityException.interval=5
+IntrusionDetector.org.owasp.esapi.errors.IntegrityException.actions=log,disable,logout
+
+# rapid validation errors indicate scans or attacks in progress
+# org.owasp.esapi.errors.ValidationException.count=10
+# org.owasp.esapi.errors.ValidationException.interval=10
+# org.owasp.esapi.errors.ValidationException.actions=log,logout
+
+# sessions jumping between hosts indicates session hijacking
+IntrusionDetector.org.owasp.esapi.errors.AuthenticationHostException.count=2
+IntrusionDetector.org.owasp.esapi.errors.AuthenticationHostException.interval=10
+IntrusionDetector.org.owasp.esapi.errors.AuthenticationHostException.actions=log,logout
+
+
+#===========================================================================
+# ESAPI Validation
+#
+# The ESAPI Validator works on regular expressions with defined names. You can define names
+# either here, or you may define application specific patterns in a separate file defined below.
+# This allows enterprises to specify both organizational standards as well as application specific
+# validation rules.
+#
+Validator.ConfigurationFile=validation.properties
+
+# Validators used by ESAPI
+Validator.AccountName=^[a-zA-Z0-9]{3,20}$
+Validator.SystemCommand=^[a-zA-Z\\-\\/]{1,64}$
+Validator.RoleName=^[a-z]{1,20}$
+
+#the word TEST below should be changed to your application
+#name - only relative URL's are supported
+Validator.Redirect=^\\/test.*$
+
+# Global HTTP Validation Rules
+# Values with Base64 encoded data (e.g. encrypted state) will need at least [a-zA-Z0-9\/+=]
+Validator.HTTPScheme=^(http|https)$
+Validator.HTTPServerName=^[a-zA-Z0-9_.\\-]*$
+Validator.HTTPParameterName=^[a-zA-Z0-9_]{1,32}$
+Validator.HTTPParameterValue=^[a-zA-Z0-9.\\-\\/+=_ ]*$
+Validator.HTTPCookieName=^[a-zA-Z0-9\\-_]{1,32}$
+Validator.HTTPCookieValue=^[a-zA-Z0-9\\-\\/+=_ ]*$
+Validator.HTTPHeaderName=^[a-zA-Z0-9\\-_]{1,32}$
+Validator.HTTPHeaderValue=^[a-zA-Z0-9()\\-=\\*\\.\\?;,+\\/:&_ ]*$
+Validator.HTTPContextPath=^[a-zA-Z0-9.\\-\\/_]*$
+Validator.HTTPServletPath=^[a-zA-Z0-9.\\-\\/_]*$
+Validator.HTTPPath=^[a-zA-Z0-9.\\-_]*$
+Validator.HTTPQueryString=^[a-zA-Z0-9()\\-=\\*\\.\\?;,+\\/:&_ %]*$
+Validator.HTTPURI=^[a-zA-Z0-9()\\-=\\*\\.\\?;,+\\/:&_ ]*$
+Validator.HTTPURL=^.*$
+Validator.HTTPJSESSIONID=^[A-Z0-9]{10,30}$
+
+# Validation of file related input
+Validator.FileName=^[a-zA-Z0-9!@#$%^&{}\\[\\]()_+\\-=,.~'` ]{1,255}$
+Validator.DirectoryName=^[a-zA-Z0-9:/\\\\!@#$%^&{}\\[\\]()_+\\-=,.~'` ]{1,255}$

http://git-wip-us.apache.org/repos/asf/hbase/blob/f280c459/hbase-server/src/test/java/org/apache/hadoop/hbase/http/jmx/TestJMXJsonServlet.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/http/jmx/TestJMXJsonServlet.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/http/jmx/TestJMXJsonServlet.java
index dd345fb..3707d71 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/http/jmx/TestJMXJsonServlet.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/http/jmx/TestJMXJsonServlet.java
@@ -104,5 +104,11 @@ public class TestJMXJsonServlet extends HttpServerFunctionalTest {
     assertReFind("\"committed\"\\s*:", result);
     assertReFind("\\}\\);$", result);
 
+    // test to get XSS JSONP result
+    result = readOutput(new URL(baseUrl, "/jmx?qry=java.lang:type=Memory&callback=<script>alert('hello')</script>"));
+    LOG.info("/jmx?qry=java.lang:type=Memory&callback=<script>alert('hello')</script> RESULT: "+result);
+    assertTrue(!result.contains("<script>"));
+
+
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/f280c459/hbase-server/src/test/resources/ESAPI.properties
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/resources/ESAPI.properties b/hbase-server/src/test/resources/ESAPI.properties
new file mode 100644
index 0000000..9074001
--- /dev/null
+++ b/hbase-server/src/test/resources/ESAPI.properties
@@ -0,0 +1,431 @@
+#
+# OWASP Enterprise Security API (ESAPI) Properties file -- PRODUCTION Version
+#
+# This file is part of the Open Web Application Security Project (OWASP)
+# Enterprise Security API (ESAPI) project. For details, please see
+# http://www.owasp.org/index.php/ESAPI.
+#
+# Copyright (c) 2008,2009 - The OWASP Foundation
+#
+# DISCUSS: This may cause a major backwards compatibility issue, etc. but
+#           from a name space perspective, we probably should have prefaced
+#           all the property names with ESAPI or at least OWASP. Otherwise
+#           there could be problems is someone loads this properties file into
+#           the System properties.  We could also put this file into the
+#           esapi.jar file (perhaps as a ResourceBundle) and then allow an external
+#           ESAPI properties be defined that would overwrite these defaults.
+#           That keeps the application's properties relatively simple as usually
+#           they will only want to override a few properties. If looks like we
+#           already support multiple override levels of this in the
+#           DefaultSecurityConfiguration class, but I'm suggesting placing the
+#           defaults in the esapi.jar itself. That way, if the jar is signed,
+#           we could detect if those properties had been tampered with. (The
+#           code to check the jar signatures is pretty simple... maybe 70-90 LOC,
+#           but off course there is an execution penalty (similar to the way
+#           that the separate sunjce.jar used to be when a class from it was
+#           first loaded). Thoughts?
+###############################################################################
+#
+# WARNING: Operating system protection should be used to lock down the .esapi
+# resources directory and all the files inside and all the directories all the
+# way up to the root directory of the file system.  Note that if you are using
+# file-based implementations, that some files may need to be read-write as they
+# get updated dynamically.
+#
+# Before using, be sure to update the MasterKey and MasterSalt as described below.
+# N.B.: If you had stored data that you have previously encrypted with ESAPI 1.4,
+#        you *must* FIRST decrypt it using ESAPI 1.4 and then (if so desired)
+#        re-encrypt it with ESAPI 2.0. If you fail to do this, you will NOT be
+#        able to decrypt your data with ESAPI 2.0.
+#
+#        YOU HAVE BEEN WARNED!!! More details are in the ESAPI 2.0 Release Notes.
+#
+#===========================================================================
+# ESAPI Configuration
+#
+# If true, then print all the ESAPI properties set here when they are loaded.
+# If false, they are not printed. Useful to reduce output when running JUnit tests.
+# If you need to troubleshoot a properties related problem, turning this on may help.
+# This is 'false' in the src/test/resources/.esapi version. It is 'true' by
+# default for reasons of backward compatibility with earlier ESAPI versions.
+ESAPI.printProperties=true
+
+# ESAPI is designed to be easily extensible. You can use the reference implementation
+# or implement your own providers to take advantage of your enterprise's security
+# infrastructure. The functions in ESAPI are referenced using the ESAPI locator, like:
+#
+#    String ciphertext =
+#        ESAPI.encryptor().encrypt("Secret message");   // Deprecated in 2.0
+#    CipherText cipherText =
+#        ESAPI.encryptor().encrypt(new PlainText("Secret message")); // Preferred
+#
+# Below you can specify the classname for the provider that you wish to use in your
+# application. The only requirement is that it implement the appropriate ESAPI interface.
+# This allows you to switch security implementations in the future without rewriting the
+# entire application.
+#
+# ExperimentalAccessController requires ESAPI-AccessControlPolicy.xml in .esapi directory
+ESAPI.AccessControl=org.owasp.esapi.reference.DefaultAccessController
+# FileBasedAuthenticator requires users.txt file in .esapi directory
+ESAPI.Authenticator=org.owasp.esapi.reference.FileBasedAuthenticator
+ESAPI.Encoder=org.owasp.esapi.reference.DefaultEncoder
+ESAPI.Encryptor=org.owasp.esapi.reference.crypto.JavaEncryptor
+
+ESAPI.Executor=org.owasp.esapi.reference.DefaultExecutor
+ESAPI.HTTPUtilities=org.owasp.esapi.reference.DefaultHTTPUtilities
+ESAPI.IntrusionDetector=org.owasp.esapi.reference.DefaultIntrusionDetector
+# Log4JFactory Requires log4j.xml or log4j.properties in classpath - http://www.laliluna.de/log4j-tutorial.html
+ESAPI.Logger=org.owasp.esapi.reference.Log4JLogFactory
+#ESAPI.Logger=org.owasp.esapi.reference.JavaLogFactory
+ESAPI.Randomizer=org.owasp.esapi.reference.DefaultRandomizer
+ESAPI.Validator=org.owasp.esapi.reference.DefaultValidator
+
+#===========================================================================
+# ESAPI Authenticator
+#
+Authenticator.AllowedLoginAttempts=3
+Authenticator.MaxOldPasswordHashes=13
+Authenticator.UsernameParameterName=username
+Authenticator.PasswordParameterName=password
+# RememberTokenDuration (in days)
+Authenticator.RememberTokenDuration=14
+# Session Timeouts (in minutes)
+Authenticator.IdleTimeoutDuration=20
+Authenticator.AbsoluteTimeoutDuration=120
+
+#===========================================================================
+# ESAPI Encoder
+#
+# ESAPI canonicalizes input before validation to prevent bypassing filters with encoded attacks.
+# Failure to canonicalize input is a very common mistake when implementing validation schemes.
+# Canonicalization is automatic when using the ESAPI Validator, but you can also use the
+# following code to canonicalize data.
+#
+#      ESAPI.Encoder().canonicalize( "%22hello world&#x22;" );
+#
+# Multiple encoding is when a single encoding format is applied multiple times, multiple
+# different encoding formats are applied, or when multiple formats are nested. Allowing
+# multiple encoding is strongly discouraged.
+Encoder.AllowMultipleEncoding=false
+#
+# The default list of codecs to apply when canonicalizing untrusted data. The list should include the codecs
+# for all downstream interpreters or decoders. For example, if the data is likely to end up in a URL, HTML, or
+# inside JavaScript, then the list of codecs below is appropriate. The order of the list is not terribly important.
+Encoder.DefaultCodecList=HTMLEntityCodec,PercentCodec,JavaScriptCodec
+
+
+#===========================================================================
+# ESAPI Encryption
+#
+# The ESAPI Encryptor provides basic cryptographic functions with a simplified API.
+# To get started, generate a new key using java -classpath esapi.jar org.owasp.esapi.reference.crypto.JavaEncryptor
+# There is not currently any support for key rotation, so be careful when changing your key and salt as it
+# will invalidate all signed, encrypted, and hashed data.
+#
+# WARNING: Not all combinations of algorithms and key lengths are supported.
+# If you choose to use a key length greater than 128, you MUST download the
+# unlimited strength policy files and install in the lib directory of your JRE/JDK.
+# See http://java.sun.com/javase/downloads/index.jsp for more information.
+#
+# Backward compatibility with ESAPI Java 1.4 is supported by the two deprecated API
+# methods, Encryptor.encrypt(String) and Encryptor.decrypt(String). However, whenever
+# possible, these methods should be avoided as they use ECB cipher mode, which in almost
+# all circumstances a poor choice because of it's weakness. CBC cipher mode is the default
+# for the new Encryptor encrypt / decrypt methods for ESAPI Java 2.0.  In general, you
+# should only use this compatibility setting if you have persistent data encrypted with
+# version 1.4 and even then, you should ONLY set this compatibility mode UNTIL
+# you have decrypted all of your old encrypted data and then re-encrypted it with
+# ESAPI 2.0 using CBC mode. If you have some reason to mix the deprecated 1.4 mode
+# with the new 2.0 methods, make sure that you use the same cipher algorithm for both
+# (256-bit AES was the default for 1.4; 128-bit is the default for 2.0; see below for
+# more details.) Otherwise, you will have to use the new 2.0 encrypt / decrypt methods
+# where you can specify a SecretKey. (Note that if you are using the 256-bit AES,
+# that requires downloading the special jurisdiction policy files mentioned above.)
+#
+#        ***** IMPORTANT: Do NOT forget to replace these with your own values! *****
+# To calculate these values, you can run:
+#        java -classpath esapi.jar org.owasp.esapi.reference.crypto.JavaEncryptor
+#
+Encryptor.MasterKey=
+Encryptor.MasterSalt=
+
+# Provides the default JCE provider that ESAPI will "prefer" for its symmetric
+# encryption and hashing. (That is it will look to this provider first, but it
+# will defer to other providers if the requested algorithm is not implemented
+# by this provider.) If left unset, ESAPI will just use your Java VM's current
+# preferred JCE provider, which is generally set in the file
+# "$JAVA_HOME/jre/lib/security/java.security".
+#
+# The main intent of this is to allow ESAPI symmetric encryption to be
+# used with a FIPS 140-2 compliant crypto-module. For details, see the section
+# "Using ESAPI Symmetric Encryption with FIPS 140-2 Cryptographic Modules" in
+# the ESAPI 2.0 Symmetric Encryption User Guide, at:
+# http://owasp-esapi-java.googlecode.com/svn/trunk/documentation/esapi4java-core-2.0-symmetric-crypto-user-guide.html
+# However, this property also allows you to easily use an alternate JCE provider
+# such as "Bouncy Castle" without having to make changes to "java.security".
+# See Javadoc for SecurityProviderLoader for further details. If you wish to use
+# a provider that is not known to SecurityProviderLoader, you may specify the
+# fully-qualified class name of the JCE provider class that implements
+# java.security.Provider. If the name contains a '.', this is interpreted as
+# a fully-qualified class name that implements java.security.Provider.
+#
+# NOTE: Setting this property has the side-effect of changing it in your application
+#       as well, so if you are using JCE in your application directly rather than
+#       through ESAPI (you wouldn't do that, would you? ;-), it will change the
+#       preferred JCE provider there as well.
+#
+# Default: Keeps the JCE provider set to whatever JVM sets it to.
+Encryptor.PreferredJCEProvider=
+
+# AES is the most widely used and strongest encryption algorithm. This
+# should agree with your Encryptor.CipherTransformation property.
+# By default, ESAPI Java 1.4 uses "PBEWithMD5AndDES" and which is
+# very weak. It is essentially a password-based encryption key, hashed
+# with MD5 around 1K times and then encrypted with the weak DES algorithm
+# (56-bits) using ECB mode and an unspecified padding (it is
+# JCE provider specific, but most likely "NoPadding"). However, 2.0 uses
+# "AES/CBC/PKCSPadding". If you want to change these, change them here.
+# Warning: This property does not control the default reference implementation for
+#           ESAPI 2.0 using JavaEncryptor. Also, this property will be dropped
+#           in the future.
+# @deprecated
+Encryptor.EncryptionAlgorithm=AES
+#        For ESAPI Java 2.0 - New encrypt / decrypt methods use this.
+Encryptor.CipherTransformation=AES/CBC/PKCS5Padding
+
+# Applies to ESAPI 2.0 and later only!
+# Comma-separated list of cipher modes that provide *BOTH*
+# confidentiality *AND* message authenticity. (NIST refers to such cipher
+# modes as "combined modes" so that's what we shall call them.) If any of these
+# cipher modes are used then no MAC is calculated and stored
+# in the CipherText upon encryption. Likewise, if one of these
+# cipher modes is used with decryption, no attempt will be made
+# to validate the MAC contained in the CipherText object regardless
+# of whether it contains one or not. Since the expectation is that
+# these cipher modes support support message authenticity already,
+# injecting a MAC in the CipherText object would be at best redundant.
+#
+# Note that as of JDK 1.5, the SunJCE provider does not support *any*
+# of these cipher modes. Of these listed, only GCM and CCM are currently
+# NIST approved. YMMV for other JCE providers. E.g., Bouncy Castle supports
+# GCM and CCM with "NoPadding" mode, but not with "PKCS5Padding" or other
+# padding modes.
+Encryptor.cipher_modes.combined_modes=GCM,CCM,IAPM,EAX,OCB,CWC
+
+# Applies to ESAPI 2.0 and later only!
+# Additional cipher modes allowed for ESAPI 2.0 encryption. These
+# cipher modes are in _addition_ to those specified by the property
+# 'Encryptor.cipher_modes.combined_modes'.
+# Note: We will add support for streaming modes like CFB & OFB once
+# we add support for 'specified' to the property 'Encryptor.ChooseIVMethod'
+# (probably in ESAPI 2.1).
+# DISCUSS: Better name?
+Encryptor.cipher_modes.additional_allowed=CBC
+
+# 128-bit is almost always sufficient and appears to be more resistant to
+# related key attacks than is 256-bit AES. Use '_' to use default key size
+# for cipher algorithms (where it makes sense because the algorithm supports
+# a variable key size). Key length must agree to what's provided as the
+# cipher transformation, otherwise this will be ignored after logging a
+# warning.
+#
+# NOTE: This is what applies BOTH ESAPI 1.4 and 2.0. See warning above about mixing!
+Encryptor.EncryptionKeyLength=128
+
+# Because 2.0 uses CBC mode by default, it requires an initialization vector (IV).
+# (All cipher modes except ECB require an IV.) There are two choices: we can either
+# use a fixed IV known to both parties or allow ESAPI to choose a random IV. While
+# the IV does not need to be hidden from adversaries, it is important that the
+# adversary not be allowed to choose it. Also, random IVs are generally much more
+# secure than fixed IVs. (In fact, it is essential that feed-back cipher modes
+# such as CFB and OFB use a different IV for each encryption with a given key so
+# in such cases, random IVs are much preferred. By default, ESAPI 2.0 uses random
+# IVs. If you wish to use 'fixed' IVs, set 'Encryptor.ChooseIVMethod=fixed' and
+# uncomment the Encryptor.fixedIV.
+#
+# Valid values:        random|fixed|specified        'specified' not yet implemented; planned for 2.1
+Encryptor.ChooseIVMethod=random
+# If you choose to use a fixed IV, then you must place a fixed IV here that
+# is known to all others who are sharing your secret key. The format should
+# be a hex string that is the same length as the cipher block size for the
+# cipher algorithm that you are using. The following is an example for AES
+# from an AES test vector for AES-128/CBC as described in:
+# NIST Special Publication 800-38A (2001 Edition)
+# "Recommendation for Block Cipher Modes of Operation".
+# (Note that the block size for AES is 16 bytes == 128 bits.)
+#
+Encryptor.fixedIV=0x000102030405060708090a0b0c0d0e0f
+
+# Whether or not CipherText should use a message authentication code (MAC) with it.
+# This prevents an adversary from altering the IV as well as allowing a more
+# fool-proof way of determining the decryption failed because of an incorrect
+# key being supplied. This refers to the "separate" MAC calculated and stored
+# in CipherText, not part of any MAC that is calculated as a result of a
+# "combined mode" cipher mode.
+#
+# If you are using ESAPI with a FIPS 140-2 cryptographic module, you *must* also
+# set this property to false.
+Encryptor.CipherText.useMAC=true
+
+# Whether or not the PlainText object may be overwritten and then marked
+# eligible for garbage collection. If not set, this is still treated as 'true'.
+Encryptor.PlainText.overwrite=true
+
+# Do not use DES except in a legacy situations. 56-bit is way too small key size.
+#Encryptor.EncryptionKeyLength=56
+#Encryptor.EncryptionAlgorithm=DES
+
+# TripleDES is considered strong enough for most purposes.
+#    Note:    There is also a 112-bit version of DESede. Using the 168-bit version
+#            requires downloading the special jurisdiction policy from Sun.
+#Encryptor.EncryptionKeyLength=168
+#Encryptor.EncryptionAlgorithm=DESede
+
+Encryptor.HashAlgorithm=SHA-512
+Encryptor.HashIterations=1024
+Encryptor.DigitalSignatureAlgorithm=SHA1withDSA
+Encryptor.DigitalSignatureKeyLength=1024
+Encryptor.RandomAlgorithm=SHA1PRNG
+Encryptor.CharacterEncoding=UTF-8
+
+
+#===========================================================================
+# ESAPI HttpUtilties
+#
+# The HttpUtilities provide basic protections to HTTP requests and responses. Primarily these methods
+# protect against malicious data from attackers, such as unprintable characters, escaped characters,
+# and other simple attacks. The HttpUtilities also provides utility methods for dealing with cookies,
+# headers, and CSRF tokens.
+#
+# Default file upload location (remember to escape backslashes with \\)
+HttpUtilities.UploadDir=C:\\ESAPI\\testUpload
+HttpUtilities.UploadTempDir=C:\\temp
+# Force flags on cookies, if you use HttpUtilities to set cookies
+HttpUtilities.ForceHttpOnlySession=false
+HttpUtilities.ForceSecureSession=false
+HttpUtilities.ForceHttpOnlyCookies=true
+HttpUtilities.ForceSecureCookies=true
+# Maximum size of HTTP headers
+HttpUtilities.MaxHeaderSize=4096
+# File upload configuration
+HttpUtilities.ApprovedUploadExtensions=.zip,.pdf,.doc,.docx,.ppt,.pptx,.tar,.gz,.tgz,.rar,.war,.jar,.ear,.xls,.rtf,.properties,.java,.class,.txt,.xml,.jsp,.jsf,.exe,.dll
+HttpUtilities.MaxUploadFileBytes=500000000
+# Using UTF-8 throughout your stack is highly recommended. That includes your database driver,
+# container, and any other technologies you may be using. Failure to do this may expose you
+# to Unicode transcoding injection attacks. Use of UTF-8 does not hinder internationalization.
+HttpUtilities.ResponseContentType=text/html; charset=UTF-8
+
+
+
+#===========================================================================
+# ESAPI Executor
+# CHECKME - Not sure what this is used for, but surely it should be made OS independent.
+Executor.WorkingDirectory=C:\\Windows\\Temp
+Executor.ApprovedExecutables=C:\\Windows\\System32\\cmd.exe,C:\\Windows\\System32\\runas.exe
+
+
+#===========================================================================
+# ESAPI Logging
+# Set the application name if these logs are combined with other applications
+Logger.ApplicationName=ExampleApplication
+# If you use an HTML log viewer that does not properly HTML escape log data, you can set LogEncodingRequired to true
+Logger.LogEncodingRequired=false
+# Determines whether ESAPI should log the application name. This might be clutter in some single-server/single-app environments.
+Logger.LogApplicationName=true
+# Determines whether ESAPI should log the server IP and port. This might be clutter in some single-server environments.
+Logger.LogServerIP=true
+# LogFileName, the name of the logging file. Provide a full directory path (e.g., C:\\ESAPI\\ESAPI_logging_file) if you
+# want to place it in a specific directory.
+Logger.LogFileName=ESAPI_logging_file
+# MaxLogFileSize, the max size (in bytes) of a single log file before it cuts over to a new one (default is 10,000,000)
+Logger.MaxLogFileSize=10000000
+
+
+#===========================================================================
+# ESAPI Intrusion Detection
+#
+# Each event has a base to which .count, .interval, and .action are added
+# The IntrusionException will fire if we receive "count" events within "interval" seconds
+# The IntrusionDetector is configurable to take the following actions: log, logout, and disable
+#  (multiple actions separated by commas are allowed e.g. event.test.actions=log,disable
+#
+# Custom Events
+# Names must start with "event." as the base
+# Use IntrusionDetector.addEvent( "test" ) in your code to trigger "event.test" here
+# You can also disable intrusion detection completely by changing
+# the following parameter to true
+#
+IntrusionDetector.Disable=false
+#
+IntrusionDetector.event.test.count=2
+IntrusionDetector.event.test.interval=10
+IntrusionDetector.event.test.actions=disable,log
+
+# Exception Events
+# All EnterpriseSecurityExceptions are registered automatically
+# Call IntrusionDetector.getInstance().addException(e) for Exceptions that do not extend EnterpriseSecurityException
+# Use the fully qualified classname of the exception as the base
+
+# any intrusion is an attack
+IntrusionDetector.org.owasp.esapi.errors.IntrusionException.count=1
+IntrusionDetector.org.owasp.esapi.errors.IntrusionException.interval=1
+IntrusionDetector.org.owasp.esapi.errors.IntrusionException.actions=log,disable,logout
+
+# for test purposes
+# CHECKME: Shouldn't there be something in the property name itself that designates
+#           that these are for testing???
+IntrusionDetector.org.owasp.esapi.errors.IntegrityException.count=10
+IntrusionDetector.org.owasp.esapi.errors.IntegrityException.interval=5
+IntrusionDetector.org.owasp.esapi.errors.IntegrityException.actions=log,disable,logout
+
+# rapid validation errors indicate scans or attacks in progress
+# org.owasp.esapi.errors.ValidationException.count=10
+# org.owasp.esapi.errors.ValidationException.interval=10
+# org.owasp.esapi.errors.ValidationException.actions=log,logout
+
+# sessions jumping between hosts indicates session hijacking
+IntrusionDetector.org.owasp.esapi.errors.AuthenticationHostException.count=2
+IntrusionDetector.org.owasp.esapi.errors.AuthenticationHostException.interval=10
+IntrusionDetector.org.owasp.esapi.errors.AuthenticationHostException.actions=log,logout
+
+
+#===========================================================================
+# ESAPI Validation
+#
+# The ESAPI Validator works on regular expressions with defined names. You can define names
+# either here, or you may define application specific patterns in a separate file defined below.
+# This allows enterprises to specify both organizational standards as well as application specific
+# validation rules.
+#
+Validator.ConfigurationFile=validation.properties
+
+# Validators used by ESAPI
+Validator.AccountName=^[a-zA-Z0-9]{3,20}$
+Validator.SystemCommand=^[a-zA-Z\\-\\/]{1,64}$
+Validator.RoleName=^[a-z]{1,20}$
+
+#the word TEST below should be changed to your application
+#name - only relative URL's are supported
+Validator.Redirect=^\\/test.*$
+
+# Global HTTP Validation Rules
+# Values with Base64 encoded data (e.g. encrypted state) will need at least [a-zA-Z0-9\/+=]
+Validator.HTTPScheme=^(http|https)$
+Validator.HTTPServerName=^[a-zA-Z0-9_.\\-]*$
+Validator.HTTPParameterName=^[a-zA-Z0-9_]{1,32}$
+Validator.HTTPParameterValue=^[a-zA-Z0-9.\\-\\/+=_ ]*$
+Validator.HTTPCookieName=^[a-zA-Z0-9\\-_]{1,32}$
+Validator.HTTPCookieValue=^[a-zA-Z0-9\\-\\/+=_ ]*$
+Validator.HTTPHeaderName=^[a-zA-Z0-9\\-_]{1,32}$
+Validator.HTTPHeaderValue=^[a-zA-Z0-9()\\-=\\*\\.\\?;,+\\/:&_ ]*$
+Validator.HTTPContextPath=^[a-zA-Z0-9.\\-\\/_]*$
+Validator.HTTPServletPath=^[a-zA-Z0-9.\\-\\/_]*$
+Validator.HTTPPath=^[a-zA-Z0-9.\\-_]*$
+Validator.HTTPQueryString=^[a-zA-Z0-9()\\-=\\*\\.\\?;,+\\/:&_ %]*$
+Validator.HTTPURI=^[a-zA-Z0-9()\\-=\\*\\.\\?;,+\\/:&_ ]*$
+Validator.HTTPURL=^.*$
+Validator.HTTPJSESSIONID=^[A-Z0-9]{10,30}$
+
+# Validation of file related input
+Validator.FileName=^[a-zA-Z0-9!@#$%^&{}\\[\\]()_+\\-=,.~'` ]{1,255}$
+Validator.DirectoryName=^[a-zA-Z0-9:/\\\\!@#$%^&{}\\[\\]()_+\\-=,.~'` ]{1,255}$

http://git-wip-us.apache.org/repos/asf/hbase/blob/f280c459/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index bf52503..1ab1f58 100644
--- a/pom.xml
+++ b/pom.xml
@@ -754,6 +754,7 @@
               <exclude>.svn/**</exclude>
               <exclude>**/.settings/**</exclude>
               <exclude>**/patchprocess/**</exclude>
+              <exclude>**/ESAPI.properties</exclude>
             </excludes>
           </configuration>
         </plugin>


[02/50] hbase git commit: HBASE-15052 Use EnvironmentEdgeManager in ReplicationSource

Posted by la...@apache.org.
HBASE-15052 Use EnvironmentEdgeManager in ReplicationSource


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f4fa859d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f4fa859d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f4fa859d

Branch: refs/heads/branch-1.0
Commit: f4fa859d949f4cffbb5bf8ed17a64bd6c58ce586
Parents: cccf8e6
Author: Matteo Bertozzi <ma...@cloudera.com>
Authored: Mon Jan 11 09:49:26 2016 -0800
Committer: Matteo Bertozzi <ma...@cloudera.com>
Committed: Mon Jan 11 09:49:47 2016 -0800

----------------------------------------------------------------------
 .../hadoop/hbase/replication/regionserver/ReplicationSource.java  | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/f4fa859d/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
index 0d55b94..f3734b2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
@@ -53,6 +53,7 @@ import org.apache.hadoop.hbase.replication.ReplicationQueueInfo;
 import org.apache.hadoop.hbase.replication.ReplicationQueues;
 import org.apache.hadoop.hbase.replication.SystemTableWALEntryFilter;
 import org.apache.hadoop.hbase.replication.WALEntryFilter;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.Threads;
 
@@ -395,7 +396,7 @@ public class ReplicationSource extends Thread
           sleepMultiplier = 1;
           // if there was nothing to ship and it's not an error
           // set "ageOfLastShippedOp" to <now> to indicate that we're current
-          this.metrics.setAgeOfLastShippedOp(System.currentTimeMillis());
+          this.metrics.setAgeOfLastShippedOp(EnvironmentEdgeManager.currentTime());
         }
         if (sleepForRetries("Nothing to replicate", sleepMultiplier)) {
           sleepMultiplier++;


[06/50] hbase git commit: HBASE-15108 TestReplicationAdmin failed on branch-1.0

Posted by la...@apache.org.
HBASE-15108 TestReplicationAdmin failed on branch-1.0


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/ae8f0900
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/ae8f0900
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/ae8f0900

Branch: refs/heads/branch-1.0
Commit: ae8f0900fb49b222c8359256022035841b3eb53d
Parents: 9277293
Author: chenheng <ch...@apache.org>
Authored: Fri Jan 15 09:49:37 2016 +0800
Committer: chenheng <ch...@apache.org>
Committed: Fri Jan 15 09:49:37 2016 +0800

----------------------------------------------------------------------
 .../client/replication/TestReplicationAdmin.java  | 18 ++++++++++++++++--
 1 file changed, 16 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/ae8f0900/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java
index 1a5dd33..d4cebd8 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java
@@ -175,14 +175,28 @@ public class TestReplicationAdmin {
 
     // append table t2 to replication
     admin.appendPeerTableCFs(ID_ONE, "t2");
-    assertEquals("t2;t1", admin.getPeerTableCFs(ID_ONE));
+    String peerTablesOne = admin.getPeerTableCFs(ID_ONE);
+
+    // Different jdk's return different sort order for the tables. ( Not sure on why exactly )
+    //
+    // So instead of asserting that the string is exactly we
+    // assert that the string contains all tables and the needed separator.
+    assertTrue("Should contain t1", peerTablesOne.contains("t1"));
+    assertTrue("Should contain t2", peerTablesOne.contains("t2"));
+    assertTrue("Should contain ; as the seperator", peerTablesOne.contains(";"));
 
     // append table column family: f1 of t3 to replication
     admin.appendPeerTableCFs(ID_ONE, "t3:f1");
-    assertEquals("t3:f1;t2;t1", admin.getPeerTableCFs(ID_ONE));
+    String peerTablesTwo = admin.getPeerTableCFs(ID_ONE);
+    assertTrue("Should contain t1", peerTablesTwo.contains("t1"));
+    assertTrue("Should contain t2", peerTablesTwo.contains("t2"));
+    assertTrue("Should contain t3:f1", peerTablesTwo.contains("t3:f1"));
+    assertTrue("Should contain ; as the seperator", peerTablesTwo.contains(";"));
     admin.removePeer(ID_ONE);
+
   }
 
+
   @Test
   public void testRemovePeerTableCFs() throws Exception {
     // Add a valid peer


[24/50] hbase git commit: HBASE-15252 Data loss when replaying wal if HDFS timeout

Posted by la...@apache.org.
HBASE-15252 Data loss when replaying wal if HDFS timeout


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/21ab1843
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/21ab1843
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/21ab1843

Branch: refs/heads/branch-1.0
Commit: 21ab1843c524c670bab54db9a0082d3439fa7baa
Parents: e521b51
Author: zhangduo <zh...@apache.org>
Authored: Fri Feb 12 08:17:10 2016 +0800
Committer: zhangduo <zh...@apache.org>
Committed: Fri Feb 12 16:26:26 2016 +0800

----------------------------------------------------------------------
 .../regionserver/wal/ProtobufLogReader.java     |   3 +-
 .../hbase/regionserver/wal/TestWALReplay.java   | 113 ++++++++++++++++++-
 2 files changed, 112 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/21ab1843/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java
index 3aba71a..61b3977 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java
@@ -45,6 +45,7 @@ import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.wal.WAL.Entry;
 
 import com.google.protobuf.CodedInputStream;
+import com.google.protobuf.InvalidProtocolBufferException;
 
 /**
  * A Protobuf based WAL has the following structure:
@@ -318,7 +319,7 @@ public class ProtobufLogReader extends ReaderBase {
           }
           ProtobufUtil.mergeFrom(builder, new LimitInputStream(this.inputStream, size),
             (int)size);
-        } catch (IOException ipbe) {
+        } catch (InvalidProtocolBufferException ipbe) {
           throw (EOFException) new EOFException("Invalid PB, EOF? Ignoring; originalPosition=" +
             originalPosition + ", currentPosition=" + this.inputStream.getPos() +
             ", messageSize=" + size + ", currentAvailable=" + available).initCause(ipbe);

http://git-wip-us.apache.org/repos/asf/hbase/blob/21ab1843/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java
index e2f974e..12295a56 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java
@@ -22,9 +22,15 @@ import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.eq;
+import static org.mockito.Mockito.doAnswer;
+import static org.mockito.Mockito.spy;
 import static org.mockito.Mockito.when;
 
+import java.io.FilterInputStream;
 import java.io.IOException;
+import java.lang.reflect.Field;
 import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
 import java.util.List;
@@ -35,6 +41,7 @@ import java.util.concurrent.atomic.AtomicLong;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -86,6 +93,7 @@ import org.apache.hadoop.hbase.wal.WAL;
 import org.apache.hadoop.hbase.wal.WALKey;
 import org.apache.hadoop.hbase.wal.WALFactory;
 import org.apache.hadoop.hbase.wal.WALSplitter;
+import org.apache.hadoop.hdfs.DFSInputStream;
 import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.Before;
@@ -95,6 +103,8 @@ import org.junit.Rule;
 import org.junit.rules.TestName;
 import org.junit.experimental.categories.Category;
 import org.mockito.Mockito;
+import org.mockito.invocation.InvocationOnMock;
+import org.mockito.stubbing.Answer;
 
 /**
  * Test replay of edits out of a WAL split.
@@ -496,7 +506,7 @@ public class TestWALReplay {
     boolean first = true;
     for (HColumnDescriptor hcd: htd.getFamilies()) {
       addRegionEdits(rowName, hcd.getName(), countPerFamily, this.ee, region, "x");
-      if (first ) {
+      if (first) {
         // If first, so we have at least one family w/ different seqid to rest.
         region.flushcache();
         first = false;
@@ -820,9 +830,9 @@ public class TestWALReplay {
     final Configuration newConf = HBaseConfiguration.create(this.conf);
     User user = HBaseTestingUtility.getDifferentUser(newConf,
       ".replay.wal.secondtime");
-    user.runAs(new PrivilegedExceptionAction() {
+    user.runAs(new PrivilegedExceptionAction<Void>() {
       @Override
-      public Object run() throws Exception {
+      public Void run() throws Exception {
         runWALSplit(newConf);
         FileSystem newFS = FileSystem.get(newConf);
         // 100k seems to make for about 4 flushes during HRegion#initialize.
@@ -922,6 +932,103 @@ public class TestWALReplay {
         lastestSeqNumber, editCount);
   }
 
+  /**
+   * testcase for https://issues.apache.org/jira/browse/HBASE-15252
+   */
+  @Test
+  public void testDatalossWhenInputError() throws IOException, InstantiationException,
+      IllegalAccessException {
+    final TableName tableName = TableName.valueOf("testDatalossWhenInputError");
+    final HRegionInfo hri = createBasic3FamilyHRegionInfo(tableName);
+    final Path basedir = FSUtils.getTableDir(this.hbaseRootDir, tableName);
+    deleteDir(basedir);
+    final byte[] rowName = tableName.getName();
+    final int countPerFamily = 10;
+    final HTableDescriptor htd = createBasic1FamilyHTD(tableName);
+    HRegion region1 = HBaseTestingUtility.createRegionAndWAL(hri, hbaseRootDir, this.conf, htd);
+    Path regionDir = region1.getRegionFileSystem().getRegionDir();
+    HBaseTestingUtility.closeRegionAndWAL(region1);
+
+    WAL wal = createWAL(this.conf);
+    HRegion region = HRegion.openHRegion(this.conf, this.fs, hbaseRootDir, hri, htd, wal);
+    for (HColumnDescriptor hcd : htd.getFamilies()) {
+      addRegionEdits(rowName, hcd.getName(), countPerFamily, this.ee, region, "x");
+    }
+    // Now assert edits made it in.
+    final Get g = new Get(rowName);
+    Result result = region.get(g);
+    assertEquals(countPerFamily * htd.getFamilies().size(), result.size());
+    // Now close the region (without flush), split the log, reopen the region and assert that
+    // replay of log has the correct effect.
+    region.close(true);
+    wal.shutdown();
+
+    runWALSplit(this.conf);
+
+    // here we let the DFSInputStream throw an IOException just after the WALHeader.
+    Path editFile = WALSplitter.getSplitEditFilesSorted(this.fs, regionDir).first();
+    FSDataInputStream stream = fs.open(editFile);
+    stream.seek(ProtobufLogReader.PB_WAL_MAGIC.length);
+    Class<? extends DefaultWALProvider.Reader> logReaderClass =
+        conf.getClass("hbase.regionserver.hlog.reader.impl", ProtobufLogReader.class,
+          DefaultWALProvider.Reader.class);
+    DefaultWALProvider.Reader reader = logReaderClass.newInstance();
+    reader.init(this.fs, editFile, conf, stream);
+    final long headerLength = stream.getPos();
+    reader.close();
+    FileSystem spyFs = spy(this.fs);
+    doAnswer(new Answer<FSDataInputStream>() {
+
+      @Override
+      public FSDataInputStream answer(InvocationOnMock invocation) throws Throwable {
+        FSDataInputStream stream = (FSDataInputStream) invocation.callRealMethod();
+        Field field = FilterInputStream.class.getDeclaredField("in");
+        field.setAccessible(true);
+        final DFSInputStream in = (DFSInputStream) field.get(stream);
+        DFSInputStream spyIn = spy(in);
+        doAnswer(new Answer<Integer>() {
+
+          private long pos;
+
+          @Override
+          public Integer answer(InvocationOnMock invocation) throws Throwable {
+            if (pos >= headerLength) {
+              throw new IOException("read over limit");
+            }
+            int b = (Integer) invocation.callRealMethod();
+            if (b > 0) {
+              pos += b;
+            }
+            return b;
+          }
+        }).when(spyIn).read(any(byte[].class), any(int.class), any(int.class));
+        doAnswer(new Answer<Void>() {
+
+          @Override
+          public Void answer(InvocationOnMock invocation) throws Throwable {
+            invocation.callRealMethod();
+            in.close();
+            return null;
+          }
+        }).when(spyIn).close();
+        field.set(stream, spyIn);
+        return stream;
+      }
+    }).when(spyFs).open(eq(editFile));
+
+    WAL wal2 = createWAL(this.conf);
+    HRegion region2;
+    try {
+      // log replay should fail due to the IOException, otherwise we may lose data.
+      region2 = HRegion.openHRegion(conf, spyFs, hbaseRootDir, hri, htd, wal2);
+      assertEquals(result.size(), region2.get(g).size());
+    } catch (IOException e) {
+      assertEquals("read over limit", e.getMessage());
+    }
+    region2 = HRegion.openHRegion(conf, fs, hbaseRootDir, hri, htd, wal2);
+    assertEquals(result.size(), region2.get(g).size());
+  }
+
   static class MockWAL extends FSHLog {
     boolean doCompleteCacheFlush = false;
 


[40/50] hbase git commit: HBASE-15720 Print row locks at the debug dump page, addendum

Posted by la...@apache.org.
HBASE-15720 Print row locks at the debug dump page, addendum


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a3e77bf3
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a3e77bf3
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a3e77bf3

Branch: refs/heads/branch-1.0
Commit: a3e77bf3664d361a5337ba6b584848eb65ceccbb
Parents: 8911195
Author: chenheng <ch...@apache.org>
Authored: Tue May 3 09:31:45 2016 +1000
Committer: chenheng <ch...@apache.org>
Committed: Tue May 3 09:31:45 2016 +1000

----------------------------------------------------------------------
 .../java/org/apache/hadoop/hbase/regionserver/RSDumpServlet.java  | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/a3e77bf3/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSDumpServlet.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSDumpServlet.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSDumpServlet.java
index 56d0417..8fe6126 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSDumpServlet.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSDumpServlet.java
@@ -105,8 +105,7 @@ public class RSDumpServlet extends StateDumpServlet {
 
   public static void dumpRowLock(HRegionServer hrs, PrintWriter out) {
     StringBuilder sb = new StringBuilder();
-    for (Region region : hrs.getOnlineRegionsLocalContext()) {
-      HRegion hRegion = (HRegion)region;
+    for (HRegion hRegion : hrs.getOnlineRegionsLocalContext()) {
       if (hRegion.getLockedRows().size() > 0) {
         for (HRegion.RowLockContext rowLockContext : hRegion.getLockedRows().values()) {
           sb.setLength(0);


[41/50] hbase git commit: HBASE-15801 Upgrade checkstyle for all branches

Posted by la...@apache.org.
HBASE-15801 Upgrade checkstyle for all branches


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/edc0a171
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/edc0a171
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/edc0a171

Branch: refs/heads/branch-1.0
Commit: edc0a171f1e449ffe2b2749f53dcbe76d067acbd
Parents: a3e77bf
Author: zhangduo <zh...@apache.org>
Authored: Mon May 9 14:42:27 2016 +0800
Committer: zhangduo <zh...@apache.org>
Committed: Mon May 9 15:35:14 2016 +0800

----------------------------------------------------------------------
 pom.xml | 15 +++++++++++++--
 1 file changed, 13 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/edc0a171/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 1ab1f58..1ced55c 100644
--- a/pom.xml
+++ b/pom.xml
@@ -781,13 +781,18 @@
         <plugin>
           <groupId>org.apache.maven.plugins</groupId>
           <artifactId>maven-checkstyle-plugin</artifactId>
-          <version>2.13</version>
+          <version>2.17</version>
           <dependencies>
             <dependency>
               <groupId>org.apache.hbase</groupId>
               <artifactId>hbase-checkstyle</artifactId>
               <version>${project.version}</version>
             </dependency>
+            <dependency>
+              <groupId>com.puppycrawl.tools</groupId>
+              <artifactId>checkstyle</artifactId>
+              <version>${checkstyle.version}</version>
+            </dependency>
           </dependencies>
           <configuration>
             <configLocation>hbase/checkstyle.xml</configLocation>
@@ -902,6 +907,11 @@
             <artifactId>hbase-checkstyle</artifactId>
             <version>${project.version}</version>
           </dependency>
+          <dependency>
+            <groupId>com.puppycrawl.tools</groupId>
+            <artifactId>checkstyle</artifactId>
+            <version>${checkstyle.version}</version>
+          </dependency>
         </dependencies>
         <configuration>
           <configLocation>hbase/checkstyle.xml</configLocation>
@@ -1174,6 +1184,7 @@
     <maven.antrun.version>1.6</maven.antrun.version>
     <jamon.plugin.version>2.3.4</jamon.plugin.version>
     <findbugs-annotations>1.3.9-1</findbugs-annotations>
+    <checkstyle.version>6.18</checkstyle.version>
     <javadoc.version>2.9</javadoc.version>
     <asciidoctor.plugin.version>1.5.2.1</asciidoctor.plugin.version>
     <!-- General Packaging -->
@@ -2455,7 +2466,7 @@
       <plugin>
         <groupId>org.apache.maven.plugins</groupId>
         <artifactId>maven-checkstyle-plugin</artifactId>
-        <version>2.13</version>
+        <version>2.17</version>
         <configuration>
           <configLocation>hbase/checkstyle.xml</configLocation>
           <suppressionsLocation>hbase/checkstyle-suppressions.xml</suppressionsLocation>


[09/50] hbase git commit: HBASE-15150 Fix TestDurablity in branch-1.1 (Yu Li)

Posted by la...@apache.org.
HBASE-15150 Fix TestDurablity in branch-1.1 (Yu Li)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/64e22626
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/64e22626
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/64e22626

Branch: refs/heads/branch-1.0
Commit: 64e22626adc20162cfb5da71f0fc68797a888fa4
Parents: 886c70d
Author: stack <st...@apache.org>
Authored: Thu Jan 21 14:37:36 2016 -0800
Committer: stack <st...@apache.org>
Committed: Thu Jan 21 14:37:36 2016 -0800

----------------------------------------------------------------------
 .../main/java/org/apache/hadoop/hbase/regionserver/HRegion.java    | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/64e22626/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 8df37ff..4b8f3b6 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -6074,7 +6074,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { //
     }
     // Request a cache flush.  Do it outside update lock.
     if (isFlushSize(this.addAndGetGlobalMemstoreSize(accumulatedResultSize))) requestFlush();
-    return Result.create(allKVs);
+    return increment.isReturnResults() ? Result.create(allKVs) : null;
   }
 
   /**


[08/50] hbase git commit: HBASE-15147 Shell should use Admin.listTableNames() instead of Admin.listTables()

Posted by la...@apache.org.
HBASE-15147 Shell should use Admin.listTableNames() instead of Admin.listTables()


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/886c70d0
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/886c70d0
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/886c70d0

Branch: refs/heads/branch-1.0
Commit: 886c70d0d95b95ddd928cd5bc1e1fc83b1de2f42
Parents: f1e1312
Author: Enis Soztutar <en...@apache.org>
Authored: Thu Jan 21 11:09:02 2016 -0800
Committer: Enis Soztutar <en...@apache.org>
Committed: Thu Jan 21 11:13:08 2016 -0800

----------------------------------------------------------------------
 hbase-shell/src/main/ruby/hbase/admin.rb | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/886c70d0/hbase-shell/src/main/ruby/hbase/admin.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/hbase/admin.rb b/hbase-shell/src/main/ruby/hbase/admin.rb
index 4aa715f..749ec50 100644
--- a/hbase-shell/src/main/ruby/hbase/admin.rb
+++ b/hbase-shell/src/main/ruby/hbase/admin.rb
@@ -45,7 +45,7 @@ module Hbase
     #----------------------------------------------------------------------------------------------
     # Returns a list of tables in hbase
     def list(regex = ".*")
-      @admin.listTables(regex).map { |t| t.getNameAsString }
+      @admin.listTableNames(regex).map { |t| t.getNameAsString }
     end
 
     #----------------------------------------------------------------------------------------------


[34/50] hbase git commit: HBASE-15693 Reconsider the ImportOrder rule of checkstyle

Posted by la...@apache.org.
HBASE-15693 Reconsider the ImportOrder rule of checkstyle


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0b20b27e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0b20b27e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0b20b27e

Branch: refs/heads/branch-1.0
Commit: 0b20b27e56aeb0a9b602c39fc13bfeddad11bdee
Parents: a74c495
Author: zhangduo <zh...@apache.org>
Authored: Sun Apr 24 11:09:50 2016 +0800
Committer: zhangduo <zh...@apache.org>
Committed: Sun Apr 24 11:10:38 2016 +0800

----------------------------------------------------------------------
 .../src/main/resources/hbase/checkstyle.xml     | 82 ++++++++++++++++----
 1 file changed, 66 insertions(+), 16 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/0b20b27e/hbase-checkstyle/src/main/resources/hbase/checkstyle.xml
----------------------------------------------------------------------
diff --git a/hbase-checkstyle/src/main/resources/hbase/checkstyle.xml b/hbase-checkstyle/src/main/resources/hbase/checkstyle.xml
index 34fe5ec..b423095 100644
--- a/hbase-checkstyle/src/main/resources/hbase/checkstyle.xml
+++ b/hbase-checkstyle/src/main/resources/hbase/checkstyle.xml
@@ -32,29 +32,79 @@
 <module name="Checker">
   <module name="FileTabCharacter"/>
   <module name="TreeWalker">
+
+    <!-- Annotations Checks
+    http://checkstyle.sourceforge.net/config_annotation.html -->
+    <module name="MissingDeprecated"/>
+
+    <!-- Block Checks
+    http://checkstyle.sourceforge.net/config_blocks.html -->
+    <module name="EmptyBlock"/>
+    <module name="LeftCurly"/>
+    <module name="NeedBraces"/>
+
+    <!-- Class Design Checks
+    http://checkstyle.sourceforge.net/config_design.html -->
+    <module name="FinalClass"/>
+    <module name="HideUtilityClassConstructor"/>
+    <module name="InterfaceIsType"/>
+    <module name="VisibilityModifier">
+      <property name="packageAllowed" value="true"/>
+      <property name="protectedAllowed" value="true"/>
+    </module>
+
+    <!-- Coding Checks
+    http://checkstyle.sourceforge.net/config_coding.html -->
+    <module name="ArrayTypeStyle"/>
+    <module name="EmptyStatement"/>
+    <module name="EqualsHashCode"/>
+    <module name="IllegalInstantiation"/>
+    <module name="InnerAssignment"/>
+    <module name="MissingSwitchDefault"/>
+    <module name="NoFinalizer"/>
+
+    <!-- Import Checks
+    http://checkstyle.sourceforge.net/config_imports.html -->
     <module name="AvoidStarImport"/>
+    <module name="ImportOrder">
+      <property name="option" value="top" />
+      <property name="ordered" value="true"/>
+      <property name="sortStaticImportsAlphabetically" value="true"/>
+    </module>
     <module name="RedundantImport"/>
-    <module name="UnusedImports"/>
+    <module name="UnusedImports">
+      <property name="processJavadoc" value="true"/>
+    </module>
+
+    <!-- Javadoc Checks
+    http://checkstyle.sourceforge.net/config_javadoc.html -->
+    <module name="JavadocTagContinuationIndentation">
+      <property name="offset" value="2"/>
+    </module>
+    <module name="NonEmptyAtclauseDescription"/>
+
+    <!-- Miscellaneous Checks
+    http://checkstyle.sourceforge.net/config_misc.html -->
+    <module name="UpperEll"/>
+    <module name="Indentation">
+      <property name="basicOffset" value="2"/>
+      <property name="caseIndent" value="2"/>
+      <property name="throwsIndent" value="2"/>
+      <property name="arrayInitIndent" value="2"/>
+      <property name="lineWrappingIndentation" value="2"/>
+    </module>
+
+    <!-- Size Violation Checks
+    http://checkstyle.sourceforge.net/config_sizes.html -->
     <module name="LineLength">
       <property name="max" value="100"/>
-      <property name="ignorePattern" value="^package.*|^import.*|a href|href|http://|https://|ftp://"/>
+      <property name="ignorePattern" value="^package.*|^import.*|a href|href|http://|https://|ftp://|org.apache.thrift.|com.google.protobuf.|hbase.protobuf.generated"/>
     </module>
     <module name="MethodLength"/>
+
+    <!-- Whitespace Checks
+    http://checkstyle.sourceforge.net/config_whitespace.html -->
     <module name="MethodParamPad"/>
     <module name="ParenPad"/>
-    <module name="EmptyStatement"/>
-    <module name="EmptyBlock"/>
-    <module name="EqualsHashCode"/>
-    <module name="IllegalInstantiation"/>
-    <module name="InnerAssignment"/>
-    <module name="MissingSwitchDefault"/>
-    <module name="FinalClass"/>
-    <module name="HideUtilityClassConstructor"/>
-    <module name="InterfaceIsType"/>
-    <module name="VisibilityModifier"/>
-    <module name="ArrayTypeStyle"/>
-    <module name="UpperEll"/>
-    <module name="NoFinalizer"/>
-    <module name="MissingDeprecated"/>
   </module>
 </module>


[49/50] hbase git commit: HBASE-16538 Changes the way version information is stored during build. Instead of writing package-info.java with VersionAnnotation, saveVersion.sh now writes Version.java with static members.

Posted by la...@apache.org.
HBASE-16538 Changes the way version information is stored during build.
Instead of writing package-info.java with VersionAnnotation, saveVersion.sh now writes Version.java with static members.

Change-Id: I009f440fa049f409e10cb3f1c3afb483bc2aa876


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/fba13a6e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/fba13a6e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/fba13a6e

Branch: refs/heads/branch-1.0
Commit: fba13a6ef6cfe4c56bfa528b7bbf003482dfc4a4
Parents: 8971176
Author: Apekshit Sharma <ap...@apache.org>
Authored: Thu Sep 1 18:18:48 2016 -0700
Committer: Apekshit Sharma <ap...@apache.org>
Committed: Fri Sep 2 16:44:47 2016 -0700

----------------------------------------------------------------------
 .../apache/hadoop/hbase/VersionAnnotation.java  | 66 --------------------
 .../apache/hadoop/hbase/util/VersionInfo.java   | 34 +++-------
 hbase-common/src/saveVersion.sh                 | 14 +++--
 3 files changed, 19 insertions(+), 95 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/fba13a6e/hbase-common/src/main/java/org/apache/hadoop/hbase/VersionAnnotation.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/VersionAnnotation.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/VersionAnnotation.java
deleted file mode 100644
index f3137ae..0000000
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/VersionAnnotation.java
+++ /dev/null
@@ -1,66 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase;
-
-import java.lang.annotation.*;
-
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-
-/**
- * A package attribute that captures the version of hbase that was compiled.
- * Copied down from hadoop.  All is same except name of interface.
- */
-@Retention(RetentionPolicy.RUNTIME)
-@Target(ElementType.PACKAGE)
-@InterfaceAudience.Private
-public @interface VersionAnnotation {
-
-  /**
-   * Get the Hadoop version
-   * @return the version string "0.6.3-dev"
-   */
-  String version();
-
-  /**
-   * Get the username that compiled Hadoop.
-   */
-  String user();
-
-  /**
-   * Get the date when Hadoop was compiled.
-   * @return the date in unix 'date' format
-   */
-  String date();
-
-  /**
-   * Get the url for the subversion repository.
-   */
-  String url();
-
-  /**
-   * Get the subversion revision.
-   * @return the revision number as a string (eg. "451451")
-   */
-  String revision();
-
-  /**
-   * Get a checksum of the source files from which HBase was compiled.
-   * @return a string that uniquely identifies the source
-   **/
-  String srcChecksum();
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/fba13a6e/hbase-common/src/main/java/org/apache/hadoop/hbase/util/VersionInfo.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/VersionInfo.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/VersionInfo.java
index aadad2e..e767d80 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/VersionInfo.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/VersionInfo.java
@@ -21,41 +21,25 @@ package org.apache.hadoop.hbase.util;
 import org.apache.commons.logging.LogFactory;
 import java.io.PrintWriter;
 
+import org.apache.commons.logging.Log;
+import org.apache.hadoop.hbase.Version;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.classification.InterfaceStability;
-import org.apache.hadoop.hbase.VersionAnnotation;
-import org.apache.commons.logging.Log;
 
 /**
- * This class finds the package info for hbase and the VersionAnnotation
- * information.  Taken from hadoop.  Only name of annotation is different.
+ * This class finds the Version information for HBase.
  */
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
 public class VersionInfo {
   private static final Log LOG = LogFactory.getLog(VersionInfo.class.getName());
-  private static Package myPackage;
-  private static VersionAnnotation version;
-
-  static {
-    myPackage = VersionAnnotation.class.getPackage();
-    version = myPackage.getAnnotation(VersionAnnotation.class);
-  }
-
-  /**
-   * Get the meta-data for the hbase package.
-   * @return package
-   */
-  static Package getPackage() {
-    return myPackage;
-  }
 
   /**
    * Get the hbase version.
    * @return the hbase version string, eg. "0.6.3-dev"
    */
   public static String getVersion() {
-    return version != null ? version.version() : "Unknown";
+    return Version.version;
   }
 
   /**
@@ -63,7 +47,7 @@ public class VersionInfo {
    * @return the revision number, eg. "451451"
    */
   public static String getRevision() {
-    return version != null ? version.revision() : "Unknown";
+    return Version.revision;
   }
 
   /**
@@ -71,7 +55,7 @@ public class VersionInfo {
    * @return the compilation date in unix date format
    */
   public static String getDate() {
-    return version != null ? version.date() : "Unknown";
+    return Version.date;
   }
 
   /**
@@ -79,7 +63,7 @@ public class VersionInfo {
    * @return the username of the user
    */
   public static String getUser() {
-    return version != null ? version.user() : "Unknown";
+    return Version.user;
   }
 
   /**
@@ -87,7 +71,7 @@ public class VersionInfo {
    * @return the url
    */
   public static String getUrl() {
-    return version != null ? version.url() : "Unknown";
+    return Version.url;
   }
 
   static String[] versionReport() {
@@ -104,7 +88,7 @@ public class VersionInfo {
    * @return a string that uniquely identifies the source
    **/
   public static String getSrcChecksum() {
-    return version != null ? version.srcChecksum() : "Unknown";
+    return Version.srcChecksum;
   }
 
   public static void writeTo(PrintWriter out) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/fba13a6e/hbase-common/src/saveVersion.sh
----------------------------------------------------------------------
diff --git a/hbase-common/src/saveVersion.sh b/hbase-common/src/saveVersion.sh
index 890dc5a..4c21829 100644
--- a/hbase-common/src/saveVersion.sh
+++ b/hbase-common/src/saveVersion.sh
@@ -55,13 +55,19 @@ fi
 popd
 
 mkdir -p "$outputDirectory/org/apache/hadoop/hbase"
-cat >"$outputDirectory/org/apache/hadoop/hbase/package-info.java" <<EOF
+cat >"$outputDirectory/org/apache/hadoop/hbase/Version.java" <<EOF
 /*
  * Generated by src/saveVersion.sh
  */
-@VersionAnnotation(version="$version", revision="$revision",
-                         user="$user", date="$date", url="$url",
-                         srcChecksum="$srcChecksum")
 package org.apache.hadoop.hbase;
+
+public class Version {
+  public static final String version = "$version";
+  public static final String revision = "$revision";
+  public static final String user = "$user";
+  public static final String date = "$date";
+  public static final String url = "$url";
+  public static final String srcChecksum = "$srcChecksum";
+}
 EOF
 


[32/50] hbase git commit: HBASE-15587 FSTableDescriptors.getDescriptor() logs stack trace erronously

Posted by la...@apache.org.
HBASE-15587 FSTableDescriptors.getDescriptor() logs stack trace erronously


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/52f8ad90
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/52f8ad90
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/52f8ad90

Branch: refs/heads/branch-1.0
Commit: 52f8ad902da29f1e6eed4e5f18921689d42cc993
Parents: 47baaed
Author: Enis Soztutar <en...@apache.org>
Authored: Tue Apr 5 18:13:40 2016 -0700
Committer: Enis Soztutar <en...@apache.org>
Committed: Tue Apr 5 18:22:40 2016 -0700

----------------------------------------------------------------------
 .../java/org/apache/hadoop/hbase/util/FSTableDescriptors.java    | 4 ++++
 1 file changed, 4 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/52f8ad90/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
index 7cd2673..06eb9ea 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
@@ -124,11 +124,13 @@ public class FSTableDescriptors implements TableDescriptors {
     this.metaTableDescriptor = HTableDescriptor.metaTableDescriptor(conf);
   }
 
+  @Override
   public void setCacheOn() throws IOException {
     this.cache.clear();
     this.usecache = true;
   }
 
+  @Override
   public void setCacheOff() throws IOException {
     this.usecache = false;
     this.cache.clear();
@@ -173,6 +175,8 @@ public class FSTableDescriptors implements TableDescriptors {
     } catch (NullPointerException e) {
       LOG.debug("Exception during readTableDecriptor. Current table name = "
           + tablename, e);
+    } catch (TableInfoMissingException e) {
+      // ignore. This is regular operation
     } catch (IOException ioe) {
       LOG.debug("Exception during readTableDecriptor. Current table name = "
           + tablename, ioe);


[22/50] hbase git commit: HBASE-15198 RPC client not using Codec and CellBlock for puts by default-addendum.

Posted by la...@apache.org.
HBASE-15198 RPC client not using Codec and CellBlock for puts by default-addendum.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/8f064d47
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/8f064d47
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/8f064d47

Branch: refs/heads/branch-1.0
Commit: 8f064d47785dac99b69cc16c50fe26a6203970f2
Parents: 62206fd
Author: anoopsjohn <an...@gmail.com>
Authored: Thu Feb 11 20:00:02 2016 +0530
Committer: anoopsjohn <an...@gmail.com>
Committed: Thu Feb 11 20:00:02 2016 +0530

----------------------------------------------------------------------
 .../java/org/apache/hadoop/hbase/client/TestAsyncProcess.java   | 5 +++++
 1 file changed, 5 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/8f064d47/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java
index ce7e2f3..cbd3ffc 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java
@@ -353,6 +353,11 @@ public class TestAsyncProcess {
         byte[] row, boolean useCache, boolean retry, int replicaId) throws IOException {
       return new RegionLocations(loc1);
     }
+
+    @Override
+    public boolean hasCellBlockSupport() {
+      return false;
+    }
   }
 
   /**


[43/50] hbase git commit: HBASE-15976 RegionServerMetricsWrapperRunnable will be failure when disable blockcache.

Posted by la...@apache.org.
HBASE-15976 RegionServerMetricsWrapperRunnable will be failure when disable blockcache.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/14d0bef2
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/14d0bef2
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/14d0bef2

Branch: refs/heads/branch-1.0
Commit: 14d0bef21eaa67f6377b4456a941f46cd908b3aa
Parents: a55ef15
Author: Jingcheng Du <ji...@intel.com>
Authored: Wed Jun 29 17:16:09 2016 +0800
Committer: Jingcheng Du <ji...@intel.com>
Committed: Wed Jun 29 17:16:09 2016 +0800

----------------------------------------------------------------------
 .../hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java   | 1 -
 1 file changed, 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/14d0bef2/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java
index 75dcf26..d14b135 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java
@@ -413,7 +413,6 @@ class MetricsRegionServerWrapperImpl
     @Override
     synchronized public void run() {
       initBlockCache();
-      cacheStats = blockCache.getStats();
 
       HDFSBlocksDistribution hdfsBlocksDistribution =
           new HDFSBlocksDistribution();


[27/50] hbase git commit: HBASE-15365 Do not write to '/tmp' in TestHBaseConfiguration

Posted by la...@apache.org.
HBASE-15365 Do not write to '/tmp' in TestHBaseConfiguration

Conflicts:
	hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c4c50a56
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c4c50a56
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c4c50a56

Branch: refs/heads/branch-1.0
Commit: c4c50a565b84fee5bd5149bab9d5fb9df48d76ba
Parents: 2ce516b
Author: zhangduo <zh...@apache.org>
Authored: Wed Mar 2 09:37:07 2016 +0800
Committer: zhangduo <zh...@apache.org>
Committed: Wed Mar 2 11:21:30 2016 +0800

----------------------------------------------------------------------
 .../hadoop/hbase/TestHBaseConfiguration.java    | 27 ++++++++++++--------
 1 file changed, 16 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/c4c50a56/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java
index b739f36..8973fdc 100644
--- a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java
+++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.hbase;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.fail;
 
+import java.io.File;
 import java.io.IOException;
 import java.lang.reflect.InvocationTargetException;
 import java.lang.reflect.Method;
@@ -30,6 +31,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.junit.AfterClass;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
@@ -38,6 +40,13 @@ public class TestHBaseConfiguration {
 
   private static final Log LOG = LogFactory.getLog(TestHBaseConfiguration.class);
 
+  private static HBaseCommonTestingUtility UTIL = new HBaseCommonTestingUtility();
+
+  @AfterClass
+  public static void tearDown() throws IOException {
+    UTIL.cleanupTestDir();
+  }
+
   @Test
   public void testGetIntDeprecated() {
     int VAL = 1, VAL2 = 2;
@@ -66,22 +75,19 @@ public class TestHBaseConfiguration {
   @Test
   public void testGetPassword() throws Exception {
     Configuration conf = HBaseConfiguration.create();
-    conf.set(ReflectiveCredentialProviderClient.CREDENTIAL_PROVIDER_PATH,
-        "jceks://file/tmp/foo.jks");
-    ReflectiveCredentialProviderClient client =
-        new ReflectiveCredentialProviderClient();
+    conf.set(ReflectiveCredentialProviderClient.CREDENTIAL_PROVIDER_PATH, "jceks://file"
+        + new File(UTIL.getDataTestDir().toUri().getPath(), "foo.jks").getCanonicalPath());
+    ReflectiveCredentialProviderClient client = new ReflectiveCredentialProviderClient();
     if (client.isHadoopCredentialProviderAvailable()) {
-      char[] keyPass = {'k', 'e', 'y', 'p', 'a', 's', 's'};
-      char[] storePass = {'s', 't', 'o', 'r', 'e', 'p', 'a', 's', 's'};
+      char[] keyPass = { 'k', 'e', 'y', 'p', 'a', 's', 's' };
+      char[] storePass = { 's', 't', 'o', 'r', 'e', 'p', 'a', 's', 's' };
       client.createEntry(conf, "ssl.keypass.alias", keyPass);
       client.createEntry(conf, "ssl.storepass.alias", storePass);
 
-      String keypass = HBaseConfiguration.getPassword(
-          conf, "ssl.keypass.alias", null);
+      String keypass = HBaseConfiguration.getPassword(conf, "ssl.keypass.alias", null);
       assertEquals(keypass, new String(keyPass));
 
-      String storepass = HBaseConfiguration.getPassword(
-          conf, "ssl.storepass.alias", null);
+      String storepass = HBaseConfiguration.getPassword(conf, "ssl.storepass.alias", null);
       assertEquals(storepass, new String(storePass));
     }
   }
@@ -165,7 +171,6 @@ public class TestHBaseConfiguration {
         getProvidersMethod = loadMethod(hadoopCredProviderFactoryClz,
             HADOOP_CRED_PROVIDER_FACTORY_GET_PROVIDERS_METHOD_NAME,
             Configuration.class);
-
         // Load Hadoop CredentialProvider
         Class<?> hadoopCredProviderClz = null;
         hadoopCredProviderClz = Class.forName(HADOOP_CRED_PROVIDER_CLASS_NAME);


[48/50] hbase git commit: HBASE-16518 Remove unused .arcconfig file

Posted by la...@apache.org.
HBASE-16518 Remove unused .arcconfig file


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/8971176d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/8971176d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/8971176d

Branch: refs/heads/branch-1.0
Commit: 8971176d89b1a8d45367a095c89221caf96fb3e7
Parents: cf2e2fd
Author: Gary Helmling <ga...@apache.org>
Authored: Mon Aug 29 12:28:46 2016 -0700
Committer: Gary Helmling <ga...@apache.org>
Committed: Mon Aug 29 15:14:57 2016 -0700

----------------------------------------------------------------------
 .arcconfig | 6 ------
 .gitignore | 1 -
 2 files changed, 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/8971176d/.arcconfig
----------------------------------------------------------------------
diff --git a/.arcconfig b/.arcconfig
deleted file mode 100644
index 8eef934..0000000
--- a/.arcconfig
+++ /dev/null
@@ -1,6 +0,0 @@
-{
-  "project_id" : "HBaseOnGithub",
-  "conduit_uri" : "https://reviews.facebook.net/",
-  "copyright_holder" : "Apache Software Foundation",
-  "max_line_length" : 100
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/8971176d/.gitignore
----------------------------------------------------------------------
diff --git a/.gitignore b/.gitignore
index f9fc9f7..63c7a7b 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,4 +1,3 @@
-/.arc_jira_lib
 /.externalToolBuilders
 .project
 *.settings/


[05/50] hbase git commit: HBASE-15095 isReturnResult=false on fast path in branch-1.1 and branch-1.0 is not respected

Posted by la...@apache.org.
HBASE-15095 isReturnResult=false on fast path in branch-1.1 and branch-1.0 is not respected


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/92772936
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/92772936
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/92772936

Branch: refs/heads/branch-1.0
Commit: 927729362f7a6cac33ad990b06314b9252dfb278
Parents: ccc8e4a
Author: chenheng <ch...@apache.org>
Authored: Wed Jan 13 11:13:35 2016 +0800
Committer: chenheng <ch...@apache.org>
Committed: Wed Jan 13 11:13:35 2016 +0800

----------------------------------------------------------------------
 .../org/apache/hadoop/hbase/client/Append.java  |  1 -
 .../apache/hadoop/hbase/client/Increment.java   | 12 ++++
 .../apache/hadoop/hbase/client/Mutation.java    |  2 +
 .../hadoop/hbase/regionserver/HRegion.java      |  4 +-
 .../client/TestIncrementsFromClientSide.java    | 62 +++++++++++---------
 5 files changed, 51 insertions(+), 30 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/92772936/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java
index 58c204b6..1bdb121 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java
@@ -47,7 +47,6 @@ import org.apache.hadoop.hbase.util.Bytes;
 @InterfaceAudience.Public
 @InterfaceStability.Stable
 public class Append extends Mutation {
-  private static final String RETURN_RESULTS = "_rr_";
   /**
    * @param returnResults
    *          True (default) if the append operation should return the results.

http://git-wip-us.apache.org/repos/asf/hbase/blob/92772936/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Increment.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Increment.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Increment.java
index af0ea56..f090cf0 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Increment.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Increment.java
@@ -90,6 +90,18 @@ public class Increment extends Mutation implements Comparable<Row> {
   }
 
   /**
+   * @return current setting for returnResults
+   */
+  public boolean isReturnResults() {
+    byte[] v = getAttribute(RETURN_RESULTS);
+    return v == null ? true : Bytes.toBoolean(v);
+  }
+
+  public Increment setReturnResults(boolean returnResults) {
+    setAttribute(RETURN_RESULTS, Bytes.toBytes(returnResults));
+    return this;
+  }
+  /**
    * Add the specified KeyValue to this operation.
    * @param cell individual Cell
    * @return this

http://git-wip-us.apache.org/repos/asf/hbase/blob/92772936/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java
index 2b88ffc..2444cf4 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java
@@ -83,6 +83,8 @@ public abstract class Mutation extends OperationWithAttributes implements Row, C
    */
   private static final String OP_ATTRIBUTE_TTL = "_ttl";
 
+  protected static final String RETURN_RESULTS = "_rr_";
+
   protected byte [] row = null;
   protected long ts = HConstants.LATEST_TIMESTAMP;
   protected Durability durability = Durability.USE_DEFAULT;

http://git-wip-us.apache.org/repos/asf/hbase/blob/92772936/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index e655f4e..8df37ff 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -5893,7 +5893,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { //
       try {
         if (this.coprocessorHost != null) {
           Result r = this.coprocessorHost.preIncrementAfterRowLock(increment);
-          if (r != null) return r;
+          if (r != null) return increment.isReturnResults() ? r : null;
         }
         // Process increments a Store/family at a time.
         long now = EnvironmentEdgeManager.currentTime();
@@ -5967,7 +5967,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { //
     }
     // Request a cache flush.  Do it outside update lock.
     if (isFlushSize(this.addAndGetGlobalMemstoreSize(accumulatedResultSize))) requestFlush();
-    return Result.create(allKVs);
+    return increment.isReturnResults() ? Result.create(allKVs) : null;
   }
 
   private Result slowButConsistentIncrement(Increment increment, long nonceGroup, long nonce)

http://git-wip-us.apache.org/repos/asf/hbase/blob/92772936/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncrementsFromClientSide.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncrementsFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncrementsFromClientSide.java
index f9461bc..77cebbd 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncrementsFromClientSide.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncrementsFromClientSide.java
@@ -18,9 +18,6 @@
  */
 package org.apache.hadoop.hbase.client;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.fail;
-
 import java.io.IOException;
 import java.util.Arrays;
 import java.util.Collection;
@@ -35,12 +32,9 @@ import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
 import org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint;
 import org.apache.hadoop.hbase.regionserver.HRegion;
-import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.Threads;
 import org.junit.After;
-import org.junit.AfterClass;
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Rule;
@@ -51,6 +45,10 @@ import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
 import org.junit.runners.Parameterized.Parameters;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
+import static org.junit.Assert.assertTrue;
+
 /**
  * Run Increment tests that use the HBase clients; {@link HTable}.
  * 
@@ -89,30 +87,18 @@ public class TestIncrementsFromClientSide {
     conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
         MultiRowMutationEndpoint.class.getName());
     conf.setBoolean("hbase.table.sanity.checks", true); // enable for below tests
-    // We need more than one region server in this test
-    TEST_UTIL.startMiniCluster(SLAVES);
   }
 
   @Before
   public void before() throws Exception {
     Configuration conf = TEST_UTIL.getConfiguration();
     if (this.fast) {
-      // If fast is set, set our configuration and then do a rolling restart of the one
-      // regionserver so it picks up the new config. Doing this should be faster than starting
-      // and stopping a cluster for each test.
       this.oldINCREMENT_FAST_BUT_NARROW_CONSISTENCY_KEY =
           conf.get(HRegion.INCREMENT_FAST_BUT_NARROW_CONSISTENCY_KEY);
       conf.setBoolean(HRegion.INCREMENT_FAST_BUT_NARROW_CONSISTENCY_KEY, this.fast);
-      HRegionServer rs =
-          TEST_UTIL.getHBaseCluster().getLiveRegionServerThreads().get(0).getRegionServer();
-      TEST_UTIL.getHBaseCluster().startRegionServer();
-      rs.stop("Restart");
-      while(!rs.isStopped()) {
-        Threads.sleep(100);
-        LOG.info("Restarting " + rs);
-      }
-      TEST_UTIL.waitUntilNoRegionsInTransition(10000);
     }
+    // We need more than one region server in this test
+    TEST_UTIL.startMiniCluster(SLAVES);
   }
 
   @After
@@ -124,13 +110,6 @@ public class TestIncrementsFromClientSide {
             this.oldINCREMENT_FAST_BUT_NARROW_CONSISTENCY_KEY);
       }
     }
-  }
-
-  /**
-   * @throws java.lang.Exception
-   */
-  @AfterClass
-  public static void afterClass() throws Exception {
     TEST_UTIL.shutdownMiniCluster();
   }
 
@@ -185,6 +164,35 @@ public class TestIncrementsFromClientSide {
   }
 
   @Test
+  public void testIncrementReturnValue() throws Exception {
+    LOG.info("Starting " + this.name.getMethodName());
+    final TableName TABLENAME =
+      TableName.valueOf(filterStringSoTableNameSafe(this.name.getMethodName()));
+    Table ht = TEST_UTIL.createTable(TABLENAME, FAMILY);
+    final byte[] COLUMN = Bytes.toBytes("column");
+    Put p = new Put(ROW);
+    p.add(FAMILY, COLUMN, Bytes.toBytes(5L));
+    ht.put(p);
+
+    Increment inc = new Increment(ROW);
+    inc.addColumn(FAMILY, COLUMN, 5L);
+
+    Result r = ht.increment(inc);
+    long result = Bytes.toLong(r.getValue(FAMILY, COLUMN));
+    assertEquals(10, result);
+
+    if (this.fast) {
+      inc = new Increment(ROW);
+      inc.addColumn(FAMILY, COLUMN, 5L);
+      inc.setReturnResults(false);
+      r = ht.increment(inc);
+      assertTrue(r.getExists() == null);
+    }
+
+  }
+
+
+  @Test
   public void testIncrementInvalidArguments() throws Exception {
     LOG.info("Starting " + this.name.getMethodName());
     final TableName TABLENAME =


[47/50] hbase git commit: HBASE-16467 Move AbstractHBaseTool to hbase-common.

Posted by la...@apache.org.
HBASE-16467 Move AbstractHBaseTool to hbase-common.

Change-Id: Iea3a3cd91fe4ecc9ff253e2795ab885945fa6a25


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/cf2e2fde
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/cf2e2fde
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/cf2e2fde

Branch: refs/heads/branch-1.0
Commit: cf2e2fde0d66ebc8abc773f0a70825ac4a38523c
Parents: a049e51
Author: Apekshit Sharma <ap...@apache.org>
Authored: Mon Aug 22 17:19:40 2016 -0700
Committer: Apekshit Sharma <ap...@apache.org>
Committed: Mon Aug 22 17:19:40 2016 -0700

----------------------------------------------------------------------
 .../hadoop/hbase/util/AbstractHBaseTool.java    | 203 +++++++++++++++++++
 .../hadoop/hbase/util/AbstractHBaseTool.java    | 203 -------------------
 2 files changed, 203 insertions(+), 203 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/cf2e2fde/hbase-common/src/main/java/org/apache/hadoop/hbase/util/AbstractHBaseTool.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/AbstractHBaseTool.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/AbstractHBaseTool.java
new file mode 100644
index 0000000..a876aef
--- /dev/null
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/AbstractHBaseTool.java
@@ -0,0 +1,203 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.hadoop.hbase.util;
+
+import java.io.IOException;
+import java.util.Set;
+import java.util.TreeSet;
+
+import org.apache.commons.cli.BasicParser;
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.CommandLineParser;
+import org.apache.commons.cli.HelpFormatter;
+import org.apache.commons.cli.Options;
+import org.apache.commons.cli.ParseException;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.util.Tool;
+import org.apache.hadoop.util.ToolRunner;
+
+/**
+ * Common base class used for HBase command-line tools. Simplifies workflow and
+ * command-line argument parsing.
+ */
+@InterfaceAudience.Private
+public abstract class AbstractHBaseTool implements Tool {
+
+  protected static final int EXIT_SUCCESS = 0;
+  protected static final int EXIT_FAILURE = 1;
+
+  private static final String SHORT_HELP_OPTION = "h";
+  private static final String LONG_HELP_OPTION = "help";
+
+  private static final Log LOG = LogFactory.getLog(AbstractHBaseTool.class);
+
+  private final Options options = new Options();
+
+  protected Configuration conf = null;
+
+  private static final Set<String> requiredOptions = new TreeSet<String>();
+
+  protected String[] cmdLineArgs = null;
+
+  /**
+   * Override this to add command-line options using {@link #addOptWithArg}
+   * and similar methods.
+   */
+  protected abstract void addOptions();
+
+  /**
+   * This method is called to process the options after they have been parsed.
+   */
+  protected abstract void processOptions(CommandLine cmd);
+
+  /** The "main function" of the tool */
+  protected abstract int doWork() throws Exception;
+
+  @Override
+  public Configuration getConf() {
+    return conf;
+  }
+
+  @Override
+  public void setConf(Configuration conf) {
+    this.conf = conf;
+  }
+
+  @Override
+  public final int run(String[] args) throws IOException {
+    if (conf == null) {
+      LOG.error("Tool configuration is not initialized");
+      throw new NullPointerException("conf");
+    }
+
+    CommandLine cmd;
+    try {
+      // parse the command line arguments
+      cmd = parseArgs(args);
+      cmdLineArgs = args;
+    } catch (ParseException e) {
+      LOG.error("Error when parsing command-line arguments", e);
+      printUsage();
+      return EXIT_FAILURE;
+    }
+
+    if (cmd.hasOption(SHORT_HELP_OPTION) || cmd.hasOption(LONG_HELP_OPTION) ||
+        !sanityCheckOptions(cmd)) {
+      printUsage();
+      return EXIT_FAILURE;
+    }
+
+    processOptions(cmd);
+
+    int ret = EXIT_FAILURE;
+    try {
+      ret = doWork();
+    } catch (Exception e) {
+      LOG.error("Error running command-line tool", e);
+      return EXIT_FAILURE;
+    }
+    return ret;
+  }
+
+  private boolean sanityCheckOptions(CommandLine cmd) {
+    boolean success = true;
+    for (String reqOpt : requiredOptions) {
+      if (!cmd.hasOption(reqOpt)) {
+        LOG.error("Required option -" + reqOpt + " is missing");
+        success = false;
+      }
+    }
+    return success;
+  }
+
+  protected CommandLine parseArgs(String[] args) throws ParseException {
+    options.addOption(SHORT_HELP_OPTION, LONG_HELP_OPTION, false, "Show usage");
+    addOptions();
+    CommandLineParser parser = new BasicParser();
+    return parser.parse(options, args);
+  }
+
+  protected void printUsage() {
+    printUsage("bin/hbase " + getClass().getName() + " <options>", "Options:", "");
+  }
+
+  protected void printUsage(final String usageStr, final String usageHeader,
+      final String usageFooter) {
+    HelpFormatter helpFormatter = new HelpFormatter();
+    helpFormatter.setWidth(120);
+    helpFormatter.printHelp(usageStr, usageHeader, options, usageFooter);
+  }
+
+  protected void addRequiredOptWithArg(String opt, String description) {
+    requiredOptions.add(opt);
+    addOptWithArg(opt, description);
+  }
+
+  protected void addRequiredOptWithArg(String shortOpt, String longOpt, String description) {
+    requiredOptions.add(longOpt);
+    addOptWithArg(shortOpt, longOpt, description);
+  }
+
+  protected void addOptNoArg(String opt, String description) {
+    options.addOption(opt, false, description);
+  }
+
+  protected void addOptNoArg(String shortOpt, String longOpt, String description) {
+    options.addOption(shortOpt, longOpt, false, description);
+  }
+
+  protected void addOptWithArg(String opt, String description) {
+    options.addOption(opt, true, description);
+  }
+
+  protected void addOptWithArg(String shortOpt, String longOpt, String description) {
+    options.addOption(shortOpt, longOpt, true, description);
+  }
+
+  /**
+   * Parse a number and enforce a range.
+   */
+  public static long parseLong(String s, long minValue, long maxValue) {
+    long l = Long.parseLong(s);
+    if (l < minValue || l > maxValue) {
+      throw new IllegalArgumentException("The value " + l
+          + " is out of range [" + minValue + ", " + maxValue + "]");
+    }
+    return l;
+  }
+
+  public static int parseInt(String s, int minValue, int maxValue) {
+    return (int) parseLong(s, minValue, maxValue);
+  }
+
+  /** Call this from the concrete tool class's main function. */
+  protected void doStaticMain(String args[]) {
+    int ret;
+    try {
+      ret = ToolRunner.run(HBaseConfiguration.create(), this, args);
+    } catch (Exception ex) {
+      LOG.error("Error running command-line tool", ex);
+      ret = EXIT_FAILURE;
+    }
+    System.exit(ret);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/cf2e2fde/hbase-server/src/main/java/org/apache/hadoop/hbase/util/AbstractHBaseTool.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/AbstractHBaseTool.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/AbstractHBaseTool.java
deleted file mode 100644
index a876aef..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/AbstractHBaseTool.java
+++ /dev/null
@@ -1,203 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.hadoop.hbase.util;
-
-import java.io.IOException;
-import java.util.Set;
-import java.util.TreeSet;
-
-import org.apache.commons.cli.BasicParser;
-import org.apache.commons.cli.CommandLine;
-import org.apache.commons.cli.CommandLineParser;
-import org.apache.commons.cli.HelpFormatter;
-import org.apache.commons.cli.Options;
-import org.apache.commons.cli.ParseException;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.util.Tool;
-import org.apache.hadoop.util.ToolRunner;
-
-/**
- * Common base class used for HBase command-line tools. Simplifies workflow and
- * command-line argument parsing.
- */
-@InterfaceAudience.Private
-public abstract class AbstractHBaseTool implements Tool {
-
-  protected static final int EXIT_SUCCESS = 0;
-  protected static final int EXIT_FAILURE = 1;
-
-  private static final String SHORT_HELP_OPTION = "h";
-  private static final String LONG_HELP_OPTION = "help";
-
-  private static final Log LOG = LogFactory.getLog(AbstractHBaseTool.class);
-
-  private final Options options = new Options();
-
-  protected Configuration conf = null;
-
-  private static final Set<String> requiredOptions = new TreeSet<String>();
-
-  protected String[] cmdLineArgs = null;
-
-  /**
-   * Override this to add command-line options using {@link #addOptWithArg}
-   * and similar methods.
-   */
-  protected abstract void addOptions();
-
-  /**
-   * This method is called to process the options after they have been parsed.
-   */
-  protected abstract void processOptions(CommandLine cmd);
-
-  /** The "main function" of the tool */
-  protected abstract int doWork() throws Exception;
-
-  @Override
-  public Configuration getConf() {
-    return conf;
-  }
-
-  @Override
-  public void setConf(Configuration conf) {
-    this.conf = conf;
-  }
-
-  @Override
-  public final int run(String[] args) throws IOException {
-    if (conf == null) {
-      LOG.error("Tool configuration is not initialized");
-      throw new NullPointerException("conf");
-    }
-
-    CommandLine cmd;
-    try {
-      // parse the command line arguments
-      cmd = parseArgs(args);
-      cmdLineArgs = args;
-    } catch (ParseException e) {
-      LOG.error("Error when parsing command-line arguments", e);
-      printUsage();
-      return EXIT_FAILURE;
-    }
-
-    if (cmd.hasOption(SHORT_HELP_OPTION) || cmd.hasOption(LONG_HELP_OPTION) ||
-        !sanityCheckOptions(cmd)) {
-      printUsage();
-      return EXIT_FAILURE;
-    }
-
-    processOptions(cmd);
-
-    int ret = EXIT_FAILURE;
-    try {
-      ret = doWork();
-    } catch (Exception e) {
-      LOG.error("Error running command-line tool", e);
-      return EXIT_FAILURE;
-    }
-    return ret;
-  }
-
-  private boolean sanityCheckOptions(CommandLine cmd) {
-    boolean success = true;
-    for (String reqOpt : requiredOptions) {
-      if (!cmd.hasOption(reqOpt)) {
-        LOG.error("Required option -" + reqOpt + " is missing");
-        success = false;
-      }
-    }
-    return success;
-  }
-
-  protected CommandLine parseArgs(String[] args) throws ParseException {
-    options.addOption(SHORT_HELP_OPTION, LONG_HELP_OPTION, false, "Show usage");
-    addOptions();
-    CommandLineParser parser = new BasicParser();
-    return parser.parse(options, args);
-  }
-
-  protected void printUsage() {
-    printUsage("bin/hbase " + getClass().getName() + " <options>", "Options:", "");
-  }
-
-  protected void printUsage(final String usageStr, final String usageHeader,
-      final String usageFooter) {
-    HelpFormatter helpFormatter = new HelpFormatter();
-    helpFormatter.setWidth(120);
-    helpFormatter.printHelp(usageStr, usageHeader, options, usageFooter);
-  }
-
-  protected void addRequiredOptWithArg(String opt, String description) {
-    requiredOptions.add(opt);
-    addOptWithArg(opt, description);
-  }
-
-  protected void addRequiredOptWithArg(String shortOpt, String longOpt, String description) {
-    requiredOptions.add(longOpt);
-    addOptWithArg(shortOpt, longOpt, description);
-  }
-
-  protected void addOptNoArg(String opt, String description) {
-    options.addOption(opt, false, description);
-  }
-
-  protected void addOptNoArg(String shortOpt, String longOpt, String description) {
-    options.addOption(shortOpt, longOpt, false, description);
-  }
-
-  protected void addOptWithArg(String opt, String description) {
-    options.addOption(opt, true, description);
-  }
-
-  protected void addOptWithArg(String shortOpt, String longOpt, String description) {
-    options.addOption(shortOpt, longOpt, true, description);
-  }
-
-  /**
-   * Parse a number and enforce a range.
-   */
-  public static long parseLong(String s, long minValue, long maxValue) {
-    long l = Long.parseLong(s);
-    if (l < minValue || l > maxValue) {
-      throw new IllegalArgumentException("The value " + l
-          + " is out of range [" + minValue + ", " + maxValue + "]");
-    }
-    return l;
-  }
-
-  public static int parseInt(String s, int minValue, int maxValue) {
-    return (int) parseLong(s, minValue, maxValue);
-  }
-
-  /** Call this from the concrete tool class's main function. */
-  protected void doStaticMain(String args[]) {
-    int ret;
-    try {
-      ret = ToolRunner.run(HBaseConfiguration.create(), this, args);
-    } catch (Exception ex) {
-      LOG.error("Error running command-line tool", ex);
-      ret = EXIT_FAILURE;
-    }
-    System.exit(ret);
-  }
-
-}


[04/50] hbase git commit: HBASE-15085 IllegalStateException was thrown when scanning on bulkloaded HFiles (Victor Xu)

Posted by la...@apache.org.
HBASE-15085 IllegalStateException was thrown when scanning on bulkloaded
HFiles (Victor Xu)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/ccc8e4a2
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/ccc8e4a2
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/ccc8e4a2

Branch: refs/heads/branch-1.0
Commit: ccc8e4a23862a2c44752e479cbf1f1bba1814511
Parents: 556741b
Author: ramkrishna <ra...@gmail.com>
Authored: Tue Jan 12 14:36:48 2016 +0530
Committer: ramkrishna <ra...@gmail.com>
Committed: Tue Jan 12 14:40:27 2016 +0530

----------------------------------------------------------------------
 .../hbase/mapreduce/LoadIncrementalHFiles.java  |  6 +++
 .../mapreduce/TestLoadIncrementalHFiles.java    | 46 ++++++++++++++++++++
 .../apache/hadoop/hbase/util/HFileTestUtil.java | 25 +++++++++--
 3 files changed, 73 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/ccc8e4a2/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java
index 23b927b..093e30b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java
@@ -85,6 +85,7 @@ import org.apache.hadoop.hbase.io.hfile.CacheConfig;
 import org.apache.hadoop.hbase.io.hfile.HFile;
 import org.apache.hadoop.hbase.io.hfile.HFileContext;
 import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
+import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoder;
 import org.apache.hadoop.hbase.io.hfile.HFileScanner;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.regionserver.BloomType;
@@ -859,6 +860,11 @@ public class LoadIncrementalHFiles extends Configured implements Tool {
   }
 
   private static boolean shouldCopyHFileMetaKey(byte[] key) {
+    // skip encoding to keep hfile meta consistent with data block info, see HBASE-15085
+    if (Bytes.equals(key, HFileDataBlockEncoder.DATA_BLOCK_ENCODING)) {
+      return false;
+    }
+
     return !HFile.isReservedFileInfoKey(key);
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/ccc8e4a2/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java
index 96fc9bb..ed75e5e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java
@@ -45,6 +45,7 @@ import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.codec.KeyValueCodecWithTags;
 import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
+import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
 import org.apache.hadoop.hbase.io.hfile.CacheConfig;
 import org.apache.hadoop.hbase.io.hfile.HFile;
 import org.apache.hadoop.hbase.io.hfile.HFileScanner;
@@ -471,6 +472,51 @@ public class TestLoadIncrementalHFiles {
     assertEquals(1000, rowCount);
   }
 
+  @Test
+  public void testSplitStoreFileWithNoneToNone() throws IOException {
+    testSplitStoreFileWithDifferentEncoding(DataBlockEncoding.NONE, DataBlockEncoding.NONE);
+  }
+
+  @Test
+  public void testSplitStoreFileWithEncodedToEncoded() throws IOException {
+    testSplitStoreFileWithDifferentEncoding(DataBlockEncoding.DIFF, DataBlockEncoding.DIFF);
+  }
+
+  @Test
+  public void testSplitStoreFileWithEncodedToNone() throws IOException {
+    testSplitStoreFileWithDifferentEncoding(DataBlockEncoding.DIFF, DataBlockEncoding.NONE);
+  }
+
+  @Test
+  public void testSplitStoreFileWithNoneToEncoded() throws IOException {
+    testSplitStoreFileWithDifferentEncoding(DataBlockEncoding.NONE, DataBlockEncoding.DIFF);
+  }
+
+  private void testSplitStoreFileWithDifferentEncoding(DataBlockEncoding bulkloadEncoding,
+      DataBlockEncoding cfEncoding) throws IOException {
+    Path dir = util.getDataTestDirOnTestFS("testSplitHFileWithDifferentEncoding");
+    FileSystem fs = util.getTestFileSystem();
+    Path testIn = new Path(dir, "testhfile");
+    HColumnDescriptor familyDesc = new HColumnDescriptor(FAMILY);
+    familyDesc.setDataBlockEncoding(cfEncoding);
+    HFileTestUtil.createHFileWithDataBlockEncoding(
+        util.getConfiguration(), fs, testIn, bulkloadEncoding,
+        FAMILY, QUALIFIER, Bytes.toBytes("aaa"), Bytes.toBytes("zzz"), 1000);
+
+    Path bottomOut = new Path(dir, "bottom.out");
+    Path topOut = new Path(dir, "top.out");
+
+    LoadIncrementalHFiles.splitStoreFile(
+        util.getConfiguration(), testIn,
+        familyDesc, Bytes.toBytes("ggg"),
+        bottomOut,
+        topOut);
+
+    int rowCount = verifyHFile(bottomOut);
+    rowCount += verifyHFile(topOut);
+    assertEquals(1000, rowCount);
+  }
+
   private int verifyHFile(Path p) throws IOException {
     Configuration conf = util.getConfiguration();
     HFile.Reader reader = HFile.createReader(

http://git-wip-us.apache.org/repos/asf/hbase/blob/ccc8e4a2/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileTestUtil.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileTestUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileTestUtil.java
index c2c938f..028937c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileTestUtil.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileTestUtil.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
 import org.apache.hadoop.hbase.io.hfile.CacheConfig;
 import org.apache.hadoop.hbase.io.hfile.HFile;
 import org.apache.hadoop.hbase.io.hfile.HFileContext;
@@ -57,7 +58,21 @@ public class HFileTestUtil {
       FileSystem fs, Path path,
       byte[] family, byte[] qualifier,
       byte[] startKey, byte[] endKey, int numRows) throws IOException {
-    createHFile(configuration, fs, path, family, qualifier, startKey, endKey,
+      createHFile(configuration, fs, path, DataBlockEncoding.NONE, family, qualifier,
+        startKey, endKey, numRows, false);
+  }
+
+  /**
+   * Create an HFile with the given number of rows between a given
+   * start key and end key @ family:qualifier.  The value will be the key value.
+   * This file will use certain data block encoding algorithm.
+   */
+  public static void createHFileWithDataBlockEncoding(
+      Configuration configuration,
+      FileSystem fs, Path path, DataBlockEncoding encoding,
+      byte[] family, byte[] qualifier,
+      byte[] startKey, byte[] endKey, int numRows) throws IOException {
+      createHFile(configuration, fs, path, encoding, family, qualifier, startKey, endKey,
         numRows, false);
   }
 
@@ -71,7 +86,8 @@ public class HFileTestUtil {
       FileSystem fs, Path path,
       byte[] family, byte[] qualifier,
       byte[] startKey, byte[] endKey, int numRows) throws IOException {
-    createHFile(configuration, fs, path, family, qualifier, startKey, endKey, numRows, true);
+      createHFile(configuration, fs, path, DataBlockEncoding.NONE, family, qualifier,
+        startKey, endKey, numRows, true);
   }
 
   /**
@@ -82,11 +98,12 @@ public class HFileTestUtil {
    */
   public static void createHFile(
       Configuration configuration,
-      FileSystem fs, Path path,
+      FileSystem fs, Path path, DataBlockEncoding encoding,
       byte[] family, byte[] qualifier,
       byte[] startKey, byte[] endKey, int numRows, boolean withTag) throws IOException {
     HFileContext meta = new HFileContextBuilder()
         .withIncludesTags(withTag)
+        .withDataBlockEncoding(encoding)
         .build();
     HFile.Writer writer = HFile.getWriterFactory(configuration, new CacheConfig(configuration))
         .withPath(fs, path)
@@ -150,4 +167,4 @@ public class HFileTestUtil {
       }
     }
   }
-}
\ No newline at end of file
+}


[20/50] hbase git commit: HBASE-15224 Undo "hbase.increment.fast.but.narrow.consistency" option; it is not necessary since HBASE-15213

Posted by la...@apache.org.
HBASE-15224 Undo "hbase.increment.fast.but.narrow.consistency" option; it is not necessary since HBASE-15213


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b9c3419d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b9c3419d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b9c3419d

Branch: refs/heads/branch-1.0
Commit: b9c3419d483493a29b4980cc2b38ca629d80a598
Parents: eb9af81
Author: stack <st...@apache.org>
Authored: Mon Feb 8 08:43:11 2016 -0800
Committer: stack <st...@apache.org>
Committed: Mon Feb 8 10:14:57 2016 -0800

----------------------------------------------------------------------
 .../hadoop/hbase/regionserver/HRegion.java      | 143 +------------------
 ...tIncrementFromClientSideWithCoprocessor.java |   5 -
 .../client/TestIncrementsFromClientSide.java    |  95 ++----------
 .../hbase/regionserver/TestAtomicOperation.java |  62 +++-----
 .../hbase/regionserver/TestRegionIncrement.java |  24 +---
 5 files changed, 30 insertions(+), 299 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/b9c3419d/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 0c5dfe7..8aed3a6 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -221,16 +221,6 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { //
       "hbase.hregion.scan.loadColumnFamiliesOnDemand";
 
   /**
-   * Set region to take the fast increment path. Constraint is that caller can only access the
-   * Cell via Increment; intermixing Increment with other Mutations will give indeterminate
-   * results. A Get with {@link IsolationLevel#READ_UNCOMMITTED} will get the latest increment
-   * or an Increment of zero will do the same.
-   */
-  public static final String INCREMENT_FAST_BUT_NARROW_CONSISTENCY_KEY =
-      "hbase.increment.fast.but.narrow.consistency";
-  private final boolean incrementFastButNarrowConsistency;
-
-  /**
    * This is the global default value for durability. All tables/mutations not
    * defining a durability or using USE_DEFAULT will default to this value.
    */
@@ -712,10 +702,6 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { //
           false :
           conf.getBoolean(HConstants.ENABLE_CLIENT_BACKPRESSURE,
               HConstants.DEFAULT_ENABLE_CLIENT_BACKPRESSURE);
-
-    // See #INCREMENT_FAST_BUT_NARROW_CONSISTENCY_KEY for what this flag is about.
-    this.incrementFastButNarrowConsistency =
-      this.conf.getBoolean(INCREMENT_FAST_BUT_NARROW_CONSISTENCY_KEY, false);
   }
 
   void setHTableSpecificConf() {
@@ -5840,139 +5826,14 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { //
     startRegionOperation(Operation.INCREMENT);
     this.writeRequestsCount.increment();
     try {
-      // Which Increment is it? Narrow increment-only consistency or slow (default) and general
-      // row-wide consistency.
-
-      // So, difference between fastAndNarrowConsistencyIncrement and slowButConsistentIncrement is
-      // that the former holds the row lock until the sync completes; this allows us to reason that
-      // there are no other writers afoot when we read the current increment value. The row lock
-      // means that we do not need to wait on mvcc reads to catch up to writes before we proceed
-      // with the read, the root of the slowdown seen in HBASE-14460. The fast-path also does not
-      // wait on mvcc to complete before returning to the client. We also reorder the write so that
-      // the update of memstore happens AFTER sync returns; i.e. the write pipeline does less
-      // zigzagging now.
-      //
-      // See the comment on INCREMENT_FAST_BUT_NARROW_CONSISTENCY_KEY
-      // for the constraints that apply when you take this code path; it is correct but only if
-      // Increments are used mutating an Increment Cell; mixing concurrent Put+Delete and Increment
-      // will yield indeterminate results.
-      return this.incrementFastButNarrowConsistency?
-        fastAndNarrowConsistencyIncrement(increment, nonceGroup, nonce):
-        slowButConsistentIncrement(increment, nonceGroup, nonce);
+      return doIncrement(increment, nonceGroup, nonce);
     } finally {
       if (this.metricsRegion != null) this.metricsRegion.updateIncrement();
       closeRegionOperation(Operation.INCREMENT);
     }
   }
 
-  /**
-   * The bulk of this method is a bulk-and-paste of the slowButConsistentIncrement but with some
-   * reordering to enable the fast increment (reordering allows us to also drop some state
-   * carrying Lists and variables so the flow here is more straight-forward). We copy-and-paste
-   * because cannot break down the method further into smaller pieces. Too much state. Will redo
-   * in trunk and tip of branch-1 to undo duplication here and in append, checkAnd*, etc. For why
-   * this route is 'faster' than the alternative slowButConsistentIncrement path, see the comment
-   * in calling method.
-   * @return Resulting increment
-   * @throws IOException
-   */
-  private Result fastAndNarrowConsistencyIncrement(Increment increment, long nonceGroup,
-      long nonce)
-  throws IOException {
-    long accumulatedResultSize = 0;
-    RowLock rowLock = null;
-    WALKey walKey = null;
-    // This is all kvs accumulated during this increment processing. Includes increments where the
-    // increment is zero: i.e. client just wants to get current state of the increment w/o
-    // changing it. These latter increments by zero are NOT added to the WAL.
-    List<Cell> allKVs = new ArrayList<Cell>(increment.size());
-    Durability effectiveDurability = getEffectiveDurability(increment.getDurability());
-    long txid = 0;
-    rowLock = getRowLock(increment.getRow());
-    try {
-      lock(this.updatesLock.readLock());
-      try {
-        if (this.coprocessorHost != null) {
-          Result r = this.coprocessorHost.preIncrementAfterRowLock(increment);
-          if (r != null) return increment.isReturnResults() ? r : null;
-        }
-        // Process increments a Store/family at a time.
-        long now = EnvironmentEdgeManager.currentTime();
-        final boolean writeToWAL = effectiveDurability != Durability.SKIP_WAL;
-        WALEdit walEdits = null;
-        // Accumulate edits for memstore to add later after we've added to WAL.
-        Map<Store, List<Cell>> forMemStore = new HashMap<Store, List<Cell>>();
-        for (Map.Entry<byte [], List<Cell>> entry: increment.getFamilyCellMap().entrySet()) {
-          byte [] columnFamilyName = entry.getKey();
-          List<Cell> increments = entry.getValue();
-          Store store = this.stores.get(columnFamilyName);
-          // Do increment for this store; be sure to 'sort' the increments first so increments
-          // match order in which we get back current Cells when we get.
-          List<Cell> results = applyIncrementsToColumnFamily(increment, columnFamilyName,
-              sort(increments, store.getComparator()), now,
-              MultiVersionConsistencyControl.NO_WRITE_NUMBER, allKVs,
-              IsolationLevel.READ_UNCOMMITTED);
-          if (!results.isEmpty()) {
-            forMemStore.put(store, results);
-            // Prepare WAL updates
-            if (writeToWAL) {
-              if (walEdits == null) walEdits = new WALEdit();
-              walEdits.getCells().addAll(results);
-            }
-          }
-        }
-
-        // Actually write to WAL now. If walEdits is non-empty, we write the WAL.
-        if (walEdits != null && !walEdits.isEmpty()) {
-          // Using default cluster id, as this can only happen in the originating cluster.
-          // A slave cluster receives the final value (not the delta) as a Put. We use HLogKey
-          // here instead of WALKey directly to support legacy coprocessors.
-          walKey = new HLogKey(this.getRegionInfo().getEncodedNameAsBytes(),
-            this.htableDescriptor.getTableName(), WALKey.NO_SEQUENCE_ID, nonceGroup, nonce);
-          txid = this.wal.append(this.htableDescriptor, this.getRegionInfo(),
-             walKey, walEdits, getSequenceId(), true, null/*walEdits has the List to apply*/);
-        } else {
-          // Append a faked WALEdit in order for SKIP_WAL updates to get mvccNum assigned
-          walKey = this.appendEmptyEdit(this.wal, null/*walEdits has the List to apply*/);
-        }
-
-        if (txid != 0) syncOrDefer(txid, effectiveDurability);
-
-        // Tell MVCC about the new sequenceid.
-        WriteEntry we = mvcc.beginMemstoreInsertWithSeqNum(walKey.getSequenceId());
-
-        // Now write to memstore.
-        for (Map.Entry<Store, List<Cell>> entry: forMemStore.entrySet()) {
-          Store store = entry.getKey();
-          List<Cell> results = entry.getValue();
-          if (store.getFamily().getMaxVersions() == 1) {
-            // Upsert if VERSIONS for this CF == 1. Use write sequence id rather than read point
-            // when doing fast increment.
-            accumulatedResultSize += store.upsert(results, walKey.getSequenceId());
-          } else {
-            // Otherwise keep older versions around
-            for (Cell cell: results) {
-              Pair<Long, Cell> ret = store.add(cell);
-              accumulatedResultSize += ret.getFirst();
-            }
-          }
-        }
-
-        // Tell mvcc this write is complete.
-        this.mvcc.advanceMemstore(we);
-      } finally {
-        this.updatesLock.readLock().unlock();
-      }
-    } finally {
-      rowLock.release();
-    }
-    // Request a cache flush.  Do it outside update lock.
-    if (isFlushSize(this.addAndGetGlobalMemstoreSize(accumulatedResultSize))) requestFlush();
-    return increment.isReturnResults() ? Result.create(allKVs) : null;
-  }
-
-  private Result slowButConsistentIncrement(Increment increment, long nonceGroup, long nonce)
-  throws IOException {
+  private Result doIncrement(Increment increment, long nonceGroup, long nonce) throws IOException {
     RowLock rowLock = null;
     WriteEntry writeEntry = null;
     WALKey walKey = null;

http://git-wip-us.apache.org/repos/asf/hbase/blob/b9c3419d/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncrementFromClientSideWithCoprocessor.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncrementFromClientSideWithCoprocessor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncrementFromClientSideWithCoprocessor.java
index a67cc45..c9bc7c2 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncrementFromClientSideWithCoprocessor.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncrementFromClientSideWithCoprocessor.java
@@ -34,16 +34,11 @@ import org.junit.experimental.categories.Category;
  */
 @Category(LargeTests.class)
 public class TestIncrementFromClientSideWithCoprocessor extends TestIncrementsFromClientSide {
-  public TestIncrementFromClientSideWithCoprocessor(final boolean fast) {
-    super(fast);
-  }
-
   @Before
   public void before() throws Exception {
     Configuration conf = TEST_UTIL.getConfiguration();
     conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
         MultiRowMutationEndpoint.class.getName(), NoOpScanPolicyObserver.class.getName());
     conf.setBoolean("hbase.table.sanity.checks", true); // enable for below tests
-    super.before();
   }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/b9c3419d/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncrementsFromClientSide.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncrementsFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncrementsFromClientSide.java
index 77cebbd..188fb66 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncrementsFromClientSide.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncrementsFromClientSide.java
@@ -18,47 +18,37 @@
  */
 package org.apache.hadoop.hbase.client;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
+
 import java.io.IOException;
 import java.util.Arrays;
 import java.util.Collection;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
-import org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint;
-import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.junit.After;
-import org.junit.Before;
+import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.junit.rules.TestName;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-import org.junit.runners.Parameterized.Parameters;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.fail;
-import static org.junit.Assert.assertTrue;
 
 /**
  * Run Increment tests that use the HBase clients; {@link HTable}.
- * 
+ *
  * Test is parameterized to run the slow and fast increment code paths. If fast, in the @before, we
  * do a rolling restart of the single regionserver so that it can pick up the go fast configuration.
  * Doing it this way should be faster than starting/stopping a cluster per test.
- * 
+ *
  * Test takes a long time because spin up a cluster between each run -- ugh.
  */
-@RunWith(Parameterized.class)
 @Category(LargeTests.class)
 @SuppressWarnings ("deprecation")
 public class TestIncrementsFromClientSide {
@@ -69,47 +59,18 @@ public class TestIncrementsFromClientSide {
   // This test depends on there being only one slave running at at a time. See the @Before
   // method where we do rolling restart.
   protected static int SLAVES = 1;
-  private String oldINCREMENT_FAST_BUT_NARROW_CONSISTENCY_KEY;
   @Rule public TestName name = new TestName();
-  @Parameters(name = "fast={0}")
   public static Collection<Object []> data() {
     return Arrays.asList(new Object[] {Boolean.FALSE}, new Object [] {Boolean.TRUE});
   }
-  private final boolean fast;
-
-  public TestIncrementsFromClientSide(final boolean fast) {
-    this.fast = fast;
-  }
 
   @BeforeClass
   public static void beforeClass() throws Exception {
-    Configuration conf = TEST_UTIL.getConfiguration();
-    conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
-        MultiRowMutationEndpoint.class.getName());
-    conf.setBoolean("hbase.table.sanity.checks", true); // enable for below tests
+    TEST_UTIL.startMiniCluster();
   }
 
-  @Before
-  public void before() throws Exception {
-    Configuration conf = TEST_UTIL.getConfiguration();
-    if (this.fast) {
-      this.oldINCREMENT_FAST_BUT_NARROW_CONSISTENCY_KEY =
-          conf.get(HRegion.INCREMENT_FAST_BUT_NARROW_CONSISTENCY_KEY);
-      conf.setBoolean(HRegion.INCREMENT_FAST_BUT_NARROW_CONSISTENCY_KEY, this.fast);
-    }
-    // We need more than one region server in this test
-    TEST_UTIL.startMiniCluster(SLAVES);
-  }
-
-  @After
-  public void after() throws Exception {
-    Configuration conf = TEST_UTIL.getConfiguration();
-    if (this.fast) {
-      if (this.oldINCREMENT_FAST_BUT_NARROW_CONSISTENCY_KEY != null) {
-        conf.set(HRegion.INCREMENT_FAST_BUT_NARROW_CONSISTENCY_KEY,
-            this.oldINCREMENT_FAST_BUT_NARROW_CONSISTENCY_KEY);
-      }
-    }
+  @AfterClass
+  public static void afterClass() throws Exception {
     TEST_UTIL.shutdownMiniCluster();
   }
 
@@ -130,7 +91,6 @@ public class TestIncrementsFromClientSide {
     ht.incrementColumnValue(ROW, FAMILY, COLUMN, 5);
 
     Get get = new Get(ROW);
-    if (this.fast) get.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED);
     Result r = ht.get(get);
     assertEquals(1, r.size());
     assertEquals(5, Bytes.toLong(r.getValue(FAMILY, COLUMN)));
@@ -164,35 +124,6 @@ public class TestIncrementsFromClientSide {
   }
 
   @Test
-  public void testIncrementReturnValue() throws Exception {
-    LOG.info("Starting " + this.name.getMethodName());
-    final TableName TABLENAME =
-      TableName.valueOf(filterStringSoTableNameSafe(this.name.getMethodName()));
-    Table ht = TEST_UTIL.createTable(TABLENAME, FAMILY);
-    final byte[] COLUMN = Bytes.toBytes("column");
-    Put p = new Put(ROW);
-    p.add(FAMILY, COLUMN, Bytes.toBytes(5L));
-    ht.put(p);
-
-    Increment inc = new Increment(ROW);
-    inc.addColumn(FAMILY, COLUMN, 5L);
-
-    Result r = ht.increment(inc);
-    long result = Bytes.toLong(r.getValue(FAMILY, COLUMN));
-    assertEquals(10, result);
-
-    if (this.fast) {
-      inc = new Increment(ROW);
-      inc.addColumn(FAMILY, COLUMN, 5L);
-      inc.setReturnResults(false);
-      r = ht.increment(inc);
-      assertTrue(r.getExists() == null);
-    }
-
-  }
-
-
-  @Test
   public void testIncrementInvalidArguments() throws Exception {
     LOG.info("Starting " + this.name.getMethodName());
     final TableName TABLENAME =
@@ -267,7 +198,6 @@ public class TestIncrementsFromClientSide {
 
     // Verify expected results
     Get get = new Get(ROW);
-    if (this.fast) get.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED);
     Result r = ht.get(get);
     Cell [] kvs = r.rawCells();
     assertEquals(3, kvs.length);
@@ -309,7 +239,6 @@ public class TestIncrementsFromClientSide {
 
     // Verify expected results
     Get get = new Get(ROW);
-    if (this.fast) get.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED);
     Result r = ht.get(get);
     Cell[] kvs = r.rawCells();
     assertEquals(3, kvs.length);
@@ -332,7 +261,7 @@ public class TestIncrementsFromClientSide {
     assertIncrementKey(kvs[0], ROW, FAMILY, QUALIFIERS[0], 2);
     assertIncrementKey(kvs[1], ROW, FAMILY, QUALIFIERS[1], 2);
     assertIncrementKey(kvs[2], ROW, FAMILY, QUALIFIERS[2], 2);
-    
+
     ht.close();
   }
 
@@ -371,7 +300,6 @@ public class TestIncrementsFromClientSide {
 
     // Verify expected results
     Get get = new Get(ROW);
-    if (this.fast) get.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED);
     Result r = ht.get(get);
     Cell [] kvs = r.rawCells();
     assertEquals(5, kvs.length);
@@ -389,7 +317,6 @@ public class TestIncrementsFromClientSide {
     ht.increment(inc);
     // Verify
     get = new Get(ROWS[0]);
-    if (this.fast) get.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED);
     r = ht.get(get);
     kvs = r.rawCells();
     assertEquals(QUALIFIERS.length, kvs.length);
@@ -438,4 +365,4 @@ public class TestIncrementsFromClientSide {
   public static String filterStringSoTableNameSafe(final String str) {
     return str.replaceAll("\\[fast\\=(.*)\\]", ".FAST.is.$1");
   }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/b9c3419d/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java
index f2ea717..74d8cba 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java
@@ -52,7 +52,6 @@ import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Durability;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.Increment;
-import org.apache.hadoop.hbase.client.IsolationLevel;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
@@ -93,11 +92,11 @@ public class TestAtomicOperation {
   static final byte [] row = Bytes.toBytes("rowA");
   static final byte [] row2 = Bytes.toBytes("rowB");
 
-  @Before 
+  @Before
   public void setup() {
     tableName = Bytes.toBytes(name.getMethodName());
   }
-  
+
   @After
   public void teardown() throws IOException {
     if (region != null) {
@@ -107,7 +106,7 @@ public class TestAtomicOperation {
   }
   //////////////////////////////////////////////////////////////////////////////
   // New tests that doesn't spin up a mini cluster but rather just test the
-  // individual code pieces in the HRegion. 
+  // individual code pieces in the HRegion.
   //////////////////////////////////////////////////////////////////////////////
 
   /**
@@ -135,29 +134,10 @@ public class TestAtomicOperation {
   }
 
   /**
-   * Test multi-threaded increments. Take the fast but narrow consistency path through HRegion.
-   */
-  @Test
-  public void testIncrementMultiThreadsFastPath() throws IOException {
-    Configuration conf = TEST_UTIL.getConfiguration();
-    String oldValue = conf.get(HRegion.INCREMENT_FAST_BUT_NARROW_CONSISTENCY_KEY);
-    conf.setBoolean(HRegion.INCREMENT_FAST_BUT_NARROW_CONSISTENCY_KEY, true);
-    try {
-      testIncrementMultiThreads(true);
-    } finally {
-      if (oldValue != null) conf.set(HRegion.INCREMENT_FAST_BUT_NARROW_CONSISTENCY_KEY, oldValue);
-    }
-  }
-
-  /**
-   * Test multi-threaded increments. Take the slow but consistent path through HRegion.
+   * Test multi-threaded increments.
    */
   @Test
-  public void testIncrementMultiThreadsSlowPath() throws IOException {
-    testIncrementMultiThreads(false);
-  }
-
-  private void testIncrementMultiThreads(final boolean fast) throws IOException {
+  public void testIncrementMultiThreads() throws IOException {
     LOG.info("Starting test testIncrementMultiThreads");
     // run a with mixed column families (1 and 3 versions)
     initHRegion(tableName, name.getMethodName(), new int[] {1,3}, fam1, fam2);
@@ -187,23 +167,19 @@ public class TestAtomicOperation {
       } catch (InterruptedException e) {
       }
     }
-
-    assertICV(row, fam1, qual1, expectedTotal, fast);
-    assertICV(row, fam1, qual2, expectedTotal*2, fast);
-    assertICV(row, fam2, qual3, expectedTotal*3, fast);
-    LOG.info("testIncrementMultiThreads successfully verified that total is " +
-             expectedTotal);
+    assertICV(row, fam1, qual1, expectedTotal);
+    assertICV(row, fam1, qual2, expectedTotal*2);
+    assertICV(row, fam2, qual3, expectedTotal*3);
+    LOG.info("testIncrementMultiThreads successfully verified that total is " + expectedTotal);
   }
 
 
   private void assertICV(byte [] row,
                          byte [] familiy,
                          byte[] qualifier,
-                         long amount,
-                         boolean fast) throws IOException {
+                         long amount) throws IOException {
     // run a get and see?
     Get get = new Get(row);
-    if (fast) get.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED);
     get.addColumn(familiy, qualifier);
     Result result = region.get(get);
     assertEquals(1, result.size());
@@ -242,7 +218,6 @@ public class TestAtomicOperation {
     private final HRegion region;
     private final int numIncrements;
     private final int amount;
-    private final boolean fast;
 
 
     public Incrementer(HRegion region, int threadNumber, int amount, int numIncrements) {
@@ -250,8 +225,6 @@ public class TestAtomicOperation {
       this.region = region;
       this.numIncrements = numIncrements;
       this.amount = amount;
-      this.fast = region.getBaseConf().
-          getBoolean(HRegion.INCREMENT_FAST_BUT_NARROW_CONSISTENCY_KEY, false);
       setDaemon(true);
     }
 
@@ -306,8 +279,8 @@ public class TestAtomicOperation {
 
               Get g = new Get(row);
               Result result = region.get(g);
-              assertEquals(result.getValue(fam1, qual1).length, result.getValue(fam1, qual2).length); 
-              assertEquals(result.getValue(fam1, qual1).length, result.getValue(fam2, qual3).length); 
+              assertEquals(result.getValue(fam1, qual1).length, result.getValue(fam1, qual2).length);
+              assertEquals(result.getValue(fam1, qual1).length, result.getValue(fam2, qual3).length);
             } catch (IOException e) {
               e.printStackTrace();
               failures.incrementAndGet();
@@ -539,7 +512,7 @@ public class TestAtomicOperation {
       this.failures = failures;
     }
   }
-  
+
   private static CountDownLatch latch = new CountDownLatch(1);
   private enum TestStep {
     INIT,                  // initial put of 10 to set value of the cell
@@ -551,11 +524,11 @@ public class TestAtomicOperation {
   }
   private static volatile TestStep testStep = TestStep.INIT;
   private final String family = "f1";
-     
+
   /**
    * Test written as a verifier for HBASE-7051, CheckAndPut should properly read
-   * MVCC. 
-   * 
+   * MVCC.
+   *
    * Moved into TestAtomicOperation from its original location, TestHBase7051
    */
   @Test
@@ -571,7 +544,6 @@ public class TestAtomicOperation {
     Put put = new Put(Bytes.toBytes("r1"));
     put.add(Bytes.toBytes(family), Bytes.toBytes("q1"), Bytes.toBytes("10"));
     puts[0] = put;
-    
     region.batchMutate(puts);
     MultithreadedTestUtil.TestContext ctx =
       new MultithreadedTestUtil.TestContext(conf);
@@ -645,9 +617,7 @@ public class TestAtomicOperation {
       }
       return new WrappedRowLock(super.getRowLockInternal(row, waitForLock));
     }
-    
     public class WrappedRowLock extends RowLock {
-
       private WrappedRowLock(RowLock rowLock) {
         super(rowLock.context);
       }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b9c3419d/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionIncrement.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionIncrement.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionIncrement.java
index 92285a8..c9cb90f 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionIncrement.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionIncrement.java
@@ -43,15 +43,11 @@ import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.wal.WAL;
 import org.junit.After;
-import org.junit.Before;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.junit.rules.TestName;
 import org.junit.rules.TestRule;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-import org.junit.runners.Parameterized.Parameters;
 
 
 /**
@@ -64,38 +60,20 @@ import org.junit.runners.Parameterized.Parameters;
  * prove atomicity on row.
  */
 @Category(MediumTests.class)
-@RunWith(Parameterized.class)
 public class TestRegionIncrement {
   private static final Log LOG = LogFactory.getLog(TestRegionIncrement.class);
   @Rule public TestName name = new TestName();
   @Rule public final TestRule timeout =
       CategoryBasedTimeout.builder().withTimeout(this.getClass()).
         withLookingForStuckThread(true).build();
-  private static HBaseTestingUtility TEST_UTIL;
+  private static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
   private final static byte [] INCREMENT_BYTES = Bytes.toBytes("increment");
   private static final int THREAD_COUNT = 10;
   private static final int INCREMENT_COUNT = 10000;
-
-  @Parameters(name = "fast={0}")
   public static Collection<Object []> data() {
     return Arrays.asList(new Object[] {Boolean.FALSE}, new Object [] {Boolean.TRUE});
   }
 
-  private final boolean fast;
-
-  public TestRegionIncrement(final boolean fast) {
-    this.fast = fast;
-  }
-
-  @Before
-  public void setUp() throws Exception {
-    TEST_UTIL = HBaseTestingUtility.createLocalHTU();
-    if (this.fast) {
-      TEST_UTIL.getConfiguration().
-        setBoolean(HRegion.INCREMENT_FAST_BUT_NARROW_CONSISTENCY_KEY, this.fast);
-    }
-  }
-
   @After
   public void tearDown() throws Exception {
     TEST_UTIL.cleanupTestDir();


[33/50] hbase git commit: HBASE-15674 HRegionLocator#getAllRegionLocations should put the results in cache

Posted by la...@apache.org.
HBASE-15674 HRegionLocator#getAllRegionLocations should put the results in cache

Conflicts:
	hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
	hbase-client/src/main/java/org/apache/hadoop/hbase/client/HRegionLocator.java
	hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a74c495f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a74c495f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a74c495f

Branch: refs/heads/branch-1.0
Commit: a74c495f32ff5446377b2b5d6d4d190ca2f00457
Parents: 52f8ad9
Author: chenheng <ch...@apache.org>
Authored: Thu Apr 21 14:16:06 2016 +0800
Committer: chenheng <ch...@apache.org>
Committed: Thu Apr 21 15:00:27 2016 +0800

----------------------------------------------------------------------
 .../hadoop/hbase/client/ClusterConnection.java      |  3 +++
 .../hadoop/hbase/client/ConnectionAdapter.java      |  6 ++++++
 .../hadoop/hbase/client/ConnectionManager.java      |  3 ++-
 .../apache/hadoop/hbase/client/ConnectionUtils.java |  1 +
 .../java/org/apache/hadoop/hbase/client/HTable.java |  3 +++
 .../hadoop/hbase/client/TestFromClientSide.java     | 16 +++++++++++++++-
 6 files changed, 30 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/a74c495f/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java
index 9ceb112..40c4462 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java
@@ -83,6 +83,9 @@ public interface ClusterConnection extends HConnection {
   @Override
   void clearRegionCache();
 
+
+  void cacheLocation(final TableName tableName, final RegionLocations location);
+
   /**
    * Allows flushing the region cache of all locations that pertain to
    * <code>tableName</code>

http://git-wip-us.apache.org/repos/asf/hbase/blob/a74c495f/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java
index d67df2a..fd56692 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java
@@ -238,6 +238,12 @@ abstract class ConnectionAdapter implements ClusterConnection {
     wrappedConnection.clearRegionCache(tableName);
   }
 
+
+  @Override
+  public void cacheLocation(TableName tableName, RegionLocations location) {
+    wrappedConnection.cacheLocation(tableName, location);
+  }
+
   @Override
   public void deleteCachedRegionLocation(HRegionLocation location) {
     wrappedConnection.deleteCachedRegionLocation(location);

http://git-wip-us.apache.org/repos/asf/hbase/blob/a74c495f/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
index 78fb17f..ede3157 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
@@ -1296,7 +1296,8 @@ class ConnectionManager {
      * @param tableName The table name.
      * @param location the new location
      */
-    private void cacheLocation(final TableName tableName, final RegionLocations location) {
+    @Override
+    public void cacheLocation(final TableName tableName, final RegionLocations location) {
       metaCache.cacheLocation(tableName, location);
     }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/a74c495f/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
index b939b17..cd6b0e0 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
@@ -24,6 +24,7 @@ import java.util.concurrent.ExecutorService;
 import org.apache.commons.logging.Log;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.RegionLocations;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;

http://git-wip-us.apache.org/repos/asf/hbase/blob/a74c495f/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
index 418183e..533cbab 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
@@ -686,6 +686,9 @@ public class HTable implements HTableInterface, RegionLocator {
     for (Entry<HRegionInfo, ServerName> entry : locations.entrySet()) {
       regions.add(new HRegionLocation(entry.getKey(), entry.getValue()));
     }
+    if (regions.size() > 0) {
+      connection.cacheLocation(tableName, new RegionLocations(regions));
+    }
     return regions;
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/a74c495f/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
index b175c7a..4a0f37b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
@@ -44,7 +44,6 @@ import java.util.concurrent.Callable;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 import java.util.concurrent.atomic.AtomicReference;
-
 import org.apache.commons.lang.ArrayUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -6132,4 +6131,19 @@ public class TestFromClientSide {
       }
     }
   }
+
+  @Test
+  public void testRegionCache() throws IOException {
+    HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("testRegionCache"));
+    HColumnDescriptor fam = new HColumnDescriptor(FAMILY);
+    htd.addFamily(fam);
+    byte[][] KEYS = HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE;
+    Admin admin = TEST_UTIL.getHBaseAdmin();
+    admin.createTable(htd, KEYS);
+    RegionLocator locator = admin.getConnection().getRegionLocator(htd.getTableName());
+    List<HRegionLocation> results = locator.getAllRegionLocations();
+    int number = ((ConnectionManager.HConnectionImplementation)admin.getConnection())
+      .getNumberOfCachedRegionLocations(htd.getTableName());
+    assertEquals(results.size(), number);
+  }
 }


[31/50] hbase git commit: HBASE-15582 SnapshotManifestV1 too verbose when there are no regions

Posted by la...@apache.org.
HBASE-15582 SnapshotManifestV1 too verbose when there are no regions


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/47baaed0
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/47baaed0
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/47baaed0

Branch: refs/heads/branch-1.0
Commit: 47baaed0c7185e8cccfcf9df7ea5b17268b94f37
Parents: d5eb7560
Author: Matteo Bertozzi <ma...@cloudera.com>
Authored: Fri Apr 1 20:55:21 2016 -0700
Committer: Matteo Bertozzi <ma...@cloudera.com>
Committed: Fri Apr 1 21:00:32 2016 -0700

----------------------------------------------------------------------
 .../java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV1.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/47baaed0/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV1.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV1.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV1.java
index 137acf3..6f00692 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV1.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV1.java
@@ -116,7 +116,7 @@ public class SnapshotManifestV1 {
       final SnapshotDescription desc) throws IOException {
     FileStatus[] regions = FSUtils.listStatus(fs, snapshotDir, new FSUtils.RegionDirFilter(fs));
     if (regions == null) {
-      LOG.info("No regions under directory:" + snapshotDir);
+      LOG.debug("No regions under directory:" + snapshotDir);
       return null;
     }
 


[37/50] hbase git commit: HBASE-15720 Print row locks at the debug dump page

Posted by la...@apache.org.
HBASE-15720 Print row locks at the debug dump page

Conflicts:
    hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
    hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSDumpServlet.java


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a837182b
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a837182b
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a837182b

Branch: refs/heads/branch-1.0
Commit: a837182b79df1eb1bf7c50418d54d9881ea26509
Parents: 719993e
Author: chenheng <ch...@apache.org>
Authored: Thu Apr 28 12:18:32 2016 +0800
Committer: chenheng <ch...@apache.org>
Committed: Thu Apr 28 12:25:06 2016 +0800

----------------------------------------------------------------------
 .../hadoop/hbase/regionserver/HRegion.java      | 17 +++++++++++++++-
 .../hbase/regionserver/RSDumpServlet.java       | 21 +++++++++++++++++++-
 2 files changed, 36 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/a837182b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 8aed3a6..18a6684 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -268,7 +268,6 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { //
   //////////////////////////////////////////////////////////////////////////////
   // Members
   //////////////////////////////////////////////////////////////////////////////
-
   // map from a locked row to the context for that lock including:
   // - CountDownLatch for threads waiting on that row
   // - the thread that owns the lock (allow reentrancy)
@@ -3862,6 +3861,10 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { //
     return this.stores.get(column);
   }
 
+  public ConcurrentHashMap<HashedBytes, RowLockContext> getLockedRows() {
+    return lockedRows;
+  }
+
   /**
    * Return HStore instance. Does not do any copy: as the number of store is limited, we
    *  iterate on the list.
@@ -6774,6 +6777,16 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { //
         latch.countDown();
       }
     }
+
+    @Override
+    public String toString() {
+      return "RowLockContext{" +
+        "row=" + row +
+        ", count=" + lockCount +
+        ", threadName=" + thread.getName() +
+        '}';
+    }
+
   }
 
   /**
@@ -6867,4 +6880,6 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { //
   public RegionSplitPolicy getSplitPolicy() {
     return this.splitPolicy;
   }
+
+
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/a837182b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSDumpServlet.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSDumpServlet.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSDumpServlet.java
index 77b68ec..56d0417 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSDumpServlet.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSDumpServlet.java
@@ -23,7 +23,6 @@ import java.io.OutputStream;
 import java.io.PrintStream;
 import java.io.PrintWriter;
 import java.util.Date;
-
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
 
@@ -69,6 +68,10 @@ public class RSDumpServlet extends StateDumpServlet {
     out.println(LINE);
     TaskMonitor.get().dumpAsText(out);
 
+    out.println("\n\nRowLocks:");
+    out.println(LINE);
+    dumpRowLock(hrs, out);
+
     out.println("\n\nExecutors:");
     out.println(LINE);
     dumpExecutors(hrs.getExecutorService(), out);
@@ -100,6 +103,22 @@ public class RSDumpServlet extends StateDumpServlet {
     out.flush();
   }
 
+  public static void dumpRowLock(HRegionServer hrs, PrintWriter out) {
+    StringBuilder sb = new StringBuilder();
+    for (Region region : hrs.getOnlineRegionsLocalContext()) {
+      HRegion hRegion = (HRegion)region;
+      if (hRegion.getLockedRows().size() > 0) {
+        for (HRegion.RowLockContext rowLockContext : hRegion.getLockedRows().values()) {
+          sb.setLength(0);
+          sb.append(hRegion.getTableDesc().getTableName()).append(",")
+            .append(hRegion.getRegionInfo().getEncodedName()).append(",");
+          sb.append(rowLockContext.toString());
+          out.println(sb.toString());
+        }
+      }
+    }
+  }
+
   public static void dumpQueue(HRegionServer hrs, PrintWriter out)
       throws IOException {
     if (hrs.compactSplitThread != null) {


[17/50] hbase git commit: HBASE-15213 Fix increment performance regression caused by HBASE-8763 on branch-1.0 (Junegunn Choi)

Posted by la...@apache.org.
HBASE-15213 Fix increment performance regression caused by HBASE-8763 on branch-1.0 (Junegunn Choi)

Signed-off-by: stack <st...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6c555d36
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6c555d36
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6c555d36

Branch: refs/heads/branch-1.0
Commit: 6c555d36cd9928e44281b3280e57dd5f98b63fc8
Parents: 30eb2fb
Author: stack <st...@apache.org>
Authored: Fri Feb 5 13:28:16 2016 -0800
Committer: stack <st...@apache.org>
Committed: Fri Feb 5 18:03:28 2016 -0800

----------------------------------------------------------------------
 .../MultiVersionConsistencyControl.java         | 31 +++++++++++++-------
 1 file changed, 20 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/6c555d36/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MultiVersionConsistencyControl.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MultiVersionConsistencyControl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MultiVersionConsistencyControl.java
index fffd7c0..8b9f41b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MultiVersionConsistencyControl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MultiVersionConsistencyControl.java
@@ -19,7 +19,7 @@
 package org.apache.hadoop.hbase.regionserver;
 
 import java.io.IOException;
-import java.util.LinkedList;
+import java.util.LinkedHashSet;
 import java.util.concurrent.atomic.AtomicLong;
 
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
@@ -39,8 +39,8 @@ public class MultiVersionConsistencyControl {
   private final Object readWaiters = new Object();
 
   // This is the pending queue of writes.
-  private final LinkedList<WriteEntry> writeQueue =
-      new LinkedList<WriteEntry>();
+  private final LinkedHashSet<WriteEntry> writeQueue =
+      new LinkedHashSet<WriteEntry>();
 
   /**
    * Default constructor. Initializes the memstoreRead/Write points to 0.
@@ -100,7 +100,14 @@ public class MultiVersionConsistencyControl {
    * @return WriteEntry a WriteEntry instance with the passed in curSeqNum
    */
   public WriteEntry beginMemstoreInsertWithSeqNum(long curSeqNum) {
+    return beginMemstoreInsertWithSeqNum(curSeqNum, false);
+  }
+
+  private WriteEntry beginMemstoreInsertWithSeqNum(long curSeqNum, boolean complete) {
     WriteEntry e = new WriteEntry(curSeqNum);
+    if (complete) {
+      e.markCompleted();
+    }
     synchronized (writeQueue) {
       writeQueue.add(e);
       return e;
@@ -153,11 +160,11 @@ public class MultiVersionConsistencyControl {
       e.markCompleted();
 
       while (!writeQueue.isEmpty()) {
-        WriteEntry queueFirst = writeQueue.getFirst();
+        WriteEntry queueFirst = writeQueue.iterator().next();
         if (queueFirst.isCompleted()) {
           // Using Max because Edit complete in WAL sync order not arriving order
           nextReadValue = Math.max(nextReadValue, queueFirst.getWriteNumber());
-          writeQueue.removeFirst();
+          writeQueue.remove(queueFirst);
         } else {
           break;
         }
@@ -199,27 +206,31 @@ public class MultiVersionConsistencyControl {
    * Wait for all previous MVCC transactions complete
    */
   public void waitForPreviousTransactionsComplete() {
-    WriteEntry w = beginMemstoreInsert();
+    WriteEntry w = beginMemstoreInsertWithSeqNum(NO_WRITE_NUMBER, true);
     waitForPreviousTransactionsComplete(w);
   }
 
   public void waitForPreviousTransactionsComplete(WriteEntry waitedEntry) {
     boolean interrupted = false;
     WriteEntry w = waitedEntry;
+    w.markCompleted();
 
     try {
       WriteEntry firstEntry = null;
       do {
         synchronized (writeQueue) {
-          // writeQueue won't be empty at this point, the following is just a safety check
           if (writeQueue.isEmpty()) {
             break;
           }
-          firstEntry = writeQueue.getFirst();
+          firstEntry = writeQueue.iterator().next();
           if (firstEntry == w) {
             // all previous in-flight transactions are done
             break;
           }
+          // WriteEntry already was removed from the queue by another handler
+          if (!writeQueue.contains(w)) {
+            break;
+          }
           try {
             writeQueue.wait(0);
           } catch (InterruptedException ie) {
@@ -231,9 +242,7 @@ public class MultiVersionConsistencyControl {
         }
       } while (firstEntry != null);
     } finally {
-      if (w != null) {
-        advanceMemstore(w);
-      }
+      advanceMemstore(w);
     }
     if (interrupted) {
       Thread.currentThread().interrupt();


[42/50] hbase git commit: HBASE-15895 Remove unmaintained jenkins build analysis tool.

Posted by la...@apache.org.
HBASE-15895 Remove unmaintained jenkins build analysis tool.

Signed-off-by: stack <st...@apache.org>
Signed-off-by: Dima Spivak <ds...@cloudera.com>
Signed-off-by: Elliott Clark <el...@fb.com>
Signed-off-by: Apekshit <ap...@gmail.com>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a55ef158
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a55ef158
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a55ef158

Branch: refs/heads/branch-1.0
Commit: a55ef158661d24a3567823205213bb56fdfc1164
Parents: edc0a17
Author: Sean Busbey <bu...@apache.org>
Authored: Thu May 26 14:23:15 2016 -0700
Committer: Sean Busbey <bu...@apache.org>
Committed: Fri May 27 16:43:45 2016 -0700

----------------------------------------------------------------------
 dev-support/jenkins-tools/README.md             |  67 -----
 dev-support/jenkins-tools/buildstats/pom.xml    |  87 -------
 .../BuildResultWithTestCaseDetails.java         |  49 ----
 .../devtools/buildstats/HistoryReport.java      |  88 -------
 .../devtools/buildstats/TestCaseResult.java     |  61 -----
 .../devtools/buildstats/TestResultHistory.java  | 260 -------------------
 .../hbase/devtools/buildstats/TestSuite.java    |  47 ----
 dev-support/jenkins-tools/pom.xml               |  36 ---
 8 files changed, 695 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/a55ef158/dev-support/jenkins-tools/README.md
----------------------------------------------------------------------
diff --git a/dev-support/jenkins-tools/README.md b/dev-support/jenkins-tools/README.md
deleted file mode 100644
index 9e1905f..0000000
--- a/dev-support/jenkins-tools/README.md
+++ /dev/null
@@ -1,67 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-jenkins-tools
-=============
-
-A tool which pulls test case results from Jenkins server. It displays a union of failed test cases 
-from the last 15(by default and actual number of jobs can be less depending on availablity) runs 
-recorded in Jenkins sever and track how each of them are performed for all the last 15 runs(passed, 
-not run or failed)
-
-Pre-requirement(run under folder jenkins-tools)
-       Please download jenkins-client from https://github.com/cosmin/jenkins-client
-       1) git clone git://github.com/cosmin/jenkins-client.git
-       2) make sure the dependency jenkins-client version in ./buildstats/pom.xml matches the 
-          downloaded jenkins-client(current value is 0.1.6-SNAPSHOT)
-       
-Build command(run under folder jenkins-tools):
-
-       mvn clean package
-
-Usage are: 
-
-       java -jar ./buildstats/target/buildstats.jar <Jenkins HTTP URL> <Job Name> [number of last most recent jobs to check]
-
-Sample commands are:
-
-       java -jar ./buildstats/target/buildstats.jar https://builds.apache.org HBase-TRUNK
-
-Sample output(where 1 means "PASSED", 0 means "NOT RUN AT ALL", -1 means "FAILED"):
-
-Failed Test Cases              3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3632 3633 3634 3635
-
-org.apache.hadoop.hbase.catalog.testmetareadereditor.testretrying    1    1   -1    0    1    1    1    1   -1    0    1    1    1    1
-org.apache.hadoop.hbase.client.testadmin.testdeleteeditunknowncolumnfamilyandortable    0    1    1    1   -1    0    1    1    0    1    1    1    1    1
-org.apache.hadoop.hbase.client.testfromclientsidewithcoprocessor.testclientpoolthreadlocal    1    1    1    1    1    1    1    1    0    1    1   -1    0    1
-org.apache.hadoop.hbase.client.testhcm.testregioncaching    1    1   -1    0    1    1   -1    0   -1    0   -1    0    1    1
-org.apache.hadoop.hbase.client.testmultiparallel.testflushcommitswithabort    1    1    1    1    1    1    1    1   -1    0    1    1    1    1
-org.apache.hadoop.hbase.client.testscannertimeout.test3686a    1    1    1    1    1    1    1    1   -1    0    1    1    1    1
-org.apache.hadoop.hbase.coprocessor.example.testrowcountendpoint.org.apache.hadoop.hbase.coprocessor.example.testrowcountendpoint    0   -1    0   -1    0    0    0   -1    0    0    0    0    0    0
-org.apache.hadoop.hbase.coprocessor.example.testzookeeperscanpolicyobserver.org.apache.hadoop.hbase.coprocessor.example.testzookeeperscanpolicyobserver    0   -1    0   -1    0    0    0   -1    0    0    0    0    0    0
-org.apache.hadoop.hbase.master.testrollingrestart.testbasicrollingrestart    1    1    1    1   -1    0    1    1    1    1    1    1   -1    0
-org.apache.hadoop.hbase.regionserver.testcompactionstate.testmajorcompaction    1    1   -1    0    1    1    1    1    1    1    1    1    1    1
-org.apache.hadoop.hbase.regionserver.testcompactionstate.testminorcompaction    1    1   -1    0    1    1    1    1    1    1    1    1    1    1
-org.apache.hadoop.hbase.replication.testreplication.loadtesting    1    1    1    1    1    1    1    1    1   -1    0    1    1    1
-org.apache.hadoop.hbase.rest.client.testremoteadmin.org.apache.hadoop.hbase.rest.client.testremoteadmin    0    0    0    0    0    0    0    0   -1    0    0    0    0    0
-org.apache.hadoop.hbase.rest.client.testremotetable.org.apache.hadoop.hbase.rest.client.testremotetable    0    0    0    0    0    0    0    0   -1    0    0    0    0    0
-org.apache.hadoop.hbase.security.access.testtablepermissions.testbasicwrite    0    1    1    1    1    1    1    1    1    1    1    1    1   -1
-org.apache.hadoop.hbase.testdrainingserver.testdrainingserverwithabort    1    1    1    1    1   -1    0    1    1    1    1    1   -1    0
-org.apache.hadoop.hbase.util.testhbasefsck.testregionshouldnotbedeployed    1    1    1    1    1    1   -1    0   -1    0   -1   -1    0   -1
-
-

http://git-wip-us.apache.org/repos/asf/hbase/blob/a55ef158/dev-support/jenkins-tools/buildstats/pom.xml
----------------------------------------------------------------------
diff --git a/dev-support/jenkins-tools/buildstats/pom.xml b/dev-support/jenkins-tools/buildstats/pom.xml
deleted file mode 100644
index 4149dc7..0000000
--- a/dev-support/jenkins-tools/buildstats/pom.xml
+++ /dev/null
@@ -1,87 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-	<modelVersion>4.0.0</modelVersion>
-
-	<groupId>org.apache.hbase</groupId>
-	<artifactId>buildstats</artifactId>
-	<version>1.0</version>
-	<packaging>jar</packaging>
-	<name>buildstats</name>
-
-	<properties>
-		<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
-	</properties>
-
-	<dependencies>
-		<dependency>
-			<groupId>com.offbytwo.jenkins</groupId>
-			<artifactId>jenkins-client</artifactId>
-			<version>0.1.6-SNAPSHOT</version>
-		</dependency>
-	</dependencies>
-
-	<build>
-		<plugins>
-			<plugin>
-				<groupId>org.apache.maven.plugins</groupId>
-				<artifactId>maven-jar-plugin</artifactId>
-				<version>2.4</version>
-				<inherited>true</inherited>
-				<configuration>
-					<archive>
-						<manifest>
-							<addDefaultImplementationEntries>true</addDefaultImplementationEntries>
-							<addDefaultSpecificationEntries>true</addDefaultSpecificationEntries>
-						</manifest>
-					</archive>
-				</configuration>
-			</plugin>
-	
-			<plugin>
-				<artifactId>maven-assembly-plugin</artifactId>
-				<configuration>
-					<archive>
-						<manifest>
-							<addClasspath>true</addClasspath>
-							<mainClass>org.apache.hadoop.hbase.devtools.buildstats.TestResultHistory</mainClass>
-						</manifest>
-					</archive>
-					<descriptorRefs>
-						<descriptorRef>jar-with-dependencies</descriptorRef>
-					</descriptorRefs>
-					<finalName>buildstats</finalName>
-					<appendAssemblyId>false</appendAssemblyId>
-				</configuration>
-				<executions>
-					<execution>
-						<id>make-my-jar-with-dependencies</id>
-						<phase>package</phase>
-						<goals>
-							<goal>single</goal>
-						</goals>
-					</execution>
-				</executions>
-			</plugin>
-	
-		</plugins>
-	</build>
-</project>

http://git-wip-us.apache.org/repos/asf/hbase/blob/a55ef158/dev-support/jenkins-tools/buildstats/src/main/java/org/apache/hadoop/hbase/devtools/buildstats/BuildResultWithTestCaseDetails.java
----------------------------------------------------------------------
diff --git a/dev-support/jenkins-tools/buildstats/src/main/java/org/apache/hadoop/hbase/devtools/buildstats/BuildResultWithTestCaseDetails.java b/dev-support/jenkins-tools/buildstats/src/main/java/org/apache/hadoop/hbase/devtools/buildstats/BuildResultWithTestCaseDetails.java
deleted file mode 100644
index ad3f0e3..0000000
--- a/dev-support/jenkins-tools/buildstats/src/main/java/org/apache/hadoop/hbase/devtools/buildstats/BuildResultWithTestCaseDetails.java
+++ /dev/null
@@ -1,49 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.devtools.buildstats;
-
-import com.offbytwo.jenkins.model.BaseModel;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-
-public class BuildResultWithTestCaseDetails extends BaseModel {
-
-  List<TestSuite> suites;
-
-  /* default constructor needed for Jackson */
-  public BuildResultWithTestCaseDetails() {
-    this(new ArrayList<TestSuite>());
-  }
-
-  public BuildResultWithTestCaseDetails(List<TestSuite> s) {
-    this.suites = s;
-  }
-
-  public BuildResultWithTestCaseDetails(TestSuite... s) {
-    this(Arrays.asList(s));
-  }
-
-  public List<TestSuite> getSuites() {
-    return suites;
-  }
-
-  public void setSuites(List<TestSuite> s) {
-    suites = s;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/a55ef158/dev-support/jenkins-tools/buildstats/src/main/java/org/apache/hadoop/hbase/devtools/buildstats/HistoryReport.java
----------------------------------------------------------------------
diff --git a/dev-support/jenkins-tools/buildstats/src/main/java/org/apache/hadoop/hbase/devtools/buildstats/HistoryReport.java b/dev-support/jenkins-tools/buildstats/src/main/java/org/apache/hadoop/hbase/devtools/buildstats/HistoryReport.java
deleted file mode 100644
index 80671b2..0000000
--- a/dev-support/jenkins-tools/buildstats/src/main/java/org/apache/hadoop/hbase/devtools/buildstats/HistoryReport.java
+++ /dev/null
@@ -1,88 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.devtools.buildstats;
-
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.SortedSet;
-import java.util.TreeSet;
-import java.util.Set;
-
-public class HistoryReport {
-  private List<Integer> buildsWithTestResults;
-  private Map<String, int[]> historyResults;
-  private Map<Integer, Set<String>> skippedTests;
-
-  public HistoryReport() {
-    buildsWithTestResults = new ArrayList<Integer>();
-    this.historyResults = new HashMap<String, int[]>();
-  }
-
-  public Map<String, int[]> getHistoryResults() {
-    return this.historyResults;
-  }
-
-  public Map<Integer, Set<String>> getSkippedTests() {
-    return this.skippedTests;
-  }
-
-  public List<Integer> getBuildsWithTestResults() {
-    return this.buildsWithTestResults;
-  }
-
-  public void setBuildsWithTestResults(List<Integer> src) {
-    this.buildsWithTestResults = src;
-  }
-
-  public void setHistoryResults(Map<String, int[]> src, Map<Integer, Set<String>> skippedTests) {
-    this.skippedTests = skippedTests;
-    this.historyResults = src;
-  }
-
-  public void printReport() {
-    System.out.printf("%-30s", "Failed Test Cases Stats");
-    for (Integer i : getBuildsWithTestResults()) {
-      System.out.printf("%5d", i);
-    }
-    System.out.println("\n========================================================");
-    SortedSet<String> keys = new TreeSet<String>(getHistoryResults().keySet());
-    for (String failedTestCase : keys) {
-      System.out.println();
-      int[] resultHistory = getHistoryResults().get(failedTestCase);
-      System.out.print(failedTestCase);
-      for (int i = 0; i < resultHistory.length; i++) {
-        System.out.printf("%5d", resultHistory[i]);
-      }
-    }
-    System.out.println();
-
-    if (skippedTests == null) return;
-
-    System.out.printf("\n%-30s\n", "Skipped Test Cases Stats");
-    for (Integer i : getBuildsWithTestResults()) {
-      Set<String> tmpSkippedTests = skippedTests.get(i);
-      if (tmpSkippedTests == null || tmpSkippedTests.isEmpty()) continue;
-      System.out.printf("======= %d skipped(Or don't have) following test suites =======\n", i);
-      for (String skippedTestcase : tmpSkippedTests) {
-        System.out.println(skippedTestcase);
-      }
-    }
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/a55ef158/dev-support/jenkins-tools/buildstats/src/main/java/org/apache/hadoop/hbase/devtools/buildstats/TestCaseResult.java
----------------------------------------------------------------------
diff --git a/dev-support/jenkins-tools/buildstats/src/main/java/org/apache/hadoop/hbase/devtools/buildstats/TestCaseResult.java b/dev-support/jenkins-tools/buildstats/src/main/java/org/apache/hadoop/hbase/devtools/buildstats/TestCaseResult.java
deleted file mode 100644
index e476cb9..0000000
--- a/dev-support/jenkins-tools/buildstats/src/main/java/org/apache/hadoop/hbase/devtools/buildstats/TestCaseResult.java
+++ /dev/null
@@ -1,61 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.devtools.buildstats;
-
-public class TestCaseResult {
-  private String className;
-  private int failedSince;
-  private String name;
-  private String status;
-
-  public String getName() {
-    return name;
-  }
-
-  public String getClassName() {
-    return className;
-  }
-
-  public int failedSince() {
-    return failedSince;
-  }
-
-  public String getStatus() {
-    return status;
-  }
-
-  public void setName(String s) {
-    name = s;
-  }
-
-  public void setClassName(String s) {
-    className = s;
-  }
-
-  public void setFailedSince(int s) {
-    failedSince = s;
-  }
-
-  public void setStatus(String s) {
-    status = s;
-  }
-
-  public String getFullName() {
-    return (this.className + "." + this.name).toLowerCase();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/a55ef158/dev-support/jenkins-tools/buildstats/src/main/java/org/apache/hadoop/hbase/devtools/buildstats/TestResultHistory.java
----------------------------------------------------------------------
diff --git a/dev-support/jenkins-tools/buildstats/src/main/java/org/apache/hadoop/hbase/devtools/buildstats/TestResultHistory.java b/dev-support/jenkins-tools/buildstats/src/main/java/org/apache/hadoop/hbase/devtools/buildstats/TestResultHistory.java
deleted file mode 100644
index 0270f91..0000000
--- a/dev-support/jenkins-tools/buildstats/src/main/java/org/apache/hadoop/hbase/devtools/buildstats/TestResultHistory.java
+++ /dev/null
@@ -1,260 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.devtools.buildstats;
-
-import com.offbytwo.jenkins.JenkinsServer;
-import com.offbytwo.jenkins.client.JenkinsHttpClient;
-import com.offbytwo.jenkins.model.*;
-
-import java.io.IOException;
-import java.net.URI;
-import java.net.URISyntaxException;
-import java.util.*;
-
-public class TestResultHistory {
-  public final static String STATUS_REGRESSION = "REGRESSION";
-  public final static String STATUS_FAILED = "FAILED";
-  public final static String STATUS_PASSED = "PASSED";
-  public final static String STATUS_FIXED = "FIXED";
-  public static int BUILD_HISTORY_NUM = 15;
-
-  private JenkinsHttpClient client;
-  private String jobName;
-
-  public TestResultHistory(String apacheHTTPURL, String jobName, String userName, String passWord)
-      throws URISyntaxException {
-    this.client = new JenkinsHttpClient(new URI(apacheHTTPURL), userName, passWord);
-    this.jobName = jobName;
-  }
-
-  public static void main(String[] args) {
-
-    if (args.length < 2) {
-      printUsage();
-      return;
-    }
-
-    String apacheHTTPUrl = args[0];
-    String jobName = args[1];
-    if (args.length > 2) {
-      int tmpHistoryJobNum = -1;
-      try {
-        tmpHistoryJobNum = Integer.parseInt(args[2]);
-      } catch (NumberFormatException ex) {
-        // ignore
-      }
-      if (tmpHistoryJobNum > 0) {
-        BUILD_HISTORY_NUM = tmpHistoryJobNum;
-      }
-    }
-
-    try {
-      TestResultHistory buildHistory = new TestResultHistory(apacheHTTPUrl, jobName, "", "");
-      HistoryReport report = buildHistory.getReport();
-      // display result in console
-      report.printReport();
-    } catch (Exception ex) {
-      System.out.println("Got unexpected exception: " + ex.getMessage());
-    }
-  }
-
-  protected static void printUsage() {
-    System.out.println("<Jenkins HTTP URL> <Job Name> [Number of Historical Jobs to Check]");
-    System.out.println("Sample Input: \"https://builds.apache.org\" "
-        + "\"HBase-TRUNK-on-Hadoop-2.0.0\" ");
-  }
-
-  public HistoryReport getReport() {
-    HistoryReport report = new HistoryReport();
-
-    List<Integer> buildWithTestResults = new ArrayList<Integer>();
-    Map<String, int[]> failureStats = new HashMap<String, int[]>();
-
-    try {
-      JenkinsServer jenkins = new JenkinsServer(this.client);
-      Map<String, Job> jobs = jenkins.getJobs();
-      JobWithDetails job = jobs.get(jobName.toLowerCase()).details();
-
-      // build test case failures stats for the past 10 builds
-      Build lastBuild = job.getLastBuild();
-      int startingBuildNumber =
-          (lastBuild.getNumber() - BUILD_HISTORY_NUM > 0) ? lastBuild.getNumber()
-              - BUILD_HISTORY_NUM + 1 : 1;
-
-      Map<Integer, HashMap<String, String>> executedTestCases =
-          new HashMap<Integer, HashMap<String, String>>();
-      Map<Integer, Set<String>> skippedTestCases = new TreeMap<Integer, Set<String>>();
-      Set<String> allExecutedTestCases = new HashSet<String>();
-      Map<Integer, Set<String>> normalizedTestSet = new HashMap<Integer, Set<String>>();
-      String buildUrl = lastBuild.getUrl();
-      for (int i = startingBuildNumber; i <= lastBuild.getNumber(); i++) {
-        HashMap<String, String> buildExecutedTestCases = new HashMap<String, String>(2048);
-        String curBuildUrl = buildUrl.replaceFirst("/" + lastBuild.getNumber(), "/" + i);
-        List<String> failedCases = null;
-        try {
-          failedCases = getBuildFailedTestCases(curBuildUrl, buildExecutedTestCases);
-          buildWithTestResults.add(i);
-        } catch (Exception ex) {
-          // can't get result so skip it
-          continue;
-        }
-        executedTestCases.put(i, buildExecutedTestCases);
-        HashSet<String> tmpSet = new HashSet<String>();
-        for (String tmpTestCase : buildExecutedTestCases.keySet()) {
-          allExecutedTestCases.add(tmpTestCase.substring(0, tmpTestCase.lastIndexOf(".")));
-          tmpSet.add(tmpTestCase.substring(0, tmpTestCase.lastIndexOf(".")));
-        }
-        normalizedTestSet.put(i, tmpSet);
-
-        // set test result failed cases of current build
-        for (String curFailedTestCase : failedCases) {
-          if (failureStats.containsKey(curFailedTestCase)) {
-            int[] testCaseResultArray = failureStats.get(curFailedTestCase);
-            testCaseResultArray[i - startingBuildNumber] = -1;
-          } else {
-            int[] testResult = new int[BUILD_HISTORY_NUM];
-            testResult[i - startingBuildNumber] = -1;
-            // refill previous build test results for newly failed test case
-            for (int k = startingBuildNumber; k < i; k++) {
-              HashMap<String, String> tmpBuildExecutedTestCases = executedTestCases.get(k);
-              if (tmpBuildExecutedTestCases != null
-                  && tmpBuildExecutedTestCases.containsKey(curFailedTestCase)) {
-                String statusStr = tmpBuildExecutedTestCases.get(curFailedTestCase);
-                testResult[k - startingBuildNumber] = convertStatusStringToInt(statusStr);
-              }
-            }
-            failureStats.put(curFailedTestCase, testResult);
-          }
-
-        }
-
-        // set test result for previous failed test cases
-        for (String curTestCase : failureStats.keySet()) {
-          if (!failedCases.contains(curTestCase) && buildExecutedTestCases.containsKey(curTestCase)) {
-            String statusVal = buildExecutedTestCases.get(curTestCase);
-            int[] testCaseResultArray = failureStats.get(curTestCase);
-            testCaseResultArray[i - startingBuildNumber] = convertStatusStringToInt(statusVal);
-          }
-        }
-      }
-
-      // check which test suits skipped
-      for (int i = startingBuildNumber; i <= lastBuild.getNumber(); i++) {
-        Set<String> skippedTests = new HashSet<String>();
-        HashMap<String, String> tmpBuildExecutedTestCases = executedTestCases.get(i);
-        if (tmpBuildExecutedTestCases == null || tmpBuildExecutedTestCases.isEmpty()) continue;
-        // normalize test case names
-        Set<String> tmpNormalizedTestCaseSet = normalizedTestSet.get(i);
-        for (String testCase : allExecutedTestCases) {
-          if (!tmpNormalizedTestCaseSet.contains(testCase)) {
-            skippedTests.add(testCase);
-          }
-        }
-        skippedTestCases.put(i, skippedTests);
-      }
-
-      report.setBuildsWithTestResults(buildWithTestResults);
-      for (String failedTestCase : failureStats.keySet()) {
-        int[] resultHistory = failureStats.get(failedTestCase);
-        int[] compactHistory = new int[buildWithTestResults.size()];
-        int index = 0;
-        for (Integer i : buildWithTestResults) {
-          compactHistory[index] = resultHistory[i - startingBuildNumber];
-          index++;
-        }
-        failureStats.put(failedTestCase, compactHistory);
-      }
-
-      report.setHistoryResults(failureStats, skippedTestCases);
-
-    } catch (Exception ex) {
-      System.out.println(ex);
-      ex.printStackTrace();
-    }
-
-    return report;
-  }
-
-  /**
-   * @param statusVal
-   * @return 1 means PASSED, -1 means FAILED, 0 means SKIPPED
-   */
-  static int convertStatusStringToInt(String statusVal) {
-
-    if (statusVal.equalsIgnoreCase(STATUS_REGRESSION) || statusVal.equalsIgnoreCase(STATUS_FAILED)) {
-      return -1;
-    } else if (statusVal.equalsIgnoreCase(STATUS_PASSED)) {
-      return 1;
-    }
-
-    return 0;
-  }
-
-  /**
-   * Get failed test cases of a build
-   * @param buildURL Jenkins build job URL
-   * @param executedTestCases Set of test cases which was executed for the build
-   * @return list of failed test case names
-   */
-  List<String> getBuildFailedTestCases(String buildURL, HashMap<String, String> executedTestCases)
-      throws IOException {
-    List<String> result = new ArrayList<String>();
-
-    String apiPath =
-        urlJoin(buildURL,
-          "testReport?depth=10&tree=suites[cases[className,name,status,failedSince]]");
-
-    List<TestSuite> suites = client.get(apiPath, BuildResultWithTestCaseDetails.class).getSuites();
-
-    result = getTestSuiteFailedTestcase(suites, executedTestCases);
-
-    return result;
-  }
-
-  private List<String> getTestSuiteFailedTestcase(List<TestSuite> suites,
-      HashMap<String, String> executedTestCases) {
-    List<String> result = new ArrayList<String>();
-
-    if (suites == null) {
-      return result;
-    }
-
-    for (TestSuite curTestSuite : suites) {
-      for (TestCaseResult curTestCaseResult : curTestSuite.getCases()) {
-        if (curTestCaseResult.getStatus().equalsIgnoreCase(STATUS_FAILED)
-            || curTestCaseResult.getStatus().equalsIgnoreCase(STATUS_REGRESSION)) {
-          // failed test case
-          result.add(curTestCaseResult.getFullName());
-        }
-        executedTestCases.put(curTestCaseResult.getFullName(), curTestCaseResult.getStatus());
-      }
-    }
-
-    return result;
-  }
-
-  String urlJoin(String path1, String path2) {
-    if (!path1.endsWith("/")) {
-      path1 += "/";
-    }
-    if (path2.startsWith("/")) {
-      path2 = path2.substring(1);
-    }
-    return path1 + path2;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/a55ef158/dev-support/jenkins-tools/buildstats/src/main/java/org/apache/hadoop/hbase/devtools/buildstats/TestSuite.java
----------------------------------------------------------------------
diff --git a/dev-support/jenkins-tools/buildstats/src/main/java/org/apache/hadoop/hbase/devtools/buildstats/TestSuite.java b/dev-support/jenkins-tools/buildstats/src/main/java/org/apache/hadoop/hbase/devtools/buildstats/TestSuite.java
deleted file mode 100644
index b8a7624..0000000
--- a/dev-support/jenkins-tools/buildstats/src/main/java/org/apache/hadoop/hbase/devtools/buildstats/TestSuite.java
+++ /dev/null
@@ -1,47 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.devtools.buildstats;
-
-import com.offbytwo.jenkins.model.BaseModel;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-
-public class TestSuite extends BaseModel {
-  List<TestCaseResult> cases;
-
-  public TestSuite() {
-    this(new ArrayList<TestCaseResult>());
-  }
-
-  public TestSuite(List<TestCaseResult> s) {
-    this.cases = s;
-  }
-
-  public TestSuite(TestCaseResult... s) {
-    this(Arrays.asList(s));
-  }
-
-  public List<TestCaseResult> getCases() {
-    return cases;
-  }
-
-  public void setCases(List<TestCaseResult> s) {
-    cases = s;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/a55ef158/dev-support/jenkins-tools/pom.xml
----------------------------------------------------------------------
diff --git a/dev-support/jenkins-tools/pom.xml b/dev-support/jenkins-tools/pom.xml
deleted file mode 100644
index 952b29a..0000000
--- a/dev-support/jenkins-tools/pom.xml
+++ /dev/null
@@ -1,36 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<project xmlns="http://maven.apache.org/POM/4.0.0"
-         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-	<modelVersion>4.0.0</modelVersion>
-	
-	<groupId>org.apache.hbase</groupId>
-	<artifactId>jenkins-tools</artifactId>
-	<version>1.0</version>
-	<packaging>pom</packaging>
-	
-	<modules>
-		<module>jenkins-client</module>
-		<module>buildstats</module>
-	</modules>
-</project>


[15/50] hbase git commit: Amend HBASE-15200 ZooKeeper znode ACL checks should only compare the shortname

Posted by la...@apache.org.
Amend HBASE-15200 ZooKeeper znode ACL checks should only compare the shortname

Fixes for newly introduced FindBugs warnings


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7f42fcd6
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7f42fcd6
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7f42fcd6

Branch: refs/heads/branch-1.0
Commit: 7f42fcd6f9db1decd06dfb9484dd22c5704dd917
Parents: b43442c
Author: Andrew Purtell <ap...@apache.org>
Authored: Thu Feb 4 16:17:41 2016 -0800
Committer: Andrew Purtell <ap...@apache.org>
Committed: Thu Feb 4 17:00:23 2016 -0800

----------------------------------------------------------------------
 .../org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java    | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/7f42fcd6/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java
index 983153f..3ad3a20 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java
@@ -297,7 +297,7 @@ public class ZooKeeperWatcher implements Watcher, Abortable, Closeable {
       if (Ids.ANYONE_ID_UNSAFE.equals(id)) {
         if (perms != Perms.READ) {
           if (LOG.isDebugEnabled()) {
-            LOG.debug(String.format("permissions for '%s' are not correct: have %0x, want %0x",
+            LOG.debug(String.format("permissions for '%s' are not correct: have 0x%x, want 0x%x",
               id, perms, Perms.READ));
           }
           return false;
@@ -312,7 +312,7 @@ public class ZooKeeperWatcher implements Watcher, Abortable, Closeable {
         if (name.equals(hbaseUser)) {
           if (perms != Perms.ALL) {
             if (LOG.isDebugEnabled()) {
-              LOG.debug(String.format("permissions for '%s' are not correct: have %0x, want %0x",
+              LOG.debug(String.format("permissions for '%s' are not correct: have 0x%x, want 0x%x",
                 id, perms, Perms.ALL));
             }
             return false;
@@ -348,7 +348,7 @@ public class ZooKeeperWatcher implements Watcher, Abortable, Closeable {
             } else {
               if (LOG.isDebugEnabled()) {
                 LOG.debug(String.format(
-                  "superuser '%s' does not have correct permissions: have %0x, want %0x",
+                  "superuser '%s' does not have correct permissions: have 0x%x, want 0x%x",
                   acl.getId().getId(), acl.getPerms(), Perms.ALL));
               }
             }


[18/50] hbase git commit: HBASE-15157 Add *PerformanceTest for Append, CheckAnd*

Posted by la...@apache.org.
HBASE-15157 Add *PerformanceTest for Append, CheckAnd*


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7d901960
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7d901960
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7d901960

Branch: refs/heads/branch-1.0
Commit: 7d90196059c236b05b3a4df0f1ff6bdb39069e10
Parents: 6c555d3
Author: stack <st...@apache.org>
Authored: Fri Feb 5 11:18:42 2016 -0800
Committer: stack <st...@apache.org>
Committed: Fri Feb 5 20:39:23 2016 -0800

----------------------------------------------------------------------
 .../hadoop/hbase/IncrementPerformanceTest.java  | 129 ----------------
 .../hadoop/hbase/PerformanceEvaluation.java     | 146 ++++++++++++++++++-
 2 files changed, 141 insertions(+), 134 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/7d901960/hbase-server/src/test/java/org/apache/hadoop/hbase/IncrementPerformanceTest.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/IncrementPerformanceTest.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/IncrementPerformanceTest.java
deleted file mode 100644
index bf3a44f..0000000
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/IncrementPerformanceTest.java
+++ /dev/null
@@ -1,129 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase;
-
-import java.io.IOException;
-import java.util.HashSet;
-import java.util.Set;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicInteger;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-// import org.apache.hadoop.hbase.client.ConnectionFactory;
-import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.util.Tool;
-import org.apache.hadoop.util.ToolRunner;
-
-import com.yammer.metrics.Metrics;
-import com.yammer.metrics.core.MetricName;
-import com.yammer.metrics.core.Timer;
-import com.yammer.metrics.core.TimerContext;
-import com.yammer.metrics.stats.Snapshot;
-
-/**
- * Simple Increments Performance Test. Run this from main. It is to go against a cluster.
- * Presumption is the table exists already. Defaults are a zk ensemble of localhost:2181,
- * a tableName of 'tableName', a column famly name of 'columnFamilyName', with 80 threads by
- * default and 10000 increments per thread. To change any of these configs, pass -DNAME=VALUE as
- * in -DtableName="newTableName". It prints out configuration it is running with at the start and
- * on the end it prints out percentiles.
- */
-public class IncrementPerformanceTest implements Tool {
-  private static final Log LOG = LogFactory.getLog(IncrementPerformanceTest.class);
-  private static final byte [] QUALIFIER = new byte [] {'q'};
-  private Configuration conf;
-  private final MetricName metricName = new MetricName(this.getClass(), "increment");
-  private static final String TABLENAME = "tableName";
-  private static final String COLUMN_FAMILY = "columnFamilyName";
-  private static final String THREAD_COUNT = "threadCount";
-  private static final int DEFAULT_THREAD_COUNT = 80;
-  private static final String INCREMENT_COUNT = "incrementCount";
-  private static final int DEFAULT_INCREMENT_COUNT = 10000;
-
-  IncrementPerformanceTest() {}
-
-  public int run(final String [] args) throws Exception {
-    Configuration conf = getConf();
-    final TableName tableName = TableName.valueOf(conf.get(TABLENAME), TABLENAME);
-    final byte [] columnFamilyName = Bytes.toBytes(conf.get(COLUMN_FAMILY, COLUMN_FAMILY));
-    int threadCount = conf.getInt(THREAD_COUNT, DEFAULT_THREAD_COUNT);
-    final int incrementCount = conf.getInt(INCREMENT_COUNT, DEFAULT_INCREMENT_COUNT);
-    LOG.info("Running test with " + HConstants.ZOOKEEPER_QUORUM + "=" +
-      getConf().get(HConstants.ZOOKEEPER_QUORUM) + ", tableName=" + tableName +
-      ", columnFamilyName=" + columnFamilyName + ", threadCount=" + threadCount +
-      ", incrementCount=" + incrementCount);
-
-    ExecutorService service = Executors.newFixedThreadPool(threadCount);
-    Set<Future<?>> futures = new HashSet<Future<?>>();
-    final AtomicInteger integer = new AtomicInteger(0); // needed a simple "final" counter
-    while (integer.incrementAndGet() <= threadCount) {
-      futures.add(service.submit(new Runnable() {
-        @Override
-        public void run() {
-          HTable table;
-          try {
-            // ConnectionFactory.createConnection(conf).getTable(TableName.valueOf(TABLE_NAME));
-            table = new HTable(getConf(), tableName.getName());
-          } catch (Exception e) {
-            throw new RuntimeException(e);
-          }
-          Timer timer = Metrics.newTimer(metricName, TimeUnit.MILLISECONDS, TimeUnit.SECONDS);
-          for (int i = 0; i < incrementCount; i++) {
-            byte[] row = Bytes.toBytes(i);
-            TimerContext context = timer.time();
-            try {
-              table.incrementColumnValue(row, columnFamilyName, QUALIFIER, 1l);
-            } catch (IOException e) {
-              // swallow..it's a test.
-            } finally {
-              context.stop();
-            }
-          }
-        }
-      }));
-    }
-
-    for(Future<?> future : futures) future.get();
-    service.shutdown();
-    Snapshot s = Metrics.newTimer(this.metricName,
-        TimeUnit.MILLISECONDS, TimeUnit.SECONDS).getSnapshot();
-    LOG.info(String.format("75th=%s, 95th=%s, 99th=%s", s.get75thPercentile(),
-        s.get95thPercentile(), s.get99thPercentile()));
-    return 0;
-  }
-
-  @Override
-  public Configuration getConf() {
-    return this.conf;
-  }
-
-  @Override
-  public void setConf(Configuration conf) {
-    this.conf = conf;
-  }
-
-  public static void main(String[] args) throws Exception {
-    System.exit(ToolRunner.run(HBaseConfiguration.create(), new IncrementPerformanceTest(), args));
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/7d901960/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
index 17f00ad..2e2b243 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
@@ -52,19 +52,24 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.Append;
 import org.apache.hadoop.hbase.client.BufferedMutator;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.Consistency;
+import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Durability;
 import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Increment;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.RowMutations;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.filter.BinaryComparator;
 import org.apache.hadoop.hbase.filter.CompareFilter;
+import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
 import org.apache.hadoop.hbase.filter.Filter;
 import org.apache.hadoop.hbase.filter.FilterAllFilter;
 import org.apache.hadoop.hbase.filter.FilterList;
@@ -167,7 +172,17 @@ public class PerformanceEvaluation extends Configured implements Tool {
       "Run scan test (read every row)");
     addCommandDescriptor(FilteredScanTest.class, "filterScan",
       "Run scan test using a filter to find a specific row based on it's value " +
-        "(make sure to use --rows=20)");
+      "(make sure to use --rows=20)");
+    addCommandDescriptor(IncrementTest.class, "increment",
+      "Increment on each row; clients overlap on keyspace so some concurrent operations");
+    addCommandDescriptor(AppendTest.class, "append",
+      "Append on each row; clients overlap on keyspace so some concurrent operations");
+    addCommandDescriptor(CheckAndMutateTest.class, "checkAndMutate",
+      "CheckAndMutate on each row; clients overlap on keyspace so some concurrent operations");
+    addCommandDescriptor(CheckAndPutTest.class, "checkAndPut",
+      "CheckAndPut on each row; clients overlap on keyspace so some concurrent operations");
+    addCommandDescriptor(CheckAndDeleteTest.class, "checkAndDelete",
+      "CheckAndDelete on each row; clients overlap on keyspace so some concurrent operations");
   }
 
   /**
@@ -1057,13 +1072,22 @@ public class PerformanceEvaluation extends Configured implements Tool {
       return (System.nanoTime() - startTime) / 1000000;
     }
 
+    int getStartRow() {
+      return opts.startRow;
+    }
+
+    int getLastRow() {
+      return getStartRow() + opts.perClientRunRows;
+    }
+
     /**
      * Provides an extension point for tests that don't want a per row invocation.
      */
     void testTimed() throws IOException, InterruptedException {
-      int lastRow = opts.startRow + opts.perClientRunRows;
+      int startRow = getStartRow();
+      int lastRow = getLastRow();
       // Report on completion of 1/10th of total.
-      for (int i = opts.startRow; i < lastRow; i++) {
+      for (int i = startRow; i < lastRow; i++) {
         if (i % everyN != 0) continue;
         long startTime = System.nanoTime();
         TraceScope scope = Trace.startSpan("test row", traceSampler);
@@ -1074,10 +1098,11 @@ public class PerformanceEvaluation extends Configured implements Tool {
         }
         latency.update((System.nanoTime() - startTime) / 1000);
         if (status != null && i > 0 && (i % getReportingPeriod()) == 0) {
-          status.setStatus(generateStatus(opts.startRow, i, lastRow));
+          status.setStatus(generateStatus(startRow, i, lastRow));
         }
       }
     }
+
     /**
      * report percentiles of latency
      * @throws IOException
@@ -1419,7 +1444,116 @@ public class PerformanceEvaluation extends Configured implements Tool {
       Result r = testScanner.next();
       updateValueSize(r);
     }
+  }
+
+  /**
+   * Base class for operations that are CAS-like; that read a value and then set it based off what
+   * they read. In this category is increment, append, checkAndPut, etc.
+   *
+   * <p>These operations also want some concurrency going on. Usually when these tests run, they
+   * operate in their own part of the key range. In CASTest, we will have them all overlap on the
+   * same key space. We do this with our getStartRow and getLastRow overrides.
+   */
+  static abstract class CASTableTest extends TableTest {
+    private final byte [] qualifier;
+    CASTableTest(Connection con, TestOptions options, Status status) {
+      super(con, options, status);
+      qualifier = Bytes.toBytes(this.getClass().getSimpleName());
+    }
+
+    byte [] getQualifier() {
+      return this.qualifier;
+    }
+
+    @Override
+    int getStartRow() {
+      return 0;
+    }
+
+    @Override
+    int getLastRow() {
+      return opts.perClientRunRows;
+    }
+  }
+
+  static class IncrementTest extends CASTableTest {
+    IncrementTest(Connection con, TestOptions options, Status status) {
+      super(con, options, status);
+    }
+
+    @Override
+    void testRow(final int i) throws IOException {
+      Increment increment = new Increment(format(i));
+      increment.addColumn(FAMILY_NAME, getQualifier(), 1l);
+      updateValueSize(this.table.increment(increment));
+    }
+  }
+
+  static class AppendTest extends CASTableTest {
+    AppendTest(Connection con, TestOptions options, Status status) {
+      super(con, options, status);
+    }
+
+    @Override
+    void testRow(final int i) throws IOException {
+      byte [] bytes = format(i);
+      Append append = new Append(bytes);
+      append.add(FAMILY_NAME, getQualifier(), bytes);
+      updateValueSize(this.table.append(append));
+    }
+  }
+
+  static class CheckAndMutateTest extends CASTableTest {
+    CheckAndMutateTest(Connection con, TestOptions options, Status status) {
+      super(con, options, status);
+    }
+
+    @Override
+    void testRow(final int i) throws IOException {
+      byte [] bytes = format(i);
+      // Put a known value so when we go to check it, it is there.
+      Put put = new Put(bytes);
+      put.addColumn(FAMILY_NAME, getQualifier(), bytes);
+      this.table.put(put);
+      RowMutations mutations = new RowMutations(bytes);
+      mutations.add(put);
+      this.table.checkAndMutate(bytes, FAMILY_NAME, getQualifier(), CompareOp.EQUAL, bytes,
+          mutations);
+    }
+  }
 
+  static class CheckAndPutTest extends CASTableTest {
+    CheckAndPutTest(Connection con, TestOptions options, Status status) {
+      super(con, options, status);
+    }
+
+    @Override
+    void testRow(final int i) throws IOException {
+      byte [] bytes = format(i);
+      // Put a known value so when we go to check it, it is there.
+      Put put = new Put(bytes);
+      put.addColumn(FAMILY_NAME, getQualifier(), bytes);
+      this.table.put(put);
+      this.table.checkAndPut(bytes, FAMILY_NAME, getQualifier(), CompareOp.EQUAL, bytes, put);
+    }
+  }
+
+  static class CheckAndDeleteTest extends CASTableTest {
+    CheckAndDeleteTest(Connection con, TestOptions options, Status status) {
+      super(con, options, status);
+    }
+
+    @Override
+    void testRow(final int i) throws IOException {
+      byte [] bytes = format(i);
+      // Put a known value so when we go to check it, it is there.
+      Put put = new Put(bytes);
+      put.addColumn(FAMILY_NAME, getQualifier(), bytes);
+      this.table.put(put);
+      Delete delete = new Delete(put.getRow());
+      delete.addColumn(FAMILY_NAME, getQualifier());
+      this.table.checkAndDelete(bytes, FAMILY_NAME, getQualifier(), CompareOp.EQUAL, bytes, delete);
+    }
   }
 
   static class SequentialReadTest extends TableTest {
@@ -1723,8 +1857,10 @@ public class PerformanceEvaluation extends Configured implements Tool {
       "clients (and HRegionServers)");
     System.err.println("                 running: 1 <= value <= 500");
     System.err.println("Examples:");
-    System.err.println(" To run a single evaluation client:");
+    System.err.println(" To run a single client doing the default 1M sequentialWrites:");
     System.err.println(" $ bin/hbase " + className + " sequentialWrite 1");
+    System.err.println(" To run 10 clients doing increments over ten rows:");
+    System.err.println(" $ bin/hbase " + className + " --rows=10 --nomapred increment 10");
   }
 
   /**


[03/50] hbase git commit: HBASE-15083 Gets from Multiactions are not counted in metrics for gets

Posted by la...@apache.org.
HBASE-15083 Gets from Multiactions are not counted in metrics for gets


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/556741b9
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/556741b9
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/556741b9

Branch: refs/heads/branch-1.0
Commit: 556741b9ea031edadd982e098ee36ee88dd0df62
Parents: f4fa859
Author: chenheng <ch...@apache.org>
Authored: Tue Jan 12 14:32:55 2016 +0800
Committer: chenheng <ch...@apache.org>
Committed: Tue Jan 12 14:32:55 2016 +0800

----------------------------------------------------------------------
 .../apache/hadoop/hbase/regionserver/RSRpcServices.java | 12 ++++++++++--
 1 file changed, 10 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/556741b9/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
index 9c6de0f..2af42fe 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
@@ -541,8 +541,16 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
       try {
         Result r = null;
         if (action.hasGet()) {
-          Get get = ProtobufUtil.toGet(action.getGet());
-          r = region.get(get);
+          long before = EnvironmentEdgeManager.currentTime();
+          try {
+            Get get = ProtobufUtil.toGet(action.getGet());
+            r = region.get(get);
+          } finally {
+            if (regionServer.metricsRegionServer != null) {
+              regionServer.metricsRegionServer.updateGet(
+                EnvironmentEdgeManager.currentTime() - before);
+            }
+          }
         } else if (action.hasServiceCall()) {
           resultOrExceptionBuilder = ResultOrException.newBuilder();
           try {


[23/50] hbase git commit: HBASE-15129 Set default value for hbase.fs.tmp.dir rather than fully depend on hbase-default.xml (Yu Li)

Posted by la...@apache.org.
HBASE-15129 Set default value for hbase.fs.tmp.dir rather than fully depend on hbase-default.xml (Yu Li)

Conflicts:
	hbase-client/src/main/java/org/apache/hadoop/hbase/security/SecureBulkLoadUtil.java


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e521b51e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e521b51e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e521b51e

Branch: refs/heads/branch-1.0
Commit: e521b51e406e7a60d50b82d1fb3361e1a9c5880a
Parents: 8f064d4
Author: Enis Soztutar <en...@apache.org>
Authored: Tue Feb 2 16:18:26 2016 -0800
Committer: Nick Dimiduk <nd...@apache.org>
Committed: Thu Feb 11 09:22:33 2016 -0800

----------------------------------------------------------------------
 .../org/apache/hadoop/hbase/security/SecureBulkLoadUtil.java | 8 ++++++--
 .../src/main/java/org/apache/hadoop/hbase/HConstants.java    | 5 +++++
 .../apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java    | 5 ++++-
 .../hadoop/hbase/mapreduce/TestHFileOutputFormat2.java       | 3 ++-
 4 files changed, 17 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/e521b51e/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SecureBulkLoadUtil.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SecureBulkLoadUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SecureBulkLoadUtil.java
index 04bfbb5..5af6891 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SecureBulkLoadUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SecureBulkLoadUtil.java
@@ -18,9 +18,10 @@
  */
 package org.apache.hadoop.hbase.security;
 
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.util.Bytes;
 
 @InterfaceAudience.Private
@@ -37,6 +38,9 @@ public class SecureBulkLoadUtil {
   }
 
   public static Path getBaseStagingDir(Configuration conf) {
-    return new Path(conf.get(BULKLOAD_STAGING_DIR));
+    String hbaseTmpFsDir =
+        conf.get(HConstants.TEMPORARY_FS_DIRECTORY_KEY,
+          HConstants.DEFAULT_TEMPORARY_HDFS_DIRECTORY);
+    return new Path(conf.get(BULKLOAD_STAGING_DIR, hbaseTmpFsDir));
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/e521b51e/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
index 68680b8..f1f3e1a 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
@@ -1187,6 +1187,11 @@ public final class HConstants {
   public static final String ZK_SERVER_KERBEROS_PRINCIPAL =
       "hbase.zookeeper.server.kerberos.principal";  
 
+  /** Config key for hbase temporary directory in hdfs */
+  public static final String TEMPORARY_FS_DIRECTORY_KEY = "hbase.fs.tmp.dir";
+  public static final String DEFAULT_TEMPORARY_HDFS_DIRECTORY = "/user/"
+      + System.getProperty("user.name") + "/hbase-staging";
+
   private HConstants() {
     // Can't be instantiated with this ctor.
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/e521b51e/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
index 41a540b..7230f3c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
@@ -568,7 +568,10 @@ public class HFileOutputFormat2
     Configuration conf = job.getConfiguration();
     // create the partitions file
     FileSystem fs = FileSystem.get(conf);
-    Path partitionsPath = new Path(conf.get("hbase.fs.tmp.dir"), "partitions_" + UUID.randomUUID());
+    String hbaseTmpFsDir =
+        conf.get(HConstants.TEMPORARY_FS_DIRECTORY_KEY,
+          HConstants.DEFAULT_TEMPORARY_HDFS_DIRECTORY);
+    Path partitionsPath = new Path(hbaseTmpFsDir, "partitions_" + UUID.randomUUID());
     fs.makeQualified(partitionsPath);
     writePartitions(conf, partitionsPath, splitPoints);
     fs.deleteOnExit(partitionsPath);

http://git-wip-us.apache.org/repos/asf/hbase/blob/e521b51e/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java
index a8d86e9..ce3b232 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java
@@ -333,7 +333,8 @@ public class TestHFileOutputFormat2  {
   @Test
   public void testJobConfiguration() throws Exception {
     Configuration conf = new Configuration(this.util.getConfiguration());
-    conf.set("hbase.fs.tmp.dir", util.getDataTestDir("testJobConfiguration").toString());
+    conf.set(HConstants.TEMPORARY_FS_DIRECTORY_KEY, util.getDataTestDir("testJobConfiguration")
+        .toString());
     Job job = new Job(conf);
     job.setWorkingDirectory(util.getDataTestDir("testJobConfiguration"));
     HTable table = Mockito.mock(HTable.class);


[16/50] hbase git commit: HBASE-15218 On RS crash and replay of WAL, loosing all Tags in Cells (Anoop Sam John)

Posted by la...@apache.org.
HBASE-15218 On RS crash and replay of WAL, loosing all Tags in Cells (Anoop Sam John)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/30eb2fb2
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/30eb2fb2
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/30eb2fb2

Branch: refs/heads/branch-1.0
Commit: 30eb2fb265759f8df8fd6729a3cd137d841631f1
Parents: 7f42fcd
Author: stack <st...@apache.org>
Authored: Fri Feb 5 10:09:13 2016 -0800
Committer: stack <st...@apache.org>
Committed: Fri Feb 5 10:10:26 2016 -0800

----------------------------------------------------------------------
 .../regionserver/wal/SecureWALCellCodec.java    |  6 +++---
 .../hbase/regionserver/wal/WALCellCodec.java    |  4 ++--
 ...ibilityLabelsWithDefaultVisLabelService.java | 22 ++++++++++++++++++++
 3 files changed, 27 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/30eb2fb2/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureWALCellCodec.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureWALCellCodec.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureWALCellCodec.java
index 69181e5..603496f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureWALCellCodec.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureWALCellCodec.java
@@ -30,7 +30,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.KeyValueUtil;
-import org.apache.hadoop.hbase.codec.KeyValueCodec;
+import org.apache.hadoop.hbase.codec.KeyValueCodecWithTags;
 import org.apache.hadoop.hbase.io.crypto.Decryptor;
 import org.apache.hadoop.hbase.io.crypto.Encryption;
 import org.apache.hadoop.hbase.io.crypto.Encryptor;
@@ -60,7 +60,7 @@ public class SecureWALCellCodec extends WALCellCodec {
     this.decryptor = decryptor;
   }
 
-  static class EncryptedKvDecoder extends KeyValueCodec.KeyValueDecoder {
+  static class EncryptedKvDecoder extends KeyValueCodecWithTags.KeyValueDecoder {
 
     private Decryptor decryptor;
     private byte[] iv;
@@ -142,7 +142,7 @@ public class SecureWALCellCodec extends WALCellCodec {
 
   }
 
-  static class EncryptedKvEncoder extends KeyValueCodec.KeyValueEncoder {
+  static class EncryptedKvEncoder extends KeyValueCodecWithTags.KeyValueEncoder {
 
     private Encryptor encryptor;
     private final ThreadLocal<byte[]> iv = new ThreadLocal<byte[]>() {

http://git-wip-us.apache.org/repos/asf/hbase/blob/30eb2fb2/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.java
index 9389479..05d733d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.java
@@ -31,7 +31,7 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.codec.BaseDecoder;
 import org.apache.hadoop.hbase.codec.BaseEncoder;
 import org.apache.hadoop.hbase.codec.Codec;
-import org.apache.hadoop.hbase.codec.KeyValueCodec;
+import org.apache.hadoop.hbase.codec.KeyValueCodecWithTags;
 import org.apache.hadoop.hbase.io.util.Dictionary;
 import org.apache.hadoop.hbase.io.util.StreamUtils;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -342,7 +342,7 @@ public class WALCellCodec implements Codec {
   @Override
   public Decoder getDecoder(InputStream is) {
     return (compression == null)
-        ? new KeyValueCodec.KeyValueDecoder(is) : new CompressedKvDecoder(is, compression);
+        ? new KeyValueCodecWithTags.KeyValueDecoder(is) : new CompressedKvDecoder(is, compression);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hbase/blob/30eb2fb2/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithDefaultVisLabelService.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithDefaultVisLabelService.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithDefaultVisLabelService.java
index 3cca329..5f2505c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithDefaultVisLabelService.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithDefaultVisLabelService.java
@@ -32,6 +32,7 @@ import java.util.concurrent.atomic.AtomicBoolean;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
@@ -214,4 +215,25 @@ public class TestVisibilityLabelsWithDefaultVisLabelService extends TestVisibili
     };
     SUPERUSER.runAs(action);
   }
+
+  @Test(timeout = 60 * 1000)
+  public void testVisibilityLabelsOnWALReplay() throws Exception {
+    final TableName tableName = TableName.valueOf(TEST_NAME.getMethodName());
+    try (Table table = createTableAndWriteDataWithLabels(tableName,
+        "(" + SECRET + "|" + CONFIDENTIAL + ")", PRIVATE);) {
+      List<RegionServerThread> regionServerThreads = TEST_UTIL.getHBaseCluster()
+          .getRegionServerThreads();
+      for (RegionServerThread rsThread : regionServerThreads) {
+        rsThread.getRegionServer().abort("Aborting ");
+      }
+      // Start one new RS
+      RegionServerThread rs = TEST_UTIL.getHBaseCluster().startRegionServer();
+      waitForLabelsRegionAvailability(rs.getRegionServer());
+      Scan s = new Scan();
+      s.setAuthorizations(new Authorizations(SECRET));
+      ResultScanner scanner = table.getScanner(s);
+      Result[] next = scanner.next(3);
+      assertTrue(next.length == 1);
+    }
+  }
 }


[07/50] hbase git commit: Updated CHANGES.txt for 1.0.3RC1

Posted by la...@apache.org.
Updated CHANGES.txt for 1.0.3RC1


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f1e1312f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f1e1312f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f1e1312f

Branch: refs/heads/branch-1.0
Commit: f1e1312f9790a7c40f6a4b5a1bab2ea1dd559890
Parents: ae8f090
Author: Enis Soztutar <en...@apache.org>
Authored: Tue Jan 19 18:57:25 2016 -0800
Committer: Enis Soztutar <en...@apache.org>
Committed: Tue Jan 19 18:57:25 2016 -0800

----------------------------------------------------------------------
 CHANGES.txt | 34 +++++++++++++++++++++++++++++++---
 1 file changed, 31 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/f1e1312f/CHANGES.txt
----------------------------------------------------------------------
diff --git a/CHANGES.txt b/CHANGES.txt
index bf7c7d4..102b681 100644
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -1,8 +1,9 @@
 HBase Change Log
 
-Release Notes - HBase - Version 1.0.3 - 11/23/2015
+Release Notes - HBase - Version 1.0.3 - 01/26/2016
 
 ** Sub-task
+    * [HBASE-14221] - Reduce the number of time row comparison is done in a Scan
     * [HBASE-14428] - Upgrade our surefire-plugin from 2.18 to 2.18.1
     * [HBASE-14513] - TestBucketCache runs obnoxious 1k threads in a unit test
     * [HBASE-14519] - Purge TestFavoredNodeAssignmentHelper, a test for an abandoned feature that can hang
@@ -16,6 +17,11 @@ Release Notes - HBase - Version 1.0.3 - 11/23/2015
     * [HBASE-14655] - Narrow the scope of doAs() calls to region observer notifications for compaction
     * [HBASE-14657] - Remove unneeded API from EncodedSeeker
     * [HBASE-14709] - Parent change breaks graceful_stop.sh on a cluster
+    * [HBASE-15031] - Fix merge of MVCC and SequenceID performance regression in branch-1.0
+    * [HBASE-15095] - isReturnResult=false  on fast path in branch-1.1 and branch-1.0 is not respected
+
+** Brainstorming
+    * [HBASE-14869] - Better request latency and size histograms
 
 ** Bug
     * [HBASE-13143] - TestCacheOnWrite is flaky and needs a diet
@@ -45,10 +51,9 @@ Release Notes - HBase - Version 1.0.3 - 11/23/2015
     * [HBASE-14474] - DeadLock in RpcClientImpl.Connection.close() 
     * [HBASE-14475] - Region split requests are always audited with "hbase" user rather than request user
     * [HBASE-14489] - postScannerFilterRow consumes a lot of CPU
-    * [HBASE-14491] - ReplicationSource#countDistinctRowKeys code logic is not correct
     * [HBASE-14492] - Increase REST server header buffer size from 8k to 64k
     * [HBASE-14494] - Wrong usage messages on shell commands
-    * [HBASE-14501] - NPE in replication with TDE
+    * [HBASE-14501] - NPE in replication when HDFS transparent encryption is enabled.
     * [HBASE-14510] - Can not set coprocessor from Shell after HBASE-14224
     * [HBASE-14545] - TestMasterFailover often times out
     * [HBASE-14577] - HBase shell help for scan and returning a column family has a typo
@@ -67,7 +72,25 @@ Release Notes - HBase - Version 1.0.3 - 11/23/2015
     * [HBASE-14705] - Javadoc for KeyValue constructor is not correct.
     * [HBASE-14733] - Minor typo in alter_namespace.rb
     * [HBASE-14759] - Avoid using Math.abs when selecting SyncRunner in FSHLog
+    * [HBASE-14761] - Deletes with and without visibility expression do not delete the matching mutation
     * [HBASE-14768] - bin/graceful_stop.sh logs nothing as a balancer state to be stored
+    * [HBASE-14799] - Commons-collections object deserialization remote command execution vulnerability 
+    * [HBASE-14806] - Missing sources.jar for several modules when building HBase
+    * [HBASE-14822] - Renewing leases of scanners doesn't work
+    * [HBASE-14840] - Sink cluster reports data replication request as success though the data is not replicated
+    * [HBASE-14904] - Mark Base[En|De]coder LimitedPrivate and fix binary compat issue
+    * [HBASE-14923] - VerifyReplication should not mask the exception during result comparison 
+    * [HBASE-14930] - check_compatibility.sh needs smarter exit codes
+    * [HBASE-14936] - CombinedBlockCache should overwrite CacheStats#rollMetricsPeriod()
+    * [HBASE-14940] - Make our unsafe based ops more safe
+    * [HBASE-14968] - ConcurrentModificationException in region close resulting in the region staying in closing state
+    * [HBASE-14989] - Implementation of Mutation.getWriteToWAL() is backwards
+    * [HBASE-15035] - bulkloading hfiles with tags that require splits do not preserve tags
+    * [HBASE-15052] - Use EnvironmentEdgeManager in ReplicationSource 
+    * [HBASE-15083] - Gets from Multiactions are not counted in metrics for gets.
+    * [HBASE-15085] - IllegalStateException was thrown when scanning on bulkloaded HFiles
+    * [HBASE-15104] - Occasional failures due to NotServingRegionException in IT tests
+    * [HBASE-15108] - TestReplicationAdmin failed on branch-1.0 
 
 ** Improvement
     * [HBASE-14261] - Enhance Chaos Monkey framework by adding zookeeper and datanode fault injections.
@@ -78,15 +101,20 @@ Release Notes - HBase - Version 1.0.3 - 11/23/2015
     * [HBASE-14588] - Stop accessing test resources from within src folder
     * [HBASE-14643] - Avoid Splits from once again opening a closed reader for fetching the first and last key
     * [HBASE-14715] - Add javadocs to DelegatingRetryingCallable
+    * [HBASE-14730] - region server needs to log warnings when there are attributes configured for cells with hfile v2
     * [HBASE-14780] - Integration Tests that run with ChaosMonkey need to specify CFs
 
 ** Task
     * [HBASE-14290] - Spin up less threads in tests
     * [HBASE-14318] - make_rc.sh should purge/re-resolve dependencies from local repository
     * [HBASE-14361] - ReplicationSink should create Connection instances lazily
+    * [HBASE-14516] - categorize hadoop-compat tests
+    * [HBASE-15033] - Backport test-patch.sh and zombie-detector.sh from master to branch-1.0/1.1
 
 ** Test
     * [HBASE-14344] - Add timeouts to TestHttpServerLifecycle
+    * [HBASE-14758] - Add UT case for unchecked error/exception thrown in AsyncProcess#sendMultiAction
+    * [HBASE-14839] - [branch-1] Backport test categories so that patch backport is easier
 
 
 Release Notes - HBase - Version 1.0.2 - 08/31/2015


[38/50] hbase git commit: Revert "Label the new methods on Table introduced by HBASE-15645 as InterfaceAudience.Private"

Posted by la...@apache.org.
Revert "Label the new methods on Table introduced by HBASE-15645 as InterfaceAudience.Private"

This reverts commit 719993e0fe2b132b75a3689267ae4adff364b6aa.

bad commit message


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/fd5c5fb3
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/fd5c5fb3
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/fd5c5fb3

Branch: refs/heads/branch-1.0
Commit: fd5c5fb3887914183a1510f5972e50d9365e02f5
Parents: a837182
Author: Sean Busbey <bu...@apache.org>
Authored: Fri Apr 29 08:44:28 2016 -0500
Committer: Sean Busbey <bu...@apache.org>
Committed: Fri Apr 29 08:44:28 2016 -0500

----------------------------------------------------------------------
 .../src/main/java/org/apache/hadoop/hbase/client/Table.java      | 4 ----
 1 file changed, 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/fd5c5fb3/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java
index ee742b2..8c6169d 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java
@@ -606,13 +606,11 @@ public interface Table extends Closeable {
    * early and throw SocketTimeoutException.
    * @param operationTimeout the total timeout of each operation in millisecond.
    */
-  @InterfaceAudience.Private
   public void setOperationTimeout(int operationTimeout);
 
   /**
    * Get timeout (millisecond) of each operation for in Table instance.
    */
-  @InterfaceAudience.Private
   public int getOperationTimeout();
 
   /**
@@ -622,12 +620,10 @@ public interface Table extends Closeable {
    * retries exhausted or operation timeout reached.
    * @param rpcTimeout the timeout of each rpc request in millisecond.
    */
-  @InterfaceAudience.Private
   public void setRpcTimeout(int rpcTimeout);
 
   /**
    * Get timeout (millisecond) of each rpc request in this Table instance.
    */
-  @InterfaceAudience.Private
   public int getRpcTimeout();
 }


[19/50] hbase git commit: HBASE-15214 Valid mutate Ops fail with RPC Codec in use and region moves across.

Posted by la...@apache.org.
HBASE-15214 Valid mutate Ops fail with RPC Codec in use and region moves across.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/eb9af81f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/eb9af81f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/eb9af81f

Branch: refs/heads/branch-1.0
Commit: eb9af81f67ecc95b1b5fe2ee70f5142c1b29bc50
Parents: 7d90196
Author: anoopsjohn <an...@gmail.com>
Authored: Sat Feb 6 12:35:09 2016 +0530
Committer: anoopsjohn <an...@gmail.com>
Committed: Sat Feb 6 12:35:09 2016 +0530

----------------------------------------------------------------------
 .../hadoop/hbase/protobuf/ProtobufUtil.java     | 18 +++++-------
 .../hbase/regionserver/RSRpcServices.java       | 30 ++++++++++++++++++--
 .../hadoop/hbase/client/TestMultiParallel.java  |  4 +++
 3 files changed, 38 insertions(+), 14 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/eb9af81f/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
index db7b074..47acfde 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
@@ -541,7 +541,7 @@ public final class ProtobufUtil {
     MutationType type = proto.getMutateType();
     assert type == MutationType.PUT: type.name();
     long timestamp = proto.hasTimestamp()? proto.getTimestamp(): HConstants.LATEST_TIMESTAMP;
-    Put put = null;
+    Put put = proto.hasRow() ? new Put(proto.getRow().toByteArray(), timestamp) : null;
     int cellCount = proto.hasAssociatedCellCount()? proto.getAssociatedCellCount(): 0;
     if (cellCount > 0) {
       // The proto has metadata only and the data is separate to be found in the cellScanner.
@@ -561,9 +561,7 @@ public final class ProtobufUtil {
         put.add(cell);
       }
     } else {
-      if (proto.hasRow()) {
-        put = new Put(proto.getRow().asReadOnlyByteBuffer(), timestamp);
-      } else {
+      if (put == null) {
         throw new IllegalArgumentException("row cannot be null");
       }
       // The proto has the metadata and the data itself
@@ -640,12 +638,8 @@ public final class ProtobufUtil {
   throws IOException {
     MutationType type = proto.getMutateType();
     assert type == MutationType.DELETE : type.name();
-    byte [] row = proto.hasRow()? proto.getRow().toByteArray(): null;
-    long timestamp = HConstants.LATEST_TIMESTAMP;
-    if (proto.hasTimestamp()) {
-      timestamp = proto.getTimestamp();
-    }
-    Delete delete = null;
+    long timestamp = proto.hasTimestamp() ? proto.getTimestamp() : HConstants.LATEST_TIMESTAMP;
+    Delete delete = proto.hasRow() ? new Delete(proto.getRow().toByteArray(), timestamp) : null;
     int cellCount = proto.hasAssociatedCellCount()? proto.getAssociatedCellCount(): 0;
     if (cellCount > 0) {
       // The proto has metadata only and the data is separate to be found in the cellScanner.
@@ -668,7 +662,9 @@ public final class ProtobufUtil {
         delete.addDeleteMarker(cell);
       }
     } else {
-      delete = new Delete(row, timestamp);
+      if (delete == null) {
+        throw new IllegalArgumentException("row cannot be null");
+      }
       for (ColumnValue column: proto.getColumnValueList()) {
         byte[] family = column.getFamily().toByteArray();
         for (QualifierValue qv: column.getQualifierValueList()) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/eb9af81f/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
index 2af42fe..2f4104c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
@@ -74,7 +74,6 @@ import org.apache.hadoop.hbase.exceptions.OperationConflictException;
 import org.apache.hadoop.hbase.exceptions.OutOfOrderScannerNextException;
 import org.apache.hadoop.hbase.filter.ByteArrayComparable;
 import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
-import org.apache.hadoop.hbase.io.hfile.HFile;
 import org.apache.hadoop.hbase.ipc.HBaseRPCErrorHandler;
 import org.apache.hadoop.hbase.ipc.PayloadCarryingRpcController;
 import org.apache.hadoop.hbase.ipc.PriorityFunction;
@@ -152,11 +151,9 @@ import org.apache.hadoop.hbase.regionserver.HRegion.Operation;
 import org.apache.hadoop.hbase.regionserver.Leases.LeaseStillHeldException;
 import org.apache.hadoop.hbase.regionserver.handler.OpenMetaHandler;
 import org.apache.hadoop.hbase.regionserver.handler.OpenRegionHandler;
-import org.apache.hadoop.hbase.wal.WAL;
 import org.apache.hadoop.hbase.wal.WALKey;
 import org.apache.hadoop.hbase.wal.WALSplitter;
 import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
-import org.apache.hadoop.hbase.util.ByteStringer;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Counter;
 import org.apache.hadoop.hbase.util.DNS;
@@ -1838,6 +1835,9 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
       } catch (IOException e) {
         regionActionResultBuilder.setException(ResponseConverter.buildException(e));
         responseBuilder.addRegionActionResult(regionActionResultBuilder.build());
+        if (cellScanner != null) {
+          skipCellsForMutations(regionAction.getActionList(), cellScanner);
+        }
         continue;  // For this region it's a failure.
       }
 
@@ -1884,6 +1884,30 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
     return responseBuilder.build();
   }
 
+  private void skipCellsForMutations(List<ClientProtos.Action> actions, CellScanner cellScanner) {
+    for (ClientProtos.Action action : actions) {
+      skipCellsForMutation(action, cellScanner);
+    }
+  }
+
+  private void skipCellsForMutation(ClientProtos.Action action, CellScanner cellScanner) {
+    try {
+      if (action.hasMutation()) {
+        MutationProto m = action.getMutation();
+        if (m.hasAssociatedCellCount()) {
+          for (int i = 0; i < m.getAssociatedCellCount(); i++) {
+            cellScanner.advance();
+          }
+        }
+      }
+    } catch (IOException e) {
+      // No need to handle these Individual Muatation level issue. Any way this entire RegionAction
+      // marked as failed as we could not see the Region here. At client side the top level
+      // RegionAction exception will be considered first.
+      LOG.error("Error while skipping Cells in CellScanner for invalid Region Mutations", e);
+    }
+  }
+
   /**
    * Mutate data in a table.
    *

http://git-wip-us.apache.org/repos/asf/hbase/blob/eb9af81f/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiParallel.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiParallel.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiParallel.java
index 61cb16a..8cbd7e9 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiParallel.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiParallel.java
@@ -35,11 +35,13 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.Waiter;
+import org.apache.hadoop.hbase.codec.KeyValueCodec;
 import org.apache.hadoop.hbase.exceptions.OperationConflictException;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.JVMClusterUtil;
@@ -73,6 +75,8 @@ public class TestMultiParallel {
     //((Log4JLogger)RpcServer.LOG).getLogger().setLevel(Level.ALL);
     //((Log4JLogger)RpcClient.LOG).getLogger().setLevel(Level.ALL);
     //((Log4JLogger)ScannerCallable.LOG).getLogger().setLevel(Level.ALL);
+    UTIL.getConfiguration().set(HConstants.RPC_CODEC_CONF_KEY,
+        KeyValueCodec.class.getCanonicalName());
     UTIL.startMiniCluster(slaves);
     HTable t = UTIL.createTable(TEST_TABLE, Bytes.toBytes(FAMILY));
     UTIL.createMultiRegions(t, Bytes.toBytes(FAMILY));


[26/50] hbase git commit: HBASE-15274 ClientSideRegionScanner's reaction to Scan#setBatch is not consistent between HBase versions (Youngjoon Kim)

Posted by la...@apache.org.
HBASE-15274 ClientSideRegionScanner's reaction to Scan#setBatch is not consistent between HBase versions (Youngjoon Kim)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/2ce516b0
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/2ce516b0
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/2ce516b0

Branch: refs/heads/branch-1.0
Commit: 2ce516b0fff93a4ee84bb53f9623c495d8dc3d13
Parents: f280c45
Author: Enis Soztutar <en...@apache.org>
Authored: Wed Feb 17 17:41:13 2016 -0800
Committer: Enis Soztutar <en...@apache.org>
Committed: Wed Feb 17 17:41:13 2016 -0800

----------------------------------------------------------------------
 .../org/apache/hadoop/hbase/client/ClientSideRegionScanner.java   | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/2ce516b0/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java
index 9cb9494..f9dacdd 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java
@@ -71,8 +71,7 @@ public class ClientSideRegionScanner extends AbstractClientScanner {
   @Override
   public Result next() throws IOException {
     values.clear();
-
-    scanner.nextRaw(values, -1); // pass -1 as limit so that we see the whole row.
+    scanner.nextRaw(values);
     if (values.isEmpty()) {
       //we are done
       return null;


[45/50] hbase git commit: HBASE-14345 Consolidate printUsage in IntegrationTestLoadAndVerify (Reid Chan)

Posted by la...@apache.org.
HBASE-14345 Consolidate printUsage in IntegrationTestLoadAndVerify (Reid Chan)

Change-Id: Iac3e3bbd3c0b8be848ded1c481334675789bd4cc


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/89b432d4
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/89b432d4
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/89b432d4

Branch: refs/heads/branch-1.0
Commit: 89b432d452d81843f725466c5cd581599710d488
Parents: fb9a648
Author: Reid <re...@outlook.com>
Authored: Wed Aug 10 09:58:40 2016 -0700
Committer: Apekshit Sharma <ap...@apache.org>
Committed: Wed Aug 10 09:58:40 2016 -0700

----------------------------------------------------------------------
 .../hadoop/hbase/test/IntegrationTestLoadAndVerify.java | 12 +++++++-----
 .../IntegrationTestWithCellVisibilityLoadAndVerify.java |  4 ++--
 2 files changed, 9 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/89b432d4/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestLoadAndVerify.java
----------------------------------------------------------------------
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestLoadAndVerify.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestLoadAndVerify.java
index 8a0181c..1930a9c 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestLoadAndVerify.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestLoadAndVerify.java
@@ -398,10 +398,12 @@ public void cleanUpCluster() throws Exception {
     getTestingUtil(getConf()).deleteTable(htd.getTableName());
   }
 
-  public void usage() {
-    System.err.println(this.getClass().getSimpleName() + " [-Doptions] <load|verify|loadAndVerify>");
+  @Override
+  public void printUsage() {
+    printUsage(this.getClass().getSimpleName() + " <options>"
+        + " [-Doptions] <load|verify|loadAndVerify>", "Options", "");
+    System.err.println("");
     System.err.println("  Loads a table with row dependencies and verifies the dependency chains");
-    System.err.println("Options");
     System.err.println("  -Dloadmapper.table=<name>        Table to write/verify (default autogen)");
     System.err.println("  -Dloadmapper.backrefs=<n>        Number of backreferences per row (default 50)");
     System.err.println("  -Dloadmapper.num_to_write=<n>    Number of rows per mapper (default 100,000 per mapper)");
@@ -419,7 +421,7 @@ public void cleanUpCluster() throws Exception {
 
     String[] args = cmd.getArgs();
     if (args == null || args.length < 1 || args.length > 1) {
-      usage();
+      printUsage();
       throw new RuntimeException("Incorrect Number of args.");
     }
     toRun = args[0];
@@ -442,7 +444,7 @@ public void cleanUpCluster() throws Exception {
       doVerify= true;
     } else {
       System.err.println("Invalid argument " + toRun);
-      usage();
+      printUsage();
       return 1;
     }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/89b432d4/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestWithCellVisibilityLoadAndVerify.java
----------------------------------------------------------------------
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestWithCellVisibilityLoadAndVerify.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestWithCellVisibilityLoadAndVerify.java
index e68cb38..1c7fc62 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestWithCellVisibilityLoadAndVerify.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestWithCellVisibilityLoadAndVerify.java
@@ -346,7 +346,7 @@ public class IntegrationTestWithCellVisibilityLoadAndVerify extends IntegrationT
   }
 
   @Override
-  public void usage() {
+  public void printUsage() {
     System.err.println(this.getClass().getSimpleName() + " -u usera,userb [-Doptions]");
     System.err.println("  Loads a table with cell visibilities and verifies with Authorizations");
     System.err.println("Options");
@@ -386,7 +386,7 @@ public class IntegrationTestWithCellVisibilityLoadAndVerify extends IntegrationT
   protected void processOptions(CommandLine cmd) {
     List args = cmd.getArgList();
     if (args.size() > 0) {
-      usage();
+      printUsage();
       throw new RuntimeException("No args expected.");
     }
     // We always want loadAndVerify action


[50/50] hbase git commit: HBASE-16562 ITBLL should fail to start if misconfigured

Posted by la...@apache.org.
HBASE-16562 ITBLL should fail to start if misconfigured


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a55842a0
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a55842a0
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a55842a0

Branch: refs/heads/branch-1.0
Commit: a55842a0a86040545eff6692317191acb84032ae
Parents: fba13a6
Author: chenheng <ch...@apache.org>
Authored: Tue Sep 6 11:02:18 2016 +0800
Committer: chenheng <ch...@apache.org>
Committed: Wed Sep 7 16:04:18 2016 +0800

----------------------------------------------------------------------
 .../test/IntegrationTestBigLinkedList.java      | 34 ++++++++++++++------
 1 file changed, 24 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/a55842a0/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
----------------------------------------------------------------------
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
index 99be272..b0c5371 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
@@ -239,6 +239,11 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase {
 
     private static final Log LOG = LogFactory.getLog(Generator.class);
 
+    public static final String USAGE =  "Usage : " + Generator.class.getSimpleName() +
+            " <num mappers> <num nodes per map> <tmp output dir> [<width> <wrap multiplier> \n" +
+            "where <num nodes per map> should be a multiple of width*wrap multiplier, " +
+            "25M by default \n";
+
     static class GeneratorInputFormat extends InputFormat<BytesWritable,NullWritable> {
       static class GeneratorInputSplit extends InputSplit implements Writable {
         @Override
@@ -461,21 +466,20 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase {
     @Override
     public int run(String[] args) throws Exception {
       if (args.length < 3) {
-        System.out.println("Usage : " + Generator.class.getSimpleName() +
-            " <num mappers> <num nodes per map> <tmp output dir> [<width> <wrap multiplier>]");
-        System.out.println("   where <num nodes per map> should be a multiple of " +
-            " width*wrap multiplier, 25M by default");
-        return 0;
+        System.err.println(USAGE);
+        return 1;
       }
 
       int numMappers = Integer.parseInt(args[0]);
       long numNodes = Long.parseLong(args[1]);
       Path tmpOutput = new Path(args[2]);
       Integer width = (args.length < 4) ? null : Integer.parseInt(args[3]);
-      Integer wrapMuplitplier = (args.length < 5) ? null : Integer.parseInt(args[4]);
-      return run(numMappers, numNodes, tmpOutput, width, wrapMuplitplier);
+      Integer wrapMultiplier = (args.length < 5) ? null : Integer.parseInt(args[4]);
+      return run(numMappers, numNodes, tmpOutput, width, wrapMultiplier);
     }
 
+
+
     protected void createSchema() throws IOException {
       Configuration conf = getConf();
       Admin admin = new HBaseAdmin(conf);
@@ -575,12 +579,22 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase {
     }
 
     public int run(int numMappers, long numNodes, Path tmpOutput,
-        Integer width, Integer wrapMuplitplier) throws Exception {
-      int ret = runRandomInputGenerator(numMappers, numNodes, tmpOutput, width, wrapMuplitplier);
+        Integer width, Integer wrapMultiplier) throws Exception {
+      long wrap = (long)width*wrapMultiplier;
+      if (wrap < numNodes && numNodes % wrap != 0) {
+        /**
+         *  numNodes should be a multiple of width*wrapMultiplier.
+         *  If numNodes less than wrap, wrap will be set to be equal with numNodes,
+         *  See {@link GeneratorMapper#setup(Mapper.Context)}
+         * */
+        System.err.println(USAGE);
+        return 1;
+      }
+      int ret = runRandomInputGenerator(numMappers, numNodes, tmpOutput, width, wrapMultiplier);
       if (ret > 0) {
         return ret;
       }
-      return runGenerator(numMappers, numNodes, tmpOutput, width, wrapMuplitplier);
+      return runGenerator(numMappers, numNodes, tmpOutput, width, wrapMultiplier);
     }
   }
 


[11/50] hbase git commit: HBASE-15153 Apply checkFamilies addendum on increment to 1.1 and 1.0

Posted by la...@apache.org.
HBASE-15153 Apply checkFamilies addendum on increment to 1.1 and 1.0


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/ed2dbda5
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/ed2dbda5
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/ed2dbda5

Branch: refs/heads/branch-1.0
Commit: ed2dbda596480753b3e9eedd025d962c3a2cd968
Parents: 24002e2
Author: stack <st...@apache.org>
Authored: Fri Jan 22 09:44:39 2016 -0800
Committer: stack <st...@apache.org>
Committed: Fri Jan 22 09:45:11 2016 -0800

----------------------------------------------------------------------
 .../main/java/org/apache/hadoop/hbase/regionserver/HRegion.java   | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/ed2dbda5/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 4b8f3b6..0c5dfe7 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -5836,6 +5836,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { //
     checkReadOnly();
     checkResources();
     checkRow(increment.getRow(), "increment");
+    checkFamilies(increment.getFamilyCellMap().keySet());
     startRegionOperation(Operation.INCREMENT);
     this.writeRequestsCount.increment();
     try {
@@ -5850,7 +5851,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { //
       // wait on mvcc to complete before returning to the client. We also reorder the write so that
       // the update of memstore happens AFTER sync returns; i.e. the write pipeline does less
       // zigzagging now.
-      // 
+      //
       // See the comment on INCREMENT_FAST_BUT_NARROW_CONSISTENCY_KEY
       // for the constraints that apply when you take this code path; it is correct but only if
       // Increments are used mutating an Increment Cell; mixing concurrent Put+Delete and Increment


[28/50] hbase git commit: HBASE-13963 Do not leak jdk.tools dependency from hbase-annotations

Posted by la...@apache.org.
HBASE-13963 Do not leak jdk.tools dependency from hbase-annotations

Signed-off-by: Sean Busbey <bu...@apache.org>

Conflicts:
	hbase-common/pom.xml


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7947de62
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7947de62
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7947de62

Branch: refs/heads/branch-1.0
Commit: 7947de62853794354dc423c72d1496081af0ef16
Parents: c4c50a5
Author: G�bor Lipt�k <gl...@gmail.com>
Authored: Wed Jun 24 22:10:13 2015 -0400
Committer: Sean Busbey <bu...@apache.org>
Committed: Tue Mar 8 10:11:14 2016 -0800

----------------------------------------------------------------------
 hbase-client/pom.xml         | 6 ++++++
 hbase-common/pom.xml         | 6 ++++++
 hbase-examples/pom.xml       | 6 ++++++
 hbase-hadoop2-compat/pom.xml | 6 ++++++
 hbase-protocol/pom.xml       | 6 ++++++
 hbase-rest/pom.xml           | 6 ++++++
 hbase-testing-util/pom.xml   | 6 ++++++
 7 files changed, 42 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/7947de62/hbase-client/pom.xml
----------------------------------------------------------------------
diff --git a/hbase-client/pom.xml b/hbase-client/pom.xml
index 770b60c..bce9030 100644
--- a/hbase-client/pom.xml
+++ b/hbase-client/pom.xml
@@ -109,6 +109,12 @@
     <dependency>
       <groupId>org.apache.hbase</groupId>
       <artifactId>hbase-annotations</artifactId>
+      <exclusions>
+        <exclusion>
+          <groupId>jdk.tools</groupId>
+          <artifactId>jdk.tools</artifactId>
+        </exclusion>
+      </exclusions>
     </dependency>
     <dependency>
       <groupId>org.apache.hbase</groupId>

http://git-wip-us.apache.org/repos/asf/hbase/blob/7947de62/hbase-common/pom.xml
----------------------------------------------------------------------
diff --git a/hbase-common/pom.xml b/hbase-common/pom.xml
index 260b268..746ad93 100644
--- a/hbase-common/pom.xml
+++ b/hbase-common/pom.xml
@@ -225,6 +225,12 @@
     <dependency>
       <groupId>org.apache.hbase</groupId>
       <artifactId>hbase-annotations</artifactId>
+      <exclusions>
+        <exclusion>
+          <groupId>jdk.tools</groupId>
+          <artifactId>jdk.tools</artifactId>
+        </exclusion>
+      </exclusions>
     </dependency>
     <dependency>
       <groupId>org.apache.hbase</groupId>

http://git-wip-us.apache.org/repos/asf/hbase/blob/7947de62/hbase-examples/pom.xml
----------------------------------------------------------------------
diff --git a/hbase-examples/pom.xml b/hbase-examples/pom.xml
index 24af59e..426b6ed 100644
--- a/hbase-examples/pom.xml
+++ b/hbase-examples/pom.xml
@@ -264,6 +264,12 @@ if we can combine these profiles somehow -->
              <dependency>
                  <groupId>org.apache.hadoop</groupId>
                  <artifactId>hadoop-annotations</artifactId>
+                 <exclusions>
+                   <exclusion>
+                     <groupId>jdk.tools</groupId>
+                     <artifactId>jdk.tools</artifactId>
+                   </exclusion>
+                 </exclusions>
              </dependency>
              <dependency>
                  <groupId>org.apache.hadoop</groupId>

http://git-wip-us.apache.org/repos/asf/hbase/blob/7947de62/hbase-hadoop2-compat/pom.xml
----------------------------------------------------------------------
diff --git a/hbase-hadoop2-compat/pom.xml b/hbase-hadoop2-compat/pom.xml
index 5198e5e..6f7c859 100644
--- a/hbase-hadoop2-compat/pom.xml
+++ b/hbase-hadoop2-compat/pom.xml
@@ -143,6 +143,12 @@ limitations under the License.
     <dependency>
       <groupId>org.apache.hbase</groupId>
       <artifactId>hbase-annotations</artifactId>
+      <exclusions>
+        <exclusion>
+          <groupId>jdk.tools</groupId>
+          <artifactId>jdk.tools</artifactId>
+        </exclusion>
+      </exclusions>
     </dependency>
     <dependency>
       <groupId>org.apache.hbase</groupId>

http://git-wip-us.apache.org/repos/asf/hbase/blob/7947de62/hbase-protocol/pom.xml
----------------------------------------------------------------------
diff --git a/hbase-protocol/pom.xml b/hbase-protocol/pom.xml
index b805a96..eb8ec96 100644
--- a/hbase-protocol/pom.xml
+++ b/hbase-protocol/pom.xml
@@ -110,6 +110,12 @@
       <dependency>
         <groupId>org.apache.hbase</groupId>
         <artifactId>hbase-annotations</artifactId>
+        <exclusions>
+          <exclusion>
+            <groupId>jdk.tools</groupId>
+            <artifactId>jdk.tools</artifactId>
+          </exclusion>
+        </exclusions>
       </dependency>
       <!-- General dependencies -->
       <dependency>

http://git-wip-us.apache.org/repos/asf/hbase/blob/7947de62/hbase-rest/pom.xml
----------------------------------------------------------------------
diff --git a/hbase-rest/pom.xml b/hbase-rest/pom.xml
index e75d215..ea60655 100644
--- a/hbase-rest/pom.xml
+++ b/hbase-rest/pom.xml
@@ -189,6 +189,12 @@
     <dependency>
       <groupId>org.apache.hbase</groupId>
       <artifactId>hbase-annotations</artifactId>
+      <exclusions>
+        <exclusion>
+          <groupId>jdk.tools</groupId>
+          <artifactId>jdk.tools</artifactId>
+        </exclusion>
+      </exclusions>
     </dependency>
     <dependency>
       <groupId>org.apache.hbase</groupId>

http://git-wip-us.apache.org/repos/asf/hbase/blob/7947de62/hbase-testing-util/pom.xml
----------------------------------------------------------------------
diff --git a/hbase-testing-util/pom.xml b/hbase-testing-util/pom.xml
index e7249d4..2fdbdaf 100644
--- a/hbase-testing-util/pom.xml
+++ b/hbase-testing-util/pom.xml
@@ -59,6 +59,12 @@
             <artifactId>hbase-annotations</artifactId>
             <type>test-jar</type>
             <scope>compile</scope>
+            <exclusions>
+                <exclusion>
+                    <groupId>jdk.tools</groupId>
+                    <artifactId>jdk.tools</artifactId>
+                </exclusion>
+            </exclusions>
         </dependency>
         <dependency>
             <groupId>org.apache.hbase</groupId>


[36/50] hbase git commit: Label the new methods on Table introduced by HBASE-15645 as InterfaceAudience.Private

Posted by la...@apache.org.
Label the new methods on Table introduced by HBASE-15645 as InterfaceAudience.Private

Signed-off-by: stack <st...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/719993e0
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/719993e0
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/719993e0

Branch: refs/heads/branch-1.0
Commit: 719993e0fe2b132b75a3689267ae4adff364b6aa
Parents: 48f158f
Author: Phil Yang <ud...@gmail.com>
Authored: Wed Apr 27 11:21:17 2016 +0800
Committer: stack <st...@apache.org>
Committed: Wed Apr 27 10:07:52 2016 -0700

----------------------------------------------------------------------
 .../src/main/java/org/apache/hadoop/hbase/client/Table.java      | 4 ++++
 1 file changed, 4 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/719993e0/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java
index 8c6169d..ee742b2 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java
@@ -606,11 +606,13 @@ public interface Table extends Closeable {
    * early and throw SocketTimeoutException.
    * @param operationTimeout the total timeout of each operation in millisecond.
    */
+  @InterfaceAudience.Private
   public void setOperationTimeout(int operationTimeout);
 
   /**
    * Get timeout (millisecond) of each operation for in Table instance.
    */
+  @InterfaceAudience.Private
   public int getOperationTimeout();
 
   /**
@@ -620,10 +622,12 @@ public interface Table extends Closeable {
    * retries exhausted or operation timeout reached.
    * @param rpcTimeout the timeout of each rpc request in millisecond.
    */
+  @InterfaceAudience.Private
   public void setRpcTimeout(int rpcTimeout);
 
   /**
    * Get timeout (millisecond) of each rpc request in this Table instance.
    */
+  @InterfaceAudience.Private
   public int getRpcTimeout();
 }


[12/50] hbase git commit: HBASE-15019 Replication stuck when HDFS is restarted.

Posted by la...@apache.org.
HBASE-15019 Replication stuck when HDFS is restarted.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/9c42beaa
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/9c42beaa
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/9c42beaa

Branch: refs/heads/branch-1.0
Commit: 9c42beaa3423e1476aa87e56f59168ed5ce0f461
Parents: ed2dbda
Author: Matteo Bertozzi <ma...@cloudera.com>
Authored: Thu Jan 21 00:05:57 2016 -0600
Committer: Matteo Bertozzi <ma...@cloudera.com>
Committed: Thu Jan 28 09:49:54 2016 -0800

----------------------------------------------------------------------
 .../regionserver/ReplicationSource.java         | 30 +++++++++++--
 .../hbase/util/LeaseNotRecoveredException.java  | 47 ++++++++++++++++++++
 .../org/apache/hadoop/hbase/wal/WALFactory.java |  5 ++-
 3 files changed, 78 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/9c42beaa/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
index f3734b2..c542502 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
@@ -53,8 +53,11 @@ import org.apache.hadoop.hbase.replication.ReplicationQueueInfo;
 import org.apache.hadoop.hbase.replication.ReplicationQueues;
 import org.apache.hadoop.hbase.replication.SystemTableWALEntryFilter;
 import org.apache.hadoop.hbase.replication.WALEntryFilter;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.CancelableProgressable;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.util.LeaseNotRecoveredException;
 import org.apache.hadoop.hbase.util.Threads;
 
 import com.google.common.collect.Lists;
@@ -587,6 +590,11 @@ public class ReplicationSource extends Thread
           // TODO What happens the log is missing in both places?
         }
       }
+    } catch (LeaseNotRecoveredException lnre) {
+      // HBASE-15019 the WAL was not closed due to some hiccup.
+      LOG.warn(peerClusterZnode + " Try to recover the WAL lease " + currentPath, lnre);
+      recoverLease(conf, currentPath);
+      this.reader = null;
     } catch (IOException ioe) {
       if (ioe instanceof EOFException && isCurrentLogEmpty()) return true;
       LOG.warn(this.peerClusterZnode + " Got: ", ioe);
@@ -606,6 +614,22 @@ public class ReplicationSource extends Thread
     return true;
   }
 
+  private void recoverLease(final Configuration conf, final Path path) {
+    try {
+      final FileSystem dfs = FSUtils.getCurrentFileSystem(conf);
+      FSUtils fsUtils = FSUtils.getInstance(dfs, conf);
+      fsUtils.recoverFileLease(dfs, path, conf, new CancelableProgressable() {
+        @Override
+        public boolean progress() {
+          LOG.debug("recover WAL lease: " + path);
+          return isActive();
+        }
+      });
+    } catch (IOException e) {
+      LOG.warn("unable to recover lease for WAL: " + path, e);
+    }
+  }
+
   /*
    * Checks whether the current log file is empty, and it is not a recovered queue. This is to
    * handle scenario when in an idle cluster, there is no entry in the current log and we keep on
@@ -857,9 +881,9 @@ public class ReplicationSource extends Thread
      * @param p path to split
      * @return start time
      */
-    private long getTS(Path p) {
-      String[] parts = p.getName().split("\\.");
-      return Long.parseLong(parts[parts.length-1]);
+    private static long getTS(Path p) {
+      int tsIndex = p.getName().lastIndexOf('.') + 1;
+      return Long.parseLong(p.getName().substring(tsIndex));
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/9c42beaa/hbase-server/src/main/java/org/apache/hadoop/hbase/util/LeaseNotRecoveredException.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/LeaseNotRecoveredException.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/LeaseNotRecoveredException.java
new file mode 100644
index 0000000..ca769b8
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/LeaseNotRecoveredException.java
@@ -0,0 +1,47 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.util;
+
+import org.apache.hadoop.hbase.HBaseIOException;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+
+/**
+ * Thrown when the lease was expected to be recovered,
+ * but the file can't be opened.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Stable
+public class LeaseNotRecoveredException extends HBaseIOException {
+  public LeaseNotRecoveredException() {
+    super();
+  }
+
+  public LeaseNotRecoveredException(String message) {
+    super(message);
+  }
+
+  public LeaseNotRecoveredException(String message, Throwable cause) {
+      super(message, cause);
+  }
+
+  public LeaseNotRecoveredException(Throwable cause) {
+      super(cause);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/9c42beaa/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java
index 4ef320a..02e8a75 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java
@@ -40,6 +40,7 @@ import org.apache.hadoop.hbase.wal.WAL.Reader;
 import org.apache.hadoop.hbase.wal.WALProvider.Writer;
 import org.apache.hadoop.hbase.util.CancelableProgressable;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.hadoop.hbase.util.LeaseNotRecoveredException;
 
 // imports for things that haven't moved from regionserver.wal yet.
 import org.apache.hadoop.hbase.regionserver.wal.MetricsWAL;
@@ -334,8 +335,10 @@ public class WALFactory {
                 throw iioe;
               }
             }
+            throw new LeaseNotRecoveredException(e);
+          } else {
+            throw e;
           }
-          throw e;
         }
       }
     } catch (IOException ie) {


[29/50] hbase git commit: HBASE-15430 Failed taking snapshot - Manifest proto-message too large (JunHo Cho)

Posted by la...@apache.org.
HBASE-15430 Failed taking snapshot - Manifest proto-message too large (JunHo Cho)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d8a5820b
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d8a5820b
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d8a5820b

Branch: refs/heads/branch-1.0
Commit: d8a5820becfb776ba7d9f5597e56563ca67dc87e
Parents: 7947de6
Author: Matteo Bertozzi <ma...@cloudera.com>
Authored: Wed Mar 16 08:52:02 2016 -0700
Committer: Matteo Bertozzi <ma...@cloudera.com>
Committed: Wed Mar 16 09:15:21 2016 -0700

----------------------------------------------------------------------
 .../hadoop/hbase/snapshot/SnapshotManifest.java |  17 ++-
 .../hbase/snapshot/SnapshotTestingUtils.java    |  18 ++-
 .../hbase/snapshot/TestSnapshotManifest.java    | 143 +++++++++++++++++++
 3 files changed, 172 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/d8a5820b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java
index 38ccf08..f688e79 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java
@@ -18,8 +18,10 @@
 
 package org.apache.hadoop.hbase.snapshot;
 
-import java.io.IOException;
+import com.google.protobuf.CodedInputStream;
+
 import java.io.FileNotFoundException;
+import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.HashMap;
@@ -30,7 +32,6 @@ import java.util.concurrent.TimeUnit;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
@@ -38,6 +39,7 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
 import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDataManifest;
@@ -64,7 +66,9 @@ import org.apache.hadoop.hbase.util.Threads;
 public class SnapshotManifest {
   private static final Log LOG = LogFactory.getLog(SnapshotManifest.class);
 
-  private static final String DATA_MANIFEST_NAME = "data.manifest";
+  public static final String SNAPSHOT_MANIFEST_SIZE_LIMIT_CONF_KEY = "snapshot.manifest.size.limit";
+
+  public static final String DATA_MANIFEST_NAME = "data.manifest";
 
   private List<SnapshotRegionManifest> regionManifests;
   private SnapshotDescription desc;
@@ -74,6 +78,7 @@ public class SnapshotManifest {
   private final Configuration conf;
   private final Path workingDir;
   private final FileSystem fs;
+  private int manifestSizeLimit;
 
   private SnapshotManifest(final Configuration conf, final FileSystem fs,
       final Path workingDir, final SnapshotDescription desc,
@@ -83,6 +88,8 @@ public class SnapshotManifest {
     this.workingDir = workingDir;
     this.conf = conf;
     this.fs = fs;
+
+    this.manifestSizeLimit = conf.getInt(SNAPSHOT_MANIFEST_SIZE_LIMIT_CONF_KEY, 64 * 1024 * 1024);
   }
 
   /**
@@ -430,7 +437,9 @@ public class SnapshotManifest {
     FSDataInputStream in = null;
     try {
       in = fs.open(new Path(workingDir, DATA_MANIFEST_NAME));
-      return SnapshotDataManifest.parseFrom(in);
+      CodedInputStream cin = CodedInputStream.newInstance(in);
+      cin.setSizeLimit(manifestSizeLimit);
+      return SnapshotDataManifest.parseFrom(cin);
     } catch (FileNotFoundException e) {
       return null;
     } finally {

http://git-wip-us.apache.org/repos/asf/hbase/blob/d8a5820b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java
index 89754b7..7390b7d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java
@@ -546,16 +546,30 @@ public class SnapshotTestingUtils {
       return createSnapshot(snapshotName, tableName, SnapshotManifestV1.DESCRIPTOR_VERSION);
     }
 
+    public SnapshotBuilder createSnapshotV1(final String snapshotName, final String tableName,
+        final int numRegions) throws IOException {
+      return createSnapshot(snapshotName, tableName, numRegions, SnapshotManifestV1.DESCRIPTOR_VERSION);
+    }
+
     public SnapshotBuilder createSnapshotV2(final String snapshotName, final String tableName)
         throws IOException {
       return createSnapshot(snapshotName, tableName, SnapshotManifestV2.DESCRIPTOR_VERSION);
     }
 
+    public SnapshotBuilder createSnapshotV2(final String snapshotName, final String tableName,
+        final int numRegions) throws IOException {
+      return createSnapshot(snapshotName, tableName, numRegions, SnapshotManifestV2.DESCRIPTOR_VERSION);
+    }
+
     private SnapshotBuilder createSnapshot(final String snapshotName, final String tableName,
         final int version) throws IOException {
-      HTableDescriptor htd = createHtd(tableName);
+      return createSnapshot(snapshotName, tableName, TEST_NUM_REGIONS, version);
+    }
 
-      RegionData[] regions = createTable(htd, TEST_NUM_REGIONS);
+    private SnapshotBuilder createSnapshot(final String snapshotName, final String tableName,
+        final int numRegions, final int version) throws IOException {
+      HTableDescriptor htd = createHtd(tableName);
+      RegionData[] regions = createTable(htd, numRegions);
 
       SnapshotDescription desc = SnapshotDescription.newBuilder()
         .setTable(htd.getNameAsString())

http://git-wip-us.apache.org/repos/asf/hbase/blob/d8a5820b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestSnapshotManifest.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestSnapshotManifest.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestSnapshotManifest.java
new file mode 100644
index 0000000..870bfd9
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestSnapshotManifest.java
@@ -0,0 +1,143 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.snapshot;
+
+import com.google.protobuf.InvalidProtocolBufferException;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
+import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDataManifest;
+import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
+import org.apache.hadoop.hbase.testclassification.MasterTests;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.apache.hadoop.hbase.util.ByteStringer;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import java.io.IOException;
+
+import static org.junit.Assert.fail;
+
+@Category({MasterTests.class, SmallTests.class})
+public class TestSnapshotManifest {
+  private final Log LOG = LogFactory.getLog(getClass());
+
+  private static final String TABLE_NAME_STR = "testSnapshotManifest";
+  private static final TableName TABLE_NAME = TableName.valueOf(TABLE_NAME_STR);
+  private static final int TEST_NUM_REGIONS = 16000;
+
+  private static HBaseTestingUtility TEST_UTIL;
+  private Configuration conf;
+  private FileSystem fs;
+  private Path rootDir;
+  private Path snapshotDir;
+  private SnapshotDescription snapshotDesc;
+
+  @Before
+  public void setup() throws Exception {
+    TEST_UTIL = HBaseTestingUtility.createLocalHTU();
+
+    rootDir = TEST_UTIL.getDataTestDir(TABLE_NAME_STR);
+    fs = TEST_UTIL.getTestFileSystem();
+    conf = TEST_UTIL.getConfiguration();
+
+    SnapshotTestingUtils.SnapshotMock snapshotMock =
+      new SnapshotTestingUtils.SnapshotMock(conf, fs, rootDir);
+    SnapshotTestingUtils.SnapshotMock.SnapshotBuilder builder =
+      snapshotMock.createSnapshotV2("snapshot", TABLE_NAME_STR, 0);
+    snapshotDir = builder.commit();
+    snapshotDesc = builder.getSnapshotDescription();
+
+    SnapshotDataManifest.Builder dataManifestBuilder =
+      SnapshotDataManifest.newBuilder();
+    byte[] startKey = null;
+    byte[] stopKey = null;
+    for (int i = 1; i <= TEST_NUM_REGIONS; i++) {
+      stopKey = Bytes.toBytes(String.format("%016d", i));
+      HRegionInfo regionInfo = new HRegionInfo(TABLE_NAME, startKey, stopKey, false);
+      SnapshotRegionManifest.Builder dataRegionManifestBuilder =
+        SnapshotRegionManifest.newBuilder();
+
+      for (HColumnDescriptor hcd: builder.getTableDescriptor().getFamilies()) {
+        SnapshotRegionManifest.FamilyFiles.Builder family =
+            SnapshotRegionManifest.FamilyFiles.newBuilder();
+        family.setFamilyName(ByteStringer.wrap(hcd.getName()));
+        for (int j = 0; j < 100; ++j) {
+          SnapshotRegionManifest.StoreFile.Builder sfManifest =
+            SnapshotRegionManifest.StoreFile.newBuilder();
+          sfManifest.setName(String.format("%032d", i));
+          sfManifest.setFileSize((1 + i) * (1 + i) * 1024);
+          family.addStoreFiles(sfManifest.build());
+        }
+        dataRegionManifestBuilder.addFamilyFiles(family.build());
+      }
+
+      dataRegionManifestBuilder.setRegionInfo(HRegionInfo.convert(regionInfo));
+      dataManifestBuilder.addRegionManifests(dataRegionManifestBuilder.build());
+
+      startKey = stopKey;
+    }
+
+    dataManifestBuilder.setTableSchema(builder.getTableDescriptor().convert());
+
+    SnapshotDataManifest dataManifest = dataManifestBuilder.build();
+    writeDataManifest(dataManifest);
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    fs.delete(rootDir,true);
+  }
+
+  @Test
+  public void testReadSnapshotManifest() throws IOException {
+    try {
+      SnapshotManifest.open(conf, fs, snapshotDir, snapshotDesc);
+      fail("fail to test snapshot manifest because message size is too small.");
+    } catch (InvalidProtocolBufferException ipbe) {
+      try {
+        conf.setInt(SnapshotManifest.SNAPSHOT_MANIFEST_SIZE_LIMIT_CONF_KEY, 128 * 1024 * 1024);
+        SnapshotManifest.open(conf, fs, snapshotDir, snapshotDesc);
+        LOG.info("open snapshot manifest succeed.");
+      } catch (InvalidProtocolBufferException ipbe2) {
+        fail("fail to take snapshot because Manifest proto-message too large.");
+      }
+    }
+  }
+
+  private void writeDataManifest(final SnapshotDataManifest manifest)
+      throws IOException {
+    FSDataOutputStream stream = fs.create(new Path(snapshotDir, SnapshotManifest.DATA_MANIFEST_NAME));
+    try {
+      manifest.writeTo(stream);
+    } finally {
+      stream.close();
+    }
+  }
+}


[10/50] hbase git commit: HBASE-15152 Automatically include prefix-tree module in MR jobs if present

Posted by la...@apache.org.
HBASE-15152 Automatically include prefix-tree module in MR jobs if present


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/24002e23
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/24002e23
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/24002e23

Branch: refs/heads/branch-1.0
Commit: 24002e23623472d9e6d5e5a63c834eb442873bf1
Parents: 64e2262
Author: Jonathan M Hsieh <jm...@apache.org>
Authored: Thu Jan 21 07:25:00 2016 -0800
Committer: Jonathan M Hsieh <jm...@apache.org>
Committed: Thu Jan 21 19:01:54 2016 -0800

----------------------------------------------------------------------
 .../hbase/mapreduce/TableMapReduceUtil.java       | 18 +++++++++++++++++-
 1 file changed, 17 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/24002e23/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
index 410e411..ce273f5 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
@@ -705,7 +705,7 @@ public class TableMapReduceUtil {
    * Add HBase and its dependencies (only) to the job configuration.
    * <p>
    * This is intended as a low-level API, facilitating code reuse between this
-   * class and its mapred counterpart. It also of use to extenral tools that
+   * class and its mapred counterpart. It also of use to external tools that
    * need to build a MapReduce job that interacts with HBase but want
    * fine-grained control over the jars shipped to the cluster.
    * </p>
@@ -714,6 +714,21 @@ public class TableMapReduceUtil {
    * @see <a href="https://issues.apache.org/jira/browse/PIG-3285">PIG-3285</a>
    */
   public static void addHBaseDependencyJars(Configuration conf) throws IOException {
+
+    // PrefixTreeCodec is part of the hbase-prefix-tree module. If not included in MR jobs jar
+    // dependencies, MR jobs that write encoded hfiles will fail.
+    // We used reflection here so to prevent a circular module dependency.
+    // TODO - if we extract the MR into a module, make it depend on hbase-prefix-tree.
+    Class prefixTreeCodecClass = null;
+    try {
+      prefixTreeCodecClass =
+          Class.forName("org.apache.hadoop.hbase.code.prefixtree.PrefixTreeCodec");
+    } catch (ClassNotFoundException e) {
+      // this will show up in unit tests but should not show in real deployments
+      LOG.warn("The hbase-prefix-tree module jar containing PrefixTreeCodec is not present." +
+          "  Continuing without it.");
+    }
+
     addDependencyJars(conf,
       // explicitly pull a class from each module
       org.apache.hadoop.hbase.HConstants.class,                      // hbase-common
@@ -721,6 +736,7 @@ public class TableMapReduceUtil {
       org.apache.hadoop.hbase.client.Put.class,                      // hbase-client
       org.apache.hadoop.hbase.CompatibilityFactory.class,            // hbase-hadoop-compat
       org.apache.hadoop.hbase.mapreduce.TableMapper.class,           // hbase-server
+      prefixTreeCodecClass, //  hbase-prefix-tree (if null will be skipped)
       // pull necessary dependencies
       org.apache.zookeeper.ZooKeeper.class,
       io.netty.channel.Channel.class,


[21/50] hbase git commit: HBASE-15198 RPC client not using Codec and CellBlock for puts by default.

Posted by la...@apache.org.
HBASE-15198 RPC client not using Codec and CellBlock for puts by default.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/62206fd9
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/62206fd9
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/62206fd9

Branch: refs/heads/branch-1.0
Commit: 62206fd9af286a2a40a7567debf890efa255b361
Parents: b9c3419
Author: anoopsjohn <an...@gmail.com>
Authored: Thu Feb 11 16:41:39 2016 +0530
Committer: anoopsjohn <an...@gmail.com>
Committed: Thu Feb 11 16:41:39 2016 +0530

----------------------------------------------------------------------
 .../hadoop/hbase/client/ClusterConnection.java  |  6 +++++
 .../hadoop/hbase/client/ConnectionAdapter.java  |  5 ++++
 .../hadoop/hbase/client/ConnectionManager.java  |  5 ++++
 .../hbase/client/MultiServerCallable.java       |  8 ++-----
 .../hadoop/hbase/ipc/AbstractRpcClient.java     |  5 ++++
 .../org/apache/hadoop/hbase/ipc/RpcClient.java  |  6 +++++
 .../hadoop/hbase/protobuf/ProtobufUtil.java     |  4 ----
 .../hadoop/hbase/protobuf/RequestConverter.java | 11 ++++++---
 .../security/access/TestAccessController.java   | 25 --------------------
 9 files changed, 37 insertions(+), 38 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/62206fd9/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java
index 05d5c63..9ceb112 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java
@@ -300,4 +300,10 @@ public interface ClusterConnection extends HConnection {
    * @return the configured client backoff policy
    */
   ClientBackoffPolicy getBackoffPolicy();
+
+  /**
+   * @return true when this connection uses a {@link org.apache.hadoop.hbase.codec.Codec} and so
+   *         supports cell blocks.
+   */
+  boolean hasCellBlockSupport();
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/62206fd9/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java
index 99da1be..d67df2a 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java
@@ -464,4 +464,9 @@ abstract class ConnectionAdapter implements ClusterConnection {
   public ClientBackoffPolicy getBackoffPolicy() {
     return wrappedConnection.getBackoffPolicy();
   }
+
+  @Override
+  public boolean hasCellBlockSupport() {
+    return wrappedConnection.hasCellBlockSupport();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/62206fd9/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
index 5b5ffa1..78fb17f 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
@@ -2522,6 +2522,11 @@ class ConnectionManager {
     public boolean isManaged() {
       return managed;
     }
+
+    @Override
+    public boolean hasCellBlockSupport() {
+      return this.rpcClient.hasCellBlockSupport();
+    }
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/62206fd9/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java
index 8d63105..382a8b6 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java
@@ -22,7 +22,6 @@ import java.util.ArrayList;
 import java.util.List;
 import java.util.Map;
 
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.CellScannable;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.HConstants;
@@ -142,11 +141,8 @@ class MultiServerCallable<R> extends RegionServerCallable<MultiResponse> {
     // This is not exact -- the configuration could have changed on us after connection was set up
     // but it will do for now.
     HConnection connection = getConnection();
-    if (connection == null) return true; // Default is to do cellblocks.
-    Configuration configuration = connection.getConfiguration();
-    if (configuration == null) return true;
-    String codec = configuration.get(HConstants.RPC_CODEC_CONF_KEY, "");
-    return codec != null && codec.length() > 0;
+    if (!(connection instanceof ClusterConnection)) return true; // Default is to do cellblocks.
+    return ((ClusterConnection) connection).hasCellBlockSupport();
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hbase/blob/62206fd9/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java
index df43f6f..df4ad46 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java
@@ -139,6 +139,11 @@ public abstract class AbstractRpcClient implements RpcClient {
     }
   }
 
+  @Override
+  public boolean hasCellBlockSupport() {
+    return this.codec != null;
+  }
+
   /**
    * Encapsulate the ugly casting and RuntimeException conversion in private method.
    * @param conf configuration

http://git-wip-us.apache.org/repos/asf/hbase/blob/62206fd9/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClient.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClient.java
index 4ededd2..4b17655 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClient.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClient.java
@@ -75,4 +75,10 @@ import java.io.Closeable;
    * using this client.
    */
   @Override public void close();
+
+  /**
+   * @return true when this client uses a {@link org.apache.hadoop.hbase.codec.Codec} and so
+   *         supports cell blocks.
+   */
+  boolean hasCellBlockSupport();
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/62206fd9/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
index 47acfde..b6da835 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
@@ -1182,10 +1182,6 @@ public final class ProtobufUtil {
         valueBuilder.setValue(ByteStringer.wrap(
             cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()));
         valueBuilder.setTimestamp(cell.getTimestamp());
-        if(cell.getTagsLength() > 0) {
-          valueBuilder.setTags(ByteStringer.wrap(cell.getTagsArray(), cell.getTagsOffset(),
-              cell.getTagsLength()));
-        }
         if (type == MutationType.DELETE || (type == MutationType.PUT && CellUtil.isDelete(cell))) {
           KeyValue.Type keyValueType = KeyValue.Type.codeToType(cell.getTypeByte());
           valueBuilder.setDeleteType(toDeleteType(keyValueType));

http://git-wip-us.apache.org/repos/asf/hbase/blob/62206fd9/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java
index 917a5a2..e46acef 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java
@@ -21,8 +21,6 @@ import java.io.IOException;
 import java.util.List;
 import java.util.regex.Pattern;
 
-import org.apache.hadoop.hbase.util.ByteStringer;
-
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.CellScannable;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
@@ -666,8 +664,15 @@ public final class RequestConverter {
         cells.add(i);
         builder.addAction(actionBuilder.setMutation(ProtobufUtil.toMutationNoData(
           MutationType.INCREMENT, i, mutationBuilder, action.getNonce())));
+      } else if (row instanceof RegionCoprocessorServiceExec) {
+        RegionCoprocessorServiceExec exec = (RegionCoprocessorServiceExec) row;
+        builder.addAction(actionBuilder.setServiceCall(ClientProtos.CoprocessorServiceCall
+            .newBuilder().setRow(ByteStringer.wrap(exec.getRow()))
+            .setServiceName(exec.getMethod().getService().getFullName())
+            .setMethodName(exec.getMethod().getName())
+            .setRequest(exec.getRequest().toByteString())));
       } else if (row instanceof RowMutations) {
-        continue; // ignore RowMutations
+        throw new UnsupportedOperationException("No RowMutations in multi calls; use mutateRow");
       } else {
         throw new DoNotRetryIOException("Multi doesn't support " + row.getClass().getName());
       }

http://git-wip-us.apache.org/repos/asf/hbase/blob/62206fd9/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
index 2d78b74..e7a5aaf 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
@@ -51,7 +51,6 @@ import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.TableNotFoundException;
-import org.apache.hadoop.hbase.Tag;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Append;
 import org.apache.hadoop.hbase.client.Connection;
@@ -2301,30 +2300,6 @@ public class TestAccessController extends SecureTestUtil {
   }
 
   @Test
-  public void testReservedCellTags() throws Exception {
-    AccessTestAction putWithReservedTag = new AccessTestAction() {
-      @Override
-      public Object run() throws Exception {
-        try(Connection conn = ConnectionFactory.createConnection(conf);
-            Table t = conn.getTable(TEST_TABLE);) {
-          KeyValue kv = new KeyValue(TEST_ROW, TEST_FAMILY, TEST_QUALIFIER,
-            HConstants.LATEST_TIMESTAMP, HConstants.EMPTY_BYTE_ARRAY,
-            new Tag[] { new Tag(AccessControlLists.ACL_TAG_TYPE,
-              ProtobufUtil.toUsersAndPermissions(USER_OWNER.getShortName(),
-                new Permission(Permission.Action.READ)).toByteArray()) });
-          t.put(new Put(TEST_ROW).add(kv));
-        }
-        return null;
-      }
-    };
-
-    // Current user is superuser
-    verifyAllowed(putWithReservedTag, User.getCurrent());
-    // No other user should be allowed
-    verifyDenied(putWithReservedTag, USER_OWNER, USER_ADMIN, USER_CREATE, USER_RW, USER_RO);
-  }
-
-  @Test
   public void testGetNamespacePermission() throws Exception {
     String namespace = "testGetNamespacePermission";
     NamespaceDescriptor desc = NamespaceDescriptor.create(namespace).build();