You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by sy...@apache.org on 2016/01/07 17:58:33 UTC

[01/17] hbase git commit: HBASE-14902 Revert some of the stringency recently introduced by checkstyle tightening

Repository: hbase
Updated Branches:
  refs/heads/hbase-12439 00656688f -> 5266b0770


HBASE-14902 Revert some of the stringency recently introduced by checkstyle tightening


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/998b9371
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/998b9371
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/998b9371

Branch: refs/heads/hbase-12439
Commit: 998b9371c9d9a6fe4a93e734ba0055c3e20e0b3e
Parents: 0065668
Author: stack <st...@apache.org>
Authored: Mon Jan 4 14:21:59 2016 -0800
Committer: stack <st...@apache.org>
Committed: Mon Jan 4 14:22:12 2016 -0800

----------------------------------------------------------------------
 hbase-checkstyle/src/main/resources/hbase/checkstyle.xml | 8 ++++++--
 1 file changed, 6 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/998b9371/hbase-checkstyle/src/main/resources/hbase/checkstyle.xml
----------------------------------------------------------------------
diff --git a/hbase-checkstyle/src/main/resources/hbase/checkstyle.xml b/hbase-checkstyle/src/main/resources/hbase/checkstyle.xml
index e7272c5..6670182 100644
--- a/hbase-checkstyle/src/main/resources/hbase/checkstyle.xml
+++ b/hbase-checkstyle/src/main/resources/hbase/checkstyle.xml
@@ -41,7 +41,9 @@
     http://checkstyle.sourceforge.net/config_blocks.html -->
     <module name="EmptyBlock"/>
     <module name="LeftCurly"/>
-    <module name="NeedBraces"/>
+    <module name="NeedBraces">
+      <property name="allowSingleLineStatement" value="true"/>
+    </module>
 
     <!-- Class Design Checks
     http://checkstyle.sourceforge.net/config_design.html -->
@@ -77,7 +79,9 @@
 
     <!-- Javadoc Checks
     http://checkstyle.sourceforge.net/config_javadoc.html -->
-    <module name="JavadocTagContinuationIndentation"/>
+    <module name="JavadocTagContinuationIndentation">
+      <property name="offset" value="2"/>
+    </module>
     <module name="NonEmptyAtclauseDescription"/>
 
     <!-- Miscellaneous Checks


[06/17] hbase git commit: HBASE-14888 ClusterSchema: Add Namespace Operations

Posted by sy...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/46303dfd/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
index 6400887..043d549 100644
--- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
+++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
@@ -17439,6 +17439,16 @@ public final class MasterProtos {
 
   public interface CreateNamespaceResponseOrBuilder
       extends com.google.protobuf.MessageOrBuilder {
+
+    // optional uint64 proc_id = 1;
+    /**
+     * <code>optional uint64 proc_id = 1;</code>
+     */
+    boolean hasProcId();
+    /**
+     * <code>optional uint64 proc_id = 1;</code>
+     */
+    long getProcId();
   }
   /**
    * Protobuf type {@code hbase.pb.CreateNamespaceResponse}
@@ -17473,6 +17483,7 @@ public final class MasterProtos {
         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
         throws com.google.protobuf.InvalidProtocolBufferException {
       initFields();
+      int mutable_bitField0_ = 0;
       com.google.protobuf.UnknownFieldSet.Builder unknownFields =
           com.google.protobuf.UnknownFieldSet.newBuilder();
       try {
@@ -17490,6 +17501,11 @@ public final class MasterProtos {
               }
               break;
             }
+            case 8: {
+              bitField0_ |= 0x00000001;
+              procId_ = input.readUInt64();
+              break;
+            }
           }
         }
       } catch (com.google.protobuf.InvalidProtocolBufferException e) {
@@ -17529,7 +17545,25 @@ public final class MasterProtos {
       return PARSER;
     }
 
+    private int bitField0_;
+    // optional uint64 proc_id = 1;
+    public static final int PROC_ID_FIELD_NUMBER = 1;
+    private long procId_;
+    /**
+     * <code>optional uint64 proc_id = 1;</code>
+     */
+    public boolean hasProcId() {
+      return ((bitField0_ & 0x00000001) == 0x00000001);
+    }
+    /**
+     * <code>optional uint64 proc_id = 1;</code>
+     */
+    public long getProcId() {
+      return procId_;
+    }
+
     private void initFields() {
+      procId_ = 0L;
     }
     private byte memoizedIsInitialized = -1;
     public final boolean isInitialized() {
@@ -17543,6 +17577,9 @@ public final class MasterProtos {
     public void writeTo(com.google.protobuf.CodedOutputStream output)
                         throws java.io.IOException {
       getSerializedSize();
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        output.writeUInt64(1, procId_);
+      }
       getUnknownFields().writeTo(output);
     }
 
@@ -17552,6 +17589,10 @@ public final class MasterProtos {
       if (size != -1) return size;
 
       size = 0;
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeUInt64Size(1, procId_);
+      }
       size += getUnknownFields().getSerializedSize();
       memoizedSerializedSize = size;
       return size;
@@ -17575,6 +17616,11 @@ public final class MasterProtos {
       org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceResponse) obj;
 
       boolean result = true;
+      result = result && (hasProcId() == other.hasProcId());
+      if (hasProcId()) {
+        result = result && (getProcId()
+            == other.getProcId());
+      }
       result = result &&
           getUnknownFields().equals(other.getUnknownFields());
       return result;
@@ -17588,6 +17634,10 @@ public final class MasterProtos {
       }
       int hash = 41;
       hash = (19 * hash) + getDescriptorForType().hashCode();
+      if (hasProcId()) {
+        hash = (37 * hash) + PROC_ID_FIELD_NUMBER;
+        hash = (53 * hash) + hashLong(getProcId());
+      }
       hash = (29 * hash) + getUnknownFields().hashCode();
       memoizedHashCode = hash;
       return hash;
@@ -17697,6 +17747,8 @@ public final class MasterProtos {
 
       public Builder clear() {
         super.clear();
+        procId_ = 0L;
+        bitField0_ = (bitField0_ & ~0x00000001);
         return this;
       }
 
@@ -17723,6 +17775,13 @@ public final class MasterProtos {
 
       public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceResponse buildPartial() {
         org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceResponse(this);
+        int from_bitField0_ = bitField0_;
+        int to_bitField0_ = 0;
+        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+          to_bitField0_ |= 0x00000001;
+        }
+        result.procId_ = procId_;
+        result.bitField0_ = to_bitField0_;
         onBuilt();
         return result;
       }
@@ -17738,6 +17797,9 @@ public final class MasterProtos {
 
       public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceResponse other) {
         if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceResponse.getDefaultInstance()) return this;
+        if (other.hasProcId()) {
+          setProcId(other.getProcId());
+        }
         this.mergeUnknownFields(other.getUnknownFields());
         return this;
       }
@@ -17763,6 +17825,40 @@ public final class MasterProtos {
         }
         return this;
       }
+      private int bitField0_;
+
+      // optional uint64 proc_id = 1;
+      private long procId_ ;
+      /**
+       * <code>optional uint64 proc_id = 1;</code>
+       */
+      public boolean hasProcId() {
+        return ((bitField0_ & 0x00000001) == 0x00000001);
+      }
+      /**
+       * <code>optional uint64 proc_id = 1;</code>
+       */
+      public long getProcId() {
+        return procId_;
+      }
+      /**
+       * <code>optional uint64 proc_id = 1;</code>
+       */
+      public Builder setProcId(long value) {
+        bitField0_ |= 0x00000001;
+        procId_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>optional uint64 proc_id = 1;</code>
+       */
+      public Builder clearProcId() {
+        bitField0_ = (bitField0_ & ~0x00000001);
+        procId_ = 0L;
+        onChanged();
+        return this;
+      }
 
       // @@protoc_insertion_point(builder_scope:hbase.pb.CreateNamespaceResponse)
     }
@@ -18474,6 +18570,16 @@ public final class MasterProtos {
 
   public interface DeleteNamespaceResponseOrBuilder
       extends com.google.protobuf.MessageOrBuilder {
+
+    // optional uint64 proc_id = 1;
+    /**
+     * <code>optional uint64 proc_id = 1;</code>
+     */
+    boolean hasProcId();
+    /**
+     * <code>optional uint64 proc_id = 1;</code>
+     */
+    long getProcId();
   }
   /**
    * Protobuf type {@code hbase.pb.DeleteNamespaceResponse}
@@ -18508,6 +18614,7 @@ public final class MasterProtos {
         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
         throws com.google.protobuf.InvalidProtocolBufferException {
       initFields();
+      int mutable_bitField0_ = 0;
       com.google.protobuf.UnknownFieldSet.Builder unknownFields =
           com.google.protobuf.UnknownFieldSet.newBuilder();
       try {
@@ -18525,6 +18632,11 @@ public final class MasterProtos {
               }
               break;
             }
+            case 8: {
+              bitField0_ |= 0x00000001;
+              procId_ = input.readUInt64();
+              break;
+            }
           }
         }
       } catch (com.google.protobuf.InvalidProtocolBufferException e) {
@@ -18564,7 +18676,25 @@ public final class MasterProtos {
       return PARSER;
     }
 
+    private int bitField0_;
+    // optional uint64 proc_id = 1;
+    public static final int PROC_ID_FIELD_NUMBER = 1;
+    private long procId_;
+    /**
+     * <code>optional uint64 proc_id = 1;</code>
+     */
+    public boolean hasProcId() {
+      return ((bitField0_ & 0x00000001) == 0x00000001);
+    }
+    /**
+     * <code>optional uint64 proc_id = 1;</code>
+     */
+    public long getProcId() {
+      return procId_;
+    }
+
     private void initFields() {
+      procId_ = 0L;
     }
     private byte memoizedIsInitialized = -1;
     public final boolean isInitialized() {
@@ -18578,6 +18708,9 @@ public final class MasterProtos {
     public void writeTo(com.google.protobuf.CodedOutputStream output)
                         throws java.io.IOException {
       getSerializedSize();
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        output.writeUInt64(1, procId_);
+      }
       getUnknownFields().writeTo(output);
     }
 
@@ -18587,6 +18720,10 @@ public final class MasterProtos {
       if (size != -1) return size;
 
       size = 0;
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeUInt64Size(1, procId_);
+      }
       size += getUnknownFields().getSerializedSize();
       memoizedSerializedSize = size;
       return size;
@@ -18610,6 +18747,11 @@ public final class MasterProtos {
       org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceResponse) obj;
 
       boolean result = true;
+      result = result && (hasProcId() == other.hasProcId());
+      if (hasProcId()) {
+        result = result && (getProcId()
+            == other.getProcId());
+      }
       result = result &&
           getUnknownFields().equals(other.getUnknownFields());
       return result;
@@ -18623,6 +18765,10 @@ public final class MasterProtos {
       }
       int hash = 41;
       hash = (19 * hash) + getDescriptorForType().hashCode();
+      if (hasProcId()) {
+        hash = (37 * hash) + PROC_ID_FIELD_NUMBER;
+        hash = (53 * hash) + hashLong(getProcId());
+      }
       hash = (29 * hash) + getUnknownFields().hashCode();
       memoizedHashCode = hash;
       return hash;
@@ -18732,6 +18878,8 @@ public final class MasterProtos {
 
       public Builder clear() {
         super.clear();
+        procId_ = 0L;
+        bitField0_ = (bitField0_ & ~0x00000001);
         return this;
       }
 
@@ -18758,6 +18906,13 @@ public final class MasterProtos {
 
       public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceResponse buildPartial() {
         org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceResponse(this);
+        int from_bitField0_ = bitField0_;
+        int to_bitField0_ = 0;
+        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+          to_bitField0_ |= 0x00000001;
+        }
+        result.procId_ = procId_;
+        result.bitField0_ = to_bitField0_;
         onBuilt();
         return result;
       }
@@ -18773,6 +18928,9 @@ public final class MasterProtos {
 
       public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceResponse other) {
         if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceResponse.getDefaultInstance()) return this;
+        if (other.hasProcId()) {
+          setProcId(other.getProcId());
+        }
         this.mergeUnknownFields(other.getUnknownFields());
         return this;
       }
@@ -18798,6 +18956,40 @@ public final class MasterProtos {
         }
         return this;
       }
+      private int bitField0_;
+
+      // optional uint64 proc_id = 1;
+      private long procId_ ;
+      /**
+       * <code>optional uint64 proc_id = 1;</code>
+       */
+      public boolean hasProcId() {
+        return ((bitField0_ & 0x00000001) == 0x00000001);
+      }
+      /**
+       * <code>optional uint64 proc_id = 1;</code>
+       */
+      public long getProcId() {
+        return procId_;
+      }
+      /**
+       * <code>optional uint64 proc_id = 1;</code>
+       */
+      public Builder setProcId(long value) {
+        bitField0_ |= 0x00000001;
+        procId_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>optional uint64 proc_id = 1;</code>
+       */
+      public Builder clearProcId() {
+        bitField0_ = (bitField0_ & ~0x00000001);
+        procId_ = 0L;
+        onChanged();
+        return this;
+      }
 
       // @@protoc_insertion_point(builder_scope:hbase.pb.DeleteNamespaceResponse)
     }
@@ -19553,6 +19745,16 @@ public final class MasterProtos {
 
   public interface ModifyNamespaceResponseOrBuilder
       extends com.google.protobuf.MessageOrBuilder {
+
+    // optional uint64 proc_id = 1;
+    /**
+     * <code>optional uint64 proc_id = 1;</code>
+     */
+    boolean hasProcId();
+    /**
+     * <code>optional uint64 proc_id = 1;</code>
+     */
+    long getProcId();
   }
   /**
    * Protobuf type {@code hbase.pb.ModifyNamespaceResponse}
@@ -19587,6 +19789,7 @@ public final class MasterProtos {
         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
         throws com.google.protobuf.InvalidProtocolBufferException {
       initFields();
+      int mutable_bitField0_ = 0;
       com.google.protobuf.UnknownFieldSet.Builder unknownFields =
           com.google.protobuf.UnknownFieldSet.newBuilder();
       try {
@@ -19604,6 +19807,11 @@ public final class MasterProtos {
               }
               break;
             }
+            case 8: {
+              bitField0_ |= 0x00000001;
+              procId_ = input.readUInt64();
+              break;
+            }
           }
         }
       } catch (com.google.protobuf.InvalidProtocolBufferException e) {
@@ -19643,7 +19851,25 @@ public final class MasterProtos {
       return PARSER;
     }
 
+    private int bitField0_;
+    // optional uint64 proc_id = 1;
+    public static final int PROC_ID_FIELD_NUMBER = 1;
+    private long procId_;
+    /**
+     * <code>optional uint64 proc_id = 1;</code>
+     */
+    public boolean hasProcId() {
+      return ((bitField0_ & 0x00000001) == 0x00000001);
+    }
+    /**
+     * <code>optional uint64 proc_id = 1;</code>
+     */
+    public long getProcId() {
+      return procId_;
+    }
+
     private void initFields() {
+      procId_ = 0L;
     }
     private byte memoizedIsInitialized = -1;
     public final boolean isInitialized() {
@@ -19657,6 +19883,9 @@ public final class MasterProtos {
     public void writeTo(com.google.protobuf.CodedOutputStream output)
                         throws java.io.IOException {
       getSerializedSize();
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        output.writeUInt64(1, procId_);
+      }
       getUnknownFields().writeTo(output);
     }
 
@@ -19666,6 +19895,10 @@ public final class MasterProtos {
       if (size != -1) return size;
 
       size = 0;
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeUInt64Size(1, procId_);
+      }
       size += getUnknownFields().getSerializedSize();
       memoizedSerializedSize = size;
       return size;
@@ -19689,6 +19922,11 @@ public final class MasterProtos {
       org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceResponse) obj;
 
       boolean result = true;
+      result = result && (hasProcId() == other.hasProcId());
+      if (hasProcId()) {
+        result = result && (getProcId()
+            == other.getProcId());
+      }
       result = result &&
           getUnknownFields().equals(other.getUnknownFields());
       return result;
@@ -19702,6 +19940,10 @@ public final class MasterProtos {
       }
       int hash = 41;
       hash = (19 * hash) + getDescriptorForType().hashCode();
+      if (hasProcId()) {
+        hash = (37 * hash) + PROC_ID_FIELD_NUMBER;
+        hash = (53 * hash) + hashLong(getProcId());
+      }
       hash = (29 * hash) + getUnknownFields().hashCode();
       memoizedHashCode = hash;
       return hash;
@@ -19811,6 +20053,8 @@ public final class MasterProtos {
 
       public Builder clear() {
         super.clear();
+        procId_ = 0L;
+        bitField0_ = (bitField0_ & ~0x00000001);
         return this;
       }
 
@@ -19837,6 +20081,13 @@ public final class MasterProtos {
 
       public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceResponse buildPartial() {
         org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceResponse(this);
+        int from_bitField0_ = bitField0_;
+        int to_bitField0_ = 0;
+        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+          to_bitField0_ |= 0x00000001;
+        }
+        result.procId_ = procId_;
+        result.bitField0_ = to_bitField0_;
         onBuilt();
         return result;
       }
@@ -19852,6 +20103,9 @@ public final class MasterProtos {
 
       public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceResponse other) {
         if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceResponse.getDefaultInstance()) return this;
+        if (other.hasProcId()) {
+          setProcId(other.getProcId());
+        }
         this.mergeUnknownFields(other.getUnknownFields());
         return this;
       }
@@ -19877,6 +20131,40 @@ public final class MasterProtos {
         }
         return this;
       }
+      private int bitField0_;
+
+      // optional uint64 proc_id = 1;
+      private long procId_ ;
+      /**
+       * <code>optional uint64 proc_id = 1;</code>
+       */
+      public boolean hasProcId() {
+        return ((bitField0_ & 0x00000001) == 0x00000001);
+      }
+      /**
+       * <code>optional uint64 proc_id = 1;</code>
+       */
+      public long getProcId() {
+        return procId_;
+      }
+      /**
+       * <code>optional uint64 proc_id = 1;</code>
+       */
+      public Builder setProcId(long value) {
+        bitField0_ |= 0x00000001;
+        procId_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>optional uint64 proc_id = 1;</code>
+       */
+      public Builder clearProcId() {
+        bitField0_ = (bitField0_ & ~0x00000001);
+        procId_ = 0L;
+        onChanged();
+        return this;
+      }
 
       // @@protoc_insertion_point(builder_scope:hbase.pb.ModifyNamespaceResponse)
     }
@@ -61891,255 +62179,257 @@ public final class MasterProtos {
       "\001(\004\"~\n\026CreateNamespaceRequest\022:\n\023namespa" +
       "ceDescriptor\030\001 \002(\0132\035.hbase.pb.NamespaceD" +
       "escriptor\022\026\n\013nonce_group\030\002 \001(\004:\0010\022\020\n\005non",
-      "ce\030\003 \001(\004:\0010\"\031\n\027CreateNamespaceResponse\"Y" +
-      "\n\026DeleteNamespaceRequest\022\025\n\rnamespaceNam" +
-      "e\030\001 \002(\t\022\026\n\013nonce_group\030\002 \001(\004:\0010\022\020\n\005nonce" +
-      "\030\003 \001(\004:\0010\"\031\n\027DeleteNamespaceResponse\"~\n\026" +
-      "ModifyNamespaceRequest\022:\n\023namespaceDescr" +
-      "iptor\030\001 \002(\0132\035.hbase.pb.NamespaceDescript" +
-      "or\022\026\n\013nonce_group\030\002 \001(\004:\0010\022\020\n\005nonce\030\003 \001(" +
-      "\004:\0010\"\031\n\027ModifyNamespaceResponse\"6\n\035GetNa" +
-      "mespaceDescriptorRequest\022\025\n\rnamespaceNam" +
-      "e\030\001 \002(\t\"\\\n\036GetNamespaceDescriptorRespons",
-      "e\022:\n\023namespaceDescriptor\030\001 \002(\0132\035.hbase.p" +
-      "b.NamespaceDescriptor\"!\n\037ListNamespaceDe" +
-      "scriptorsRequest\"^\n ListNamespaceDescrip" +
-      "torsResponse\022:\n\023namespaceDescriptor\030\001 \003(" +
-      "\0132\035.hbase.pb.NamespaceDescriptor\"?\n&List" +
-      "TableDescriptorsByNamespaceRequest\022\025\n\rna" +
-      "mespaceName\030\001 \002(\t\"U\n\'ListTableDescriptor" +
-      "sByNamespaceResponse\022*\n\013tableSchema\030\001 \003(" +
-      "\0132\025.hbase.pb.TableSchema\"9\n ListTableNam" +
-      "esByNamespaceRequest\022\025\n\rnamespaceName\030\001 ",
-      "\002(\t\"K\n!ListTableNamesByNamespaceResponse" +
-      "\022&\n\ttableName\030\001 \003(\0132\023.hbase.pb.TableName" +
-      "\"\021\n\017ShutdownRequest\"\022\n\020ShutdownResponse\"" +
-      "\023\n\021StopMasterRequest\"\024\n\022StopMasterRespon" +
-      "se\"\037\n\016BalanceRequest\022\r\n\005force\030\001 \001(\010\"\'\n\017B" +
-      "alanceResponse\022\024\n\014balancer_ran\030\001 \002(\010\"<\n\031" +
-      "SetBalancerRunningRequest\022\n\n\002on\030\001 \002(\010\022\023\n" +
-      "\013synchronous\030\002 \001(\010\"8\n\032SetBalancerRunning" +
-      "Response\022\032\n\022prev_balance_value\030\001 \001(\010\"\032\n\030" +
-      "IsBalancerEnabledRequest\",\n\031IsBalancerEn",
-      "abledResponse\022\017\n\007enabled\030\001 \002(\010\"\022\n\020Normal" +
-      "izeRequest\"+\n\021NormalizeResponse\022\026\n\016norma" +
-      "lizer_ran\030\001 \002(\010\")\n\033SetNormalizerRunningR" +
-      "equest\022\n\n\002on\030\001 \002(\010\"=\n\034SetNormalizerRunni" +
-      "ngResponse\022\035\n\025prev_normalizer_value\030\001 \001(" +
-      "\010\"\034\n\032IsNormalizerEnabledRequest\".\n\033IsNor" +
-      "malizerEnabledResponse\022\017\n\007enabled\030\001 \002(\010\"" +
-      "\027\n\025RunCatalogScanRequest\"-\n\026RunCatalogSc" +
-      "anResponse\022\023\n\013scan_result\030\001 \001(\005\"-\n\033Enabl" +
-      "eCatalogJanitorRequest\022\016\n\006enable\030\001 \002(\010\"2",
-      "\n\034EnableCatalogJanitorResponse\022\022\n\nprev_v" +
-      "alue\030\001 \001(\010\" \n\036IsCatalogJanitorEnabledReq" +
-      "uest\"0\n\037IsCatalogJanitorEnabledResponse\022" +
-      "\r\n\005value\030\001 \002(\010\"B\n\017SnapshotRequest\022/\n\010sna" +
-      "pshot\030\001 \002(\0132\035.hbase.pb.SnapshotDescripti" +
-      "on\",\n\020SnapshotResponse\022\030\n\020expected_timeo" +
-      "ut\030\001 \002(\003\"\036\n\034GetCompletedSnapshotsRequest" +
-      "\"Q\n\035GetCompletedSnapshotsResponse\0220\n\tsna" +
-      "pshots\030\001 \003(\0132\035.hbase.pb.SnapshotDescript" +
-      "ion\"H\n\025DeleteSnapshotRequest\022/\n\010snapshot",
-      "\030\001 \002(\0132\035.hbase.pb.SnapshotDescription\"\030\n" +
-      "\026DeleteSnapshotResponse\"I\n\026RestoreSnapsh" +
-      "otRequest\022/\n\010snapshot\030\001 \002(\0132\035.hbase.pb.S" +
-      "napshotDescription\"\031\n\027RestoreSnapshotRes" +
-      "ponse\"H\n\025IsSnapshotDoneRequest\022/\n\010snapsh" +
-      "ot\030\001 \001(\0132\035.hbase.pb.SnapshotDescription\"" +
-      "^\n\026IsSnapshotDoneResponse\022\023\n\004done\030\001 \001(\010:" +
-      "\005false\022/\n\010snapshot\030\002 \001(\0132\035.hbase.pb.Snap" +
-      "shotDescription\"O\n\034IsRestoreSnapshotDone" +
-      "Request\022/\n\010snapshot\030\001 \001(\0132\035.hbase.pb.Sna",
-      "pshotDescription\"4\n\035IsRestoreSnapshotDon" +
-      "eResponse\022\023\n\004done\030\001 \001(\010:\005false\"F\n\033GetSch" +
-      "emaAlterStatusRequest\022\'\n\ntable_name\030\001 \002(" +
-      "\0132\023.hbase.pb.TableName\"T\n\034GetSchemaAlter" +
-      "StatusResponse\022\035\n\025yet_to_update_regions\030" +
-      "\001 \001(\r\022\025\n\rtotal_regions\030\002 \001(\r\"\213\001\n\032GetTabl" +
-      "eDescriptorsRequest\022(\n\013table_names\030\001 \003(\013" +
-      "2\023.hbase.pb.TableName\022\r\n\005regex\030\002 \001(\t\022!\n\022" +
-      "include_sys_tables\030\003 \001(\010:\005false\022\021\n\tnames" +
-      "pace\030\004 \001(\t\"J\n\033GetTableDescriptorsRespons",
-      "e\022+\n\014table_schema\030\001 \003(\0132\025.hbase.pb.Table" +
-      "Schema\"[\n\024GetTableNamesRequest\022\r\n\005regex\030" +
-      "\001 \001(\t\022!\n\022include_sys_tables\030\002 \001(\010:\005false" +
-      "\022\021\n\tnamespace\030\003 \001(\t\"A\n\025GetTableNamesResp" +
-      "onse\022(\n\013table_names\030\001 \003(\0132\023.hbase.pb.Tab" +
-      "leName\"?\n\024GetTableStateRequest\022\'\n\ntable_" +
-      "name\030\001 \002(\0132\023.hbase.pb.TableName\"B\n\025GetTa" +
-      "bleStateResponse\022)\n\013table_state\030\001 \002(\0132\024." +
-      "hbase.pb.TableState\"\031\n\027GetClusterStatusR" +
-      "equest\"K\n\030GetClusterStatusResponse\022/\n\016cl",
-      "uster_status\030\001 \002(\0132\027.hbase.pb.ClusterSta" +
-      "tus\"\030\n\026IsMasterRunningRequest\"4\n\027IsMaste" +
-      "rRunningResponse\022\031\n\021is_master_running\030\001 " +
-      "\002(\010\"I\n\024ExecProcedureRequest\0221\n\tprocedure" +
-      "\030\001 \002(\0132\036.hbase.pb.ProcedureDescription\"F" +
-      "\n\025ExecProcedureResponse\022\030\n\020expected_time" +
-      "out\030\001 \001(\003\022\023\n\013return_data\030\002 \001(\014\"K\n\026IsProc" +
-      "edureDoneRequest\0221\n\tprocedure\030\001 \001(\0132\036.hb" +
-      "ase.pb.ProcedureDescription\"`\n\027IsProcedu" +
-      "reDoneResponse\022\023\n\004done\030\001 \001(\010:\005false\0220\n\010s",
-      "napshot\030\002 \001(\0132\036.hbase.pb.ProcedureDescri" +
-      "ption\",\n\031GetProcedureResultRequest\022\017\n\007pr" +
-      "oc_id\030\001 \002(\004\"\371\001\n\032GetProcedureResultRespon" +
-      "se\0229\n\005state\030\001 \002(\0162*.hbase.pb.GetProcedur" +
-      "eResultResponse.State\022\022\n\nstart_time\030\002 \001(" +
-      "\004\022\023\n\013last_update\030\003 \001(\004\022\016\n\006result\030\004 \001(\014\0224" +
-      "\n\texception\030\005 \001(\0132!.hbase.pb.ForeignExce" +
-      "ptionMessage\"1\n\005State\022\r\n\tNOT_FOUND\020\000\022\013\n\007" +
-      "RUNNING\020\001\022\014\n\010FINISHED\020\002\"M\n\025AbortProcedur" +
-      "eRequest\022\017\n\007proc_id\030\001 \002(\004\022#\n\025mayInterrup",
-      "tIfRunning\030\002 \001(\010:\004true\"6\n\026AbortProcedure" +
-      "Response\022\034\n\024is_procedure_aborted\030\001 \002(\010\"\027" +
-      "\n\025ListProceduresRequest\"@\n\026ListProcedure" +
-      "sResponse\022&\n\tprocedure\030\001 \003(\0132\023.hbase.pb." +
-      "Procedure\"\315\001\n\017SetQuotaRequest\022\021\n\tuser_na" +
-      "me\030\001 \001(\t\022\022\n\nuser_group\030\002 \001(\t\022\021\n\tnamespac" +
-      "e\030\003 \001(\t\022\'\n\ntable_name\030\004 \001(\0132\023.hbase.pb.T" +
-      "ableName\022\022\n\nremove_all\030\005 \001(\010\022\026\n\016bypass_g" +
-      "lobals\030\006 \001(\010\022+\n\010throttle\030\007 \001(\0132\031.hbase.p" +
-      "b.ThrottleRequest\"\022\n\020SetQuotaResponse\"J\n",
-      "\037MajorCompactionTimestampRequest\022\'\n\ntabl" +
-      "e_name\030\001 \002(\0132\023.hbase.pb.TableName\"U\n(Maj" +
-      "orCompactionTimestampForRegionRequest\022)\n" +
-      "\006region\030\001 \002(\0132\031.hbase.pb.RegionSpecifier" +
-      "\"@\n MajorCompactionTimestampResponse\022\034\n\024" +
-      "compaction_timestamp\030\001 \002(\003\"\035\n\033SecurityCa" +
-      "pabilitiesRequest\"\354\001\n\034SecurityCapabiliti" +
-      "esResponse\022G\n\014capabilities\030\001 \003(\01621.hbase" +
-      ".pb.SecurityCapabilitiesResponse.Capabil" +
-      "ity\"\202\001\n\nCapability\022\031\n\025SIMPLE_AUTHENTICAT",
-      "ION\020\000\022\031\n\025SECURE_AUTHENTICATION\020\001\022\021\n\rAUTH" +
-      "ORIZATION\020\002\022\026\n\022CELL_AUTHORIZATION\020\003\022\023\n\017C" +
-      "ELL_VISIBILITY\020\0042\374&\n\rMasterService\022e\n\024Ge" +
-      "tSchemaAlterStatus\022%.hbase.pb.GetSchemaA" +
-      "lterStatusRequest\032&.hbase.pb.GetSchemaAl" +
-      "terStatusResponse\022b\n\023GetTableDescriptors" +
-      "\022$.hbase.pb.GetTableDescriptorsRequest\032%" +
-      ".hbase.pb.GetTableDescriptorsResponse\022P\n" +
-      "\rGetTableNames\022\036.hbase.pb.GetTableNamesR" +
-      "equest\032\037.hbase.pb.GetTableNamesResponse\022",
-      "Y\n\020GetClusterStatus\022!.hbase.pb.GetCluste" +
-      "rStatusRequest\032\".hbase.pb.GetClusterStat" +
-      "usResponse\022V\n\017IsMasterRunning\022 .hbase.pb" +
-      ".IsMasterRunningRequest\032!.hbase.pb.IsMas" +
-      "terRunningResponse\022D\n\tAddColumn\022\032.hbase." +
-      "pb.AddColumnRequest\032\033.hbase.pb.AddColumn" +
-      "Response\022M\n\014DeleteColumn\022\035.hbase.pb.Dele" +
-      "teColumnRequest\032\036.hbase.pb.DeleteColumnR" +
-      "esponse\022M\n\014ModifyColumn\022\035.hbase.pb.Modif" +
-      "yColumnRequest\032\036.hbase.pb.ModifyColumnRe",
-      "sponse\022G\n\nMoveRegion\022\033.hbase.pb.MoveRegi" +
-      "onRequest\032\034.hbase.pb.MoveRegionResponse\022" +
-      "k\n\026DispatchMergingRegions\022\'.hbase.pb.Dis" +
-      "patchMergingRegionsRequest\032(.hbase.pb.Di" +
-      "spatchMergingRegionsResponse\022M\n\014AssignRe" +
-      "gion\022\035.hbase.pb.AssignRegionRequest\032\036.hb" +
-      "ase.pb.AssignRegionResponse\022S\n\016UnassignR" +
-      "egion\022\037.hbase.pb.UnassignRegionRequest\032 " +
-      ".hbase.pb.UnassignRegionResponse\022P\n\rOffl" +
-      "ineRegion\022\036.hbase.pb.OfflineRegionReques",
-      "t\032\037.hbase.pb.OfflineRegionResponse\022J\n\013De" +
-      "leteTable\022\034.hbase.pb.DeleteTableRequest\032" +
-      "\035.hbase.pb.DeleteTableResponse\022P\n\rtrunca" +
-      "teTable\022\036.hbase.pb.TruncateTableRequest\032" +
-      "\037.hbase.pb.TruncateTableResponse\022J\n\013Enab" +
-      "leTable\022\034.hbase.pb.EnableTableRequest\032\035." +
-      "hbase.pb.EnableTableResponse\022M\n\014DisableT" +
-      "able\022\035.hbase.pb.DisableTableRequest\032\036.hb" +
-      "ase.pb.DisableTableResponse\022J\n\013ModifyTab" +
-      "le\022\034.hbase.pb.ModifyTableRequest\032\035.hbase",
-      ".pb.ModifyTableResponse\022J\n\013CreateTable\022\034" +
-      ".hbase.pb.CreateTableRequest\032\035.hbase.pb." +
-      "CreateTableResponse\022A\n\010Shutdown\022\031.hbase." +
-      "pb.ShutdownRequest\032\032.hbase.pb.ShutdownRe" +
-      "sponse\022G\n\nStopMaster\022\033.hbase.pb.StopMast" +
-      "erRequest\032\034.hbase.pb.StopMasterResponse\022" +
-      ">\n\007Balance\022\030.hbase.pb.BalanceRequest\032\031.h" +
-      "base.pb.BalanceResponse\022_\n\022SetBalancerRu" +
-      "nning\022#.hbase.pb.SetBalancerRunningReque" +
-      "st\032$.hbase.pb.SetBalancerRunningResponse",
-      "\022\\\n\021IsBalancerEnabled\022\".hbase.pb.IsBalan" +
-      "cerEnabledRequest\032#.hbase.pb.IsBalancerE" +
-      "nabledResponse\022D\n\tNormalize\022\032.hbase.pb.N" +
-      "ormalizeRequest\032\033.hbase.pb.NormalizeResp" +
-      "onse\022e\n\024SetNormalizerRunning\022%.hbase.pb." +
-      "SetNormalizerRunningRequest\032&.hbase.pb.S" +
-      "etNormalizerRunningResponse\022b\n\023IsNormali" +
-      "zerEnabled\022$.hbase.pb.IsNormalizerEnable" +
-      "dRequest\032%.hbase.pb.IsNormalizerEnabledR" +
-      "esponse\022S\n\016RunCatalogScan\022\037.hbase.pb.Run",
-      "CatalogScanRequest\032 .hbase.pb.RunCatalog" +
-      "ScanResponse\022e\n\024EnableCatalogJanitor\022%.h" +
-      "base.pb.EnableCatalogJanitorRequest\032&.hb" +
-      "ase.pb.EnableCatalogJanitorResponse\022n\n\027I" +
-      "sCatalogJanitorEnabled\022(.hbase.pb.IsCata" +
-      "logJanitorEnabledRequest\032).hbase.pb.IsCa" +
-      "talogJanitorEnabledResponse\022^\n\021ExecMaste" +
-      "rService\022#.hbase.pb.CoprocessorServiceRe" +
-      "quest\032$.hbase.pb.CoprocessorServiceRespo" +
-      "nse\022A\n\010Snapshot\022\031.hbase.pb.SnapshotReque",
-      "st\032\032.hbase.pb.SnapshotResponse\022h\n\025GetCom" +
-      "pletedSnapshots\022&.hbase.pb.GetCompletedS" +
-      "napshotsRequest\032\'.hbase.pb.GetCompletedS" +
-      "napshotsResponse\022S\n\016DeleteSnapshot\022\037.hba" +
-      "se.pb.DeleteSnapshotRequest\032 .hbase.pb.D" +
-      "eleteSnapshotResponse\022S\n\016IsSnapshotDone\022" +
-      "\037.hbase.pb.IsSnapshotDoneRequest\032 .hbase" +
-      ".pb.IsSnapshotDoneResponse\022V\n\017RestoreSna" +
-      "pshot\022 .hbase.pb.RestoreSnapshotRequest\032" +
-      "!.hbase.pb.RestoreSnapshotResponse\022h\n\025Is",
-      "RestoreSnapshotDone\022&.hbase.pb.IsRestore" +
-      "SnapshotDoneRequest\032\'.hbase.pb.IsRestore" +
-      "SnapshotDoneResponse\022P\n\rExecProcedure\022\036." +
-      "hbase.pb.ExecProcedureRequest\032\037.hbase.pb" +
-      ".ExecProcedureResponse\022W\n\024ExecProcedureW" +
-      "ithRet\022\036.hbase.pb.ExecProcedureRequest\032\037" +
-      ".hbase.pb.ExecProcedureResponse\022V\n\017IsPro" +
-      "cedureDone\022 .hbase.pb.IsProcedureDoneReq" +
-      "uest\032!.hbase.pb.IsProcedureDoneResponse\022" +
-      "V\n\017ModifyNamespace\022 .hbase.pb.ModifyName",
-      "spaceRequest\032!.hbase.pb.ModifyNamespaceR" +
-      "esponse\022V\n\017CreateNamespace\022 .hbase.pb.Cr" +
-      "eateNamespaceRequest\032!.hbase.pb.CreateNa" +
-      "mespaceResponse\022V\n\017DeleteNamespace\022 .hba" +
-      "se.pb.DeleteNamespaceRequest\032!.hbase.pb." +
-      "DeleteNamespaceResponse\022k\n\026GetNamespaceD" +
-      "escriptor\022\'.hbase.pb.GetNamespaceDescrip" +
-      "torRequest\032(.hbase.pb.GetNamespaceDescri" +
-      "ptorResponse\022q\n\030ListNamespaceDescriptors" +
-      "\022).hbase.pb.ListNamespaceDescriptorsRequ",
-      "est\032*.hbase.pb.ListNamespaceDescriptorsR" +
-      "esponse\022\206\001\n\037ListTableDescriptorsByNamesp" +
-      "ace\0220.hbase.pb.ListTableDescriptorsByNam" +
-      "espaceRequest\0321.hbase.pb.ListTableDescri" +
-      "ptorsByNamespaceResponse\022t\n\031ListTableNam" +
-      "esByNamespace\022*.hbase.pb.ListTableNamesB" +
-      "yNamespaceRequest\032+.hbase.pb.ListTableNa" +
-      "mesByNamespaceResponse\022P\n\rGetTableState\022" +
-      "\036.hbase.pb.GetTableStateRequest\032\037.hbase." +
-      "pb.GetTableStateResponse\022A\n\010SetQuota\022\031.h",
-      "base.pb.SetQuotaRequest\032\032.hbase.pb.SetQu" +
-      "otaResponse\022x\n\037getLastMajorCompactionTim" +
-      "estamp\022).hbase.pb.MajorCompactionTimesta" +
-      "mpRequest\032*.hbase.pb.MajorCompactionTime" +
-      "stampResponse\022\212\001\n(getLastMajorCompaction" +
-      "TimestampForRegion\0222.hbase.pb.MajorCompa" +
-      "ctionTimestampForRegionRequest\032*.hbase.p" +
-      "b.MajorCompactionTimestampResponse\022_\n\022ge" +
-      "tProcedureResult\022#.hbase.pb.GetProcedure" +
-      "ResultRequest\032$.hbase.pb.GetProcedureRes",
-      "ultResponse\022h\n\027getSecurityCapabilities\022%" +
-      ".hbase.pb.SecurityCapabilitiesRequest\032&." +
-      "hbase.pb.SecurityCapabilitiesResponse\022S\n" +
-      "\016AbortProcedure\022\037.hbase.pb.AbortProcedur" +
-      "eRequest\032 .hbase.pb.AbortProcedureRespon" +
-      "se\022S\n\016ListProcedures\022\037.hbase.pb.ListProc" +
-      "eduresRequest\032 .hbase.pb.ListProceduresR" +
-      "esponseBB\n*org.apache.hadoop.hbase.proto" +
-      "buf.generatedB\014MasterProtosH\001\210\001\001\240\001\001"
+      "ce\030\003 \001(\004:\0010\"*\n\027CreateNamespaceResponse\022\017" +
+      "\n\007proc_id\030\001 \001(\004\"Y\n\026DeleteNamespaceReques" +
+      "t\022\025\n\rnamespaceName\030\001 \002(\t\022\026\n\013nonce_group\030" +
+      "\002 \001(\004:\0010\022\020\n\005nonce\030\003 \001(\004:\0010\"*\n\027DeleteName" +
+      "spaceResponse\022\017\n\007proc_id\030\001 \001(\004\"~\n\026Modify" +
+      "NamespaceRequest\022:\n\023namespaceDescriptor\030" +
+      "\001 \002(\0132\035.hbase.pb.NamespaceDescriptor\022\026\n\013" +
+      "nonce_group\030\002 \001(\004:\0010\022\020\n\005nonce\030\003 \001(\004:\0010\"*" +
+      "\n\027ModifyNamespaceResponse\022\017\n\007proc_id\030\001 \001" +
+      "(\004\"6\n\035GetNamespaceDescriptorRequest\022\025\n\rn",
+      "amespaceName\030\001 \002(\t\"\\\n\036GetNamespaceDescri" +
+      "ptorResponse\022:\n\023namespaceDescriptor\030\001 \002(" +
+      "\0132\035.hbase.pb.NamespaceDescriptor\"!\n\037List" +
+      "NamespaceDescriptorsRequest\"^\n ListNames" +
+      "paceDescriptorsResponse\022:\n\023namespaceDesc" +
+      "riptor\030\001 \003(\0132\035.hbase.pb.NamespaceDescrip" +
+      "tor\"?\n&ListTableDescriptorsByNamespaceRe" +
+      "quest\022\025\n\rnamespaceName\030\001 \002(\t\"U\n\'ListTabl" +
+      "eDescriptorsByNamespaceResponse\022*\n\013table" +
+      "Schema\030\001 \003(\0132\025.hbase.pb.TableSchema\"9\n L",
+      "istTableNamesByNamespaceRequest\022\025\n\rnames" +
+      "paceName\030\001 \002(\t\"K\n!ListTableNamesByNamesp" +
+      "aceResponse\022&\n\ttableName\030\001 \003(\0132\023.hbase.p" +
+      "b.TableName\"\021\n\017ShutdownRequest\"\022\n\020Shutdo" +
+      "wnResponse\"\023\n\021StopMasterRequest\"\024\n\022StopM" +
+      "asterResponse\"\037\n\016BalanceRequest\022\r\n\005force" +
+      "\030\001 \001(\010\"\'\n\017BalanceResponse\022\024\n\014balancer_ra" +
+      "n\030\001 \002(\010\"<\n\031SetBalancerRunningRequest\022\n\n\002" +
+      "on\030\001 \002(\010\022\023\n\013synchronous\030\002 \001(\010\"8\n\032SetBala" +
+      "ncerRunningResponse\022\032\n\022prev_balance_valu",
+      "e\030\001 \001(\010\"\032\n\030IsBalancerEnabledRequest\",\n\031I" +
+      "sBalancerEnabledResponse\022\017\n\007enabled\030\001 \002(" +
+      "\010\"\022\n\020NormalizeRequest\"+\n\021NormalizeRespon" +
+      "se\022\026\n\016normalizer_ran\030\001 \002(\010\")\n\033SetNormali" +
+      "zerRunningRequest\022\n\n\002on\030\001 \002(\010\"=\n\034SetNorm" +
+      "alizerRunningResponse\022\035\n\025prev_normalizer" +
+      "_value\030\001 \001(\010\"\034\n\032IsNormalizerEnabledReque" +
+      "st\".\n\033IsNormalizerEnabledResponse\022\017\n\007ena" +
+      "bled\030\001 \002(\010\"\027\n\025RunCatalogScanRequest\"-\n\026R" +
+      "unCatalogScanResponse\022\023\n\013scan_result\030\001 \001",
+      "(\005\"-\n\033EnableCatalogJanitorRequest\022\016\n\006ena" +
+      "ble\030\001 \002(\010\"2\n\034EnableCatalogJanitorRespons" +
+      "e\022\022\n\nprev_value\030\001 \001(\010\" \n\036IsCatalogJanito" +
+      "rEnabledRequest\"0\n\037IsCatalogJanitorEnabl" +
+      "edResponse\022\r\n\005value\030\001 \002(\010\"B\n\017SnapshotReq" +
+      "uest\022/\n\010snapshot\030\001 \002(\0132\035.hbase.pb.Snapsh" +
+      "otDescription\",\n\020SnapshotResponse\022\030\n\020exp" +
+      "ected_timeout\030\001 \002(\003\"\036\n\034GetCompletedSnaps" +
+      "hotsRequest\"Q\n\035GetCompletedSnapshotsResp" +
+      "onse\0220\n\tsnapshots\030\001 \003(\0132\035.hbase.pb.Snaps",
+      "hotDescription\"H\n\025DeleteSnapshotRequest\022" +
+      "/\n\010snapshot\030\001 \002(\0132\035.hbase.pb.SnapshotDes" +
+      "cription\"\030\n\026DeleteSnapshotResponse\"I\n\026Re" +
+      "storeSnapshotRequest\022/\n\010snapshot\030\001 \002(\0132\035" +
+      ".hbase.pb.SnapshotDescription\"\031\n\027Restore" +
+      "SnapshotResponse\"H\n\025IsSnapshotDoneReques" +
+      "t\022/\n\010snapshot\030\001 \001(\0132\035.hbase.pb.SnapshotD" +
+      "escription\"^\n\026IsSnapshotDoneResponse\022\023\n\004" +
+      "done\030\001 \001(\010:\005false\022/\n\010snapshot\030\002 \001(\0132\035.hb" +
+      "ase.pb.SnapshotDescription\"O\n\034IsRestoreS",
+      "napshotDoneRequest\022/\n\010snapshot\030\001 \001(\0132\035.h" +
+      "base.pb.SnapshotDescription\"4\n\035IsRestore" +
+      "SnapshotDoneResponse\022\023\n\004done\030\001 \001(\010:\005fals" +
+      "e\"F\n\033GetSchemaAlterStatusRequest\022\'\n\ntabl" +
+      "e_name\030\001 \002(\0132\023.hbase.pb.TableName\"T\n\034Get" +
+      "SchemaAlterStatusResponse\022\035\n\025yet_to_upda" +
+      "te_regions\030\001 \001(\r\022\025\n\rtotal_regions\030\002 \001(\r\"" +
+      "\213\001\n\032GetTableDescriptorsRequest\022(\n\013table_" +
+      "names\030\001 \003(\0132\023.hbase.pb.TableName\022\r\n\005rege" +
+      "x\030\002 \001(\t\022!\n\022include_sys_tables\030\003 \001(\010:\005fal",
+      "se\022\021\n\tnamespace\030\004 \001(\t\"J\n\033GetTableDescrip" +
+      "torsResponse\022+\n\014table_schema\030\001 \003(\0132\025.hba" +
+      "se.pb.TableSchema\"[\n\024GetTableNamesReques" +
+      "t\022\r\n\005regex\030\001 \001(\t\022!\n\022include_sys_tables\030\002" +
+      " \001(\010:\005false\022\021\n\tnamespace\030\003 \001(\t\"A\n\025GetTab" +
+      "leNamesResponse\022(\n\013table_names\030\001 \003(\0132\023.h" +
+      "base.pb.TableName\"?\n\024GetTableStateReques" +
+      "t\022\'\n\ntable_name\030\001 \002(\0132\023.hbase.pb.TableNa" +
+      "me\"B\n\025GetTableStateResponse\022)\n\013table_sta" +
+      "te\030\001 \002(\0132\024.hbase.pb.TableState\"\031\n\027GetClu",
+      "sterStatusRequest\"K\n\030GetClusterStatusRes" +
+      "ponse\022/\n\016cluster_status\030\001 \002(\0132\027.hbase.pb" +
+      ".ClusterStatus\"\030\n\026IsMasterRunningRequest" +
+      "\"4\n\027IsMasterRunningResponse\022\031\n\021is_master" +
+      "_running\030\001 \002(\010\"I\n\024ExecProcedureRequest\0221" +
+      "\n\tprocedure\030\001 \002(\0132\036.hbase.pb.ProcedureDe" +
+      "scription\"F\n\025ExecProcedureResponse\022\030\n\020ex" +
+      "pected_timeout\030\001 \001(\003\022\023\n\013return_data\030\002 \001(" +
+      "\014\"K\n\026IsProcedureDoneRequest\0221\n\tprocedure" +
+      "\030\001 \001(\0132\036.hbase.pb.ProcedureDescription\"`",
+      "\n\027IsProcedureDoneResponse\022\023\n\004done\030\001 \001(\010:" +
+      "\005false\0220\n\010snapshot\030\002 \001(\0132\036.hbase.pb.Proc" +
+      "edureDescription\",\n\031GetProcedureResultRe" +
+      "quest\022\017\n\007proc_id\030\001 \002(\004\"\371\001\n\032GetProcedureR" +
+      "esultResponse\0229\n\005state\030\001 \002(\0162*.hbase.pb." +
+      "GetProcedureResultResponse.State\022\022\n\nstar" +
+      "t_time\030\002 \001(\004\022\023\n\013last_update\030\003 \001(\004\022\016\n\006res" +
+      "ult\030\004 \001(\014\0224\n\texception\030\005 \001(\0132!.hbase.pb." +
+      "ForeignExceptionMessage\"1\n\005State\022\r\n\tNOT_" +
+      "FOUND\020\000\022\013\n\007RUNNING\020\001\022\014\n\010FINISHED\020\002\"M\n\025Ab",
+      "ortProcedureRequest\022\017\n\007proc_id\030\001 \002(\004\022#\n\025" +
+      "mayInterruptIfRunning\030\002 \001(\010:\004true\"6\n\026Abo" +
+      "rtProcedureResponse\022\034\n\024is_procedure_abor" +
+      "ted\030\001 \002(\010\"\027\n\025ListProceduresRequest\"@\n\026Li" +
+      "stProceduresResponse\022&\n\tprocedure\030\001 \003(\0132" +
+      "\023.hbase.pb.Procedure\"\315\001\n\017SetQuotaRequest" +
+      "\022\021\n\tuser_name\030\001 \001(\t\022\022\n\nuser_group\030\002 \001(\t\022" +
+      "\021\n\tnamespace\030\003 \001(\t\022\'\n\ntable_name\030\004 \001(\0132\023" +
+      ".hbase.pb.TableName\022\022\n\nremove_all\030\005 \001(\010\022" +
+      "\026\n\016bypass_globals\030\006 \001(\010\022+\n\010throttle\030\007 \001(",
+      "\0132\031.hbase.pb.ThrottleRequest\"\022\n\020SetQuota" +
+      "Response\"J\n\037MajorCompactionTimestampRequ" +
+      "est\022\'\n\ntable_name\030\001 \002(\0132\023.hbase.pb.Table" +
+      "Name\"U\n(MajorCompactionTimestampForRegio" +
+      "nRequest\022)\n\006region\030\001 \002(\0132\031.hbase.pb.Regi" +
+      "onSpecifier\"@\n MajorCompactionTimestampR" +
+      "esponse\022\034\n\024compaction_timestamp\030\001 \002(\003\"\035\n" +
+      "\033SecurityCapabilitiesRequest\"\354\001\n\034Securit" +
+      "yCapabilitiesResponse\022G\n\014capabilities\030\001 " +
+      "\003(\01621.hbase.pb.SecurityCapabilitiesRespo",
+      "nse.Capability\"\202\001\n\nCapability\022\031\n\025SIMPLE_" +
+      "AUTHENTICATION\020\000\022\031\n\025SECURE_AUTHENTICATIO" +
+      "N\020\001\022\021\n\rAUTHORIZATION\020\002\022\026\n\022CELL_AUTHORIZA" +
+      "TION\020\003\022\023\n\017CELL_VISIBILITY\020\0042\374&\n\rMasterSe" +
+      "rvice\022e\n\024GetSchemaAlterStatus\022%.hbase.pb" +
+      ".GetSchemaAlterStatusRequest\032&.hbase.pb." +
+      "GetSchemaAlterStatusResponse\022b\n\023GetTable" +
+      "Descriptors\022$.hbase.pb.GetTableDescripto" +
+      "rsRequest\032%.hbase.pb.GetTableDescriptors" +
+      "Response\022P\n\rGetTableNames\022\036.hbase.pb.Get",
+      "TableNamesRequest\032\037.hbase.pb.GetTableNam" +
+      "esResponse\022Y\n\020GetClusterStatus\022!.hbase.p" +
+      "b.GetClusterStatusRequest\032\".hbase.pb.Get" +
+      "ClusterStatusResponse\022V\n\017IsMasterRunning" +
+      "\022 .hbase.pb.IsMasterRunningRequest\032!.hba" +
+      "se.pb.IsMasterRunningResponse\022D\n\tAddColu" +
+      "mn\022\032.hbase.pb.AddColumnRequest\032\033.hbase.p" +
+      "b.AddColumnResponse\022M\n\014DeleteColumn\022\035.hb" +
+      "ase.pb.DeleteColumnRequest\032\036.hbase.pb.De" +
+      "leteColumnResponse\022M\n\014ModifyColumn\022\035.hba",
+      "se.pb.ModifyColumnRequest\032\036.hbase.pb.Mod" +
+      "ifyColumnResponse\022G\n\nMoveRegion\022\033.hbase." +
+      "pb.MoveRegionRequest\032\034.hbase.pb.MoveRegi" +
+      "onResponse\022k\n\026DispatchMergingRegions\022\'.h" +
+      "base.pb.DispatchMergingRegionsRequest\032(." +
+      "hbase.pb.DispatchMergingRegionsResponse\022" +
+      "M\n\014AssignRegion\022\035.hbase.pb.AssignRegionR" +
+      "equest\032\036.hbase.pb.AssignRegionResponse\022S" +
+      "\n\016UnassignRegion\022\037.hbase.pb.UnassignRegi" +
+      "onRequest\032 .hbase.pb.UnassignRegionRespo",
+      "nse\022P\n\rOfflineRegion\022\036.hbase.pb.OfflineR" +
+      "egionRequest\032\037.hbase.pb.OfflineRegionRes" +
+      "ponse\022J\n\013DeleteTable\022\034.hbase.pb.DeleteTa" +
+      "bleRequest\032\035.hbase.pb.DeleteTableRespons" +
+      "e\022P\n\rtruncateTable\022\036.hbase.pb.TruncateTa" +
+      "bleRequest\032\037.hbase.pb.TruncateTableRespo" +
+      "nse\022J\n\013EnableTable\022\034.hbase.pb.EnableTabl" +
+      "eRequest\032\035.hbase.pb.EnableTableResponse\022" +
+      "M\n\014DisableTable\022\035.hbase.pb.DisableTableR" +
+      "equest\032\036.hbase.pb.DisableTableResponse\022J",
+      "\n\013ModifyTable\022\034.hbase.pb.ModifyTableRequ" +
+      "est\032\035.hbase.pb.ModifyTableResponse\022J\n\013Cr" +
+      "eateTable\022\034.hbase.pb.CreateTableRequest\032" +
+      "\035.hbase.pb.CreateTableResponse\022A\n\010Shutdo" +
+      "wn\022\031.hbase.pb.ShutdownRequest\032\032.hbase.pb" +
+      ".ShutdownResponse\022G\n\nStopMaster\022\033.hbase." +
+      "pb.StopMasterRequest\032\034.hbase.pb.StopMast" +
+      "erResponse\022>\n\007Balance\022\030.hbase.pb.Balance" +
+      "Request\032\031.hbase.pb.BalanceResponse\022_\n\022Se" +
+      "tBalancerRunning\022#.hbase.pb.SetBalancerR",
+      "unningRequest\032$.hbase.pb.SetBalancerRunn" +
+      "ingResponse\022\\\n\021IsBalancerEnabled\022\".hbase" +
+      ".pb.IsBalancerEnabledRequest\032#.hbase.pb." +
+      "IsBalancerEnabledResponse\022D\n\tNormalize\022\032" +
+      ".hbase.pb.NormalizeRequest\032\033.hbase.pb.No" +
+      "rmalizeResponse\022e\n\024SetNormalizerRunning\022" +
+      "%.hbase.pb.SetNormalizerRunningRequest\032&" +
+      ".hbase.pb.SetNormalizerRunningResponse\022b" +
+      "\n\023IsNormalizerEnabled\022$.hbase.pb.IsNorma" +
+      "lizerEnabledRequest\032%.hbase.pb.IsNormali",
+      "zerEnabledResponse\022S\n\016RunCatalogScan\022\037.h" +
+      "base.pb.RunCatalogScanRequest\032 .hbase.pb" +
+      ".RunCatalogScanResponse\022e\n\024EnableCatalog" +
+      "Janitor\022%.hbase.pb.EnableCatalogJanitorR" +
+      "equest\032&.hbase.pb.EnableCatalogJanitorRe" +
+      "sponse\022n\n\027IsCatalogJanitorEnabled\022(.hbas" +
+      "e.pb.IsCatalogJanitorEnabledRequest\032).hb" +
+      "ase.pb.IsCatalogJanitorEnabledResponse\022^" +
+      "\n\021ExecMasterService\022#.hbase.pb.Coprocess" +
+      "orServiceRequest\032$.hbase.pb.CoprocessorS",
+      "erviceResponse\022A\n\010Snapshot\022\031.hbase.pb.Sn" +
+      "apshotRequest\032\032.hbase.pb.SnapshotRespons" +
+      "e\022h\n\025GetCompletedSnapshots\022&.hbase.pb.Ge" +
+      "tCompletedSnapshotsRequest\032\'.hbase.pb.Ge" +
+      "tCompletedSnapshotsResponse\022S\n\016DeleteSna" +
+      "pshot\022\037.hbase.pb.DeleteSnapshotRequest\032 " +
+      ".hbase.pb.DeleteSnapshotResponse\022S\n\016IsSn" +
+      "apshotDone\022\037.hbase.pb.IsSnapshotDoneRequ" +
+      "est\032 .hbase.pb.IsSnapshotDoneResponse\022V\n" +
+      "\017RestoreSnapshot\022 .hbase.pb.RestoreSnaps",
+      "hotRequest\032!.hbase.pb.RestoreSnapshotRes" +
+      "ponse\022h\n\025IsRestoreSnapshotDone\022&.hbase.p" +
+      "b.IsRestoreSnapshotDoneRequest\032\'.hbase.p" +
+      "b.IsRestoreSnapshotDoneResponse\022P\n\rExecP" +
+      "rocedure\022\036.hbase.pb.ExecProcedureRequest" +
+      "\032\037.hbase.pb.ExecProcedureResponse\022W\n\024Exe" +
+      "cProcedureWithRet\022\036.hbase.pb.ExecProcedu" +
+      "reRequest\032\037.hbase.pb.ExecProcedureRespon" +
+      "se\022V\n\017IsProcedureDone\022 .hbase.pb.IsProce" +
+      "dureDoneRequest\032!.hbase.pb.IsProcedureDo",
+      "neResponse\022V\n\017ModifyNamespace\022 .hbase.pb" +
+      ".ModifyNamespaceRequest\032!.hbase.pb.Modif" +
+      "yNamespaceResponse\022V\n\017CreateNamespace\022 ." +
+      "hbase.pb.CreateNamespaceRequest\032!.hbase." +
+      "pb.CreateNamespaceResponse\022V\n\017DeleteName" +
+      "space\022 .hbase.pb.DeleteNamespaceRequest\032" +
+      "!.hbase.pb.DeleteNamespaceResponse\022k\n\026Ge" +
+      "tNamespaceDescriptor\022\'.hbase.pb.GetNames" +
+      "paceDescriptorRequest\032(.hbase.pb.GetName" +
+      "spaceDescriptorResponse\022q\n\030ListNamespace",
+      "Descriptors\022).hbase.pb.ListNamespaceDesc" +
+      "riptorsRequest\032*.hbase.pb.ListNamespaceD" +
+      "escriptorsResponse\022\206\001\n\037ListTableDescript" +
+      "orsByNamespace\0220.hbase.pb.ListTableDescr" +
+      "iptorsByNamespaceRequest\0321.hbase.pb.List" +
+      "TableDescriptorsByNamespaceResponse\022t\n\031L" +
+      "istTableNamesByNamespace\022*.hbase.pb.List" +
+      "TableNamesByNamespaceRequest\032+.hbase.pb." +
+      "ListTableNamesByNamespaceResponse\022P\n\rGet" +
+      "TableState\022\036.hbase.pb.GetTableStateReque",
+      "st\032\037.hbase.pb.GetTableStateResponse\022A\n\010S" +
+      "etQuota\022\031.hbase.pb.SetQuotaRequest\032\032.hba" +
+      "se.pb.SetQuotaResponse\022x\n\037getLastMajorCo" +
+      "mpactionTimestamp\022).hbase.pb.MajorCompac" +
+      "tionTimestampRequest\032*.hbase.pb.MajorCom" +
+      "pactionTimestampResponse\022\212\001\n(getLastMajo" +
+      "rCompactionTimestampForRegion\0222.hbase.pb" +
+      ".MajorCompactionTimestampForRegionReques" +
+      "t\032*.hbase.pb.MajorCompactionTimestampRes" +
+      "ponse\022_\n\022getProcedureResult\022#.hbase.pb.G",
+      "etProcedureResultRequest\032$.hbase.pb.GetP" +
+      "rocedureResultResponse\022h\n\027getSecurityCap" +
+      "abilities\022%.hbase.pb.SecurityCapabilitie" +
+      "sRequest\032&.hbase.pb.SecurityCapabilities" +
+      "Response\022S\n\016AbortProcedure\022\037.hbase.pb.Ab" +
+      "ortProcedureRequest\032 .hbase.pb.AbortProc" +
+      "edureResponse\022S\n\016ListProcedures\022\037.hbase." +
+      "pb.ListProceduresRequest\032 .hbase.pb.List" +
+      "ProceduresResponseBB\n*org.apache.hadoop." +
+      "hbase.protobuf.generatedB\014MasterProtosH\001",
+      "\210\001\001\240\001\001"
     };
     com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
       new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -62325,7 +62615,7 @@ public final class MasterProtos {
           internal_static_hbase_pb_CreateNamespaceResponse_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_CreateNamespaceResponse_descriptor,
-              new java.lang.String[] { });
+              new java.lang.String[] { "ProcId", });
           internal_static_hbase_pb_DeleteNamespaceRequest_descriptor =
             getDescriptor().getMessageTypes().get(30);
           internal_static_hbase_pb_DeleteNamespaceRequest_fieldAccessorTable = new
@@ -62337,7 +62627,7 @@ public final class MasterProtos {
           internal_static_hbase_pb_DeleteNamespaceResponse_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_DeleteNamespaceResponse_descriptor,
-              new java.lang.String[] { });
+              new java.lang.String[] { "ProcId", });
           internal_static_hbase_pb_ModifyNamespaceRequest_descriptor =
             getDescriptor().getMessageTypes().get(32);
           internal_static_hbase_pb_ModifyNamespaceRequest_fieldAccessorTable = new
@@ -62349,7 +62639,7 @@ public final class MasterProtos {
           internal_static_hbase_pb_ModifyNamespaceResponse_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_ModifyNamespaceResponse_descriptor,
-              new java.lang.String[] { });
+              new java.lang.String[] { "ProcId", });
           internal_static_hbase_pb_GetNamespaceDescriptorRequest_descriptor =
             getDescriptor().getMessageTypes().get(34);
           internal_static_hbase_pb_GetNamespaceDescriptorRequest_fieldAccessorTable = new

http://git-wip-us.apache.org/repos/asf/hbase/blob/46303dfd/hbase-protocol/src/main/protobuf/Master.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/protobuf/Master.proto b/hbase-protocol/src/main/protobuf/Master.proto
index 4d3a2e1..aa31a5e 100644
--- a/hbase-protocol/src/main/protobuf/Master.proto
+++ b/hbase-protocol/src/main/protobuf/Master.proto
@@ -186,6 +186,7 @@ message CreateNamespaceRequest {
 }
 
 message CreateNamespaceResponse {
+  optional uint64 proc_id = 1;
 }
 
 message DeleteNamespaceRequest {
@@ -195,6 +196,7 @@ message DeleteNamespaceRequest {
 }
 
 message DeleteNamespaceResponse {
+  optional uint64 proc_id = 1;
 }
 
 message ModifyNamespaceRequest {
@@ -204,6 +206,7 @@ message ModifyNamespaceRequest {
 }
 
 message ModifyNamespaceResponse {
+  optional uint64 proc_id = 1;
 }
 
 message GetNamespaceDescriptorRequest {

http://git-wip-us.apache.org/repos/asf/hbase/blob/46303dfd/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java
index 365c0b8..26454f0 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java
@@ -22,6 +22,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.classification.InterfaceStability;
 import org.apache.hadoop.hbase.client.ClusterConnection;
+import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 
@@ -43,12 +44,20 @@ public interface Server extends Abortable, Stoppable {
   ZooKeeperWatcher getZooKeeper();
 
   /**
-   * Returns a reference to the servers' cluster connection.
+   * Returns a reference to the servers' connection.
    *
    * Important note: this method returns a reference to Connection which is managed
    * by Server itself, so callers must NOT attempt to close connection obtained.
    */
-  ClusterConnection getConnection();
+  Connection getConnection();
+
+  /**
+   * Returns a reference to the servers' cluster connection. Prefer {@link #getConnection()}.
+   *
+   * Important note: this method returns a reference to Connection which is managed
+   * by Server itself, so callers must NOT attempt to close connection obtained.
+   */
+  ClusterConnection getClusterConnection();
 
   /**
    * Returns instance of {@link org.apache.hadoop.hbase.zookeeper.MetaTableLocator}

http://git-wip-us.apache.org/repos/asf/hbase/blob/46303dfd/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterSchema.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterSchema.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterSchema.java
new file mode 100644
index 0000000..cb3b684
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterSchema.java
@@ -0,0 +1,131 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master;
+
+import java.io.IOException;
+import java.io.InterruptedIOException;
+import java.util.List;
+
+import org.apache.hadoop.hbase.NamespaceDescriptor;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+
+/**
+ * View and edit the current cluster schema. Use this API making any modification to
+ * namespaces, tables, etc.
+ *
+ * <h2>Implementation Notes</h2>
+ * Nonces are for when operation is non-idempotent to ensure once-only semantic, even
+ * across process failures.
+ */
+// ClusterSchema is introduced to encapsulate schema modification. Currently the different aspects
+// are spread about the code base. This effort is about cleanup, shutting down access, and
+// coalescing common code. In particular, we'd contain filesystem modification. Other
+// benefits are to make all schema modification work the same way (one way to do an operation only
+// rather than the current approach where how an operation is done varies with context) and to make
+// it so clusterschema modification can stand apart from Master to faciliate standalone
+// testing. It is part of the filesystem refactor project that undoes the dependency on a
+// layout in HDFS that mimics our model of tables have regions have column families have files.
+// With this Interface in place, with all modifications going via this route where no filesystem
+// particulars are exposed, redoing our internals will take less effort.
+//
+// Currently ClusterSchema Interface will include namespace and table manipulation. Ideally a
+// form of this Interface will go all the ways down to the file manipulation level but currently
+// TBD.
+//
+// ClusterSchema is private to the Master; only the Master knows current cluster state and has
+// means of editing/altering it.
+//
+// TODO: Remove Server argument when MasterServices are passed.
+// TODO: We return Future<ProcedureInfo> in the below from most methods. It may change to return
+// a ProcedureFuture subsequently.
+@InterfaceAudience.Private
+public interface ClusterSchema {
+  /**
+   * Timeout for cluster operations in milliseconds.
+   */
+  public static final String HBASE_MASTER_CLUSTER_SCHEMA_OPERATION_TIMEOUT_KEY =
+      "hbase.master.cluster.schema.operation.timeout";
+  /**
+   * Default operation timeout in milliseconds.
+   */
+  public static final int DEFAULT_HBASE_MASTER_CLUSTER_SCHEMA_OPERATION_TIMEOUT =
+      5 * 60 * 1000;
+
+  /**
+   * For internals use only. Do not use! Provisionally part of this Interface.
+   * Prefer the high-level APIs available elsewhere in this API.
+   * @return Instance of {@link TableNamespaceManager}
+   */
+  // TODO: Remove from here. Keep internal. This Interface is too high-level to host this accessor.
+  TableNamespaceManager getTableNamespaceManager();
+
+  /**
+   * Create a new Namespace.
+   * @param namespaceDescriptor descriptor for new Namespace
+   * @param nonceGroup Identifier for the source of the request, a client or process.
+   * @param nonce A unique identifier for this operation from the client or process identified by
+   *    <code>nonceGroup</code> (the source must ensure each operation gets a unique id).
+   * @return procedure id
+   * @throws IOException Throws {@link ClusterSchemaException} and {@link InterruptedIOException}
+   *    as well as {@link IOException}
+   */
+  long createNamespace(NamespaceDescriptor namespaceDescriptor, long nonceGroup, long nonce)
+  throws IOException;
+
+  /**
+   * Modify an existing Namespace.
+   * @param nonceGroup Identifier for the source of the request, a client or process.
+   * @param nonce A unique identifier for this operation from the client or process identified by
+   *    <code>nonceGroup</code> (the source must ensure each operation gets a unique id).
+   * @return procedure id
+   * @throws IOException Throws {@link ClusterSchemaException} and {@link InterruptedIOException}
+   *    as well as {@link IOException}
+   */
+  long modifyNamespace(NamespaceDescriptor descriptor, long nonceGroup, long nonce)
+  throws IOException;
+
+  /**
+   * Delete an existing Namespace.
+   * Only empty Namespaces (no tables) can be removed.
+   * @param nonceGroup Identifier for the source of the request, a client or process.
+   * @param nonce A unique identifier for this operation from the client or process identified by
+   *    <code>nonceGroup</code> (the source must ensure each operation gets a unique id).
+   * @return procedure id
+   * @throws IOException Throws {@link ClusterSchemaException} and {@link InterruptedIOException}
+   *    as well as {@link IOException}
+   */
+  long deleteNamespace(String name, long nonceGroup, long nonce)
+  throws IOException;
+
+  /**
+   * Get a Namespace
+   * @param name Name of the Namespace
+   * @return Namespace descriptor for <code>name</code>
+   * @throws IOException Throws {@link ClusterSchemaException} and {@link InterruptedIOException}
+   *    as well as {@link IOException}
+   */
+  // No Future here because presumption is that the request will go against cached metadata so
+  // return immediately -- no need of running a Procedure.
+  NamespaceDescriptor getNamespace(String name) throws IOException;
+
+  /**
+   * Get all Namespaces
+   * @return All Namespace descriptors
+   */
+  List<NamespaceDescriptor> getNamespaces() throws IOException;
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/46303dfd/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterSchemaException.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterSchemaException.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterSchemaException.java
new file mode 100644
index 0000000..62892b6
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterSchemaException.java
@@ -0,0 +1,37 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master;
+
+import org.apache.hadoop.hbase.HBaseIOException;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+
+@SuppressWarnings("serial")
+@InterfaceAudience.Private
+public class ClusterSchemaException extends HBaseIOException {
+  public ClusterSchemaException(String message) {
+    super(message);
+  }
+
+  public ClusterSchemaException(String message, Throwable cause) {
+    super(message, cause);
+  }
+
+  public ClusterSchemaException(Throwable cause) {
+    super(cause);
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/46303dfd/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterSchemaService.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterSchemaService.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterSchemaService.java
new file mode 100644
index 0000000..43353ba
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterSchemaService.java
@@ -0,0 +1,27 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master;
+
+import org.apache.hadoop.hbase.Service;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+
+/**
+ * Mixes in ClusterSchema and Service
+ */
+@InterfaceAudience.Private
+public interface ClusterSchemaService extends ClusterSchema, Service {}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/46303dfd/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterSchemaServiceImpl.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterSchemaServiceImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterSchemaServiceImpl.java
new file mode 100644
index 0000000..0250f36
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterSchemaServiceImpl.java
@@ -0,0 +1,131 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.hadoop.hbase.NamespaceDescriptor;
+import org.apache.hadoop.hbase.NamespaceNotFoundException;
+import org.apache.hadoop.hbase.ServiceNotRunningException;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.master.procedure.CreateNamespaceProcedure;
+import org.apache.hadoop.hbase.master.procedure.DeleteNamespaceProcedure;
+import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
+import org.apache.hadoop.hbase.master.procedure.ModifyNamespaceProcedure;
+import org.apache.hadoop.hbase.procedure2.Procedure;
+import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
+
+@InterfaceAudience.Private
+class ClusterSchemaServiceImpl implements ClusterSchemaService {
+  private boolean running = false;
+  private final TableNamespaceManager tableNamespaceManager;
+  private final MasterServices masterServices;
+  private final static List<NamespaceDescriptor> EMPTY_NAMESPACE_LIST =
+    Collections.unmodifiableList(new ArrayList<NamespaceDescriptor>(0));
+
+  ClusterSchemaServiceImpl(final MasterServices masterServices) {
+    this.masterServices = masterServices;
+    this.tableNamespaceManager = new TableNamespaceManager(masterServices);
+  }
+
+  // All below are synchronized so consistent view on whether running or not.
+
+  @Override
+  public synchronized boolean isRunning() {
+    return this.running;
+  }
+
+  private synchronized void checkIsRunning() throws ServiceNotRunningException {
+    if (!isRunning()) throw new ServiceNotRunningException();
+  }
+
+  @Override
+  public synchronized void startAndWait() throws IOException {
+    if (isRunning()) throw new IllegalStateException("Already running; cannot double-start.");
+    // Set to running FIRST because tableNamespaceManager start uses this class to do namespace ops
+    this.running = true;
+    this.tableNamespaceManager.start();
+  }
+
+  @Override
+  public synchronized void stopAndWait() throws IOException {
+    checkIsRunning();
+    // You can't stop tableNamespaceManager.
+    this.running = false;
+  }
+
+  @Override
+  public TableNamespaceManager getTableNamespaceManager() {
+    return this.tableNamespaceManager;
+  }
+
+  private long submitProcedure(final Procedure<?> procedure, long nonceGroup,
+      long nonce)
+  throws ServiceNotRunningException {
+    checkIsRunning();
+    ProcedureExecutor<MasterProcedureEnv> pe = this.masterServices.getMasterProcedureExecutor();
+    return pe.submitProcedure(procedure, nonceGroup, nonce);
+  }
+
+  @Override
+  public long createNamespace(NamespaceDescriptor namespaceDescriptor,
+      long nonceGroup, long nonce)
+  throws IOException {
+    return submitProcedure(new CreateNamespaceProcedure(
+      this.masterServices.getMasterProcedureExecutor().getEnvironment(), namespaceDescriptor),
+        nonceGroup, nonce);
+  }
+
+  @Override
+  public long modifyNamespace(NamespaceDescriptor namespaceDescriptor,
+      long nonceGroup, long nonce)
+  throws IOException {
+    return submitProcedure(new ModifyNamespaceProcedure(
+      this.masterServices.getMasterProcedureExecutor().getEnvironment(), namespaceDescriptor),
+        nonceGroup, nonce);
+  }
+
+  @Override
+  public long deleteNamespace(String name, long nonceGroup, long nonce)
+  throws IOException {
+    return submitProcedure(new DeleteNamespaceProcedure(
+      this.masterServices.getMasterProcedureExecutor().getEnvironment(), name),
+        nonceGroup, nonce);
+  }
+
+  @Override
+  public NamespaceDescriptor getNamespace(String name) throws IOException {
+    NamespaceDescriptor nsd = getTableNamespaceManager().get(name);
+    if (nsd == null) throw new NamespaceNotFoundException(name);
+    return nsd;
+  }
+
+  @Override
+  public List<NamespaceDescriptor> getNamespaces() throws IOException {
+    checkIsRunning();
+    Set<NamespaceDescriptor> set = getTableNamespaceManager().list();
+    if (set == null || set.isEmpty()) return EMPTY_NAMESPACE_LIST;
+    List<NamespaceDescriptor> list = new ArrayList<NamespaceDescriptor>(set.size());
+    list.addAll(set);
+    return Collections.unmodifiableList(list);
+  }
+}
\ No newline at end of file


[03/17] hbase git commit: Revert "HBASE-14902 Revert some of the stringency recently introduced by checkstyle tightening"

Posted by sy...@apache.org.
Revert "HBASE-14902 Revert some of the stringency recently introduced by checkstyle tightening"

This reverts commit 998b9371c9d9a6fe4a93e734ba0055c3e20e0b3e.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/1c4edd2a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/1c4edd2a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/1c4edd2a

Branch: refs/heads/hbase-12439
Commit: 1c4edd2ab702488e21c4929a998c49a4208633fc
Parents: 9997e4e
Author: stack <st...@apache.org>
Authored: Mon Jan 4 20:03:56 2016 -0800
Committer: stack <st...@apache.org>
Committed: Mon Jan 4 20:03:56 2016 -0800

----------------------------------------------------------------------
 hbase-checkstyle/src/main/resources/hbase/checkstyle.xml | 8 ++------
 1 file changed, 2 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/1c4edd2a/hbase-checkstyle/src/main/resources/hbase/checkstyle.xml
----------------------------------------------------------------------
diff --git a/hbase-checkstyle/src/main/resources/hbase/checkstyle.xml b/hbase-checkstyle/src/main/resources/hbase/checkstyle.xml
index 6670182..e7272c5 100644
--- a/hbase-checkstyle/src/main/resources/hbase/checkstyle.xml
+++ b/hbase-checkstyle/src/main/resources/hbase/checkstyle.xml
@@ -41,9 +41,7 @@
     http://checkstyle.sourceforge.net/config_blocks.html -->
     <module name="EmptyBlock"/>
     <module name="LeftCurly"/>
-    <module name="NeedBraces">
-      <property name="allowSingleLineStatement" value="true"/>
-    </module>
+    <module name="NeedBraces"/>
 
     <!-- Class Design Checks
     http://checkstyle.sourceforge.net/config_design.html -->
@@ -79,9 +77,7 @@
 
     <!-- Javadoc Checks
     http://checkstyle.sourceforge.net/config_javadoc.html -->
-    <module name="JavadocTagContinuationIndentation">
-      <property name="offset" value="2"/>
-    </module>
+    <module name="JavadocTagContinuationIndentation"/>
     <module name="NonEmptyAtclauseDescription"/>
 
     <!-- Miscellaneous Checks


[11/17] hbase git commit: HBASE-14221 Reduce the number of time row comparison is done in a Scan (Ram)

Posted by sy...@apache.org.
HBASE-14221 Reduce the number of time row comparison is done in a Scan
(Ram)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/073e00c0
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/073e00c0
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/073e00c0

Branch: refs/heads/hbase-12439
Commit: 073e00c0053cb351dbba265112a6e756c932f875
Parents: 7cd09bf
Author: ramkrishna <ra...@gmail.com>
Authored: Wed Jan 6 10:38:27 2016 +0530
Committer: ramkrishna <ra...@gmail.com>
Committed: Wed Jan 6 10:38:27 2016 +0530

----------------------------------------------------------------------
 .../hbase/regionserver/ScanQueryMatcher.java    | 36 ++++++++++++--------
 .../hadoop/hbase/regionserver/StoreScanner.java | 11 +++---
 2 files changed, 27 insertions(+), 20 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/073e00c0/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java
index 47d8c8f..c220b5c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java
@@ -283,27 +283,33 @@ public class ScanQueryMatcher {
       if (filter != null && filter.filterAllRemaining()) {
       return MatchCode.DONE_SCAN;
     }
-    int ret = this.rowComparator.compareRows(curCell, cell);
-    if (!this.isReversed) {
-      if (ret <= -1) {
-        return MatchCode.DONE;
-      } else if (ret >= 1) {
-        // could optimize this, if necessary?
-        // Could also be called SEEK_TO_CURRENT_ROW, but this
-        // should be rare/never happens.
-        return MatchCode.SEEK_NEXT_ROW;
+    if (curCell != null) {
+      int ret = this.rowComparator.compareRows(curCell, cell);
+      if (!this.isReversed) {
+        if (ret <= -1) {
+          return MatchCode.DONE;
+        } else if (ret >= 1) {
+          // could optimize this, if necessary?
+          // Could also be called SEEK_TO_CURRENT_ROW, but this
+          // should be rare/never happens.
+          return MatchCode.SEEK_NEXT_ROW;
+        }
+      } else {
+        if (ret <= -1) {
+          return MatchCode.SEEK_NEXT_ROW;
+        } else if (ret >= 1) {
+          return MatchCode.DONE;
+        }
       }
     } else {
-      if (ret <= -1) {
-        return MatchCode.SEEK_NEXT_ROW;
-      } else if (ret >= 1) {
-        return MatchCode.DONE;
-      }
+      // Since the curCell is null it means we are already sure that we have moved over to the next row
+      return MatchCode.DONE;
     }
 
     // optimize case.
-    if (this.stickyNextRow)
+    if (this.stickyNextRow) {
       return MatchCode.SEEK_NEXT_ROW;
+    }
 
     if (this.columns.done()) {
       stickyNextRow = true;

http://git-wip-us.apache.org/repos/asf/hbase/blob/073e00c0/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
index 5fdfa79..3049608 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
@@ -507,8 +507,7 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner
     // If no limits exists in the scope LimitScope.Between_Cells then we are sure we are changing
     // rows. Else it is possible we are still traversing the same row so we must perform the row
     // comparison.
-    if (!scannerContext.hasAnyLimit(LimitScope.BETWEEN_CELLS) || matcher.curCell == null
-        || !CellUtil.matchingRow(cell, matcher.curCell)) {
+    if (!scannerContext.hasAnyLimit(LimitScope.BETWEEN_CELLS) || matcher.curCell == null) {
       this.countPerRow = 0;
       matcher.setToNewRow(cell);
     }
@@ -534,7 +533,6 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner
       if (prevCell != cell) ++kvsScanned; // Do object compare - we set prevKV from the same heap.
       checkScanOrder(prevCell, cell, comparator);
       prevCell = cell;
-
       ScanQueryMatcher.MatchCode qcode = matcher.match(cell);
       qcode = optimize(qcode, cell);
       switch (qcode) {
@@ -553,6 +551,7 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner
           if (!matcher.moreRowsMayExistAfter(cell)) {
             return scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues();
           }
+          matcher.curCell = null;
           seekToNextRow(cell);
           break LOOP;
         }
@@ -580,6 +579,7 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner
           if (!matcher.moreRowsMayExistAfter(cell)) {
             return scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues();
           }
+          matcher.curCell = null;
           seekToNextRow(cell);
         } else if (qcode == ScanQueryMatcher.MatchCode.INCLUDE_AND_SEEK_NEXT_COL) {
           seekAsDirection(matcher.getKeyForNextColumn(cell));
@@ -596,6 +596,7 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner
         continue;
 
       case DONE:
+        matcher.curCell = null;
         return scannerContext.setScannerState(NextState.MORE_VALUES).hasMoreValues();
 
       case DONE_SCAN:
@@ -608,7 +609,7 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner
         if (!matcher.moreRowsMayExistAfter(cell)) {
           return scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues();
         }
-
+        matcher.curCell = null;
         seekToNextRow(cell);
         break;
 
@@ -751,7 +752,7 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner
     }
     if ((matcher.curCell == null) || !CellUtil.matchingRows(cell, matcher.curCell)) {
       this.countPerRow = 0;
-      matcher.reset();
+      // The setToNewRow will call reset internally
       matcher.setToNewRow(cell);
     }
   }


[04/17] hbase git commit: HBASE-14888 ClusterSchema: Add Namespace Operations

Posted by sy...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/46303dfd/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRegionLocationFinder.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRegionLocationFinder.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRegionLocationFinder.java
index 177adfd..0cccce1 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRegionLocationFinder.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRegionLocationFinder.java
@@ -27,16 +27,15 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HDFSBlocksDistribution;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.MiniHBaseCluster;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Table;
-import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
-import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.apache.hadoop.hbase.util.Bytes;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;

http://git-wip-us.apache.org/repos/asf/hbase/blob/46303dfd/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java
index 078aaa6..6049701 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java
@@ -253,5 +253,11 @@ public class TestHFileCleaner {
     public ChoreService getChoreService() {
       return null;
     }
+
+    @Override
+    public ClusterConnection getClusterConnection() {
+      // TODO Auto-generated method stub
+      return null;
+    }
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/46303dfd/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileLinkCleaner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileLinkCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileLinkCleaner.java
index d6f1606..0401ae8 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileLinkCleaner.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileLinkCleaner.java
@@ -191,5 +191,11 @@ public class TestHFileLinkCleaner {
     public ChoreService getChoreService() {
       return null;
     }
+
+    @Override
+    public ClusterConnection getClusterConnection() {
+      // TODO Auto-generated method stub
+      return null;
+    }
   }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/46303dfd/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java
index f874523..ebf3699 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java
@@ -223,5 +223,11 @@ public class TestLogsCleaner {
     public ChoreService getChoreService() {
       return null;
     }
+
+    @Override
+    public ClusterConnection getClusterConnection() {
+      // TODO Auto-generated method stub
+      return null;
+    }
   }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/46303dfd/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java
index 87db386..b13f337 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java
@@ -260,5 +260,11 @@ public class TestReplicationHFileCleaner {
     public ChoreService getChoreService() {
       return null;
     }
+
+    @Override
+    public ClusterConnection getClusterConnection() {
+      // TODO Auto-generated method stub
+      return null;
+    }
   }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/46303dfd/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java
index 77a603d..9731aa4 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java
@@ -438,11 +438,11 @@ public class MasterProcedureTestingUtility {
   }
 
   public static long generateNonceGroup(final HMaster master) {
-    return master.getConnection().getNonceGenerator().getNonceGroup();
+    return master.getClusterConnection().getNonceGenerator().getNonceGroup();
   }
 
   public static long generateNonce(final HMaster master) {
-    return master.getConnection().getNonceGenerator().newNonce();
+    return master.getClusterConnection().getNonceGenerator().newNonce();
   }
 
   public static class InjectAbortOnLoadListener

http://git-wip-us.apache.org/repos/asf/hbase/blob/46303dfd/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java
index e550c3a..0e38afc 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java
@@ -718,6 +718,12 @@ public class TestHeapMemoryManager {
     public ChoreService getChoreService() {
       return null;
     }
+
+    @Override
+    public ClusterConnection getClusterConnection() {
+      // TODO Auto-generated method stub
+      return null;
+    }
   }
 
   static class CustomHeapMemoryTuner implements HeapMemoryTuner {

http://git-wip-us.apache.org/repos/asf/hbase/blob/46303dfd/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitLogWorker.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitLogWorker.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitLogWorker.java
index d62ccde..96ec698 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitLogWorker.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitLogWorker.java
@@ -143,6 +143,12 @@ public class TestSplitLogWorker {
     public ChoreService getChoreService() {
       return null;
     }
+
+    @Override
+    public ClusterConnection getClusterConnection() {
+      // TODO Auto-generated method stub
+      return null;
+    }
   }
 
   private void waitForCounter(AtomicLong ctr, long oldval, long newval, long timems)

http://git-wip-us.apache.org/repos/asf/hbase/blob/46303dfd/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateZKImpl.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateZKImpl.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateZKImpl.java
index 3b7402a..94dbb25 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateZKImpl.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateZKImpl.java
@@ -191,5 +191,11 @@ public class TestReplicationStateZKImpl extends TestReplicationStateBasic {
     public ChoreService getChoreService() {
       return null;
     }
+
+    @Override
+    public ClusterConnection getClusterConnection() {
+      // TODO Auto-generated method stub
+      return null;
+    }
   }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/46303dfd/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationTrackerZKImpl.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationTrackerZKImpl.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationTrackerZKImpl.java
index a082b19..9a878fd 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationTrackerZKImpl.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationTrackerZKImpl.java
@@ -1,4 +1,5 @@
 /**
+
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -292,5 +293,11 @@ public class TestReplicationTrackerZKImpl {
     public ChoreService getChoreService() {
       return null;
     }
+
+    @Override
+    public ClusterConnection getClusterConnection() {
+      // TODO Auto-generated method stub
+      return null;
+    }
   }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/46303dfd/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java
index a208120..f042a8d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java
@@ -595,5 +595,11 @@ public class TestReplicationSourceManager {
     public ChoreService getChoreService() {
       return null;
     }
+
+    @Override
+    public ClusterConnection getClusterConnection() {
+      // TODO Auto-generated method stub
+      return null;
+    }
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/46303dfd/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java
index 69c6e63..faac8eb 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java
@@ -334,6 +334,12 @@ public class TestTokenAuthentication {
     public ChoreService getChoreService() {
       return null;
     }
+
+    @Override
+    public ClusterConnection getClusterConnection() {
+      // TODO Auto-generated method stub
+      return null;
+    }
   }
 
   private static HBaseTestingUtility TEST_UTIL;

http://git-wip-us.apache.org/repos/asf/hbase/blob/46303dfd/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockServer.java
index 1fcfcbb..53e2467 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockServer.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockServer.java
@@ -127,4 +127,10 @@ public class MockServer implements Server {
   public ChoreService getChoreService() {
     return null;
   }
+
+  @Override
+  public ClusterConnection getClusterConnection() {
+    // TODO Auto-generated method stub
+    return null;
+  }
 }
\ No newline at end of file


[17/17] hbase git commit: HBASE-15068 Add metrics for region normalization plans

Posted by sy...@apache.org.
HBASE-15068 Add metrics for region normalization plans


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5266b077
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5266b077
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5266b077

Branch: refs/heads/hbase-12439
Commit: 5266b0770843c57e977385d7ea1d0ea40273668a
Parents: d65978f
Author: tedyu <yu...@gmail.com>
Authored: Thu Jan 7 03:13:16 2016 -0800
Committer: tedyu <yu...@gmail.com>
Committed: Thu Jan 7 03:13:16 2016 -0800

----------------------------------------------------------------------
 .../hbase/master/MetricsMasterSource.java       |  5 ++++-
 .../hbase/master/MetricsMasterWrapper.java      |  9 ++++++++
 .../hbase/master/MetricsMasterSourceImpl.java   |  4 ++++
 .../org/apache/hadoop/hbase/master/HMaster.java | 22 ++++++++++++++++++++
 .../hbase/master/MetricsMasterWrapperImpl.java  | 10 +++++++++
 .../hbase/master/TestMasterMetricsWrapper.java  |  2 ++
 6 files changed, 51 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/5266b077/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSource.java
----------------------------------------------------------------------
diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSource.java
index ab621cc..290b8f5 100644
--- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSource.java
+++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSource.java
@@ -57,6 +57,8 @@ public interface MetricsMasterSource extends BaseSource {
   String SERVER_NAME_NAME = "serverName";
   String CLUSTER_ID_NAME = "clusterId";
   String IS_ACTIVE_MASTER_NAME = "isActiveMaster";
+  String SPLIT_PLAN_COUNT_NAME = "splitPlanCount";
+  String MERGE_PLAN_COUNT_NAME = "mergePlanCount";
 
   String CLUSTER_REQUESTS_NAME = "clusterRequests";
   String MASTER_ACTIVE_TIME_DESC = "Master Active Time";
@@ -70,7 +72,8 @@ public interface MetricsMasterSource extends BaseSource {
   String SERVER_NAME_DESC = "Server Name";
   String CLUSTER_ID_DESC = "Cluster Id";
   String IS_ACTIVE_MASTER_DESC = "Is Active Master";
-
+  String SPLIT_PLAN_COUNT_DESC = "Number of Region Split Plans executed";
+  String MERGE_PLAN_COUNT_DESC = "Number of Region Merge Plans executed";
 
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/5266b077/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapper.java
----------------------------------------------------------------------
diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapper.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapper.java
index 678db69..5e67f83 100644
--- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapper.java
+++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapper.java
@@ -112,4 +112,13 @@ public interface MetricsMasterWrapper {
    */
   long getNumWALFiles();
 
+  /**
+   * Get the number of region split plans executed.
+   */
+  long getSplitPlanCount();
+
+  /**
+   * Get the number of region merge plans executed.
+   */
+  long getMergePlanCount();
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/5266b077/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceImpl.java
----------------------------------------------------------------------
diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceImpl.java
index c5ce5e4..b0ba66e 100644
--- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceImpl.java
+++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceImpl.java
@@ -74,6 +74,10 @@ public class MetricsMasterSourceImpl
     // masterWrapper can be null because this function is called inside of init.
     if (masterWrapper != null) {
       metricsRecordBuilder
+          .addGauge(Interns.info(MERGE_PLAN_COUNT_NAME, MERGE_PLAN_COUNT_DESC),
+              masterWrapper.getMergePlanCount())
+          .addGauge(Interns.info(SPLIT_PLAN_COUNT_NAME, SPLIT_PLAN_COUNT_DESC),
+              masterWrapper.getSplitPlanCount())
           .addGauge(Interns.info(MASTER_ACTIVE_TIME_NAME,
               MASTER_ACTIVE_TIME_DESC), masterWrapper.getActiveTime())
           .addGauge(Interns.info(MASTER_START_TIME_NAME,

http://git-wip-us.apache.org/repos/asf/hbase/blob/5266b077/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 4a9b792..8ff7ab1 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -333,6 +333,9 @@ public class HMaster extends HRegionServer implements MasterServices {
 
   // handle table states
   private TableStateManager tableStateManager;
+  
+  private long splitPlanCount;
+  private long mergePlanCount;
 
   /** flag used in test cases in order to simulate RS failures during master initialization */
   private volatile boolean initializationBeforeMetaAssignment = false;
@@ -1340,6 +1343,11 @@ public class HMaster extends HRegionServer implements MasterServices {
         }
         NormalizationPlan plan = this.normalizer.computePlanForTable(table, types);
         plan.execute(clusterConnection.getAdmin());
+        if (plan.getType() == PlanType.SPLIT) {
+          splitPlanCount++;
+        } else if (plan.getType() == PlanType.MERGE) {
+          mergePlanCount++;
+        }
       }
     }
     // If Region did not generate any plans, it means the cluster is already balanced.
@@ -2335,6 +2343,20 @@ public class HMaster extends HRegionServer implements MasterServices {
     }
     return regionStates.getAverageLoad();
   }
+  
+  /*
+   * @return the count of region split plans executed
+   */
+  public long getSplitPlanCount() {
+    return splitPlanCount;
+  }
+
+  /*
+   * @return the count of region merge plans executed
+   */
+  public long getMergePlanCount() {
+    return mergePlanCount;
+  }
 
   @Override
   public boolean registerService(Service instance) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/5266b077/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapperImpl.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapperImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapperImpl.java
index a935a37..4cff28b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapperImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapperImpl.java
@@ -40,6 +40,16 @@ public class MetricsMasterWrapperImpl implements MetricsMasterWrapper {
   }
 
   @Override
+  public long getSplitPlanCount() {
+    return master.getSplitPlanCount();
+  }
+
+  @Override
+  public long getMergePlanCount() {
+    return master.getMergePlanCount();
+  }
+
+  @Override
   public String getClusterId() {
     return master.getClusterId();
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/5266b077/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetricsWrapper.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetricsWrapper.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetricsWrapper.java
index 2df4ac9..02f3721 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetricsWrapper.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetricsWrapper.java
@@ -50,6 +50,8 @@ public class TestMasterMetricsWrapper {
   public void testInfo() {
     HMaster master = TEST_UTIL.getHBaseCluster().getMaster();
     MetricsMasterWrapperImpl info = new MetricsMasterWrapperImpl(master);
+    assertEquals(master.getSplitPlanCount(), info.getSplitPlanCount(), 0);
+    assertEquals(master.getMergePlanCount(), info.getMergePlanCount(), 0);
     assertEquals(master.getAverageLoad(), info.getAverageLoad(), 0);
     assertEquals(master.getClusterId(), info.getClusterId());
     assertEquals(master.getMasterActiveTime(), info.getActiveTime());


[08/17] hbase git commit: HBASE-14902 Revert some of the stringency recently introduced by checkstyle tightening; RETRY changing javadoc indent from 4 to 2

Posted by sy...@apache.org.
HBASE-14902 Revert some of the stringency recently introduced by checkstyle tightening; RETRY changing javadoc indent from 4 to 2


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/72d32cc9
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/72d32cc9
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/72d32cc9

Branch: refs/heads/hbase-12439
Commit: 72d32cc96b6cda7d7dc17cf3dc88235b01534d43
Parents: 46303df
Author: stack <st...@apache.org>
Authored: Tue Jan 5 14:45:05 2016 -0800
Committer: stack <st...@apache.org>
Committed: Tue Jan 5 14:45:05 2016 -0800

----------------------------------------------------------------------
 hbase-checkstyle/src/main/resources/hbase/checkstyle.xml | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/72d32cc9/hbase-checkstyle/src/main/resources/hbase/checkstyle.xml
----------------------------------------------------------------------
diff --git a/hbase-checkstyle/src/main/resources/hbase/checkstyle.xml b/hbase-checkstyle/src/main/resources/hbase/checkstyle.xml
index e7272c5..6095d99 100644
--- a/hbase-checkstyle/src/main/resources/hbase/checkstyle.xml
+++ b/hbase-checkstyle/src/main/resources/hbase/checkstyle.xml
@@ -77,7 +77,9 @@
 
     <!-- Javadoc Checks
     http://checkstyle.sourceforge.net/config_javadoc.html -->
-    <module name="JavadocTagContinuationIndentation"/>
+    <module name="JavadocTagContinuationIndentation">
+      <property name="offset" value="2"/>
+    </module>
     <module name="NonEmptyAtclauseDescription"/>
 
     <!-- Miscellaneous Checks


[14/17] hbase git commit: HBASE-12593 Tags to work with ByteBuffer.

Posted by sy...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/a9b671b3/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java
index faf6d81..496c7e2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java
@@ -40,6 +40,7 @@ import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.KeyValue.Type;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.Tag;
+import org.apache.hadoop.hbase.TagUtil;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.filter.Filter;
@@ -338,8 +339,7 @@ public class HMobStore extends HStore {
       String fileName = MobUtils.getMobFileName(reference);
       Tag tableNameTag = MobUtils.getTableNameTag(reference);
       if (tableNameTag != null) {
-        byte[] tableName = tableNameTag.getValue();
-        String tableNameString = Bytes.toString(tableName);
+        String tableNameString = TagUtil.getValueAsString(tableNameTag);
         List<Path> locations = map.get(tableNameString);
         if (locations == null) {
           IdLock.Entry lockEntry = keyLock.getLockEntry(tableNameString.hashCode());
@@ -347,7 +347,7 @@ public class HMobStore extends HStore {
             locations = map.get(tableNameString);
             if (locations == null) {
               locations = new ArrayList<Path>(2);
-              TableName tn = TableName.valueOf(tableName);
+              TableName tn = TableName.valueOf(tableNameString);
               locations.add(MobUtils.getMobFamilyPath(conf, tn, family.getNameAsString()));
               locations.add(HFileArchiveUtil.getStoreArchivePath(conf, tn, MobUtils
                   .getMobRegionInfo(tn).getEncodedName(), family.getNameAsString()));

http://git-wip-us.apache.org/repos/asf/hbase/blob/a9b671b3/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index ccf2eb0..e553fcc 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -69,6 +69,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.ArrayBackedTag;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellComparator;
 import org.apache.hadoop.hbase.CellScanner;
@@ -94,6 +95,7 @@ import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.Tag;
 import org.apache.hadoop.hbase.TagRewriteCell;
 import org.apache.hadoop.hbase.TagType;
+import org.apache.hadoop.hbase.TagUtil;
 import org.apache.hadoop.hbase.UnknownScannerException;
 import org.apache.hadoop.hbase.backup.HFileArchiver;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
@@ -3667,8 +3669,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
       for (int i = 0; i < listSize; i++) {
         Cell cell = cells.get(i);
         List<Tag> newTags = new ArrayList<Tag>();
-        Iterator<Tag> tagIterator = CellUtil.tagsIterator(cell.getTagsArray(),
-          cell.getTagsOffset(), cell.getTagsLength());
+        Iterator<Tag> tagIterator = CellUtil.tagsIterator(cell);
 
         // Carry forward existing tags
 
@@ -3685,11 +3686,11 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
         // above may change when there are more tag based features in core.
         if (m.getTTL() != Long.MAX_VALUE) {
           // Add a cell TTL tag
-          newTags.add(new Tag(TagType.TTL_TAG_TYPE, Bytes.toBytes(m.getTTL())));
+          newTags.add(new ArrayBackedTag(TagType.TTL_TAG_TYPE, Bytes.toBytes(m.getTTL())));
         }
 
         // Rewrite the cell with the updated set of tags
-        cells.set(i, new TagRewriteCell(cell, Tag.fromList(newTags)));
+        cells.set(i, new TagRewriteCell(cell, TagUtil.fromList(newTags)));
       }
     }
   }
@@ -7073,8 +7074,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
   private static List<Tag> carryForwardTags(final Cell cell, final List<Tag> tags) {
     if (cell.getTagsLength() <= 0) return tags;
     List<Tag> newTags = tags == null? new ArrayList<Tag>(): /*Append Tags*/tags; 
-    Iterator<Tag> i =
-        CellUtil.tagsIterator(cell.getTagsArray(), cell.getTagsOffset(), cell.getTagsLength());
+    Iterator<Tag> i = CellUtil.tagsIterator(cell);
     while (i.hasNext()) newTags.add(i.next());
     return newTags;
   }
@@ -7178,11 +7178,12 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
 
                 if (mutate.getTTL() != Long.MAX_VALUE) {
                   // Add the new TTL tag
-                  newTags.add(new Tag(TagType.TTL_TAG_TYPE, Bytes.toBytes(mutate.getTTL())));
+                  newTags.add(
+                      new ArrayBackedTag(TagType.TTL_TAG_TYPE, Bytes.toBytes(mutate.getTTL())));
                 }
 
                 // Rebuild tags
-                byte[] tagBytes = Tag.fromList(newTags);
+                byte[] tagBytes = TagUtil.fromList(newTags);
 
                 // allocate an empty cell once
                 newCell = new KeyValue(row.length, cell.getFamilyLength(),
@@ -7216,9 +7217,10 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
 
                 if (mutate.getTTL() != Long.MAX_VALUE) {
                   List<Tag> newTags = new ArrayList<Tag>(1);
-                  newTags.add(new Tag(TagType.TTL_TAG_TYPE, Bytes.toBytes(mutate.getTTL())));
+                  newTags.add(
+                      new ArrayBackedTag(TagType.TTL_TAG_TYPE, Bytes.toBytes(mutate.getTTL())));
                   // Add the new TTL tag
-                  newCell = new TagRewriteCell(cell, Tag.fromList(newTags));
+                  newCell = new TagRewriteCell(cell, TagUtil.fromList(newTags));
                 } else {
                   newCell = cell;
                 }
@@ -7439,7 +7441,8 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
 
               // Add the TTL tag if the mutation carried one
               if (mutation.getTTL() != Long.MAX_VALUE) {
-                newTags.add(new Tag(TagType.TTL_TAG_TYPE, Bytes.toBytes(mutation.getTTL())));
+                newTags.add(
+                    new ArrayBackedTag(TagType.TTL_TAG_TYPE, Bytes.toBytes(mutation.getTTL())));
               }
 
               Cell newKV = new KeyValue(row, 0, row.length,

http://git-wip-us.apache.org/repos/asf/hbase/blob/a9b671b3/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
index badbd65..8d66696 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
@@ -59,6 +59,7 @@ import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.Tag;
 import org.apache.hadoop.hbase.TagType;
+import org.apache.hadoop.hbase.TagUtil;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.conf.ConfigurationManager;
@@ -1779,28 +1780,24 @@ public class HStore implements Store {
    * @return true if the cell is expired
    */
   static boolean isCellTTLExpired(final Cell cell, final long oldestTimestamp, final long now) {
-    // Do not create an Iterator or Tag objects unless the cell actually has tags.
-    if (cell.getTagsLength() > 0) {
-      // Look for a TTL tag first. Use it instead of the family setting if
-      // found. If a cell has multiple TTLs, resolve the conflict by using the
-      // first tag encountered.
-      Iterator<Tag> i = CellUtil.tagsIterator(cell.getTagsArray(), cell.getTagsOffset(),
-        cell.getTagsLength());
-      while (i.hasNext()) {
-        Tag t = i.next();
-        if (TagType.TTL_TAG_TYPE == t.getType()) {
-          // Unlike in schema cell TTLs are stored in milliseconds, no need
-          // to convert
-          long ts = cell.getTimestamp();
-          assert t.getTagLength() == Bytes.SIZEOF_LONG;
-          long ttl = Bytes.toLong(t.getBuffer(), t.getTagOffset(), t.getTagLength());
-          if (ts + ttl < now) {
-            return true;
-          }
-          // Per cell TTLs cannot extend lifetime beyond family settings, so
-          // fall through to check that
-          break;
+    // Look for a TTL tag first. Use it instead of the family setting if
+    // found. If a cell has multiple TTLs, resolve the conflict by using the
+    // first tag encountered.
+    Iterator<Tag> i = CellUtil.tagsIterator(cell);
+    while (i.hasNext()) {
+      Tag t = i.next();
+      if (TagType.TTL_TAG_TYPE == t.getType()) {
+        // Unlike in schema cell TTLs are stored in milliseconds, no need
+        // to convert
+        long ts = cell.getTimestamp();
+        assert t.getValueLength() == Bytes.SIZEOF_LONG;
+        long ttl = TagUtil.getValueAsLong(t);
+        if (ts + ttl < now) {
+          return true;
         }
+        // Per cell TTLs cannot extend lifetime beyond family settings, so
+        // fall through to check that
+        break;
       }
     }
     return false;

http://git-wip-us.apache.org/repos/asf/hbase/blob/a9b671b3/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
index 887af0a..f0723c2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
@@ -44,6 +44,7 @@ import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.Tag;
 import org.apache.hadoop.hbase.TagType;
+import org.apache.hadoop.hbase.TagUtil;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
@@ -658,8 +659,7 @@ public class AccessControlLists {
        return null;
      }
      List<Permission> results = Lists.newArrayList();
-     Iterator<Tag> tagsIterator = CellUtil.tagsIterator(cell.getTagsArray(), cell.getTagsOffset(),
-        cell.getTagsLength());
+     Iterator<Tag> tagsIterator = CellUtil.tagsIterator(cell);
      while (tagsIterator.hasNext()) {
        Tag tag = tagsIterator.next();
        if (tag.getType() == ACL_TAG_TYPE) {
@@ -668,7 +668,12 @@ public class AccessControlLists {
          // use the builder
          AccessControlProtos.UsersAndPermissions.Builder builder = 
            AccessControlProtos.UsersAndPermissions.newBuilder();
-         ProtobufUtil.mergeFrom(builder, tag.getBuffer(), tag.getTagOffset(), tag.getTagLength());
+         if (tag.hasArray()) {
+           ProtobufUtil.mergeFrom(builder, tag.getValueArray(), tag.getValueOffset(),
+               tag.getValueLength());
+         } else {
+           ProtobufUtil.mergeFrom(builder,TagUtil.cloneValue(tag));
+         }
          ListMultimap<String,Permission> kvPerms =
            ProtobufUtil.toUsersAndPermissions(builder.build());
          // Are there permissions for this user?

http://git-wip-us.apache.org/repos/asf/hbase/blob/a9b671b3/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
index 0d8b261..bb348a3 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.security.access;
 import java.io.IOException;
 import java.net.InetAddress;
 import java.security.PrivilegedExceptionAction;
+import java.util.ArrayList;
 import java.util.Collection;
 import java.util.HashMap;
 import java.util.Iterator;
@@ -34,6 +35,7 @@ import java.util.TreeSet;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.ArrayBackedTag;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellScanner;
 import org.apache.hadoop.hbase.CellUtil;
@@ -54,6 +56,7 @@ import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.Tag;
 import org.apache.hadoop.hbase.TagRewriteCell;
+import org.apache.hadoop.hbase.TagUtil;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.Append;
 import org.apache.hadoop.hbase.client.Delete;
@@ -882,15 +885,13 @@ public class AccessController extends BaseMasterAndRegionObserver
       List<Cell> newCells = Lists.newArrayList();
       for (Cell cell: e.getValue()) {
         // Prepend the supplied perms in a new ACL tag to an update list of tags for the cell
-        List<Tag> tags = Lists.newArrayList(new Tag(AccessControlLists.ACL_TAG_TYPE, perms));
-        if (cell.getTagsLength() > 0) {
-          Iterator<Tag> tagIterator = CellUtil.tagsIterator(cell.getTagsArray(),
-            cell.getTagsOffset(), cell.getTagsLength());
-          while (tagIterator.hasNext()) {
-            tags.add(tagIterator.next());
-          }
+        List<Tag> tags = new ArrayList<Tag>();
+        tags.add(new ArrayBackedTag(AccessControlLists.ACL_TAG_TYPE, perms));
+        Iterator<Tag> tagIterator = CellUtil.tagsIterator(cell);
+        while (tagIterator.hasNext()) {
+          tags.add(tagIterator.next());
         }
-        newCells.add(new TagRewriteCell(cell, Tag.fromList(tags)));
+        newCells.add(new TagRewriteCell(cell, TagUtil.fromList(tags)));
       }
       // This is supposed to be safe, won't CME
       e.setValue(newCells);
@@ -915,14 +916,10 @@ public class AccessController extends BaseMasterAndRegionObserver
       return;
     }
     for (CellScanner cellScanner = m.cellScanner(); cellScanner.advance();) {
-      Cell cell = cellScanner.current();
-      if (cell.getTagsLength() > 0) {
-        Iterator<Tag> tagsItr = CellUtil.tagsIterator(cell.getTagsArray(), cell.getTagsOffset(),
-          cell.getTagsLength());
-        while (tagsItr.hasNext()) {
-          if (tagsItr.next().getType() == AccessControlLists.ACL_TAG_TYPE) {
-            throw new AccessDeniedException("Mutation contains cell with reserved type tag");
-          }
+      Iterator<Tag> tagsItr = CellUtil.tagsIterator(cellScanner.current());
+      while (tagsItr.hasNext()) {
+        if (tagsItr.next().getType() == AccessControlLists.ACL_TAG_TYPE) {
+          throw new AccessDeniedException("Mutation contains cell with reserved type tag");
         }
       }
     }
@@ -1997,32 +1994,21 @@ public class AccessController extends BaseMasterAndRegionObserver
 
     // Collect any ACLs from the old cell
     List<Tag> tags = Lists.newArrayList();
+    List<Tag> aclTags = Lists.newArrayList();
     ListMultimap<String,Permission> perms = ArrayListMultimap.create();
     if (oldCell != null) {
-      // Save an object allocation where we can
-      if (oldCell.getTagsLength() > 0) {
-        Iterator<Tag> tagIterator = CellUtil.tagsIterator(oldCell.getTagsArray(),
-          oldCell.getTagsOffset(), oldCell.getTagsLength());
-        while (tagIterator.hasNext()) {
-          Tag tag = tagIterator.next();
-          if (tag.getType() != AccessControlLists.ACL_TAG_TYPE) {
-            // Not an ACL tag, just carry it through
-            if (LOG.isTraceEnabled()) {
-              LOG.trace("Carrying forward tag from " + oldCell + ": type " + tag.getType() +
-                " length " + tag.getTagLength());
-            }
-            tags.add(tag);
-          } else {
-            // Merge the perms from the older ACL into the current permission set
-            // TODO: The efficiency of this can be improved. Don't build just to unpack
-            // again, use the builder
-            AccessControlProtos.UsersAndPermissions.Builder builder =
-              AccessControlProtos.UsersAndPermissions.newBuilder();
-            ProtobufUtil.mergeFrom(builder, tag.getBuffer(), tag.getTagOffset(), tag.getTagLength());
-            ListMultimap<String,Permission> kvPerms =
-              ProtobufUtil.toUsersAndPermissions(builder.build());
-            perms.putAll(kvPerms);
+      Iterator<Tag> tagIterator = CellUtil.tagsIterator(oldCell);
+      while (tagIterator.hasNext()) {
+        Tag tag = tagIterator.next();
+        if (tag.getType() != AccessControlLists.ACL_TAG_TYPE) {
+          // Not an ACL tag, just carry it through
+          if (LOG.isTraceEnabled()) {
+            LOG.trace("Carrying forward tag from " + oldCell + ": type " + tag.getType()
+                + " length " + tag.getValueLength());
           }
+          tags.add(tag);
+        } else {
+          aclTags.add(tag);
         }
       }
     }
@@ -2031,7 +2017,7 @@ public class AccessController extends BaseMasterAndRegionObserver
     byte[] aclBytes = mutation.getACL();
     if (aclBytes != null) {
       // Yes, use it
-      tags.add(new Tag(AccessControlLists.ACL_TAG_TYPE, aclBytes));
+      tags.add(new ArrayBackedTag(AccessControlLists.ACL_TAG_TYPE, aclBytes));
     } else {
       // No, use what we carried forward
       if (perms != null) {
@@ -2041,8 +2027,7 @@ public class AccessController extends BaseMasterAndRegionObserver
         if (LOG.isTraceEnabled()) {
           LOG.trace("Carrying forward ACLs from " + oldCell + ": " + perms);
         }
-        tags.add(new Tag(AccessControlLists.ACL_TAG_TYPE,
-            ProtobufUtil.toUsersAndPermissions(perms).toByteArray()));
+        tags.addAll(aclTags);
       }
     }
 
@@ -2051,7 +2036,7 @@ public class AccessController extends BaseMasterAndRegionObserver
       return newCell;
     }
 
-    Cell rewriteCell = new TagRewriteCell(newCell, Tag.fromList(tags));
+    Cell rewriteCell = new TagRewriteCell(newCell, TagUtil.fromList(tags));
     return rewriteCell;
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/a9b671b3/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/DefaultVisibilityLabelServiceImpl.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/DefaultVisibilityLabelServiceImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/DefaultVisibilityLabelServiceImpl.java
index 42d6a03..f1aec09 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/DefaultVisibilityLabelServiceImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/DefaultVisibilityLabelServiceImpl.java
@@ -42,6 +42,7 @@ import java.util.regex.Pattern;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.ArrayBackedTag;
 import org.apache.hadoop.hbase.AuthUtil;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellUtil;
@@ -49,6 +50,7 @@ import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HConstants.OperationStatusCode;
 import org.apache.hadoop.hbase.Tag;
 import org.apache.hadoop.hbase.TagType;
+import org.apache.hadoop.hbase.TagUtil;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Mutation;
@@ -90,7 +92,7 @@ public class DefaultVisibilityLabelServiceImpl implements VisibilityLabelService
     } catch (IOException e) {
       // We write to a byte array. No Exception can happen.
     }
-    LABELS_TABLE_TAGS[0] = new Tag(VISIBILITY_TAG_TYPE, baos.toByteArray());
+    LABELS_TABLE_TAGS[0] = new ArrayBackedTag(VISIBILITY_TAG_TYPE, baos.toByteArray());
   }
 
   public DefaultVisibilityLabelServiceImpl() {
@@ -481,42 +483,37 @@ public class DefaultVisibilityLabelServiceImpl implements VisibilityLabelService
       @Override
       public boolean evaluate(Cell cell) throws IOException {
         boolean visibilityTagPresent = false;
-        // Save an object allocation where we can
-        if (cell.getTagsLength() > 0) {
-          Iterator<Tag> tagsItr = CellUtil.tagsIterator(cell.getTagsArray(), cell.getTagsOffset(),
-              cell.getTagsLength());
-          while (tagsItr.hasNext()) {
-            boolean includeKV = true;
-            Tag tag = tagsItr.next();
-            if (tag.getType() == VISIBILITY_TAG_TYPE) {
-              visibilityTagPresent = true;
-              int offset = tag.getTagOffset();
-              int endOffset = offset + tag.getTagLength();
-              while (offset < endOffset) {
-                Pair<Integer, Integer> result = StreamUtils
-                    .readRawVarint32(tag.getBuffer(), offset);
-                int currLabelOrdinal = result.getFirst();
-                if (currLabelOrdinal < 0) {
-                  // check for the absence of this label in the Scan Auth labels
-                  // ie. to check BitSet corresponding bit is 0
-                  int temp = -currLabelOrdinal;
-                  if (bs.get(temp)) {
-                    includeKV = false;
-                    break;
-                  }
-                } else {
-                  if (!bs.get(currLabelOrdinal)) {
-                    includeKV = false;
-                    break;
-                  }
+        Iterator<Tag> tagsItr = CellUtil.tagsIterator(cell);
+        while (tagsItr.hasNext()) {
+          boolean includeKV = true;
+          Tag tag = tagsItr.next();
+          if (tag.getType() == VISIBILITY_TAG_TYPE) {
+            visibilityTagPresent = true;
+            int offset = tag.getValueOffset();
+            int endOffset = offset + tag.getValueLength();
+            while (offset < endOffset) {
+              Pair<Integer, Integer> result = TagUtil.readVIntValuePart(tag, offset);
+              int currLabelOrdinal = result.getFirst();
+              if (currLabelOrdinal < 0) {
+                // check for the absence of this label in the Scan Auth labels
+                // ie. to check BitSet corresponding bit is 0
+                int temp = -currLabelOrdinal;
+                if (bs.get(temp)) {
+                  includeKV = false;
+                  break;
+                }
+              } else {
+                if (!bs.get(currLabelOrdinal)) {
+                  includeKV = false;
+                  break;
                 }
-                offset += result.getSecond();
-              }
-              if (includeKV) {
-                // We got one visibility expression getting evaluated to true. Good to include this
-                // KV in the result then.
-                return true;
               }
+              offset += result.getSecond();
+            }
+            if (includeKV) {
+              // We got one visibility expression getting evaluated to true. Good to include this
+              // KV in the result then.
+              return true;
             }
           }
         }
@@ -596,8 +593,7 @@ public class DefaultVisibilityLabelServiceImpl implements VisibilityLabelService
       for (Tag tag : deleteVisTags) {
         matchFound = false;
         for (Tag givenTag : putVisTags) {
-          if (Bytes.equals(tag.getBuffer(), tag.getTagOffset(), tag.getTagLength(),
-              givenTag.getBuffer(), givenTag.getTagOffset(), givenTag.getTagLength())) {
+          if (TagUtil.matchingValue(tag, givenTag)) {
             matchFound = true;
             break;
           }
@@ -621,10 +617,10 @@ public class DefaultVisibilityLabelServiceImpl implements VisibilityLabelService
   private static void getSortedTagOrdinals(List<List<Integer>> fullTagsList, Tag tag)
       throws IOException {
     List<Integer> tagsOrdinalInSortedOrder = new ArrayList<Integer>();
-    int offset = tag.getTagOffset();
-    int endOffset = offset + tag.getTagLength();
+    int offset = tag.getValueOffset();
+    int endOffset = offset + tag.getValueLength();
     while (offset < endOffset) {
-      Pair<Integer, Integer> result = StreamUtils.readRawVarint32(tag.getBuffer(), offset);
+      Pair<Integer, Integer> result = TagUtil.readVIntValuePart(tag, offset);
       tagsOrdinalInSortedOrder.add(result.getFirst());
       offset += result.getSecond();
     }
@@ -678,11 +674,11 @@ public class DefaultVisibilityLabelServiceImpl implements VisibilityLabelService
           visibilityString.append(VisibilityConstants.CLOSED_PARAN).append(
               VisibilityConstants.OR_OPERATOR);
         }
-        int offset = tag.getTagOffset();
-        int endOffset = offset + tag.getTagLength();
+        int offset = tag.getValueOffset();
+        int endOffset = offset + tag.getValueLength();
         boolean expressionStart = true;
         while (offset < endOffset) {
-          Pair<Integer, Integer> result = StreamUtils.readRawVarint32(tag.getBuffer(), offset);
+          Pair<Integer, Integer> result = TagUtil.readVIntValuePart(tag, offset);
           int currLabelOrdinal = result.getFirst();
           if (currLabelOrdinal < 0) {
             int temp = -currLabelOrdinal;

http://git-wip-us.apache.org/repos/asf/hbase/blob/a9b671b3/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java
index 5b8bdb3..b025758 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java
@@ -49,6 +49,7 @@ import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.Tag;
 import org.apache.hadoop.hbase.TagRewriteCell;
 import org.apache.hadoop.hbase.TagType;
+import org.apache.hadoop.hbase.TagUtil;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.Append;
 import org.apache.hadoop.hbase.client.Delete;
@@ -340,8 +341,7 @@ public class VisibilityController extends BaseMasterAndRegionObserver implements
           Tag tag = pair.getSecond();
           if (cellVisibility == null && tag != null) {
             // May need to store only the first one
-            cellVisibility = new CellVisibility(Bytes.toString(tag.getBuffer(), tag.getTagOffset(),
-                tag.getTagLength()));
+            cellVisibility = new CellVisibility(TagUtil.getValueAsString(tag));
             modifiedTagFound = true;
           }
         }
@@ -368,14 +368,13 @@ public class VisibilityController extends BaseMasterAndRegionObserver implements
             List<Cell> updatedCells = new ArrayList<Cell>();
             for (CellScanner cellScanner = m.cellScanner(); cellScanner.advance();) {
               Cell cell = cellScanner.current();
-              List<Tag> tags = Tag.asList(cell.getTagsArray(), cell.getTagsOffset(),
-                  cell.getTagsLength());
+              List<Tag> tags = CellUtil.getTags(cell);
               if (modifiedTagFound) {
                 // Rewrite the tags by removing the modified tags.
                 removeReplicationVisibilityTag(tags);
               }
               tags.addAll(visibilityTags);
-              Cell updatedCell = new TagRewriteCell(cell, Tag.fromList(tags));
+              Cell updatedCell = new TagRewriteCell(cell, TagUtil.fromList(tags));
               updatedCells.add(updatedCell);
             }
             m.getFamilyCellMap().clear();
@@ -472,28 +471,22 @@ public class VisibilityController extends BaseMasterAndRegionObserver implements
       // cell visiblilty tags
       // have been modified
       Tag modifiedTag = null;
-      if (cell.getTagsLength() > 0) {
-        Iterator<Tag> tagsIterator = CellUtil.tagsIterator(cell.getTagsArray(),
-            cell.getTagsOffset(), cell.getTagsLength());
-        while (tagsIterator.hasNext()) {
-          Tag tag = tagsIterator.next();
-          if (tag.getType() == TagType.STRING_VIS_TAG_TYPE) {
-            modifiedTag = tag;
-            break;
-          }
+      Iterator<Tag> tagsIterator = CellUtil.tagsIterator(cell);
+      while (tagsIterator.hasNext()) {
+        Tag tag = tagsIterator.next();
+        if (tag.getType() == TagType.STRING_VIS_TAG_TYPE) {
+          modifiedTag = tag;
+          break;
         }
       }
       pair.setFirst(true);
       pair.setSecond(modifiedTag);
       return pair;
     }
-    if (cell.getTagsLength() > 0) {
-      Iterator<Tag> tagsItr = CellUtil.tagsIterator(cell.getTagsArray(), cell.getTagsOffset(),
-          cell.getTagsLength());
-      while (tagsItr.hasNext()) {
-        if (RESERVED_VIS_TAG_TYPES.contains(tagsItr.next().getType())) {
-          return pair;
-        }
+    Iterator<Tag> tagsItr = CellUtil.tagsIterator(cell);
+    while (tagsItr.hasNext()) {
+      if (RESERVED_VIS_TAG_TYPES.contains(tagsItr.next().getType())) {
+        return pair;
       }
     }
     pair.setFirst(true);
@@ -520,13 +513,10 @@ public class VisibilityController extends BaseMasterAndRegionObserver implements
     if (isSystemOrSuperUser()) {
       return true;
     }
-    if (cell.getTagsLength() > 0) {
-      Iterator<Tag> tagsItr = CellUtil.tagsIterator(cell.getTagsArray(), cell.getTagsOffset(),
-          cell.getTagsLength());
-      while (tagsItr.hasNext()) {
-        if (RESERVED_VIS_TAG_TYPES.contains(tagsItr.next().getType())) {
-          return false;
-        }
+    Iterator<Tag> tagsItr = CellUtil.tagsIterator(cell);
+    while (tagsItr.hasNext()) {
+      if (RESERVED_VIS_TAG_TYPES.contains(tagsItr.next().getType())) {
+        return false;
       }
     }
     return true;
@@ -739,21 +729,17 @@ public class VisibilityController extends BaseMasterAndRegionObserver implements
     boolean authCheck = authorizationEnabled && checkAuths && !(isSystemOrSuperUser());
     tags.addAll(this.visibilityLabelService.createVisibilityExpTags(cellVisibility.getExpression(),
         true, authCheck));
-    // Save an object allocation where we can
-    if (newCell.getTagsLength() > 0) {
-      // Carry forward all other tags
-      Iterator<Tag> tagsItr = CellUtil.tagsIterator(newCell.getTagsArray(),
-          newCell.getTagsOffset(), newCell.getTagsLength());
-      while (tagsItr.hasNext()) {
-        Tag tag = tagsItr.next();
-        if (tag.getType() != TagType.VISIBILITY_TAG_TYPE
-            && tag.getType() != TagType.VISIBILITY_EXP_SERIALIZATION_FORMAT_TAG_TYPE) {
-          tags.add(tag);
-        }
+    // Carry forward all other tags
+    Iterator<Tag> tagsItr = CellUtil.tagsIterator(newCell);
+    while (tagsItr.hasNext()) {
+      Tag tag = tagsItr.next();
+      if (tag.getType() != TagType.VISIBILITY_TAG_TYPE
+          && tag.getType() != TagType.VISIBILITY_EXP_SERIALIZATION_FORMAT_TAG_TYPE) {
+        tags.add(tag);
       }
     }
 
-    Cell rewriteCell = new TagRewriteCell(newCell, Tag.fromList(tags));
+    Cell rewriteCell = new TagRewriteCell(newCell, TagUtil.fromList(tags));
     return rewriteCell;
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/a9b671b3/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityReplicationEndpoint.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityReplicationEndpoint.java
index aca4994..3db54c6 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityReplicationEndpoint.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityReplicationEndpoint.java
@@ -24,10 +24,12 @@ import java.util.UUID;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.ArrayBackedTag;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.Tag;
 import org.apache.hadoop.hbase.TagRewriteCell;
 import org.apache.hadoop.hbase.TagType;
+import org.apache.hadoop.hbase.TagUtil;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
 import org.apache.hadoop.hbase.replication.ReplicationEndpoint;
@@ -79,7 +81,8 @@ public class VisibilityReplicationEndpoint implements ReplicationEndpoint {
                 byte[] modifiedVisExpression = visibilityLabelsService
                     .encodeVisibilityForReplication(visTags, serializationFormat);
                 if (modifiedVisExpression != null) {
-                  nonVisTags.add(new Tag(TagType.STRING_VIS_TAG_TYPE, modifiedVisExpression));
+                  nonVisTags
+                      .add(new ArrayBackedTag(TagType.STRING_VIS_TAG_TYPE, modifiedVisExpression));
                 }
               } catch (Exception ioe) {
                 LOG.error(
@@ -92,7 +95,7 @@ public class VisibilityReplicationEndpoint implements ReplicationEndpoint {
                 continue;
               }
               // Recreate the cell with the new tags and the existing tags
-              Cell newCell = new TagRewriteCell(cell, Tag.fromList(nonVisTags));
+              Cell newCell = new TagRewriteCell(cell, TagUtil.fromList(nonVisTags));
               newEdit.add(newCell);
             } else {
               newEdit.add(cell);

http://git-wip-us.apache.org/repos/asf/hbase/blob/a9b671b3/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityUtils.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityUtils.java
index c725b11..1db506d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityUtils.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityUtils.java
@@ -35,11 +35,13 @@ import org.apache.commons.lang.StringUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.ArrayBackedTag;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.Tag;
 import org.apache.hadoop.hbase.TagType;
+import org.apache.hadoop.hbase.TagUtil;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.hadoop.hbase.filter.Filter;
@@ -74,7 +76,7 @@ public class VisibilityUtils {
   public static final String VISIBILITY_LABEL_GENERATOR_CLASS =
       "hbase.regionserver.scan.visibility.label.generator.class";
   public static final String SYSTEM_LABEL = "system";
-  public static final Tag SORTED_ORDINAL_SERIALIZATION_FORMAT_TAG = new Tag(
+  public static final Tag SORTED_ORDINAL_SERIALIZATION_FORMAT_TAG = new ArrayBackedTag(
       TagType.VISIBILITY_EXP_SERIALIZATION_FORMAT_TAG_TYPE,
       VisibilityConstants.SORTED_ORDINAL_SERIALIZATION_FORMAT_TAG_VAL);
   private static final String COMMA = ",";
@@ -209,16 +211,13 @@ public class VisibilityUtils {
    */
   public static Byte extractVisibilityTags(Cell cell, List<Tag> tags) {
     Byte serializationFormat = null;
-    if (cell.getTagsLength() > 0) {
-      Iterator<Tag> tagsIterator = CellUtil.tagsIterator(cell.getTagsArray(), cell.getTagsOffset(),
-          cell.getTagsLength());
-      while (tagsIterator.hasNext()) {
-        Tag tag = tagsIterator.next();
-        if (tag.getType() == TagType.VISIBILITY_EXP_SERIALIZATION_FORMAT_TAG_TYPE) {
-          serializationFormat = tag.getBuffer()[tag.getTagOffset()];
-        } else if (tag.getType() == VISIBILITY_TAG_TYPE) {
-          tags.add(tag);
-        }
+    Iterator<Tag> tagsIterator = CellUtil.tagsIterator(cell);
+    while (tagsIterator.hasNext()) {
+      Tag tag = tagsIterator.next();
+      if (tag.getType() == TagType.VISIBILITY_EXP_SERIALIZATION_FORMAT_TAG_TYPE) {
+        serializationFormat = TagUtil.getValueAsByte(tag);
+      } else if (tag.getType() == VISIBILITY_TAG_TYPE) {
+        tags.add(tag);
       }
     }
     return serializationFormat;
@@ -239,30 +238,23 @@ public class VisibilityUtils {
   public static Byte extractAndPartitionTags(Cell cell, List<Tag> visTags,
       List<Tag> nonVisTags) {
     Byte serializationFormat = null;
-    if (cell.getTagsLength() > 0) {
-      Iterator<Tag> tagsIterator = CellUtil.tagsIterator(cell.getTagsArray(), cell.getTagsOffset(),
-          cell.getTagsLength());
-      while (tagsIterator.hasNext()) {
-        Tag tag = tagsIterator.next();
-        if (tag.getType() == TagType.VISIBILITY_EXP_SERIALIZATION_FORMAT_TAG_TYPE) {
-          serializationFormat = tag.getBuffer()[tag.getTagOffset()];
-        } else if (tag.getType() == VISIBILITY_TAG_TYPE) {
-          visTags.add(tag);
-        } else {
-          // ignore string encoded visibility expressions, will be added in replication handling
-          nonVisTags.add(tag);
-        }
+    Iterator<Tag> tagsIterator = CellUtil.tagsIterator(cell);
+    while (tagsIterator.hasNext()) {
+      Tag tag = tagsIterator.next();
+      if (tag.getType() == TagType.VISIBILITY_EXP_SERIALIZATION_FORMAT_TAG_TYPE) {
+        serializationFormat = TagUtil.getValueAsByte(tag);
+      } else if (tag.getType() == VISIBILITY_TAG_TYPE) {
+        visTags.add(tag);
+      } else {
+        // ignore string encoded visibility expressions, will be added in replication handling
+        nonVisTags.add(tag);
       }
     }
     return serializationFormat;
   }
 
   public static boolean isVisibilityTagsPresent(Cell cell) {
-    if (cell.getTagsLength() == 0) {
-      return false;
-    }
-    Iterator<Tag> tagsIterator = CellUtil.tagsIterator(cell.getTagsArray(), cell.getTagsOffset(),
-        cell.getTagsLength());
+    Iterator<Tag> tagsIterator = CellUtil.tagsIterator(cell);
     while (tagsIterator.hasNext()) {
       Tag tag = tagsIterator.next();
       if (tag.getType() == VISIBILITY_TAG_TYPE) {
@@ -322,7 +314,7 @@ public class VisibilityUtils {
     if (node.isSingleNode()) {
       getLabelOrdinals(node, labelOrdinals, auths, checkAuths, ordinalProvider);
       writeLabelOrdinalsToStream(labelOrdinals, dos);
-      tags.add(new Tag(VISIBILITY_TAG_TYPE, baos.toByteArray()));
+      tags.add(new ArrayBackedTag(VISIBILITY_TAG_TYPE, baos.toByteArray()));
       baos.reset();
     } else {
       NonLeafExpressionNode nlNode = (NonLeafExpressionNode) node;
@@ -330,14 +322,14 @@ public class VisibilityUtils {
         for (ExpressionNode child : nlNode.getChildExps()) {
           getLabelOrdinals(child, labelOrdinals, auths, checkAuths, ordinalProvider);
           writeLabelOrdinalsToStream(labelOrdinals, dos);
-          tags.add(new Tag(VISIBILITY_TAG_TYPE, baos.toByteArray()));
+          tags.add(new ArrayBackedTag(VISIBILITY_TAG_TYPE, baos.toByteArray()));
           baos.reset();
           labelOrdinals.clear();
         }
       } else {
         getLabelOrdinals(nlNode, labelOrdinals, auths, checkAuths, ordinalProvider);
         writeLabelOrdinalsToStream(labelOrdinals, dos);
-        tags.add(new Tag(VISIBILITY_TAG_TYPE, baos.toByteArray()));
+        tags.add(new ArrayBackedTag(VISIBILITY_TAG_TYPE, baos.toByteArray()));
         baos.reset();
       }
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/a9b671b3/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALPrettyPrinter.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALPrettyPrinter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALPrettyPrinter.java
index 5df7394..b212fe6 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALPrettyPrinter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALPrettyPrinter.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HBaseInterfaceAudience;
 import org.apache.hadoop.hbase.Tag;
+import org.apache.hadoop.hbase.TagUtil;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.classification.InterfaceStability;
 import org.apache.hadoop.hbase.regionserver.wal.ProtobufLogReader;
@@ -339,12 +340,10 @@ public class WALPrettyPrinter {
     stringMap.put("vlen", cell.getValueLength());
     if (cell.getTagsLength() > 0) {
       List<String> tagsString = new ArrayList<String>();
-      Iterator<Tag> tagsIterator = CellUtil.tagsIterator(cell.getTagsArray(), cell.getTagsOffset(),
-          cell.getTagsLength());
+      Iterator<Tag> tagsIterator = CellUtil.tagsIterator(cell);
       while (tagsIterator.hasNext()) {
         Tag tag = tagsIterator.next();
-        tagsString.add((tag.getType()) + ":"
-            + Bytes.toStringBinary(tag.getBuffer(), tag.getTagOffset(), tag.getTagLength()));
+        tagsString.add((tag.getType()) + ":" + Bytes.toStringBinary(TagUtil.cloneValue(tag)));
       }
       stringMap.put("tag", tagsString);
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/a9b671b3/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
index 30629a3..821b995 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
@@ -1405,7 +1405,7 @@ public class PerformanceEvaluation extends Configured implements Tool {
           byte[] tag = generateData(this.rand, TAG_LENGTH);
           Tag[] tags = new Tag[opts.noOfTags];
           for (int n = 0; n < opts.noOfTags; n++) {
-            Tag t = new Tag((byte) n, tag);
+            Tag t = new ArrayBackedTag((byte) n, tag);
             tags[n] = t;
           }
           KeyValue kv = new KeyValue(row, FAMILY_NAME, qualifier, HConstants.LATEST_TIMESTAMP,
@@ -1493,7 +1493,7 @@ public class PerformanceEvaluation extends Configured implements Tool {
           byte[] tag = generateData(this.rand, TAG_LENGTH);
           Tag[] tags = new Tag[opts.noOfTags];
           for (int n = 0; n < opts.noOfTags; n++) {
-            Tag t = new Tag((byte) n, tag);
+            Tag t = new ArrayBackedTag((byte) n, tag);
             tags[n] = t;
           }
           KeyValue kv = new KeyValue(row, FAMILY_NAME, qualifier, HConstants.LATEST_TIMESTAMP,

http://git-wip-us.apache.org/repos/asf/hbase/blob/a9b671b3/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestResultSizeEstimation.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestResultSizeEstimation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestResultSizeEstimation.java
index f83590a..1647e97 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestResultSizeEstimation.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestResultSizeEstimation.java
@@ -23,6 +23,7 @@ import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.Tag;
+import org.apache.hadoop.hbase.ArrayBackedTag;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
@@ -103,11 +104,11 @@ public class TestResultSizeEstimation {
     Table table = TEST_UTIL.createTable(TABLE, FAMILIES);
     Put p = new Put(ROW1);
     p.add(new KeyValue(ROW1, FAMILY, QUALIFIER, Long.MAX_VALUE, VALUE,
-      new Tag[] { new Tag((byte)1, new byte[TAG_DATA_SIZE]) } ));
+      new Tag[] { new ArrayBackedTag((byte)1, new byte[TAG_DATA_SIZE]) } ));
     table.put(p);
     p = new Put(ROW2);
     p.add(new KeyValue(ROW2, FAMILY, QUALIFIER, Long.MAX_VALUE, VALUE,
-      new Tag[] { new Tag((byte)1, new byte[TAG_DATA_SIZE]) } ));
+      new Tag[] { new ArrayBackedTag((byte)1, new byte[TAG_DATA_SIZE]) } ));
     table.put(p);
 
     Scan s = new Scan();

http://git-wip-us.apache.org/repos/asf/hbase/blob/a9b671b3/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.java
index ce48ca1..00969b2 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.java
@@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.KeyValue.Type;
 import org.apache.hadoop.hbase.KeyValueUtil;
 import org.apache.hadoop.hbase.Tag;
+import org.apache.hadoop.hbase.ArrayBackedTag;
 import org.apache.hadoop.hbase.codec.prefixtree.PrefixTreeSeeker;
 import org.apache.hadoop.hbase.io.compress.Compression;
 import org.apache.hadoop.hbase.io.hfile.HFileBlock.Writer.BufferGrabbingByteArrayOutputStream;
@@ -136,10 +137,10 @@ public class TestDataBlockEncoders {
     } else {
       byte[] metaValue1 = Bytes.toBytes("metaValue1");
       byte[] metaValue2 = Bytes.toBytes("metaValue2");
-      kvList.add(new KeyValue(row, family, qualifier, 0l, value, new Tag[] { new Tag((byte) 1,
-          metaValue1) }));
-      kvList.add(new KeyValue(row, family, qualifier, 0l, value, new Tag[] { new Tag((byte) 1,
-          metaValue2) }));
+      kvList.add(new KeyValue(row, family, qualifier, 0l, value,
+          new Tag[] { new ArrayBackedTag((byte) 1, metaValue1) }));
+      kvList.add(new KeyValue(row, family, qualifier, 0l, value,
+          new Tag[] { new ArrayBackedTag((byte) 1, metaValue2) }));
     }
     testEncodersOnDataset(kvList, includesMemstoreTS, includesTags);
   }
@@ -160,10 +161,10 @@ public class TestDataBlockEncoders {
     if (includesTags) {
       byte[] metaValue1 = Bytes.toBytes("metaValue1");
       byte[] metaValue2 = Bytes.toBytes("metaValue2");
-      kvList.add(new KeyValue(row, family, qualifier, 0l, value, new Tag[] { new Tag((byte) 1,
-          metaValue1) }));
-      kvList.add(new KeyValue(row, family, qualifier, 0l, value, new Tag[] { new Tag((byte) 1,
-          metaValue2) }));
+      kvList.add(new KeyValue(row, family, qualifier, 0l, value,
+          new Tag[] { new ArrayBackedTag((byte) 1, metaValue1) }));
+      kvList.add(new KeyValue(row, family, qualifier, 0l, value,
+          new Tag[] { new ArrayBackedTag((byte) 1, metaValue2) }));
     } else {
       kvList.add(new KeyValue(row, family, qualifier, -1l, Type.Put, value));
       kvList.add(new KeyValue(row, family, qualifier, -2l, Type.Put, value));
@@ -416,10 +417,10 @@ public class TestDataBlockEncoders {
     byte[] value0 = new byte[] { 'd' };
     byte[] value1 = new byte[] { 0x00 };
     if (includesTags) {
-      kvList.add(new KeyValue(row, family, qualifier0, 0, value0, new Tag[] { new Tag((byte) 1,
-          "value1") }));
-      kvList.add(new KeyValue(row, family, qualifier1, 0, value1, new Tag[] { new Tag((byte) 1,
-          "value1") }));
+      kvList.add(new KeyValue(row, family, qualifier0, 0, value0,
+          new Tag[] { new ArrayBackedTag((byte) 1, "value1") }));
+      kvList.add(new KeyValue(row, family, qualifier1, 0, value1,
+          new Tag[] { new ArrayBackedTag((byte) 1, "value1") }));
     } else {
       kvList.add(new KeyValue(row, family, qualifier0, 0, Type.Put, value0));
       kvList.add(new KeyValue(row, family, qualifier1, 0, Type.Put, value1));

http://git-wip-us.apache.org/repos/asf/hbase/blob/a9b671b3/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestEncodedSeekers.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestEncodedSeekers.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestEncodedSeekers.java
index ce66e82..0869df6 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestEncodedSeekers.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestEncodedSeekers.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.Tag;
+import org.apache.hadoop.hbase.ArrayBackedTag;
 import org.apache.hadoop.hbase.client.Durability;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.Put;
@@ -148,7 +149,7 @@ public class TestEncodedSeekers {
         byte[] value = dataGenerator.generateRandomSizeValue(key, col);
         if (includeTags) {
           Tag[] tag = new Tag[1];
-          tag[0] = new Tag((byte) 1, "Visibility");
+          tag[0] = new ArrayBackedTag((byte) 1, "Visibility");
           KeyValue kv = new KeyValue(key, CF_BYTES, col, HConstants.LATEST_TIMESTAMP, value, tag);
           put.add(kv);
         } else {

http://git-wip-us.apache.org/repos/asf/hbase/blob/a9b671b3/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestPrefixTreeEncoding.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestPrefixTreeEncoding.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestPrefixTreeEncoding.java
index 031bf25..fd9b90b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestPrefixTreeEncoding.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestPrefixTreeEncoding.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.KeyValueUtil;
 import org.apache.hadoop.hbase.Tag;
+import org.apache.hadoop.hbase.ArrayBackedTag;
 import org.apache.hadoop.hbase.codec.prefixtree.PrefixTreeCodec;
 import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
 import org.apache.hadoop.hbase.io.encoding.DataBlockEncoder.EncodedSeeker;
@@ -280,7 +281,7 @@ public class TestPrefixTreeEncoding {
           kvset.add(kv);
         } else {
           KeyValue kv = new KeyValue(getRowKey(batchId, i), CF_BYTES, getQualifier(j), 0l,
-              getValue(batchId, i, j), new Tag[] { new Tag((byte) 1, "metaValue1") });
+              getValue(batchId, i, j), new Tag[] { new ArrayBackedTag((byte) 1, "metaValue1") });
           kvset.add(kv);
         }
       }
@@ -308,7 +309,7 @@ public class TestPrefixTreeEncoding {
           kvset.add(kv);
         } else {
           KeyValue kv = new KeyValue(getRowKey(batchId, i), CF_BYTES, getQualifier(j), 0l,
-              getValue(batchId, i, j), new Tag[] { new Tag((byte) 1, "metaValue1") });
+              getValue(batchId, i, j), new Tag[] { new ArrayBackedTag((byte) 1, "metaValue1") });
           kvset.add(kv);
         }
       }

http://git-wip-us.apache.org/repos/asf/hbase/blob/a9b671b3/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java
index ccf59a4..5158e35 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java
@@ -39,6 +39,7 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.ArrayBackedTag;
 import org.apache.hadoop.hbase.CellComparator;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HColumnDescriptor;
@@ -385,7 +386,7 @@ public class TestCacheOnWrite {
       byte[] value = RandomKeyValueUtil.randomValue(rand);
       KeyValue kv;
       if(useTags) {
-        Tag t = new Tag((byte) 1, "visibility");
+        Tag t = new ArrayBackedTag((byte) 1, "visibility");
         List<Tag> tagList = new ArrayList<Tag>();
         tagList.add(t);
         Tag[] tags = new Tag[1];
@@ -434,7 +435,7 @@ public class TestCacheOnWrite {
           String valueStr = "value_" + rowStr + "_" + qualStr;
           for (int iTS = 0; iTS < 5; ++iTS) {
             if (useTags) {
-              Tag t = new Tag((byte) 1, "visibility");
+              Tag t = new ArrayBackedTag((byte) 1, "visibility");
               Tag[] tags = new Tag[1];
               tags[0] = t;
               KeyValue kv = new KeyValue(Bytes.toBytes(rowStr), cfBytes, Bytes.toBytes(qualStr),

http://git-wip-us.apache.org/repos/asf/hbase/blob/a9b671b3/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java
index 929ad8a..66fb49c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java
@@ -42,6 +42,7 @@ import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.KeyValue.Type;
 import org.apache.hadoop.hbase.KeyValueUtil;
 import org.apache.hadoop.hbase.Tag;
+import org.apache.hadoop.hbase.ArrayBackedTag;
 import org.apache.hadoop.hbase.io.compress.Compression;
 import org.apache.hadoop.hbase.io.hfile.HFile.Reader;
 import org.apache.hadoop.hbase.io.hfile.HFile.Writer;
@@ -169,7 +170,7 @@ public class TestHFile  {
     for (int i = start; i < (start + n); i++) {
       String key = String.format(localFormatter, Integer.valueOf(i));
       if (useTags) {
-        Tag t = new Tag((byte) 1, "myTag1");
+        Tag t = new ArrayBackedTag((byte) 1, "myTag1");
         Tag[] tags = new Tag[1];
         tags[0] = t;
         kv = new KeyValue(Bytes.toBytes(key), Bytes.toBytes("family"), Bytes.toBytes("qual"),

http://git-wip-us.apache.org/repos/asf/hbase/blob/a9b671b3/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java
index 12fb584..4ee7f5b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java
@@ -52,6 +52,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.Tag;
+import org.apache.hadoop.hbase.ArrayBackedTag;
 import org.apache.hadoop.hbase.fs.HFileSystem;
 import org.apache.hadoop.hbase.io.compress.Compression;
 import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
@@ -167,8 +168,8 @@ public class TestHFileBlock {
       if (!useTag) {
         keyValues.add(new KeyValue(row, family, qualifier, timestamp, value));
       } else {
-        keyValues.add(new KeyValue(row, family, qualifier, timestamp, value, new Tag[] { new Tag(
-            (byte) 1, Bytes.toBytes("myTagVal")) }));
+        keyValues.add(new KeyValue(row, family, qualifier, timestamp, value,
+            new Tag[] { new ArrayBackedTag((byte) 1, Bytes.toBytes("myTagVal")) }));
       }
     }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/a9b671b3/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV3.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV3.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV3.java
index 15aa912..c7eb11b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV3.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV3.java
@@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.Tag;
+import org.apache.hadoop.hbase.ArrayBackedTag;
 import org.apache.hadoop.hbase.io.compress.Compression;
 import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
 import org.apache.hadoop.hbase.io.hfile.HFile.FileInfo;
@@ -140,7 +141,7 @@ public class TestHFileWriterV3 {
         for (int j = 0; j < 1 + rand.nextInt(4); j++) {
           byte[] tagBytes = new byte[16];
           rand.nextBytes(tagBytes);
-          tags.add(new Tag((byte) 1, tagBytes));
+          tags.add(new ArrayBackedTag((byte) 1, tagBytes));
         }
         keyValue = new KeyValue(keyBytes, null, null, HConstants.LATEST_TIMESTAMP,
             valueBytes, tags);

http://git-wip-us.apache.org/repos/asf/hbase/blob/a9b671b3/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestReseekTo.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestReseekTo.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestReseekTo.java
index a17368c..90e398d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestReseekTo.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestReseekTo.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.Tag;
+import org.apache.hadoop.hbase.ArrayBackedTag;
 import org.apache.hadoop.hbase.testclassification.IOTests;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -84,7 +85,7 @@ public class TestReseekTo {
             Bytes.toBytes(value));
         writer.append(kv);
       } else if (tagUsage == TagUsage.ONLY_TAG) {
-        Tag t = new Tag((byte) 1, "myTag1");
+        Tag t = new ArrayBackedTag((byte) 1, "myTag1");
         Tag[] tags = new Tag[1];
         tags[0] = t;
         kv = new KeyValue(Bytes.toBytes(key), Bytes.toBytes("family"), Bytes.toBytes("qual"),
@@ -92,7 +93,7 @@ public class TestReseekTo {
         writer.append(kv);
       } else {
         if (key % 4 == 0) {
-          Tag t = new Tag((byte) 1, "myTag1");
+          Tag t = new ArrayBackedTag((byte) 1, "myTag1");
           Tag[] tags = new Tag[1];
           tags[0] = t;
           kv = new KeyValue(Bytes.toBytes(key), Bytes.toBytes("family"), Bytes.toBytes("qual"),

http://git-wip-us.apache.org/repos/asf/hbase/blob/a9b671b3/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java
index c1d91ec..6eead71 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java
@@ -45,6 +45,8 @@ import org.apache.hadoop.hbase.KeyValueUtil;
 import org.apache.hadoop.hbase.OffheapKeyValue;
 import org.apache.hadoop.hbase.ShareableMemory;
 import org.apache.hadoop.hbase.Tag;
+import org.apache.hadoop.hbase.TagUtil;
+import org.apache.hadoop.hbase.ArrayBackedTag;
 import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
 import org.apache.hadoop.hbase.testclassification.IOTests;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
@@ -88,7 +90,7 @@ public class TestSeekTo {
       return new KeyValue(Bytes.toBytes(row), Bytes.toBytes("family"), Bytes.toBytes("qualifier"),
           Bytes.toBytes("value"));
     } else if (tagUsage == TagUsage.ONLY_TAG) {
-      Tag t = new Tag((byte) 1, "myTag1");
+      Tag t = new ArrayBackedTag((byte) 1, "myTag1");
       Tag[] tags = new Tag[1];
       tags[0] = t;
       return new KeyValue(Bytes.toBytes(row), Bytes.toBytes("family"), Bytes.toBytes("qualifier"),
@@ -100,7 +102,7 @@ public class TestSeekTo {
             Bytes.toBytes("qualifier"), HConstants.LATEST_TIMESTAMP, Bytes.toBytes("value"));
       } else {
         switchKVs = false;
-        Tag t = new Tag((byte) 1, "myTag1");
+        Tag t = new ArrayBackedTag((byte) 1, "myTag1");
         Tag[] tags = new Tag[1];
         tags[0] = t;
         return new KeyValue(Bytes.toBytes(row), Bytes.toBytes("family"),
@@ -174,11 +176,10 @@ public class TestSeekTo {
     assertEquals("i", toRowStr(scanner.getCell()));
     Cell cell = scanner.getCell();
     if (tagUsage != TagUsage.NO_TAG && cell.getTagsLength() > 0) {
-      Iterator<Tag> tagsIterator = CellUtil.tagsIterator(cell.getTagsArray(), cell.getTagsOffset(),
-          cell.getTagsLength());
+      Iterator<Tag> tagsIterator = CellUtil.tagsIterator(cell);
       while (tagsIterator.hasNext()) {
         Tag next = tagsIterator.next();
-        assertEquals("myTag1", Bytes.toString(next.getValue()));
+        assertEquals("myTag1", Bytes.toString(TagUtil.cloneValue(next)));
       }
     }
     assertTrue(scanner.seekBefore(toKV("k", tagUsage)));

http://git-wip-us.apache.org/repos/asf/hbase/blob/a9b671b3/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java
index 47b6b5c..ef02431 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java
@@ -48,6 +48,7 @@ import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.Tag;
+import org.apache.hadoop.hbase.ArrayBackedTag;
 import org.apache.hadoop.hbase.TagType;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.Scan;
@@ -57,7 +58,6 @@ import org.apache.hadoop.hbase.io.hfile.HFile;
 import org.apache.hadoop.hbase.mob.MobConstants;
 import org.apache.hadoop.hbase.mob.MobUtils;
 import org.apache.hadoop.hbase.monitoring.MonitoredTask;
-import org.apache.hadoop.hbase.regionserver.StoreFile.Reader;
 import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext;
 import org.apache.hadoop.hbase.regionserver.compactions.NoLimitCompactionThroughputController;
 import org.apache.hadoop.hbase.security.EncryptionUtil;
@@ -190,7 +190,8 @@ public class TestHMobStore {
 
     String targetPathName = MobUtils.formatDate(currentDate);
     byte[] referenceValue = Bytes.toBytes(targetPathName + Path.SEPARATOR + mobFilePath.getName());
-    Tag tableNameTag = new Tag(TagType.MOB_TABLE_NAME_TAG_TYPE, store.getTableName().getName());
+    Tag tableNameTag = new ArrayBackedTag(TagType.MOB_TABLE_NAME_TAG_TYPE,
+        store.getTableName().getName());
     KeyValue kv1 = new KeyValue(row, family, qf1, Long.MAX_VALUE, referenceValue);
     KeyValue kv2 = new KeyValue(row, family, qf2, Long.MAX_VALUE, referenceValue);
     KeyValue kv3 = new KeyValue(row2, family, qf3, Long.MAX_VALUE, referenceValue);

http://git-wip-us.apache.org/repos/asf/hbase/blob/a9b671b3/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
index 4582e31..cef92a6 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
@@ -96,7 +96,7 @@ import org.apache.hadoop.hbase.NotServingRegionException;
 import org.apache.hadoop.hbase.RegionTooBusyException;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.Tag;
+import org.apache.hadoop.hbase.ArrayBackedTag;
 import org.apache.hadoop.hbase.TagType;
 import org.apache.hadoop.hbase.Waiter;
 import org.apache.hadoop.hbase.client.Append;
@@ -6335,16 +6335,16 @@ public class TestHRegion {
       long now = EnvironmentEdgeManager.currentTime();
       // Add a cell that will expire in 5 seconds via cell TTL
       region.put(new Put(row).add(new KeyValue(row, fam1, q1, now,
-        HConstants.EMPTY_BYTE_ARRAY, new Tag[] {
+        HConstants.EMPTY_BYTE_ARRAY, new ArrayBackedTag[] {
           // TTL tags specify ts in milliseconds
-          new Tag(TagType.TTL_TAG_TYPE, Bytes.toBytes(5000L)) } )));
+          new ArrayBackedTag(TagType.TTL_TAG_TYPE, Bytes.toBytes(5000L)) } )));
       // Add a cell that will expire after 10 seconds via family setting
       region.put(new Put(row).addColumn(fam1, q2, now, HConstants.EMPTY_BYTE_ARRAY));
       // Add a cell that will expire in 15 seconds via cell TTL
       region.put(new Put(row).add(new KeyValue(row, fam1, q3, now + 10000 - 1,
-        HConstants.EMPTY_BYTE_ARRAY, new Tag[] {
+        HConstants.EMPTY_BYTE_ARRAY, new ArrayBackedTag[] {
           // TTL tags specify ts in milliseconds
-          new Tag(TagType.TTL_TAG_TYPE, Bytes.toBytes(5000L)) } )));
+          new ArrayBackedTag(TagType.TTL_TAG_TYPE, Bytes.toBytes(5000L)) } )));
       // Add a cell that will expire in 20 seconds via family setting
       region.put(new Put(row).addColumn(fam1, q4, now + 10000 - 1, HConstants.EMPTY_BYTE_ARRAY));
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/a9b671b3/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileScannerWithTagCompression.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileScannerWithTagCompression.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileScannerWithTagCompression.java
index 1bcb7c9..3c062f8 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileScannerWithTagCompression.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileScannerWithTagCompression.java
@@ -31,9 +31,11 @@ import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.KeyValueUtil;
+import org.apache.hadoop.hbase.Tag;
+import org.apache.hadoop.hbase.TagUtil;
 import org.apache.hadoop.hbase.testclassification.RegionServerTests;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
-import org.apache.hadoop.hbase.Tag;
+import org.apache.hadoop.hbase.ArrayBackedTag;
 import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
 import org.apache.hadoop.hbase.io.hfile.CacheConfig;
 import org.apache.hadoop.hbase.io.hfile.HFileContext;
@@ -86,7 +88,7 @@ public class TestStoreFileScannerWithTagCompression {
           kv.getRowLength()));
       List<Tag> tags = KeyValueUtil.ensureKeyValue(kv).getTags();
       assertEquals(1, tags.size());
-      assertEquals("tag3", Bytes.toString(tags.get(0).getValue()));
+      assertEquals("tag3", Bytes.toString(TagUtil.cloneValue(tags.get(0))));
     } finally {
       s.close();
     }
@@ -97,9 +99,9 @@ public class TestStoreFileScannerWithTagCompression {
     byte[] qualifier = Bytes.toBytes("q");
     long now = System.currentTimeMillis();
     byte[] b = Bytes.toBytes("k1");
-    Tag t1 = new Tag((byte) 1, "tag1");
-    Tag t2 = new Tag((byte) 2, "tag2");
-    Tag t3 = new Tag((byte) 3, "tag3");
+    Tag t1 = new ArrayBackedTag((byte) 1, "tag1");
+    Tag t2 = new ArrayBackedTag((byte) 2, "tag2");
+    Tag t3 = new ArrayBackedTag((byte) 3, "tag3");
     try {
       writer.append(new KeyValue(b, fam, qualifier, now, b, new Tag[] { t1 }));
       b = Bytes.toBytes("k3");

http://git-wip-us.apache.org/repos/asf/hbase/blob/a9b671b3/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestTags.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestTags.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestTags.java
index a85e479..0f7f23a 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestTags.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestTags.java
@@ -36,6 +36,8 @@ import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.KeyValueUtil;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.Tag;
+import org.apache.hadoop.hbase.TagUtil;
+import org.apache.hadoop.hbase.ArrayBackedTag;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Append;
 import org.apache.hadoop.hbase.client.Durability;
@@ -325,7 +327,7 @@ public class TestTags {
             if (CellUtil.matchingRow(current, row)) {
               assertEquals(1, TestCoprocessorForTags.tags.size());
               Tag tag = TestCoprocessorForTags.tags.get(0);
-              assertEquals(bigTagLen, tag.getTagLength());
+              assertEquals(bigTagLen, tag.getValueLength());
             } else {
               assertEquals(0, TestCoprocessorForTags.tags.size());
             }
@@ -350,7 +352,7 @@ public class TestTags {
             if (CellUtil.matchingRow(current, row)) {
               assertEquals(1, TestCoprocessorForTags.tags.size());
               Tag tag = TestCoprocessorForTags.tags.get(0);
-              assertEquals(bigTagLen, tag.getTagLength());
+              assertEquals(bigTagLen, tag.getValueLength());
             } else {
               assertEquals(0, TestCoprocessorForTags.tags.size());
             }
@@ -403,7 +405,7 @@ public class TestTags {
       List<Tag> tags = TestCoprocessorForTags.tags;
       assertEquals(3L, Bytes.toLong(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength()));
       assertEquals(1, tags.size());
-      assertEquals("tag1", Bytes.toString(tags.get(0).getValue()));
+      assertEquals("tag1", Bytes.toString(TagUtil.cloneValue(tags.get(0))));
       TestCoprocessorForTags.checkTagPresence = false;
       TestCoprocessorForTags.tags = null;
 
@@ -421,7 +423,7 @@ public class TestTags {
       // We cannot assume the ordering of tags
       List<String> tagValues = new ArrayList<String>();
       for (Tag tag: tags) {
-        tagValues.add(Bytes.toString(tag.getValue()));
+        tagValues.add(Bytes.toString(TagUtil.cloneValue(tag)));
       }
       assertTrue(tagValues.contains("tag1"));
       assertTrue(tagValues.contains("tag2"));
@@ -445,7 +447,7 @@ public class TestTags {
       tags = TestCoprocessorForTags.tags;
       assertEquals(4L, Bytes.toLong(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength()));
       assertEquals(1, tags.size());
-      assertEquals("tag2", Bytes.toString(tags.get(0).getValue()));
+      assertEquals("tag2", Bytes.toString(TagUtil.cloneValue(tags.get(0))));
       TestCoprocessorForTags.checkTagPresence = false;
       TestCoprocessorForTags.tags = null;
 
@@ -466,7 +468,7 @@ public class TestTags {
       kv = KeyValueUtil.ensureKeyValue(result.getColumnLatestCell(f, q));
       tags = TestCoprocessorForTags.tags;
       assertEquals(1, tags.size());
-      assertEquals("tag1", Bytes.toString(tags.get(0).getValue()));
+      assertEquals("tag1", Bytes.toString(TagUtil.cloneValue(tags.get(0))));
       TestCoprocessorForTags.checkTagPresence = false;
       TestCoprocessorForTags.tags = null;
 
@@ -483,7 +485,7 @@ public class TestTags {
       // We cannot assume the ordering of tags
       tagValues.clear();
       for (Tag tag: tags) {
-        tagValues.add(Bytes.toString(tag.getValue()));
+        tagValues.add(Bytes.toString(TagUtil.cloneValue(tag)));
       }
       assertTrue(tagValues.contains("tag1"));
       assertTrue(tagValues.contains("tag2"));
@@ -506,7 +508,7 @@ public class TestTags {
       kv = KeyValueUtil.ensureKeyValue(result.getColumnLatestCell(f, q));
       tags = TestCoprocessorForTags.tags;
       assertEquals(1, tags.size());
-      assertEquals("tag2", Bytes.toString(tags.get(0).getValue()));
+      assertEquals("tag2", Bytes.toString(TagUtil.cloneValue(tags.get(0))));
     } finally {
       TestCoprocessorForTags.checkTagPresence = false;
       TestCoprocessorForTags.tags = null;
@@ -569,7 +571,7 @@ public class TestTags {
             if (cf == null) {
               cf = CellUtil.cloneFamily(kv);
             }
-            Tag tag = new Tag((byte) 1, attribute);
+            Tag tag = new ArrayBackedTag((byte) 1, attribute);
             List<Tag> tagList = new ArrayList<Tag>();
             tagList.add(tag);
 
@@ -611,7 +613,7 @@ public class TestTags {
           CellScanner cellScanner = result.cellScanner();
           if (cellScanner.advance()) {
             Cell cell = cellScanner.current();
-            tags = Tag.asList(cell.getTagsArray(), cell.getTagsOffset(),
+            tags = TagUtil.asList(cell.getTagsArray(), cell.getTagsOffset(),
                 cell.getTagsLength());
           }
         }

http://git-wip-us.apache.org/repos/asf/hbase/blob/a9b671b3/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestKeyValueCompression.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestKeyValueCompression.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestKeyValueCompression.java
index 0450904..104f897 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestKeyValueCompression.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestKeyValueCompression.java
@@ -24,9 +24,10 @@ import java.util.List;
 
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.Tag;
 import org.apache.hadoop.hbase.testclassification.RegionServerTests;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
-import org.apache.hadoop.hbase.Tag;
+import org.apache.hadoop.hbase.ArrayBackedTag;
 import org.apache.hadoop.hbase.io.util.LRUDictionary;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.io.DataOutputBuffer;
@@ -108,7 +109,7 @@ public class TestKeyValueCompression {
     byte[] value = Bytes.toBytes("myValue");
     List<Tag> tags = new ArrayList<Tag>(noOfTags);
     for (int i = 1; i <= noOfTags; i++) {
-      tags.add(new Tag((byte) i, Bytes.toBytes("tagValue" + i)));
+      tags.add(new ArrayBackedTag((byte) i, Bytes.toBytes("tagValue" + i)));
     }
     return new KeyValue(row, cf, q, HConstants.LATEST_TIMESTAMP, value, tags);
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/a9b671b3/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALCellCodecWithCompression.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALCellCodecWithCompression.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALCellCodecWithCompression.java
index 501fdda..e834ac8 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALCellCodecWithCompression.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALCellCodecWithCompression.java
@@ -30,6 +30,8 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.Tag;
+import org.apache.hadoop.hbase.TagUtil;
+import org.apache.hadoop.hbase.ArrayBackedTag;
 import org.apache.hadoop.hbase.codec.Codec.Decoder;
 import org.apache.hadoop.hbase.codec.Codec.Encoder;
 import org.apache.hadoop.hbase.io.util.LRUDictionary;
@@ -69,7 +71,7 @@ public class TestWALCellCodecWithCompression {
     KeyValue kv = (KeyValue) decoder.current();
     List<Tag> tags = kv.getTags();
     assertEquals(1, tags.size());
-    assertEquals("tagValue1", Bytes.toString(tags.get(0).getValue()));
+    assertEquals("tagValue1", Bytes.toString(TagUtil.cloneValue(tags.get(0))));
     decoder.advance();
     kv = (KeyValue) decoder.current();
     tags = kv.getTags();
@@ -78,8 +80,8 @@ public class TestWALCellCodecWithCompression {
     kv = (KeyValue) decoder.current();
     tags = kv.getTags();
     assertEquals(2, tags.size());
-    assertEquals("tagValue1", Bytes.toString(tags.get(0).getValue()));
-    assertEquals("tagValue2", Bytes.toString(tags.get(1).getValue()));
+    assertEquals("tagValue1", Bytes.toString(TagUtil.cloneValue(tags.get(0))));
+    assertEquals("tagValue2", Bytes.toString(TagUtil.cloneValue(tags.get(1))));
   }
 
   private KeyValue createKV(int noOfTags) {
@@ -89,7 +91,7 @@ public class TestWALCellCodecWithCompression {
     byte[] value = Bytes.toBytes("myValue");
     List<Tag> tags = new ArrayList<Tag>(noOfTags);
     for (int i = 1; i <= noOfTags; i++) {
-      tags.add(new Tag((byte) i, Bytes.toBytes("tagValue" + i)));
+      tags.add(new ArrayBackedTag((byte) i, Bytes.toBytes("tagValue" + i)));
     }
     return new KeyValue(row, cf, q, HConstants.LATEST_TIMESTAMP, value, tags);
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/a9b671b3/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWithTags.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWithTags.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWithTags.java
index 988373f..8bfdc2a 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWithTags.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWithTags.java
@@ -39,6 +39,8 @@ import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.KeyValueUtil;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.Tag;
+import org.apache.hadoop.hbase.TagUtil;
+import org.apache.hadoop.hbase.ArrayBackedTag;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
@@ -209,7 +211,7 @@ public class TestReplicationWithTags {
             if (cf == null) {
               cf = CellUtil.cloneFamily(kv);
             }
-            Tag tag = new Tag(TAG_TYPE, attribute);
+            Tag tag = new ArrayBackedTag(TAG_TYPE, attribute);
             List<Tag> tagList = new ArrayList<Tag>();
             tagList.add(tag);
 
@@ -238,7 +240,7 @@ public class TestReplicationWithTags {
         // Check tag presence in the 1st cell in 1st Result
         if (!results.isEmpty()) {
           Cell cell = results.get(0);
-          tags = Tag.asList(cell.getTagsArray(), cell.getTagsOffset(), cell.getTagsLength());
+          tags = TagUtil.asList(cell.getTagsArray(), cell.getTagsOffset(), cell.getTagsLength());
         }
       }
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/a9b671b3/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
index 8ecc6e3..9f20c11 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
@@ -56,6 +56,7 @@ import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.TableNotFoundException;
 import org.apache.hadoop.hbase.Tag;
+import org.apache.hadoop.hbase.ArrayBackedTag;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Append;
 import org.apache.hadoop.hbase.client.Connection;
@@ -2516,7 +2517,7 @@ public class TestAccessController extends SecureTestUtil {
             Table t = conn.getTable(TEST_TABLE);) {
           KeyValue kv = new KeyValue(TEST_ROW, TEST_FAMILY, TEST_QUALIFIER,
             HConstants.LATEST_TIMESTAMP, HConstants.EMPTY_BYTE_ARRAY,
-            new Tag[] { new Tag(AccessControlLists.ACL_TAG_TYPE,
+            new Tag[] { new ArrayBackedTag(AccessControlLists.ACL_TAG_TYPE,
               ProtobufUtil.toUsersAndPermissions(USER_OWNER.getShortName(),
                 new Permission(Permission.Action.READ)).toByteArray()) });
           t.put(new Put(TEST_ROW).add(kv));


[10/17] hbase git commit: HBASE-15064 BufferUnderflowException after last Cell fetched from an HFile Block served from L2 offheap cache.

Posted by sy...@apache.org.
HBASE-15064 BufferUnderflowException after last Cell fetched from an HFile Block served from L2 offheap cache.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7cd09bfb
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7cd09bfb
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7cd09bfb

Branch: refs/heads/hbase-12439
Commit: 7cd09bfb91e829a7595b97842e64cfd8edcd97f9
Parents: e8fbc9b
Author: anoopsjohn <an...@gmail.com>
Authored: Wed Jan 6 10:25:32 2016 +0530
Committer: anoopsjohn <an...@gmail.com>
Committed: Wed Jan 6 10:25:32 2016 +0530

----------------------------------------------------------------------
 .../hadoop/hbase/util/ByteBufferArray.java      |  5 +++
 .../hadoop/hbase/util/TestByteBufferArray.java  | 43 ++++++++++++++++++++
 2 files changed, 48 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/7cd09bfb/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteBufferArray.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteBufferArray.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteBufferArray.java
index 986d6e0..2334cf7 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteBufferArray.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteBufferArray.java
@@ -219,6 +219,11 @@ public final class ByteBufferArray {
     long end = offset + len;
     int startBuffer = (int) (offset / bufferSize), startBufferOffset = (int) (offset % bufferSize);
     int endBuffer = (int) (end / bufferSize), endBufferOffset = (int) (end % bufferSize);
+    // Last buffer in the array is a dummy one with 0 capacity. Avoid sending back that
+    if (endBuffer == this.bufferCount) {
+      endBuffer--;
+      endBufferOffset = bufferSize;
+    }
     assert startBuffer >= 0 && startBuffer < bufferCount;
     assert endBuffer >= 0 && endBuffer < bufferCount
         || (endBuffer == bufferCount && endBufferOffset == 0);

http://git-wip-us.apache.org/repos/asf/hbase/blob/7cd09bfb/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestByteBufferArray.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestByteBufferArray.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestByteBufferArray.java
new file mode 100644
index 0000000..701601d
--- /dev/null
+++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestByteBufferArray.java
@@ -0,0 +1,43 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.util;
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import org.apache.hadoop.hbase.nio.ByteBuff;
+import org.apache.hadoop.hbase.testclassification.MiscTests;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category({MiscTests.class, SmallTests.class})
+public class TestByteBufferArray {
+
+  @Test
+  public void testAsSubBufferWhenEndOffsetLandInLastBuffer() throws Exception {
+    int capacity = 4 * 1024 * 1024;
+    ByteBufferArray array = new ByteBufferArray(capacity, false);
+    ByteBuff subBuf = array.asSubByteBuff(0, capacity);
+    subBuf.position(capacity - 1);// Position to the last byte
+    assertTrue(subBuf.hasRemaining());
+    // Read last byte
+    subBuf.get();
+    assertFalse(subBuf.hasRemaining());
+  }
+}


[05/17] hbase git commit: HBASE-14888 ClusterSchema: Add Namespace Operations

Posted by sy...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/46303dfd/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 8e51f25..4472b65 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -63,7 +63,6 @@ import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.MasterNotRunningException;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
-import org.apache.hadoop.hbase.NamespaceNotFoundException;
 import org.apache.hadoop.hbase.PleaseHoldException;
 import org.apache.hadoop.hbase.ProcedureInfo;
 import org.apache.hadoop.hbase.Server;
@@ -79,6 +78,7 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.RegionReplicaUtil;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.TableState;
+import org.apache.hadoop.hbase.coprocessor.BypassCoprocessorException;
 import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.hadoop.hbase.executor.ExecutorType;
@@ -97,17 +97,14 @@ import org.apache.hadoop.hbase.master.normalizer.RegionNormalizer;
 import org.apache.hadoop.hbase.master.normalizer.RegionNormalizerChore;
 import org.apache.hadoop.hbase.master.normalizer.RegionNormalizerFactory;
 import org.apache.hadoop.hbase.master.procedure.AddColumnFamilyProcedure;
-import org.apache.hadoop.hbase.master.procedure.CreateNamespaceProcedure;
 import org.apache.hadoop.hbase.master.procedure.CreateTableProcedure;
 import org.apache.hadoop.hbase.master.procedure.DeleteColumnFamilyProcedure;
-import org.apache.hadoop.hbase.master.procedure.DeleteNamespaceProcedure;
 import org.apache.hadoop.hbase.master.procedure.DeleteTableProcedure;
 import org.apache.hadoop.hbase.master.procedure.DisableTableProcedure;
 import org.apache.hadoop.hbase.master.procedure.EnableTableProcedure;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureConstants;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
 import org.apache.hadoop.hbase.master.procedure.ModifyColumnFamilyProcedure;
-import org.apache.hadoop.hbase.master.procedure.ModifyNamespaceProcedure;
 import org.apache.hadoop.hbase.master.procedure.ModifyTableProcedure;
 import org.apache.hadoop.hbase.master.procedure.ProcedurePrepareLatch;
 import org.apache.hadoop.hbase.master.procedure.ProcedureSyncWait;
@@ -185,7 +182,7 @@ import com.google.protobuf.Service;
  */
 @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)
 @SuppressWarnings("deprecation")
-public class HMaster extends HRegionServer implements MasterServices, Server {
+public class HMaster extends HRegionServer implements MasterServices {
   private static final Log LOG = LogFactory.getLog(HMaster.class.getName());
 
   /**
@@ -256,8 +253,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
   // Tracker for region normalizer state
   private RegionNormalizerTracker regionNormalizerTracker;
 
-  /** Namespace stuff */
-  private TableNamespaceManager tableNamespaceManager;
+  private ClusterSchemaService clusterSchemaService;
 
   // Metrics for the HMaster
   final MetricsMaster metricsMaster;
@@ -368,9 +364,6 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
    * Remaining steps of initialization occur in
    * #finishActiveMasterInitialization(MonitoredTask) after
    * the master becomes the active one.
-   *
-   * @throws KeeperException
-   * @throws IOException
    */
   public HMaster(final Configuration conf, CoordinatedStateManager csm)
       throws IOException, KeeperException {
@@ -570,10 +563,6 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
 
   /**
    * Initialize all ZK based system trackers.
-   * @throws IOException
-   * @throws InterruptedException
-   * @throws KeeperException
-   * @throws CoordinatedStateException
    */
   void initializeZKBasedSystemTrackers() throws IOException,
       InterruptedException, KeeperException, CoordinatedStateException {
@@ -588,12 +577,10 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
       this.balancer, this.service, this.metricsMaster,
       this.tableLockManager, tableStateManager);
 
-    this.regionServerTracker = new RegionServerTracker(zooKeeper, this,
-        this.serverManager);
+    this.regionServerTracker = new RegionServerTracker(zooKeeper, this, this.serverManager);
     this.regionServerTracker.start();
 
-    this.drainingServerTracker = new DrainingServerTracker(zooKeeper, this,
-      this.serverManager);
+    this.drainingServerTracker = new DrainingServerTracker(zooKeeper, this, this.serverManager);
     this.drainingServerTracker.start();
 
     // Set the cluster as up.  If new RSs, they'll be waiting on this before
@@ -630,11 +617,6 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
    * <li>Ensure assignment of meta/namespace regions<li>
    * <li>Handle either fresh cluster start or master failover</li>
    * </ol>
-   *
-   * @throws IOException
-   * @throws InterruptedException
-   * @throws KeeperException
-   * @throws CoordinatedStateException
    */
   private void finishActiveMasterInitialization(MonitoredTask status)
       throws IOException, InterruptedException, KeeperException, CoordinatedStateException {
@@ -781,8 +763,8 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
     this.catalogJanitorChore = new CatalogJanitor(this, this);
     getChoreService().scheduleChore(catalogJanitorChore);
 
-    status.setStatus("Starting namespace manager");
-    initNamespace();
+    status.setStatus("Starting cluster schema service");
+    initClusterSchemaService();
 
     if (this.cpHost != null) {
       try {
@@ -848,11 +830,6 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
 
   /**
    * Create a {@link ServerManager} instance.
-   * @param master
-   * @param services
-   * @return An instance of {@link ServerManager}
-   * @throws org.apache.hadoop.hbase.ZooKeeperConnectionException
-   * @throws IOException
    */
   ServerManager createServerManager(final Server master,
       final MasterServices services)
@@ -874,7 +851,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
           RegionState r = MetaTableLocator.getMetaRegionState(zkw, replicaId);
           LOG.info("Closing excess replica of meta region " + r.getRegion());
           // send a close and wait for a max of 30 seconds
-          ServerManager.closeRegionSilentlyAndWait(getConnection(), r.getServerName(),
+          ServerManager.closeRegionSilentlyAndWait(getClusterConnection(), r.getServerName(),
               r.getRegion(), 30000);
           ZKUtil.deleteNode(zkw, zkw.getZNodeForReplica(replicaId));
         }
@@ -888,12 +865,6 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
 
   /**
    * Check <code>hbase:meta</code> is assigned. If not, assign it.
-   * @param status MonitoredTask
-   * @param previouslyFailedMetaRSs
-   * @param replicaId
-   * @throws InterruptedException
-   * @throws IOException
-   * @throws KeeperException
    */
   void assignMeta(MonitoredTask status, Set<ServerName> previouslyFailedMetaRSs, int replicaId)
       throws InterruptedException, IOException, KeeperException {
@@ -915,7 +886,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
         metaState.getServerName(), null);
 
     if (!metaState.isOpened() || !metaTableLocator.verifyMetaRegionLocation(
-        this.getConnection(), this.getZooKeeper(), timeout, replicaId)) {
+        this.getClusterConnection(), this.getZooKeeper(), timeout, replicaId)) {
       ServerName currentMetaServer = metaState.getServerName();
       if (serverManager.isServerOnline(currentMetaServer)) {
         if (replicaId == HRegionInfo.DEFAULT_REPLICA_ID) {
@@ -965,10 +936,10 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
     status.setStatus("META assigned.");
   }
 
-  void initNamespace() throws IOException {
-    //create namespace manager
-    tableNamespaceManager = new TableNamespaceManager(this);
-    tableNamespaceManager.start();
+  void initClusterSchemaService() throws IOException, InterruptedException {
+    this.clusterSchemaService = new ClusterSchemaServiceImpl(this);
+    this.clusterSchemaService.startAndWait();
+    if (!this.clusterSchemaService.isRunning()) throw new HBaseIOException("Failed start");
   }
 
   void initQuotaManager() throws IOException {
@@ -1014,7 +985,6 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
   /**
    * This function returns a set of region server names under hbase:meta recovering region ZK node
    * @return Set of meta server names which were recorded in ZK
-   * @throws KeeperException
    */
   private Set<ServerName> getPreviouselyFailedMetaServersFromZK() throws KeeperException {
     Set<ServerName> result = new HashSet<ServerName>();
@@ -1050,11 +1020,6 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
     return tableStateManager;
   }
 
-  @Override
-  public TableNamespaceManager getTableNamespaceManager() {
-    return tableNamespaceManager;
-  }
-
   /*
    * Start up all services. If any of these threads gets an unhandled exception
    * then they just die with a logged message.  This should be fine because
@@ -1201,7 +1166,6 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
 
   /**
    * @return Get remote side's InetAddress
-   * @throws UnknownHostException
    */
   InetAddress getRemoteInetAddress(final int port,
       final long serverStartCode) throws UnknownHostException {
@@ -1336,9 +1300,8 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
    * Perform normalization of cluster (invoked by {@link RegionNormalizerChore}).
    *
    * @return true if normalization step was performed successfully, false otherwise
-   *   (specifically, if HMaster hasn't been initialized properly or normalization
-   *   is globally disabled)
-   * @throws IOException
+   *    (specifically, if HMaster hasn't been initialized properly or normalization
+   *    is globally disabled)
    */
   public boolean normalizeRegions() throws IOException {
     if (!this.initialized) {
@@ -1478,9 +1441,9 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
     if (isStopped()) {
       throw new MasterNotRunningException();
     }
-
+    checkInitialized();
     String namespace = hTableDescriptor.getTableName().getNamespaceAsString();
-    ensureNamespaceExists(namespace);
+    this.clusterSchemaService.getNamespace(namespace);
 
     HRegionInfo[] newRegions = ModifyRegionUtils.createHRegionInfos(hTableDescriptor, splitKeys);
     checkInitialized();
@@ -2167,8 +2130,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
    * The set of loaded coprocessors is stored in a static set. Since it's
    * statically allocated, it does not require that HMaster's cpHost be
    * initialized prior to accessing it.
-   * @return a String representation of the set of names of the loaded
-   * coprocessors.
+   * @return a String representation of the set of names of the loaded coprocessors.
    */
   public static String getLoadedCoprocessors() {
     return CoprocessorHost.getLoadedCoprocessors().toString();
@@ -2305,18 +2267,9 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
 
   void checkInitialized() throws PleaseHoldException, ServerNotRunningYetException {
     checkServiceStarted();
-    if (!this.initialized) {
-      throw new PleaseHoldException("Master is initializing");
-    }
+    if (!isInitialized()) throw new PleaseHoldException("Master is initializing");
   }
 
-  void checkNamespaceManagerReady() throws IOException {
-    checkInitialized();
-    if (tableNamespaceManager == null ||
-        !tableNamespaceManager.isTableAvailableAndInitialized(true)) {
-      throw new IOException("Table Namespace Manager not ready yet, try again later");
-    }
-  }
   /**
    * Report whether this master is currently the active master or not.
    * If not active master, we are parked on ZK waiting to become active.
@@ -2411,7 +2364,6 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
   /**
    * Utility for constructing an instance of the passed HMaster class.
    * @param masterClass
-   * @param conf
    * @return HMaster instance.
    */
   public static HMaster constructMaster(Class<? extends HMaster> masterClass,
@@ -2452,138 +2404,116 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
   }
 
   @Override
-  public void createNamespace(
-      final NamespaceDescriptor descriptor,
-      final long nonceGroup,
-      final long nonce) throws IOException {
-    TableName.isLegalNamespaceName(Bytes.toBytes(descriptor.getName()));
-    checkNamespaceManagerReady();
-    if (cpHost != null) {
-      if (cpHost.preCreateNamespace(descriptor)) {
-        return;
-      }
-    }
-    createNamespaceSync(descriptor, nonceGroup, nonce);
-    if (cpHost != null) {
-      cpHost.postCreateNamespace(descriptor);
-    }
+  public ClusterSchema getClusterSchema() {
+    return this.clusterSchemaService;
   }
 
-  @Override
-  public void createNamespaceSync(
-      final NamespaceDescriptor descriptor,
-      final long nonceGroup,
-      final long nonce) throws IOException {
-    LOG.info(getClientIdAuditPrefix() + " creating " + descriptor);
+  /**
+   * Create a new Namespace.
+   * @param namespaceDescriptor descriptor for new Namespace
+   * @param nonceGroup Identifier for the source of the request, a client or process.
+   * @param nonce A unique identifier for this operation from the client or process identified by
+   * <code>nonceGroup</code> (the source must ensure each operation gets a unique id).
+   * @return procedure id
+   */
+  long createNamespace(final NamespaceDescriptor namespaceDescriptor, final long nonceGroup,
+      final long nonce)
+  throws IOException {
+    checkInitialized();
+    TableName.isLegalNamespaceName(Bytes.toBytes(namespaceDescriptor.getName()));
+    if (this.cpHost != null && this.cpHost.preCreateNamespace(namespaceDescriptor)) {
+      throw new BypassCoprocessorException();
+    }
+    LOG.info(getClientIdAuditPrefix() + " creating " + namespaceDescriptor);
     // Execute the operation synchronously - wait for the operation to complete before continuing.
-    long procId = this.procedureExecutor.submitProcedure(
-      new CreateNamespaceProcedure(procedureExecutor.getEnvironment(), descriptor),
-      nonceGroup,
-      nonce);
-    ProcedureSyncWait.waitForProcedureToComplete(procedureExecutor, procId);
+    long procId = getClusterSchema().createNamespace(namespaceDescriptor, nonceGroup, nonce);
+    if (this.cpHost != null) this.cpHost.postCreateNamespace(namespaceDescriptor);
+    return procId;
   }
 
-  @Override
-  public void modifyNamespace(
-      final NamespaceDescriptor descriptor,
-      final long nonceGroup,
-      final long nonce) throws IOException {
-    TableName.isLegalNamespaceName(Bytes.toBytes(descriptor.getName()));
-    checkNamespaceManagerReady();
-    if (cpHost != null) {
-      if (cpHost.preModifyNamespace(descriptor)) {
-        return;
-      }
+  /**
+   * Modify an existing Namespace.
+   * @param nonceGroup Identifier for the source of the request, a client or process.
+   * @param nonce A unique identifier for this operation from the client or process identified by
+   * <code>nonceGroup</code> (the source must ensure each operation gets a unique id).
+   * @return procedure id
+   */
+  long modifyNamespace(final NamespaceDescriptor namespaceDescriptor, final long nonceGroup,
+      final long nonce)
+  throws IOException {
+    checkInitialized();
+    TableName.isLegalNamespaceName(Bytes.toBytes(namespaceDescriptor.getName()));
+    if (this.cpHost != null && this.cpHost.preModifyNamespace(namespaceDescriptor)) {
+      throw new BypassCoprocessorException();
     }
-    LOG.info(getClientIdAuditPrefix() + " modify " + descriptor);
+    LOG.info(getClientIdAuditPrefix() + " modify " + namespaceDescriptor);
     // Execute the operation synchronously - wait for the operation to complete before continuing.
-    long procId = this.procedureExecutor.submitProcedure(
-      new ModifyNamespaceProcedure(procedureExecutor.getEnvironment(), descriptor),
-      nonceGroup,
-      nonce);
-    ProcedureSyncWait.waitForProcedureToComplete(procedureExecutor, procId);
-    if (cpHost != null) {
-      cpHost.postModifyNamespace(descriptor);
-    }
+    long procId = getClusterSchema().modifyNamespace(namespaceDescriptor, nonceGroup, nonce);
+    if (this.cpHost != null) this.cpHost.postModifyNamespace(namespaceDescriptor);
+    return procId;
   }
 
-  @Override
-  public void deleteNamespace(
-      final String name,
-      final long nonceGroup,
-      final long nonce) throws IOException {
-    checkNamespaceManagerReady();
-    if (cpHost != null) {
-      if (cpHost.preDeleteNamespace(name)) {
-        return;
-      }
+  /**
+   * Delete an existing Namespace. Only empty Namespaces (no tables) can be removed.
+   * @param nonceGroup Identifier for the source of the request, a client or process.
+   * @param nonce A unique identifier for this operation from the client or process identified by
+   * <code>nonceGroup</code> (the source must ensure each operation gets a unique id).
+   * @return procedure id
+   */
+  long deleteNamespace(final String name, final long nonceGroup, final long nonce)
+  throws IOException {
+    checkInitialized();
+    if (this.cpHost != null && this.cpHost.preDeleteNamespace(name)) {
+      throw new BypassCoprocessorException();
     }
     LOG.info(getClientIdAuditPrefix() + " delete " + name);
     // Execute the operation synchronously - wait for the operation to complete before continuing.
-    long procId = this.procedureExecutor.submitProcedure(
-      new DeleteNamespaceProcedure(procedureExecutor.getEnvironment(), name),
-      nonceGroup,
-      nonce);
-    ProcedureSyncWait.waitForProcedureToComplete(procedureExecutor, procId);
-    if (cpHost != null) {
-      cpHost.postDeleteNamespace(name);
-    }
+    long procId = getClusterSchema().deleteNamespace(name, nonceGroup, nonce);
+    if (this.cpHost != null) this.cpHost.postDeleteNamespace(name);
+    return procId;
   }
 
   /**
-   * Ensure that the specified namespace exists, otherwise throws a NamespaceNotFoundException
-   *
-   * @param name the namespace to check
-   * @throws IOException if the namespace manager is not ready yet.
-   * @throws NamespaceNotFoundException if the namespace does not exists
+   * Get a Namespace
+   * @param name Name of the Namespace
+   * @return Namespace descriptor for <code>name</code>
    */
-  private void ensureNamespaceExists(final String name)
-      throws IOException, NamespaceNotFoundException {
-    checkNamespaceManagerReady();
-    NamespaceDescriptor nsd = tableNamespaceManager.get(name);
-    if (nsd == null) {
-      throw new NamespaceNotFoundException(name);
-    }
+  NamespaceDescriptor getNamespace(String name) throws IOException {
+    checkInitialized();
+    if (this.cpHost != null) this.cpHost.preGetNamespaceDescriptor(name);
+    NamespaceDescriptor nsd = this.clusterSchemaService.getNamespace(name);
+    if (this.cpHost != null) this.cpHost.postGetNamespaceDescriptor(nsd);
+    return nsd;
   }
 
-  @Override
-  public NamespaceDescriptor getNamespaceDescriptor(String name) throws IOException {
-    checkNamespaceManagerReady();
-
+  /**
+   * Get all Namespaces
+   * @return All Namespace descriptors
+   */
+  List<NamespaceDescriptor> getNamespaces() throws IOException {
+    checkInitialized();
+    final List<NamespaceDescriptor> nsds = new ArrayList<NamespaceDescriptor>();
+    boolean bypass = false;
     if (cpHost != null) {
-      cpHost.preGetNamespaceDescriptor(name);
+      bypass = cpHost.preListNamespaceDescriptors(nsds);
     }
-
-    NamespaceDescriptor nsd = tableNamespaceManager.get(name);
-    if (nsd == null) {
-      throw new NamespaceNotFoundException(name);
-    }
-
-    if (cpHost != null) {
-      cpHost.postGetNamespaceDescriptor(nsd);
+    if (!bypass) {
+      nsds.addAll(this.clusterSchemaService.getNamespaces());
+      if (this.cpHost != null) this.cpHost.postListNamespaceDescriptors(nsds);
     }
-
-    return nsd;
+    return nsds;
   }
 
   @Override
-  public List<NamespaceDescriptor> listNamespaceDescriptors() throws IOException {
-    checkNamespaceManagerReady();
-
-    final List<NamespaceDescriptor> descriptors = new ArrayList<NamespaceDescriptor>();
-    boolean bypass = false;
-    if (cpHost != null) {
-      bypass = cpHost.preListNamespaceDescriptors(descriptors);
-    }
-
-    if (!bypass) {
-      descriptors.addAll(tableNamespaceManager.list());
+  public List<TableName> listTableNamesByNamespace(String name) throws IOException {
+    checkInitialized();
+    return listTableNames(name, null, true);
+  }
 
-      if (cpHost != null) {
-        cpHost.postListNamespaceDescriptors(descriptors);
-      }
-    }
-    return descriptors;
+  @Override
+  public List<HTableDescriptor> listTableDescriptorsByNamespace(String name) throws IOException {
+    checkInitialized();
+    return listTableDescriptors(name, null, null, true);
   }
 
   @Override
@@ -2617,21 +2547,8 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
     return procInfoList;
   }
 
-  @Override
-  public List<HTableDescriptor> listTableDescriptorsByNamespace(String name) throws IOException {
-    ensureNamespaceExists(name);
-    return listTableDescriptors(name, null, null, true);
-  }
-
-  @Override
-  public List<TableName> listTableNamesByNamespace(String name) throws IOException {
-    ensureNamespaceExists(name);
-    return listTableNames(name, null, true);
-  }
-
   /**
    * Returns the list of table descriptors that match the specified request
-   *
    * @param namespace the namespace to query, or null if querying for all
    * @param regex The regular expression to match against, or null if querying for all
    * @param tableNameList the list of table names, or null if querying for all
@@ -2640,51 +2557,17 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
    */
   public List<HTableDescriptor> listTableDescriptors(final String namespace, final String regex,
       final List<TableName> tableNameList, final boolean includeSysTables)
-      throws IOException {
-    final List<HTableDescriptor> descriptors = new ArrayList<HTableDescriptor>();
-
-    boolean bypass = false;
-    if (cpHost != null) {
-      bypass = cpHost.preGetTableDescriptors(tableNameList, descriptors, regex);
-    }
-
+  throws IOException {
+    List<HTableDescriptor> htds = new ArrayList<HTableDescriptor>();
+    boolean bypass = cpHost != null?
+        cpHost.preGetTableDescriptors(tableNameList, htds, regex): false;
     if (!bypass) {
-      if (tableNameList == null || tableNameList.size() == 0) {
-        // request for all TableDescriptors
-        Collection<HTableDescriptor> htds;
-        if (namespace != null && namespace.length() > 0) {
-          htds = tableDescriptors.getByNamespace(namespace).values();
-        } else {
-          htds = tableDescriptors.getAll().values();
-        }
-
-        for (HTableDescriptor desc: htds) {
-          if (tableStateManager.isTablePresent(desc.getTableName())
-              && (includeSysTables || !desc.getTableName().isSystemTable())) {
-            descriptors.add(desc);
-          }
-        }
-      } else {
-        for (TableName s: tableNameList) {
-          if (tableStateManager.isTablePresent(s)) {
-            HTableDescriptor desc = tableDescriptors.get(s);
-            if (desc != null) {
-              descriptors.add(desc);
-            }
-          }
-        }
-      }
-
-      // Retains only those matched by regular expression.
-      if (regex != null) {
-        filterTablesByRegex(descriptors, Pattern.compile(regex));
-      }
-
+      htds = getTableDescriptors(htds, namespace, regex, tableNameList, includeSysTables);
       if (cpHost != null) {
-        cpHost.postGetTableDescriptors(tableNameList, descriptors, regex);
+        cpHost.postGetTableDescriptors(tableNameList, htds, regex);
       }
     }
-    return descriptors;
+    return htds;
   }
 
   /**
@@ -2696,46 +2579,58 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
    */
   public List<TableName> listTableNames(final String namespace, final String regex,
       final boolean includeSysTables) throws IOException {
-    final List<HTableDescriptor> descriptors = new ArrayList<HTableDescriptor>();
-
-    boolean bypass = false;
-    if (cpHost != null) {
-      bypass = cpHost.preGetTableNames(descriptors, regex);
+    List<HTableDescriptor> htds = new ArrayList<HTableDescriptor>();
+    boolean bypass = cpHost != null? cpHost.preGetTableNames(htds, regex): false;
+    if (!bypass) {
+      htds = getTableDescriptors(htds, namespace, regex, null, includeSysTables);
+      if (cpHost != null) cpHost.postGetTableNames(htds, regex);
     }
+    List<TableName> result = new ArrayList<TableName>(htds.size());
+    for (HTableDescriptor htd: htds) result.add(htd.getTableName());
+    return result;
+  }
 
-    if (!bypass) {
-      // get all descriptors
-      Collection<HTableDescriptor> htds;
+  /**
+   * @return list of table table descriptors after filtering by regex and whether to include system
+   *    tables, etc.
+   * @throws IOException
+   */
+  private List<HTableDescriptor> getTableDescriptors(final List<HTableDescriptor> htds,
+      final String namespace, final String regex, final List<TableName> tableNameList,
+      final boolean includeSysTables)
+  throws IOException {
+    if (tableNameList == null || tableNameList.size() == 0) {
+      // request for all TableDescriptors
+      Collection<HTableDescriptor> allHtds;
       if (namespace != null && namespace.length() > 0) {
-        htds = tableDescriptors.getByNamespace(namespace).values();
+        // Do a check on the namespace existence. Will fail if does not exist.
+        this.clusterSchemaService.getNamespace(namespace);
+        allHtds = tableDescriptors.getByNamespace(namespace).values();
       } else {
-        htds = tableDescriptors.getAll().values();
+        allHtds = tableDescriptors.getAll().values();
       }
-
-      for (HTableDescriptor htd: htds) {
-        if (includeSysTables || !htd.getTableName().isSystemTable()) {
-          descriptors.add(htd);
+      for (HTableDescriptor desc: allHtds) {
+        if (tableStateManager.isTablePresent(desc.getTableName())
+            && (includeSysTables || !desc.getTableName().isSystemTable())) {
+          htds.add(desc);
         }
       }
-
-      // Retains only those matched by regular expression.
-      if (regex != null) {
-        filterTablesByRegex(descriptors, Pattern.compile(regex));
-      }
-
-      if (cpHost != null) {
-        cpHost.postGetTableNames(descriptors, regex);
+    } else {
+      for (TableName s: tableNameList) {
+        if (tableStateManager.isTablePresent(s)) {
+          HTableDescriptor desc = tableDescriptors.get(s);
+          if (desc != null) {
+            htds.add(desc);
+          }
+        }
       }
     }
 
-    List<TableName> result = new ArrayList<TableName>(descriptors.size());
-    for (HTableDescriptor htd: descriptors) {
-      result.add(htd.getTableName());
-    }
-    return result;
+    // Retains only those matched by regular expression.
+    if (regex != null) filterTablesByRegex(htds, Pattern.compile(regex));
+    return htds;
   }
 
-
   /**
    * Removes the table descriptors that don't match the pattern.
    * @param descriptors list of table descriptors to filter
@@ -2848,11 +2743,8 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
    * Queries the state of the {@link RegionNormalizerTracker}. If it's not initialized,
    * false is returned.
    */
-   public boolean isNormalizerOn() {
-    if (null == regionNormalizerTracker) {
-      return false;
-    }
-    return regionNormalizerTracker.isNormalizerOn();
+  public boolean isNormalizerOn() {
+    return null == regionNormalizerTracker? false: regionNormalizerTracker.isNormalizerOn();
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/46303dfd/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index b269c3d..141fa88 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -55,121 +55,21 @@ import org.apache.hadoop.hbase.procedure2.Procedure;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.RequestConverter;
 import org.apache.hadoop.hbase.protobuf.ResponseConverter;
-import org.apache.hadoop.hbase.protobuf.generated.*;
 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionRequest;
 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionResponse;
 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoRequest;
 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse;
+import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
+import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos;
 import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AssignRegionRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AssignRegionResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteColumnRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteColumnResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteTableRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteTableResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DisableTableRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DisableTableResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableTableRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableTableResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetSchemaAlterStatusResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableNamesRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableNamesResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyColumnRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyColumnResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyTableRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyTableResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveRegionRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveRegionResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.OfflineRegionRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.OfflineRegionResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.*;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.Capability;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.TruncateTableRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.TruncateTableResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UnassignRegionRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UnassignRegionResponse;
 import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest;
 import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse;
 import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest;
@@ -188,8 +88,8 @@ import org.apache.hadoop.hbase.security.access.AccessController;
 import org.apache.hadoop.hbase.security.visibility.VisibilityController;
 import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
 import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
-import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.ByteStringer;
+import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.zookeeper.KeeperException;
 
@@ -458,11 +358,11 @@ public class MasterRpcServices extends RSRpcServices
   public CreateNamespaceResponse createNamespace(RpcController controller,
      CreateNamespaceRequest request) throws ServiceException {
     try {
-      master.createNamespace(
+      long procId = master.createNamespace(
         ProtobufUtil.toNamespaceDescriptor(request.getNamespaceDescriptor()),
         request.getNonceGroup(),
         request.getNonce());
-      return CreateNamespaceResponse.getDefaultInstance();
+      return CreateNamespaceResponse.newBuilder().setProcId(procId).build();
     } catch (IOException e) {
       throw new ServiceException(e);
     }
@@ -506,11 +406,11 @@ public class MasterRpcServices extends RSRpcServices
   public DeleteNamespaceResponse deleteNamespace(RpcController controller,
       DeleteNamespaceRequest request) throws ServiceException {
     try {
-      master.deleteNamespace(
+      long procId = master.deleteNamespace(
         request.getNamespaceName(),
         request.getNonceGroup(),
         request.getNonce());
-      return DeleteNamespaceResponse.getDefaultInstance();
+      return DeleteNamespaceResponse.newBuilder().setProcId(procId).build();
     } catch (IOException e) {
       throw new ServiceException(e);
     }
@@ -832,7 +732,7 @@ public class MasterRpcServices extends RSRpcServices
     try {
       return GetNamespaceDescriptorResponse.newBuilder()
         .setNamespaceDescriptor(ProtobufUtil.toProtoNamespaceDescriptor(
-            master.getNamespaceDescriptor(request.getNamespaceName())))
+            master.getNamespace(request.getNamespaceName())))
         .build();
     } catch (IOException e) {
       throw new ServiceException(e);
@@ -977,10 +877,8 @@ public class MasterRpcServices extends RSRpcServices
 
   /**
    * Checks if the specified procedure is done.
-   * @return true if the procedure is done,
-   *   false if the procedure is in the process of completing
-   * @throws ServiceException if invalid procedure, or
-   *  a failed procedure with progress failure reason.
+   * @return true if the procedure is done, false if the procedure is in the process of completing
+   * @throws ServiceException if invalid procedure or failed procedure with progress failure reason.
    */
   @Override
   public IsProcedureDoneResponse isProcedureDone(RpcController controller,
@@ -1120,7 +1018,7 @@ public class MasterRpcServices extends RSRpcServices
     try {
       ListNamespaceDescriptorsResponse.Builder response =
         ListNamespaceDescriptorsResponse.newBuilder();
-      for(NamespaceDescriptor ns: master.listNamespaceDescriptors()) {
+      for(NamespaceDescriptor ns: master.getNamespaces()) {
         response.addNamespaceDescriptor(ProtobufUtil.toProtoNamespaceDescriptor(ns));
       }
       return response.build();
@@ -1200,11 +1098,11 @@ public class MasterRpcServices extends RSRpcServices
   public ModifyNamespaceResponse modifyNamespace(RpcController controller,
       ModifyNamespaceRequest request) throws ServiceException {
     try {
-      master.modifyNamespace(
+      long procId = master.modifyNamespace(
         ProtobufUtil.toNamespaceDescriptor(request.getNamespaceDescriptor()),
         request.getNonceGroup(),
         request.getNonce());
-      return ModifyNamespaceResponse.getDefaultInstance();
+      return ModifyNamespaceResponse.newBuilder().setProcId(procId).build();
     } catch (IOException e) {
       throw new ServiceException(e);
     }
@@ -1305,10 +1203,9 @@ public class MasterRpcServices extends RSRpcServices
       master.checkInitialized();
       master.snapshotManager.checkSnapshotSupport();
 
-    // ensure namespace exists
+      // Ensure namespace exists. Will throw exception if non-known NS.
       TableName dstTable = TableName.valueOf(request.getSnapshot().getTable());
-      master.getNamespaceDescriptor(dstTable.getNamespaceAsString());
-
+      master.getNamespace(dstTable.getNamespaceAsString());
       SnapshotDescription reqSnapshot = request.getSnapshot();
       master.snapshotManager.restoreSnapshot(reqSnapshot);
       return RestoreSnapshotResponse.newBuilder().build();

http://git-wip-us.apache.org/repos/asf/hbase/blob/46303dfd/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
index af0e490..ec7db0c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
@@ -21,21 +21,20 @@ package org.apache.hadoop.hbase.master;
 import java.io.IOException;
 import java.util.List;
 
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.ProcedureInfo;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.TableDescriptors;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.TableNotDisabledException;
 import org.apache.hadoop.hbase.TableNotFoundException;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.executor.ExecutorService;
 import org.apache.hadoop.hbase.master.normalizer.RegionNormalizer;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
 import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
-import org.apache.hadoop.hbase.executor.ExecutorService;
 import org.apache.hadoop.hbase.quotas.MasterQuotaManager;
 
 import com.google.protobuf.Service;
@@ -46,6 +45,11 @@ import com.google.protobuf.Service;
 @InterfaceAudience.Private
 public interface MasterServices extends Server {
   /**
+   * @return Master's instance of {@link ClusterSchema}
+   */
+  ClusterSchema getClusterSchema();
+
+  /**
    * @return Master's instance of the {@link AssignmentManager}
    */
   AssignmentManager getAssignmentManager();
@@ -81,11 +85,6 @@ public interface MasterServices extends Server {
   MasterCoprocessorHost getMasterCoprocessorHost();
 
   /**
-   * @return Master's instance of {@link TableNamespaceManager}
-   */
-  TableNamespaceManager getTableNamespaceManager();
-
-  /**
    * @return Master's instance of {@link MasterQuotaManager}
    */
   MasterQuotaManager getMasterQuotaManager();
@@ -280,54 +279,6 @@ public interface MasterServices extends Server {
   boolean isInitialized();
 
   /**
-   * Create a new namespace
-   * @param descriptor descriptor which describes the new namespace
-   * @param nonceGroup
-   * @param nonce
-   * @throws IOException
-   */
-  public void createNamespace(
-      final NamespaceDescriptor descriptor,
-      final long nonceGroup,
-      final long nonce) throws IOException;
-
-  /**
-   * Create a new namespace synchronously.
-   * @param descriptor descriptor which describes the new namespace
-   * @param nonceGroup
-   * @param nonce
-   * @throws IOException
-   */
-  public void createNamespaceSync(
-      final NamespaceDescriptor descriptor,
-      final long nonceGroup,
-      final long nonce) throws IOException;
-
-  /**
-   * Modify an existing namespace
-   * @param descriptor descriptor which updates the existing namespace
-   * @param nonceGroup
-   * @param nonce
-   * @throws IOException
-   */
-  public void modifyNamespace(
-      final NamespaceDescriptor descriptor,
-      final long nonceGroup,
-      final long nonce) throws IOException;
-
-  /**
-   * Delete an existing namespace. Only empty namespaces (no tables) can be removed.
-   * @param name namespace name
-   * @param nonceGroup
-   * @param nonce
-   * @throws IOException
-   */
-  public void deleteNamespace(
-      final String name,
-      final long nonceGroup,
-      final long nonce) throws IOException;
-
-  /**
    * Abort a procedure.
    * @param procId ID of the procedure
    * @param mayInterruptIfRunning if the proc completed at least one step, should it be aborted?
@@ -338,21 +289,6 @@ public interface MasterServices extends Server {
       throws IOException;
 
   /**
-   * Get a namespace descriptor by name
-   * @param name name of namespace descriptor
-   * @return A descriptor
-   * @throws IOException
-   */
-  public NamespaceDescriptor getNamespaceDescriptor(String name) throws IOException;
-
-  /**
-   * List available namespace descriptors
-   * @return A descriptor
-   * @throws IOException
-   */
-  public List<NamespaceDescriptor> listNamespaceDescriptors() throws IOException;
-
-  /**
    * List procedures
    * @return procedure list
    * @throws IOException

http://git-wip-us.apache.org/repos/asf/hbase/blob/46303dfd/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
index 50f07c1..a95279c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
@@ -215,7 +215,7 @@ public class ServerManager {
     Configuration c = master.getConfiguration();
     maxSkew = c.getLong("hbase.master.maxclockskew", 30000);
     warningSkew = c.getLong("hbase.master.warningclockskew", 10000);
-    this.connection = connect ? master.getConnection() : null;
+    this.connection = connect ? master.getClusterConnection() : null;
     int pingMaxAttempts = Math.max(1, master.getConfiguration().getInt(
       "hbase.master.maximum.ping.server.attempts", 10));
     int pingSleepInterval = Math.max(1, master.getConfiguration().getInt(

http://git-wip-us.apache.org/repos/asf/hbase/blob/46303dfd/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java
index bbeaf76..69d1280 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java
@@ -27,17 +27,17 @@ import java.util.concurrent.locks.ReentrantReadWriteLock;
 import org.apache.commons.lang.StringUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.ZKNamespaceManager;
-import org.apache.hadoop.hbase.MetaTableAccessor;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.Put;
@@ -46,20 +46,25 @@ import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.constraint.ConstraintException;
-import org.apache.hadoop.hbase.master.procedure.CreateNamespaceProcedure;
+import org.apache.hadoop.hbase.exceptions.TimeoutIOException;
 import org.apache.hadoop.hbase.master.procedure.CreateTableProcedure;
+import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
+import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.hadoop.hbase.util.Threads;
 
 import com.google.common.collect.Sets;
 
 /**
- * This is a helper class used to manage the namespace
- * metadata that is stored in TableName.NAMESPACE_TABLE_NAME
- * It also mirrors updates to the ZK store by forwarding updates to
- * {@link org.apache.hadoop.hbase.ZKNamespaceManager}
+ * This is a helper class used internally to manage the namespace metadata that is stored in
+ * TableName.NAMESPACE_TABLE_NAME. It also mirrors updates to the ZK store by forwarding updates to
+ * {@link org.apache.hadoop.hbase.ZKNamespaceManager}.
+ * 
+ * WARNING: Do not use. Go via the higher-level {@link ClusterSchema} API instead. This manager
+ * is likely to go aways anyways.
  */
 @InterfaceAudience.Private
 public class TableNamespaceManager {
@@ -90,7 +95,7 @@ public class TableNamespaceManager {
   private long exclusiveLockTimeoutMs;
   private long sharedLockTimeoutMs;
 
-  public TableNamespaceManager(MasterServices masterServices) {
+  TableNamespaceManager(MasterServices masterServices) {
     this.masterServices = masterServices;
     this.conf = masterServices.getConfiguration();
 
@@ -104,7 +109,7 @@ public class TableNamespaceManager {
 
   public void start() throws IOException {
     if (!MetaTableAccessor.tableExists(masterServices.getConnection(),
-      TableName.NAMESPACE_TABLE_NAME)) {
+        TableName.NAMESPACE_TABLE_NAME)) {
       LOG.info("Namespace table not found. Creating...");
       createNamespaceTable(masterServices);
     }
@@ -113,7 +118,7 @@ public class TableNamespaceManager {
       // Wait for the namespace table to be initialized.
       long startTime = EnvironmentEdgeManager.currentTime();
       int timeout = conf.getInt(NS_INIT_TIMEOUT, DEFAULT_NS_INIT_TIMEOUT);
-      while (!isTableAvailableAndInitialized(false)) {
+      while (!isTableAvailableAndInitialized()) {
         if (EnvironmentEdgeManager.currentTime() - startTime + 100 > timeout) {
           // We can't do anything if ns is not online.
           throw new IOException("Timedout " + timeout + "ms waiting for namespace table to "
@@ -269,16 +274,48 @@ public class TableNamespaceManager {
   }
 
   /**
+   * Create Namespace in a blocking manner. Keeps trying until
+   * {@link ClusterSchema.HBASE_MASTER_CLUSTER_SCHEMA_OPERATION_TIMEOUT_KEY} expires.
+   * Note, by-passes notifying coprocessors and name checks. Use for system namespaces only.
+   */
+  private void blockingCreateNamespace(final NamespaceDescriptor namespaceDescriptor)
+  throws IOException {
+    ClusterSchema clusterSchema = this.masterServices.getClusterSchema();
+    long procId =
+      clusterSchema.createNamespace(namespaceDescriptor, HConstants.NO_NONCE, HConstants.NO_NONCE);
+    block(this.masterServices, procId);
+  }
+
+
+  /**
+   * An ugly utility to be removed when refactor TableNamespaceManager.
+   * @throws TimeoutIOException
+   */
+  private static void block(final MasterServices services, final long procId)
+  throws TimeoutIOException {
+    int timeoutInMillis = services.getConfiguration().
+        getInt(ClusterSchema.HBASE_MASTER_CLUSTER_SCHEMA_OPERATION_TIMEOUT_KEY,
+            ClusterSchema.DEFAULT_HBASE_MASTER_CLUSTER_SCHEMA_OPERATION_TIMEOUT);
+    long deadlineTs = EnvironmentEdgeManager.currentTime() + timeoutInMillis;
+    ProcedureExecutor<MasterProcedureEnv> procedureExecutor =
+        services.getMasterProcedureExecutor();
+    while(EnvironmentEdgeManager.currentTime() < deadlineTs) {
+      if (procedureExecutor.isFinished(procId)) return;
+      // Sleep some
+      Threads.sleep(10);
+    }
+    throw new TimeoutIOException("Procedure " + procId + " is still running");
+  }
+
+  /**
    * This method checks if the namespace table is assigned and then
-   * tries to create its HTable. If it was already created before, it also makes
+   * tries to create its Table reference. If it was already created before, it also makes
    * sure that the connection isn't closed.
-   * @return true if the namespace table manager is ready to serve, false
-   * otherwise
-   * @throws IOException
+   * @return true if the namespace table manager is ready to serve, false otherwise
    */
   @SuppressWarnings("deprecation")
-  public synchronized boolean isTableAvailableAndInitialized(
-      final boolean createNamespaceAync) throws IOException {
+  public synchronized boolean isTableAvailableAndInitialized()
+  throws IOException {
     // Did we already get a table? If so, still make sure it's available
     if (isTableNamespaceManagerInitialized()) {
       return true;
@@ -293,34 +330,10 @@ public class TableNamespaceManager {
         zkNamespaceManager.start();
 
         if (get(nsTable, NamespaceDescriptor.DEFAULT_NAMESPACE.getName()) == null) {
-          if (createNamespaceAync) {
-            masterServices.getMasterProcedureExecutor().submitProcedure(
-              new CreateNamespaceProcedure(
-                masterServices.getMasterProcedureExecutor().getEnvironment(),
-                NamespaceDescriptor.DEFAULT_NAMESPACE));
-            initGoodSofar = false;
-          }
-          else {
-            masterServices.createNamespaceSync(
-              NamespaceDescriptor.DEFAULT_NAMESPACE,
-              HConstants.NO_NONCE,
-              HConstants.NO_NONCE);
-          }
+          blockingCreateNamespace(NamespaceDescriptor.DEFAULT_NAMESPACE);
         }
         if (get(nsTable, NamespaceDescriptor.SYSTEM_NAMESPACE.getName()) == null) {
-          if (createNamespaceAync) {
-            masterServices.getMasterProcedureExecutor().submitProcedure(
-              new CreateNamespaceProcedure(
-                masterServices.getMasterProcedureExecutor().getEnvironment(),
-                NamespaceDescriptor.SYSTEM_NAMESPACE));
-            initGoodSofar = false;
-          }
-          else {
-            masterServices.createNamespaceSync(
-              NamespaceDescriptor.SYSTEM_NAMESPACE,
-              HConstants.NO_NONCE,
-              HConstants.NO_NONCE);
-          }
+          blockingCreateNamespace(NamespaceDescriptor.SYSTEM_NAMESPACE);
         }
 
         if (!initGoodSofar) {
@@ -410,4 +423,4 @@ public class TableNamespaceManager {
     }
     return maxRegions;
   }
-}
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/46303dfd/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateNamespaceProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateNamespaceProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateNamespaceProcedure.java
index 657bbfb..f934737 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateNamespaceProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateNamespaceProcedure.java
@@ -348,7 +348,7 @@ public class CreateNamespaceProcedure
   }
 
   private static TableNamespaceManager getTableNamespaceManager(final MasterProcedureEnv env) {
-    return env.getMasterServices().getTableNamespaceManager();
+    return env.getMasterServices().getClusterSchema().getTableNamespaceManager();
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/46303dfd/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteNamespaceProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteNamespaceProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteNamespaceProcedure.java
index 5a42614..2f99167 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteNamespaceProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteNamespaceProcedure.java
@@ -383,7 +383,7 @@ public class DeleteNamespaceProcedure
   }
 
   private static TableNamespaceManager getTableNamespaceManager(final MasterProcedureEnv env) {
-    return env.getMasterServices().getTableNamespaceManager();
+    return env.getMasterServices().getClusterSchema().getTableNamespaceManager();
   }
   /**
    * The procedure could be restarted from a different machine. If the variable is null, we need to

http://git-wip-us.apache.org/repos/asf/hbase/blob/46303dfd/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java
index 1e86254..baef112 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java
@@ -37,7 +37,7 @@ import org.apache.hadoop.hbase.TableNotDisabledException;
 import org.apache.hadoop.hbase.TableNotFoundException;
 import org.apache.hadoop.hbase.backup.HFileArchiver;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.client.ClusterConnection;
+import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
@@ -374,7 +374,7 @@ public class DeleteTableProcedure
    */
   private static void cleanAnyRemainingRows(final MasterProcedureEnv env,
       final TableName tableName) throws IOException {
-    ClusterConnection connection = env.getMasterServices().getConnection();
+    Connection connection = env.getMasterServices().getConnection();
     Scan tableScan = MetaTableAccessor.getScanForTableName(connection, tableName);
     try (Table metaTable =
         connection.getTable(TableName.META_TABLE_NAME)) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/46303dfd/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyNamespaceProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyNamespaceProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyNamespaceProcedure.java
index 30de252..0f8c172 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyNamespaceProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyNamespaceProcedure.java
@@ -266,8 +266,9 @@ public class ModifyNamespaceProcedure
   }
 
   private TableNamespaceManager getTableNamespaceManager(final MasterProcedureEnv env) {
-    return env.getMasterServices().getTableNamespaceManager();
+    return env.getMasterServices().getClusterSchema().getTableNamespaceManager();
   }
+
   /**
    * The procedure could be restarted from a different machine. If the variable is null, we need to
    * retrieve it.

http://git-wip-us.apache.org/repos/asf/hbase/blob/46303dfd/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java
index 5c9f6f4..bdcd89c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java
@@ -730,7 +730,7 @@ implements ServerProcedureInterface {
     boolean metaAssigned = false;
     // Is hbase:meta location available yet?
     if (mtl.isLocationAvailable(zkw)) {
-      ClusterConnection connection = env.getMasterServices().getConnection();
+      ClusterConnection connection = env.getMasterServices().getClusterConnection();
       // Is hbase:meta location good yet?
       long timeout =
         env.getMasterConfiguration().getLong(KEY_SHORT_WAIT_ON_META, DEFAULT_SHORT_WAIT_ON_META);

http://git-wip-us.apache.org/repos/asf/hbase/blob/46303dfd/hbase-server/src/main/java/org/apache/hadoop/hbase/namespace/NamespaceStateManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/namespace/NamespaceStateManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/namespace/NamespaceStateManager.java
index f24f8c0..8035d32 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/namespace/NamespaceStateManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/namespace/NamespaceStateManager.java
@@ -65,7 +65,7 @@ class NamespaceStateManager {
 
   /**
    * Gets an instance of NamespaceTableAndRegionInfo associated with namespace.
-   * @param The name of the namespace
+   * @param name The name of the namespace
    * @return An instance of NamespaceTableAndRegionInfo.
    */
   public NamespaceTableAndRegionInfo getState(String name) {
@@ -135,7 +135,7 @@ class NamespaceStateManager {
 
   private NamespaceDescriptor getNamespaceDescriptor(String namespaceAsString) {
     try {
-      return this.master.getNamespaceDescriptor(namespaceAsString);
+      return this.master.getClusterSchema().getNamespace(namespaceAsString);
     } catch (IOException e) {
       LOG.error("Error while fetching namespace descriptor for namespace : " + namespaceAsString);
       return null;
@@ -212,7 +212,7 @@ class NamespaceStateManager {
    * Initialize namespace state cache by scanning meta table.
    */
   private void initialize() throws IOException {
-    List<NamespaceDescriptor> namespaces = this.master.listNamespaceDescriptors();
+    List<NamespaceDescriptor> namespaces = this.master.getClusterSchema().getNamespaces();
     for (NamespaceDescriptor namespace : namespaces) {
       addNamespace(namespace.getName());
       List<TableName> tables = this.master.listTableNamesByNamespace(namespace.getName());

http://git-wip-us.apache.org/repos/asf/hbase/blob/46303dfd/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index 211fed5..00046ba 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -79,6 +79,7 @@ import org.apache.hadoop.hbase.YouAreDeadException;
 import org.apache.hadoop.hbase.ZNodeClearer;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.ClusterConnection;
+import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionUtils;
 import org.apache.hadoop.hbase.client.RpcRetryingCallerFactory;
 import org.apache.hadoop.hbase.conf.ConfigurationManager;
@@ -196,8 +197,7 @@ import sun.misc.SignalHandler;
  */
 @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)
 @SuppressWarnings("deprecation")
-public class HRegionServer extends HasThread implements
-    RegionServerServices, LastSequenceId {
+public class HRegionServer extends HasThread implements RegionServerServices, LastSequenceId {
 
   private static final Log LOG = LogFactory.getLog(HRegionServer.class);
 
@@ -1867,7 +1867,12 @@ public class HRegionServer extends HasThread implements
   }
 
   @Override
-  public ClusterConnection getConnection() {
+  public Connection getConnection() {
+    return getClusterConnection();
+  }
+
+  @Override
+  public ClusterConnection getClusterConnection() {
     return this.clusterConnection;
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/46303dfd/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java
index 8d38b09..b86de12 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java
@@ -187,5 +187,11 @@ public class ReplicationSyncUp extends Configured implements Tool {
     public ChoreService getChoreService() {
       return null;
     }
+
+    @Override
+    public ClusterConnection getClusterConnection() {
+      // TODO Auto-generated method stub
+      return null;
+    }
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/46303dfd/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
index f132b2b..878c5bf 100644
--- a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
+++ b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
@@ -124,7 +124,7 @@
 </div>
 <%
 if ( fqtn != null ) {
-  table = (HTable) master.getConnection().getTable(fqtn);
+  table = (HTable) master.getConnection().getTable(TableName.valueOf(fqtn));
   if (table.getTableDescriptor().getRegionReplication() > 1) {
     tableHeader = "<h2>Table Regions</h2><table class=\"table table-striped\" style=\"table-layout: fixed; word-wrap: break-word;\"><tr><th style=\"width:22%\">Name</th><th>Region Server</th><th style=\"width:22%\">Start Key</th><th style=\"width:22%\">End Key</th><th>Locality</th><th>Requests</th><th>ReplicaID</th></tr>";
     withReplica = true;
@@ -199,7 +199,7 @@ if ( fqtn != null ) {
 </table>
 <%} else {
   Admin admin = master.getConnection().getAdmin();
-  RegionLocator r = master.getConnection().getRegionLocator(table.getName());
+  RegionLocator r = master.getClusterConnection().getRegionLocator(table.getName());
   try { %>
 <h2>Table Attributes</h2>
 <table class="table table-striped">

http://git-wip-us.apache.org/repos/asf/hbase/blob/46303dfd/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java
index c126b19..a7fc75b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java
@@ -306,4 +306,10 @@ public class MockRegionServerServices implements RegionServerServices {
   public double getCompactionPressure() {
     return 0;
   }
+
+  @Override
+  public ClusterConnection getClusterConnection() {
+    // TODO Auto-generated method stub
+    return null;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/46303dfd/hbase-server/src/test/java/org/apache/hadoop/hbase/TestNamespace.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestNamespace.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestNamespace.java
index c24d8a3..f9e2a16 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestNamespace.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestNamespace.java
@@ -358,7 +358,7 @@ public class TestNamespace {
     runWithExpectedException(new Callable<Void>() {
       @Override
       public Void call() throws Exception {
-        admin.listTableDescriptorsByNamespace("non_existing_namespace");
+        admin.listTableDescriptorsByNamespace("non_existant_namespace");
         return null;
       }
     }, NamespaceNotFoundException.class);

http://git-wip-us.apache.org/repos/asf/hbase/blob/46303dfd/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestShortCircuitConnection.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestShortCircuitConnection.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestShortCircuitConnection.java
index e84d34c..618717b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestShortCircuitConnection.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestShortCircuitConnection.java
@@ -65,7 +65,7 @@ public class TestShortCircuitConnection {
     htd.addFamily(hcd);
     UTIL.createTable(htd, null);
     HRegionServer regionServer = UTIL.getRSForFirstRegionInTable(tn);
-    ClusterConnection connection = regionServer.getConnection();
+    ClusterConnection connection = regionServer.getClusterConnection();
     HTableInterface tableIf = connection.getTable(tn);
     assertTrue(tableIf instanceof HTable);
     HTable table = (HTable) tableIf;

http://git-wip-us.apache.org/repos/asf/hbase/blob/46303dfd/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java
index ef4a579..638811a 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java
@@ -1586,7 +1586,13 @@ public class TestMasterObserver {
     cp.enableBypass(true);
     cp.resetStates();
 
-    admin.modifyNamespace(NamespaceDescriptor.create(testNamespace).build());
+    boolean expected = false;
+    try {
+      admin.modifyNamespace(NamespaceDescriptor.create(testNamespace).build());
+    } catch (BypassCoprocessorException ce) {
+      expected = true;
+    }
+    assertTrue(expected);
     assertTrue("Test namespace should not have been modified",
         cp.preModifyNamespaceCalledOnly());
 
@@ -1594,7 +1600,13 @@ public class TestMasterObserver {
     assertTrue("Test namespace descriptor should have been called",
         cp.wasGetNamespaceDescriptorCalled());
 
-    admin.deleteNamespace(testNamespace);
+    expected = false;
+    try {
+      admin.deleteNamespace(testNamespace);
+    } catch (BypassCoprocessorException ce) {
+      expected = true;
+    }
+    assertTrue(expected);
     assertTrue("Test namespace should not have been deleted", cp.preDeleteNamespaceCalledOnly());
 
     assertNotNull(admin.getNamespaceDescriptor(testNamespace));
@@ -1614,7 +1626,13 @@ public class TestMasterObserver {
     cp.enableBypass(true);
     cp.resetStates();
 
-    admin.createNamespace(NamespaceDescriptor.create(testNamespace).build());
+    expected = false;
+    try {
+      admin.createNamespace(NamespaceDescriptor.create(testNamespace).build());
+    } catch (BypassCoprocessorException ce) {
+      expected = true;
+    }
+    assertTrue(expected);
     assertTrue("Test namespace should not be created", cp.preCreateNamespaceCalledOnly());
 
     // turn on bypass, run the test

http://git-wip-us.apache.org/repos/asf/hbase/blob/46303dfd/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
index eb8f803..234ad20 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
@@ -651,4 +651,10 @@ ClientProtos.ClientService.BlockingInterface, RegionServerServices {
   public double getCompactionPressure() {
     return 0;
   }
+
+  @Override
+  public ClusterConnection getClusterConnection() {
+    // TODO Auto-generated method stub
+    return null;
+  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/46303dfd/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java
index e3283e9..e10ab2a 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java
@@ -326,5 +326,11 @@ public class TestActiveMasterManager {
     public ChoreService getChoreService() {
       return null;
     }
+
+    @Override
+    public ClusterConnection getClusterConnection() {
+      // TODO Auto-generated method stub
+      return null;
+    }
   }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/46303dfd/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
index 8e35bbf..e26bd82 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
@@ -46,7 +46,6 @@ import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.MetaMockingUtil;
-import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException;
 import org.apache.hadoop.hbase.ProcedureInfo;
 import org.apache.hadoop.hbase.Server;
@@ -143,11 +142,10 @@ public class TestCatalogJanitor {
             ServerName.valueOf("example.org,12345,6789"),
           HRegionInfo.FIRST_META_REGIONINFO);
       // Set hbase.rootdir into test dir.
-      FileSystem fs = FileSystem.get(this.c);
+      FileSystem.get(this.c);
       Path rootdir = FSUtils.getRootDir(this.c);
       FSUtils.setRootDir(this.c, rootdir);
-      AdminProtos.AdminService.BlockingInterface hri =
-        Mockito.mock(AdminProtos.AdminService.BlockingInterface.class);
+      Mockito.mock(AdminProtos.AdminService.BlockingInterface.class);
     }
 
     @Override
@@ -208,6 +206,12 @@ public class TestCatalogJanitor {
     public ChoreService getChoreService() {
       return null;
     }
+
+    @Override
+    public ClusterConnection getClusterConnection() {
+      // TODO Auto-generated method stub
+      return null;
+    }
   }
 
   /**
@@ -402,48 +406,6 @@ public class TestCatalogJanitor {
     }
 
     @Override
-    public void createNamespace(
-        final NamespaceDescriptor descriptor,
-        final long nonceGroup,
-        final long nonce) throws IOException {
-      //To change body of implemented methods use File | Settings | File Templates.
-    }
-
-    @Override
-    public void createNamespaceSync(
-        final NamespaceDescriptor descriptor,
-        final long nonceGroup,
-        final long nonce) throws IOException {
-      //To change body of implemented methods use File | Settings | File Templates.
-    }
-
-    @Override
-    public void modifyNamespace(
-        final NamespaceDescriptor descriptor,
-        final long nonceGroup,
-        final long nonce) throws IOException {
-      //To change body of implemented methods use File | Settings | File Templates.
-    }
-
-    @Override
-    public void deleteNamespace(
-        final String name,
-        final long nonceGroup,
-        final long nonce) throws IOException {
-      //To change body of implemented methods use File | Settings | File Templates.
-    }
-
-    @Override
-    public NamespaceDescriptor getNamespaceDescriptor(String name) throws IOException {
-      return null;  //To change body of implemented methods use File | Settings | File Templates.
-    }
-
-    @Override
-    public List<NamespaceDescriptor> listNamespaceDescriptors() throws IOException {
-      return null;  //To change body of implemented methods use File | Settings | File Templates.
-    }
-
-    @Override
     public boolean abortProcedure(final long procId, final boolean mayInterruptIfRunning)
         throws IOException {
       return false;  //To change body of implemented methods use File | Settings | File Templates.
@@ -536,32 +498,35 @@ public class TestCatalogJanitor {
     }
 
     @Override
-    public TableNamespaceManager getTableNamespaceManager() {
-      return null;
-    }
-
-    @Override
     public void dispatchMergingRegions(HRegionInfo region_a, HRegionInfo region_b,
         boolean forcible) throws IOException {
     }
 
     @Override
     public boolean isInitialized() {
-      // Auto-generated method stub
       return false;
     }
 
     @Override
     public long getLastMajorCompactionTimestamp(TableName table) throws IOException {
-      // Auto-generated method stub
       return 0;
     }
 
     @Override
     public long getLastMajorCompactionTimestampForRegion(byte[] regionName) throws IOException {
-      // Auto-generated method stub
       return 0;
     }
+
+    @Override
+    public ClusterSchema getClusterSchema() {
+      return null;
+    }
+
+    @Override
+    public ClusterConnection getClusterConnection() {
+      // TODO Auto-generated method stub
+      return null;
+    }
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/hbase/blob/46303dfd/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClockSkewDetection.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClockSkewDetection.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClockSkewDetection.java
index a19d5d8..142437c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClockSkewDetection.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClockSkewDetection.java
@@ -100,6 +100,12 @@ public class TestClockSkewDetection {
       public ChoreService getChoreService() {
         return null;
       }
+
+      @Override
+      public ClusterConnection getClusterConnection() {
+        // TODO Auto-generated method stub
+        return null;
+      }
     }, null, false);
 
     LOG.debug("regionServerStartup 1");

http://git-wip-us.apache.org/repos/asf/hbase/blob/46303dfd/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java
index 972834a..398a898 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java
@@ -32,6 +32,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Abortable;
+import org.apache.hadoop.hbase.CategoryBasedTimeout;
 import org.apache.hadoop.hbase.CoordinatedStateException;
 import org.apache.hadoop.hbase.CoordinatedStateManager;
 import org.apache.hadoop.hbase.CoordinatedStateManagerFactory;
@@ -62,8 +63,10 @@ import org.apache.zookeeper.KeeperException;
 import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
+import org.junit.Rule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
+import org.junit.rules.TestRule;
 import org.mockito.Mockito;
 
 import com.google.protobuf.ServiceException;
@@ -80,6 +83,8 @@ import com.google.protobuf.ServiceException;
 public class TestMasterNoCluster {
   private static final Log LOG = LogFactory.getLog(TestMasterNoCluster.class);
   private static final HBaseTestingUtility TESTUTIL = new HBaseTestingUtility();
+  @Rule public final TestRule timeout = CategoryBasedTimeout.builder().
+      withTimeout(this.getClass()).withLookingForStuckThread(true).build();
 
   @BeforeClass
   public static void setUpBeforeClass() throws Exception {
@@ -122,7 +127,7 @@ public class TestMasterNoCluster {
    * @throws KeeperException
    * @throws InterruptedException
    */
-  @Test (timeout=30000)
+  @Test
   public void testStopDuringStart()
   throws IOException, KeeperException, InterruptedException {
     CoordinatedStateManager cp = CoordinatedStateManagerFactory.getCoordinatedStateManager(
@@ -141,7 +146,7 @@ public class TestMasterNoCluster {
    * @throws KeeperException
    * @throws InterruptedException
    */
-  @Test (timeout=30000)
+  @Test
   public void testFailover()
   throws IOException, KeeperException, InterruptedException, ServiceException {
     final long now = System.currentTimeMillis();
@@ -193,6 +198,9 @@ public class TestMasterNoCluster {
       }
 
       @Override
+      void initClusterSchemaService() throws IOException, InterruptedException {}
+
+      @Override
       ServerManager createServerManager(Server master, MasterServices services)
       throws IOException {
         ServerManager sm = super.createServerManager(master, services);
@@ -218,10 +226,6 @@ public class TestMasterNoCluster {
           return null;
         }
       }
-
-      @Override
-      void initNamespace() {
-      }
     };
     master.start();
 
@@ -266,6 +270,9 @@ public class TestMasterNoCluster {
       { }
 
       @Override
+      void initClusterSchemaService() throws IOException, InterruptedException {}
+
+      @Override
       void initializeZKBasedSystemTrackers() throws IOException,
       InterruptedException, KeeperException, CoordinatedStateException {
         super.initializeZKBasedSystemTrackers();
@@ -294,10 +301,6 @@ public class TestMasterNoCluster {
           return null;
         }
       }
-
-      @Override
-      void initNamespace() {
-      }
     };
     master.start();
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/46303dfd/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitLogManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitLogManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitLogManager.java
index 65c8649..c7707b7 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitLogManager.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitLogManager.java
@@ -168,6 +168,12 @@ public class TestSplitLogManager {
     public ChoreService getChoreService() {
       return null;
     }
+
+    @Override
+    public ClusterConnection getClusterConnection() {
+      // TODO Auto-generated method stub
+      return null;
+    }
   }
 
   static Stoppable stopper = new Stoppable() {


[13/17] hbase git commit: HBASE-12593 Tags to work with ByteBuffer.

Posted by sy...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/a9b671b3/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/ExpAsStringVisibilityLabelServiceImpl.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/ExpAsStringVisibilityLabelServiceImpl.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/ExpAsStringVisibilityLabelServiceImpl.java
index 104cb5b..e601af7 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/ExpAsStringVisibilityLabelServiceImpl.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/ExpAsStringVisibilityLabelServiceImpl.java
@@ -40,7 +40,9 @@ import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.HConstants.OperationStatusCode;
 import org.apache.hadoop.hbase.Tag;
+import org.apache.hadoop.hbase.ArrayBackedTag;
 import org.apache.hadoop.hbase.TagType;
+import org.apache.hadoop.hbase.TagUtil;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
@@ -58,6 +60,7 @@ import org.apache.hadoop.hbase.security.visibility.expression.ExpressionNode;
 import org.apache.hadoop.hbase.security.visibility.expression.LeafExpressionNode;
 import org.apache.hadoop.hbase.security.visibility.expression.NonLeafExpressionNode;
 import org.apache.hadoop.hbase.security.visibility.expression.Operator;
+import org.apache.hadoop.hbase.util.ByteBufferUtils;
 import org.apache.hadoop.hbase.util.Bytes;
 
 /**
@@ -73,7 +76,7 @@ public class ExpAsStringVisibilityLabelServiceImpl implements VisibilityLabelSer
 
   private static final byte[] DUMMY_VALUE = new byte[0];
   private static final byte STRING_SERIALIZATION_FORMAT = 2;
-  private static final Tag STRING_SERIALIZATION_FORMAT_TAG = new Tag(
+  private static final Tag STRING_SERIALIZATION_FORMAT_TAG = new ArrayBackedTag(
       TagType.VISIBILITY_EXP_SERIALIZATION_FORMAT_TAG_TYPE,
       new byte[] { STRING_SERIALIZATION_FORMAT });
   private final ExpressionParser expressionParser = new ExpressionParser();
@@ -281,28 +284,27 @@ public class ExpAsStringVisibilityLabelServiceImpl implements VisibilityLabelSer
         boolean visibilityTagPresent = false;
         // Save an object allocation where we can
         if (cell.getTagsLength() > 0) {
-          Iterator<Tag> tagsItr = CellUtil.tagsIterator(cell.getTagsArray(), cell.getTagsOffset(),
-              cell.getTagsLength());
+          Iterator<Tag> tagsItr = CellUtil.tagsIterator(cell);
           while (tagsItr.hasNext()) {
             boolean includeKV = true;
             Tag tag = tagsItr.next();
             if (tag.getType() == VISIBILITY_TAG_TYPE) {
               visibilityTagPresent = true;
-              int offset = tag.getTagOffset();
-              int endOffset = offset + tag.getTagLength();
+              int offset = tag.getValueOffset();
+              int endOffset = offset + tag.getValueLength();
               while (offset < endOffset) {
-                short len = Bytes.toShort(tag.getBuffer(), offset);
+                short len = getTagValuePartAsShort(tag, offset);
                 offset += 2;
                 if (len < 0) {
                   // This is a NOT label.
                   len = (short) (-1 * len);
-                  String label = Bytes.toString(tag.getBuffer(), offset, len);
+                  String label = Bytes.toString(tag.getValueArray(), offset, len);
                   if (authLabelsFinal.contains(label)) {
                     includeKV = false;
                     break;
                   }
                 } else {
-                  String label = Bytes.toString(tag.getBuffer(), offset, len);
+                  String label = Bytes.toString(tag.getValueArray(), offset, len);
                   if (!authLabelsFinal.contains(label)) {
                     includeKV = false;
                     break;
@@ -353,7 +355,7 @@ public class ExpAsStringVisibilityLabelServiceImpl implements VisibilityLabelSer
       dos.writeShort(bLabel.length);
       dos.write(bLabel);
     }
-    return new Tag(VISIBILITY_TAG_TYPE, baos.toByteArray());
+    return new ArrayBackedTag(VISIBILITY_TAG_TYPE, baos.toByteArray());
   }
 
   private void extractLabels(ExpressionNode node, List<String> labels, List<String> notLabels) {
@@ -423,8 +425,7 @@ public class ExpAsStringVisibilityLabelServiceImpl implements VisibilityLabelSer
       for (Tag tag : deleteVisTags) {
         matchFound = false;
         for (Tag givenTag : putVisTags) {
-          if (Bytes.equals(tag.getBuffer(), tag.getTagOffset(), tag.getTagLength(),
-              givenTag.getBuffer(), givenTag.getTagOffset(), givenTag.getTagLength())) {
+          if (TagUtil.matchingValue(tag, givenTag)) {
             matchFound = true;
             break;
           }
@@ -459,15 +460,15 @@ public class ExpAsStringVisibilityLabelServiceImpl implements VisibilityLabelSer
           visibilityString.append(VisibilityConstants.CLOSED_PARAN
               + VisibilityConstants.OR_OPERATOR);
         }
-        int offset = tag.getTagOffset();
-        int endOffset = offset + tag.getTagLength();
+        int offset = tag.getValueOffset();
+        int endOffset = offset + tag.getValueLength();
         boolean expressionStart = true;
         while (offset < endOffset) {
-          short len = Bytes.toShort(tag.getBuffer(), offset);
+          short len = getTagValuePartAsShort(tag, offset);
           offset += 2;
           if (len < 0) {
             len = (short) (-1 * len);
-            String label = Bytes.toString(tag.getBuffer(), offset, len);
+            String label = getTagValuePartAsString(tag, offset, len);
             if (expressionStart) {
               visibilityString.append(VisibilityConstants.OPEN_PARAN
                   + VisibilityConstants.NOT_OPERATOR + CellVisibility.quote(label));
@@ -476,7 +477,7 @@ public class ExpAsStringVisibilityLabelServiceImpl implements VisibilityLabelSer
                   + VisibilityConstants.NOT_OPERATOR + CellVisibility.quote(label));
             }
           } else {
-            String label = Bytes.toString(tag.getBuffer(), offset, len);
+            String label = getTagValuePartAsString(tag, offset, len);
             if (expressionStart) {
               visibilityString.append(VisibilityConstants.OPEN_PARAN + CellVisibility.quote(label));
             } else {
@@ -496,4 +497,20 @@ public class ExpAsStringVisibilityLabelServiceImpl implements VisibilityLabelSer
     }
     return null;
   }
+
+  private static short getTagValuePartAsShort(Tag t, int offset) {
+    if (t.hasArray()) {
+      return Bytes.toShort(t.getValueArray(), offset);
+    }
+    return ByteBufferUtils.toShort(t.getValueByteBuffer(), offset);
+  }
+
+  private static String getTagValuePartAsString(Tag t, int offset, int length) {
+    if (t.hasArray()) {
+      return Bytes.toString(t.getValueArray(), offset, length);
+    }
+    byte[] b = new byte[length];
+    ByteBufferUtils.copyFromBufferToArray(b, t.getValueByteBuffer(), offset, 0, length);
+    return Bytes.toString(b);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/a9b671b3/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelReplicationWithExpAsString.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelReplicationWithExpAsString.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelReplicationWithExpAsString.java
index fecff07..2140a5c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelReplicationWithExpAsString.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelReplicationWithExpAsString.java
@@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.Tag;
+import org.apache.hadoop.hbase.TagUtil;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
@@ -183,7 +184,7 @@ public class TestVisibilityLabelReplicationWithExpAsString extends TestVisibilit
           boolean foundNonVisTag = false;
           for(Tag t : TestCoprocessorForTagsAtSink.tags) {
             if(t.getType() == NON_VIS_TAG_TYPE) {
-              assertEquals(TEMP, Bytes.toString(t.getValue()));
+              assertEquals(TEMP, Bytes.toString(TagUtil.cloneValue(t)));
               foundNonVisTag = true;
               break;
             }

http://git-wip-us.apache.org/repos/asf/hbase/blob/a9b671b3/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsReplication.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsReplication.java
index 8414813..b3b3b43 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsReplication.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsReplication.java
@@ -43,8 +43,10 @@ import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.KeyValueUtil;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.Tag;
+import org.apache.hadoop.hbase.ArrayBackedTag;
 import org.apache.hadoop.hbase.TagRewriteCell;
 import org.apache.hadoop.hbase.TagType;
+import org.apache.hadoop.hbase.TagUtil;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
@@ -284,11 +286,11 @@ public class TestVisibilityLabelsReplication {
     for (Cell cell : cells) {
       if ((Bytes.equals(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength(), row, 0,
           row.length))) {
-        List<Tag> tags = Tag
-            .asList(cell.getTagsArray(), cell.getTagsOffset(), cell.getTagsLength());
+        List<Tag> tags = TagUtil.asList(cell.getTagsArray(), cell.getTagsOffset(),
+            cell.getTagsLength());
         for (Tag tag : tags) {
           if (tag.getType() == TagType.STRING_VIS_TAG_TYPE) {
-            assertEquals(visTag, Bytes.toString(tag.getValue()));
+            assertEquals(visTag, TagUtil.getValueAsString(tag));
             tagFound = true;
             break;
           }
@@ -330,7 +332,7 @@ public class TestVisibilityLabelsReplication {
           boolean foundNonVisTag = false;
           for (Tag t : TestCoprocessorForTagsAtSink.tags) {
             if (t.getType() == NON_VIS_TAG_TYPE) {
-              assertEquals(TEMP, Bytes.toString(t.getValue()));
+              assertEquals(TEMP, TagUtil.getValueAsString(t));
               foundNonVisTag = true;
               break;
             }
@@ -407,11 +409,11 @@ public class TestVisibilityLabelsReplication {
             if (cf == null) {
               cf = CellUtil.cloneFamily(kv);
             }
-            Tag tag = new Tag((byte) NON_VIS_TAG_TYPE, attribute);
+            Tag tag = new ArrayBackedTag((byte) NON_VIS_TAG_TYPE, attribute);
             List<Tag> tagList = new ArrayList<Tag>();
             tagList.add(tag);
             tagList.addAll(kv.getTags());
-            byte[] fromList = Tag.fromList(tagList);
+            byte[] fromList = TagUtil.fromList(tagList);
             TagRewriteCell newcell = new TagRewriteCell(kv, fromList);
             ((List<Cell>) updatedCells).add(newcell);
           }
@@ -433,7 +435,7 @@ public class TestVisibilityLabelsReplication {
         // Check tag presence in the 1st cell in 1st Result
         if (!results.isEmpty()) {
           Cell cell = results.get(0);
-          tags = Tag.asList(cell.getTagsArray(), cell.getTagsOffset(), cell.getTagsLength());
+          tags = TagUtil.asList(cell.getTagsArray(), cell.getTagsOffset(), cell.getTagsLength());
         }
       }
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/a9b671b3/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileTestUtil.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileTestUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileTestUtil.java
index fdf4fd9..964d6ed 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileTestUtil.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileTestUtil.java
@@ -21,10 +21,13 @@ package org.apache.hadoop.hbase.util;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.ArrayBackedTag;
 import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.Tag;
 import org.apache.hadoop.hbase.TagType;
+import org.apache.hadoop.hbase.TagUtil;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
@@ -98,14 +101,11 @@ public class HFileTestUtil {
         KeyValue kv = new KeyValue(key, family, qualifier, now, key);
         if (withTag) {
           // add a tag.  Arbitrarily chose mob tag since we have a helper already.
-          Tag tableNameTag = new Tag(TagType.MOB_TABLE_NAME_TAG_TYPE, key);
+          Tag tableNameTag = new ArrayBackedTag(TagType.MOB_TABLE_NAME_TAG_TYPE, key);
           kv = MobUtils.createMobRefKeyValue(kv, key, tableNameTag);
 
           // verify that the kv has the tag.
-          byte[] ta = kv.getTagsArray();
-          int toff = kv.getTagsOffset();
-          int tlen = kv.getTagsLength();
-          Tag t = Tag.getTag(ta, toff, tlen, TagType.MOB_TABLE_NAME_TAG_TYPE);
+          Tag t = CellUtil.getTag(kv, TagType.MOB_TABLE_NAME_TAG_TYPE);
           if (t == null) {
             throw new IllegalStateException("Tag didn't stick to KV " + kv.toString());
           }
@@ -130,15 +130,12 @@ public class HFileTestUtil {
     ResultScanner s = table.getScanner(new Scan());
     for (Result r : s) {
       for (Cell c : r.listCells()) {
-        byte[] ta = c.getTagsArray();
-        int toff = c.getTagsOffset();
-        int tlen = c.getTagsLength();
-        Tag t = Tag.getTag(ta, toff, tlen, TagType.MOB_TABLE_NAME_TAG_TYPE);
+        Tag t = CellUtil.getTag(c, TagType.MOB_TABLE_NAME_TAG_TYPE);
         if (t == null) {
           fail(c.toString() + " has null tag");
           continue;
         }
-        byte[] tval = t.getValue();
+        byte[] tval = TagUtil.cloneValue(t);
         assertArrayEquals(c.toString() + " has tag" + Bytes.toString(tval),
             r.getRow(), tval);
       }

http://git-wip-us.apache.org/repos/asf/hbase/blob/a9b671b3/hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestDataGeneratorWithTags.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestDataGeneratorWithTags.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestDataGeneratorWithTags.java
index 70d6d9d..87cb070 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestDataGeneratorWithTags.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestDataGeneratorWithTags.java
@@ -27,6 +27,7 @@ import org.apache.hadoop.hbase.CellScanner;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.KeyValue.Type;
 import org.apache.hadoop.hbase.Tag;
+import org.apache.hadoop.hbase.ArrayBackedTag;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.util.MultiThreadedAction.DefaultDataGenerator;
@@ -77,7 +78,7 @@ public class LoadTestDataGeneratorWithTags extends DefaultDataGenerator {
             minTagLength + random.nextInt(maxTagLength - minTagLength));
         tags = new ArrayList<Tag>();
         for (int n = 0; n < numTags; n++) {
-          tags.add(new Tag((byte) 127, tag));
+          tags.add(new ArrayBackedTag((byte) 127, tag));
         }
         Cell updatedCell = new KeyValue(cell.getRowArray(), cell.getRowOffset(),
             cell.getRowLength(), cell.getFamilyArray(), cell.getFamilyOffset(),


[12/17] hbase git commit: HBASE-15066 Small improvements to Canary tool

Posted by sy...@apache.org.
HBASE-15066 Small improvements to Canary tool


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/893a54c3
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/893a54c3
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/893a54c3

Branch: refs/heads/hbase-12439
Commit: 893a54c3a40e08b6f50d753fca01688f491893d0
Parents: 073e00c
Author: Enis Soztutar <en...@apache.org>
Authored: Wed Jan 6 02:47:19 2016 -0800
Committer: Enis Soztutar <en...@apache.org>
Committed: Wed Jan 6 02:47:19 2016 -0800

----------------------------------------------------------------------
 bin/hbase                                       |  4 ++
 .../org/apache/hadoop/hbase/tool/Canary.java    | 72 ++++++++++++++++----
 src/main/asciidoc/_chapters/ops_mgt.adoc        | 20 +++---
 3 files changed, 72 insertions(+), 24 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/893a54c3/bin/hbase
----------------------------------------------------------------------
diff --git a/bin/hbase b/bin/hbase
index 5064451..7742b5b 100755
--- a/bin/hbase
+++ b/bin/hbase
@@ -98,6 +98,7 @@ if [ $# = 0 ]; then
   echo "  mapredcp        Dump CLASSPATH entries required by mapreduce"
   echo "  pe              Run PerformanceEvaluation"
   echo "  ltt             Run LoadTestTool"
+  echo "  canary          Run the Canary tool"
   echo "  version         Print the version"
   echo "  CLASSNAME       Run the class named CLASSNAME"
   exit 1
@@ -368,6 +369,9 @@ elif [ "$COMMAND" = "pe" ] ; then
 elif [ "$COMMAND" = "ltt" ] ; then
   CLASS='org.apache.hadoop.hbase.util.LoadTestTool'
   HBASE_OPTS="$HBASE_OPTS $HBASE_LTT_OPTS"
+elif [ "$COMMAND" = "canary" ] ; then
+  CLASS='org.apache.hadoop.hbase.tool.Canary'
+  HBASE_OPTS="$HBASE_OPTS $HBASE_CANARY_OPTS"
 elif [ "$COMMAND" = "version" ] ; then
   CLASS='org.apache.hadoop.hbase.util.VersionInfo'
 else

http://git-wip-us.apache.org/repos/asf/hbase/blob/893a54c3/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
index a1c8c61..151be42 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
@@ -195,6 +195,10 @@ public final class Canary implements Tool {
       Table table = null;
       HTableDescriptor tableDesc = null;
       try {
+        if (LOG.isDebugEnabled()) {
+          LOG.debug(String.format("reading table descriptor for table %s",
+            region.getTable()));
+        }
         table = connection.getTable(region.getTable());
         tableDesc = table.getTableDescriptor();
       } catch (IOException e) {
@@ -232,20 +236,24 @@ public final class Canary implements Tool {
           scan.setFilter(new FirstKeyOnlyFilter());
           scan.addFamily(column.getName());
           scan.setMaxResultSize(1L);
+          scan.setSmall(true);
         }
 
+        if (LOG.isDebugEnabled()) {
+          LOG.debug(String.format("reading from table %s region %s column family %s and key %s",
+            tableDesc.getTableName(), region.getRegionNameAsString(), column.getNameAsString(),
+            Bytes.toStringBinary(startKey)));
+        }
         try {
+          stopWatch.start();
           if (startKey.length > 0) {
-            stopWatch.start();
             table.get(get);
-            stopWatch.stop();
-            sink.publishReadTiming(region, column, stopWatch.getTime());
           } else {
-            stopWatch.start();
             rs = table.getScanner(scan);
-            stopWatch.stop();
-            sink.publishReadTiming(region, column, stopWatch.getTime());
+            rs.next();
           }
+          stopWatch.stop();
+          sink.publishReadTiming(region, column, stopWatch.getTime());
         } catch (Exception e) {
           sink.publishReadFailure(region, column, e);
         } finally {
@@ -286,6 +294,12 @@ public final class Canary implements Tool {
           byte[] value = new byte[writeValueSize];
           Bytes.random(value);
           put.addColumn(column.getName(), HConstants.EMPTY_BYTE_ARRAY, value);
+
+          if (LOG.isDebugEnabled()) {
+            LOG.debug(String.format("writing to table %s region %s column family %s and key %s",
+              tableDesc.getTableName(), region.getRegionNameAsString(), column.getNameAsString(),
+              Bytes.toStringBinary(rowToCheck)));
+          }
           try {
             long startTime = System.currentTimeMillis();
             table.put(put);
@@ -337,6 +351,11 @@ public final class Canary implements Tool {
         table = connection.getTable(tableName);
         startKey = region.getStartKey();
         // Can't do a get on empty start row so do a Scan of first element if any instead.
+        if (LOG.isDebugEnabled()) {
+          LOG.debug(String.format("reading from region server %s table %s region %s and key %s",
+            serverName, region.getTable(), region.getRegionNameAsString(),
+            Bytes.toStringBinary(startKey)));
+        }
         if (startKey.length > 0) {
           get = new Get(startKey);
           get.setCacheBlocks(false);
@@ -350,8 +369,10 @@ public final class Canary implements Tool {
           scan.setFilter(new FirstKeyOnlyFilter());
           scan.setCaching(1);
           scan.setMaxResultSize(1L);
+          scan.setSmall(true);
           stopWatch.start();
           ResultScanner s = table.getScanner(scan);
+          s.next();
           s.close();
           stopWatch.stop();
         }
@@ -547,8 +568,6 @@ public final class Canary implements Tool {
     long startTime = 0;
     long currentTimeLength = 0;
     // Get a connection to use in below.
-    // try-with-resources jdk7 construct. See
-    // http://docs.oracle.com/javase/tutorial/essential/exceptions/tryResourceClose.html
     try (Connection connection = ConnectionFactory.createConnection(this.conf)) {
       do {
         // Do monitor !!
@@ -613,8 +632,8 @@ public final class Canary implements Tool {
     System.err.println("      only works in regionserver mode.");
     System.err.println("   -daemon        Continuous check at defined intervals.");
     System.err.println("   -interval <N>  Interval between checks (sec)");
-    System.err.println("   -e             Use region/regionserver as regular expression");
-    System.err.println("      which means the region/regionserver is regular expression pattern");
+    System.err.println("   -e             Use table/regionserver as regular expression");
+    System.err.println("      which means the table/regionserver is regular expression pattern");
     System.err.println("   -f <B>         stop whole program if first error occurs," +
         " default is true");
     System.err.println("   -t <N>         timeout for a check, default is 600000 (milisecs)");
@@ -691,6 +710,7 @@ public final class Canary implements Tool {
       this.executor = executor;
     }
 
+    @Override
     public abstract void run();
 
     protected boolean initAdmin() {
@@ -793,11 +813,17 @@ public final class Canary implements Tool {
         HTableDescriptor[] tds = null;
         Set<String> tmpTables = new TreeSet<String>();
         try {
+          if (LOG.isDebugEnabled()) {
+            LOG.debug(String.format("reading list of tables"));
+          }
+          tds = this.admin.listTables(pattern);
+          if (tds == null) {
+            tds = new HTableDescriptor[0];
+          }
           for (String monitorTarget : monitorTargets) {
             pattern = Pattern.compile(monitorTarget);
-            tds = this.admin.listTables(pattern);
-            if (tds != null) {
-              for (HTableDescriptor td : tds) {
+            for (HTableDescriptor td : tds) {
+              if (pattern.matcher(td.getNameAsString()).matches()) {
                 tmpTables.add(td.getNameAsString());
               }
             }
@@ -826,6 +852,9 @@ public final class Canary implements Tool {
      * canary entry point to monitor all the tables.
      */
     private List<Future<Void>> sniff(TaskType taskType) throws Exception {
+      if (LOG.isDebugEnabled()) {
+        LOG.debug(String.format("reading list of tables"));
+      }
       List<Future<Void>> taskFutures = new LinkedList<Future<Void>>();
       for (HTableDescriptor table : admin.listTables()) {
         if (admin.isTableEnabled(table.getTableName())
@@ -873,7 +902,7 @@ public final class Canary implements Tool {
       int numberOfRegions = (int)(numberOfServers * regionsLowerLimit);
       LOG.info("Number of live regionservers: " + numberOfServers + ", "
           + "pre-splitting the canary table into " + numberOfRegions + " regions "
-          + "(current  lower limi of regions per server is " + regionsLowerLimit
+          + "(current lower limit of regions per server is " + regionsLowerLimit
           + " and you can change it by config: "
           + HConstants.HBASE_CANARY_WRITE_PERSERVER_REGIONS_LOWERLIMIT_KEY + " )");
       HTableDescriptor desc = new HTableDescriptor(writeTableName);
@@ -916,6 +945,10 @@ public final class Canary implements Tool {
    */
   private static List<Future<Void>> sniff(final Admin admin, final Sink sink, String tableName,
       ExecutorService executor, TaskType taskType) throws Exception {
+    if (LOG.isDebugEnabled()) {
+      LOG.debug(String.format("checking table is enabled and getting table descriptor for table %s",
+        tableName));
+    }
     if (admin.isTableEnabled(TableName.valueOf(tableName))) {
       return Canary.sniff(admin, sink, admin.getTableDescriptor(TableName.valueOf(tableName)),
         executor, taskType);
@@ -930,6 +963,11 @@ public final class Canary implements Tool {
    */
   private static List<Future<Void>> sniff(final Admin admin, final Sink sink,
       HTableDescriptor tableDesc, ExecutorService executor, TaskType taskType) throws Exception {
+
+    if (LOG.isDebugEnabled()) {
+      LOG.debug(String.format("reading list of regions for table %s", tableDesc.getTableName()));
+    }
+
     Table table = null;
     try {
       table = admin.getConnection().getTable(tableDesc.getTableName());
@@ -975,6 +1013,9 @@ public final class Canary implements Tool {
       List<String> foundTableNames = new ArrayList<String>();
       TableName[] tableNames = null;
 
+      if (LOG.isDebugEnabled()) {
+        LOG.debug(String.format("reading list of tables"));
+      }
       try {
         tableNames = this.admin.listTableNames();
       } catch (IOException e) {
@@ -1060,6 +1101,9 @@ public final class Canary implements Tool {
       Table table = null;
       RegionLocator regionLocator = null;
       try {
+        if (LOG.isDebugEnabled()) {
+          LOG.debug(String.format("reading list of tables and locations"));
+        }
         HTableDescriptor[] tableDescs = this.admin.listTables();
         List<HRegionInfo> regions = null;
         for (HTableDescriptor tableDesc : tableDescs) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/893a54c3/src/main/asciidoc/_chapters/ops_mgt.adoc
----------------------------------------------------------------------
diff --git a/src/main/asciidoc/_chapters/ops_mgt.adoc b/src/main/asciidoc/_chapters/ops_mgt.adoc
index e8d44eb..13835c0 100644
--- a/src/main/asciidoc/_chapters/ops_mgt.adoc
+++ b/src/main/asciidoc/_chapters/ops_mgt.adoc
@@ -79,7 +79,7 @@ There is a Canary class can help users to canary-test the HBase cluster status,
 To see the usage, use the `--help` parameter.
 
 ----
-$ ${HBASE_HOME}/bin/hbase org.apache.hadoop.hbase.tool.Canary -help
+$ ${HBASE_HOME}/bin/hbase canary -help
 
 Usage: bin/hbase org.apache.hadoop.hbase.tool.Canary [opts] [table1 [table2]...] | [regionserver1 [regionserver2]..]
  where [opts] are:
@@ -126,7 +126,7 @@ Following are some examples based on the previous given case.
 ==== Canary test for every column family (store) of every region of every table
 
 ----
-$ ${HBASE_HOME}/bin/hbase org.apache.hadoop.hbase.tool.Canary
+$ ${HBASE_HOME}/bin/hbase canary
 
 3/12/09 03:26:32 INFO tool.Canary: read from region test-01,,1386230156732.0e3c7d77ffb6361ea1b996ac1042ca9a. column family cf1 in 2ms
 13/12/09 03:26:32 INFO tool.Canary: read from region test-01,,1386230156732.0e3c7d77ffb6361ea1b996ac1042ca9a. column family cf2 in 2ms
@@ -147,7 +147,7 @@ This is a default behavior of the this tool does.
 You can also test one or more specific tables.
 
 ----
-$ ${HBASE_HOME}/bin/hbase org.apache.hadoop.hbase.tool.Canary test-01 test-02
+$ ${HBASE_HOME}/bin/hbase canary test-01 test-02
 ----
 
 ==== Canary test with RegionServer granularity
@@ -155,7 +155,7 @@ $ ${HBASE_HOME}/bin/hbase org.apache.hadoop.hbase.tool.Canary test-01 test-02
 This will pick one small piece of data from each RegionServer, and can also put your RegionServer name as input options for canary-test specific RegionServer.
 
 ----
-$ ${HBASE_HOME}/bin/hbase org.apache.hadoop.hbase.tool.Canary -regionserver
+$ ${HBASE_HOME}/bin/hbase canary -regionserver
 
 13/12/09 06:05:17 INFO tool.Canary: Read from table:test-01 on region server:rs2 in 72ms
 13/12/09 06:05:17 INFO tool.Canary: Read from table:test-02 on region server:rs3 in 34ms
@@ -167,7 +167,7 @@ $ ${HBASE_HOME}/bin/hbase org.apache.hadoop.hbase.tool.Canary -regionserver
 This will test both table test-01 and test-02.
 
 ----
-$ ${HBASE_HOME}/bin/hbase org.apache.hadoop.hbase.tool.Canary -e test-0[1-2]
+$ ${HBASE_HOME}/bin/hbase canary -e test-0[1-2]
 ----
 
 ==== Run canary test as daemon mode
@@ -176,13 +176,13 @@ Run repeatedly with interval defined in option `-interval` whose default value i
 This daemon will stop itself and return non-zero error code if any error occurs, due to the default value of option -f is true.
 
 ----
-$ ${HBASE_HOME}/bin/hbase org.apache.hadoop.hbase.tool.Canary -daemon
+$ ${HBASE_HOME}/bin/hbase canary -daemon
 ----
 
 Run repeatedly with internal 5 seconds and will not stop itself even if errors occur in the test.
 
 ----
-$ ${HBASE_HOME}/bin/hbase org.apache.hadoop.hbase.tool.Canary -daemon -interval 50000 -f false
+$ ${HBASE_HOME}/bin/hbase canary -daemon -interval 50000 -f false
 ----
 
 ==== Force timeout if canary test stuck
@@ -192,7 +192,7 @@ Because of this we provide a timeout option to kill the canary test and return a
 This run sets the timeout value to 60 seconds, the default value is 600 seconds.
 
 ----
-$ ${HBASE_HOME}/bin/hbase org.apache.hadoop.hbase.tool.Canary -t 600000
+$ ${HBASE_HOME}/bin/hbase canary -t 600000
 ----
 
 ==== Enable write sniffing in canary
@@ -203,12 +203,12 @@ When the write sniffing is enabled, the canary tool will create an hbase table a
 regions of the table distributed on all region servers. In each sniffing period, the canary will
 try to put data to these regions to check the write availability of each region server.
 ----
-$ ${HBASE_HOME}/bin/hbase org.apache.hadoop.hbase.tool.Canary -writeSniffing
+$ ${HBASE_HOME}/bin/hbase canary -writeSniffing
 ----
 
 The default write table is `hbase:canary` and can be specified by the option `-writeTable`.
 ----
-$ ${HBASE_HOME}/bin/hbase org.apache.hadoop.hbase.tool.Canary -writeSniffing -writeTable ns:canary
+$ ${HBASE_HOME}/bin/hbase canary -writeSniffing -writeTable ns:canary
 ----
 
 The default value size of each put is 10 bytes and you can set it by the config key:


[15/17] hbase git commit: HBASE-12593 Tags to work with ByteBuffer.

Posted by sy...@apache.org.
HBASE-12593 Tags to work with ByteBuffer.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a9b671b3
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a9b671b3
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a9b671b3

Branch: refs/heads/hbase-12439
Commit: a9b671b31f07ade8968b42956aa60c722032dcc8
Parents: 893a54c
Author: anoopsjohn <an...@gmail.com>
Authored: Wed Jan 6 21:28:06 2016 +0530
Committer: anoopsjohn <an...@gmail.com>
Committed: Wed Jan 6 21:28:06 2016 +0530

----------------------------------------------------------------------
 .../apache/hadoop/hbase/client/Mutation.java    |   9 +-
 .../hadoop/hbase/protobuf/ProtobufUtil.java     |  16 +-
 .../org/apache/hadoop/hbase/ArrayBackedTag.java | 143 ++++++++++++
 .../java/org/apache/hadoop/hbase/CellUtil.java  | 120 +++++++++-
 .../java/org/apache/hadoop/hbase/KeyValue.java  |  22 +-
 .../org/apache/hadoop/hbase/OffheapTag.java     |  83 +++++++
 .../main/java/org/apache/hadoop/hbase/Tag.java  | 191 +++-------------
 .../java/org/apache/hadoop/hbase/TagUtil.java   | 219 +++++++++++++++++++
 .../hadoop/hbase/io/util/StreamUtils.java       |  47 +++-
 .../hadoop/hbase/util/ByteBufferUtils.java      |  23 ++
 .../hbase/util/test/RedundantKVGenerator.java   |   7 +-
 .../org/apache/hadoop/hbase/TestKeyValue.java   |  28 ++-
 .../hadoop/hbase/TestOffheapKeyValue.java       |  25 +--
 .../hbase/codec/TestCellCodecWithTags.java      |  32 +--
 .../hbase/codec/TestKeyValueCodecWithTags.java  |  32 +--
 .../hbase/io/TestTagCompressionContext.java     |   3 +-
 .../util/TestByteRangeWithKVSerialization.java  |   3 +-
 .../row/data/TestRowDataTrivialWithTags.java    |   5 +-
 .../hbase/rest/PerformanceEvaluation.java       |   5 +-
 .../hbase/io/hfile/HFilePrettyPrinter.java      |  10 +-
 .../hadoop/hbase/mapreduce/TextSortReducer.java |   3 +-
 .../hbase/mapreduce/TsvImporterMapper.java      |   3 +-
 .../hbase/mob/DefaultMobStoreCompactor.java     |   4 +-
 .../hbase/mob/DefaultMobStoreFlusher.java       |   5 +-
 .../apache/hadoop/hbase/mob/MobConstants.java   |   3 +-
 .../org/apache/hadoop/hbase/mob/MobUtils.java   |  10 +-
 .../compactions/PartitionedMobCompactor.java    |   3 +-
 .../hbase/mob/mapreduce/MemStoreWrapper.java    |   5 +-
 .../hadoop/hbase/regionserver/HMobStore.java    |   6 +-
 .../hadoop/hbase/regionserver/HRegion.java      |  25 ++-
 .../hadoop/hbase/regionserver/HStore.java       |  39 ++--
 .../security/access/AccessControlLists.java     |  11 +-
 .../hbase/security/access/AccessController.java |  71 +++---
 .../DefaultVisibilityLabelServiceImpl.java      |  82 ++++---
 .../visibility/VisibilityController.java        |  66 +++---
 .../VisibilityReplicationEndpoint.java          |   7 +-
 .../security/visibility/VisibilityUtils.java    |  56 ++---
 .../hadoop/hbase/wal/WALPrettyPrinter.java      |   7 +-
 .../hadoop/hbase/PerformanceEvaluation.java     |   4 +-
 .../hbase/client/TestResultSizeEstimation.java  |   5 +-
 .../io/encoding/TestDataBlockEncoders.java      |  25 ++-
 .../hbase/io/encoding/TestEncodedSeekers.java   |   3 +-
 .../io/encoding/TestPrefixTreeEncoding.java     |   5 +-
 .../hadoop/hbase/io/hfile/TestCacheOnWrite.java |   5 +-
 .../apache/hadoop/hbase/io/hfile/TestHFile.java |   3 +-
 .../hadoop/hbase/io/hfile/TestHFileBlock.java   |   5 +-
 .../hbase/io/hfile/TestHFileWriterV3.java       |   3 +-
 .../hadoop/hbase/io/hfile/TestReseekTo.java     |   5 +-
 .../hadoop/hbase/io/hfile/TestSeekTo.java       |  11 +-
 .../hbase/regionserver/TestHMobStore.java       |   5 +-
 .../hadoop/hbase/regionserver/TestHRegion.java  |  10 +-
 .../TestStoreFileScannerWithTagCompression.java |  12 +-
 .../hadoop/hbase/regionserver/TestTags.java     |  22 +-
 .../wal/TestKeyValueCompression.java            |   5 +-
 .../wal/TestWALCellCodecWithCompression.java    |  10 +-
 .../replication/TestReplicationWithTags.java    |   6 +-
 .../security/access/TestAccessController.java   |   3 +-
 .../ExpAsStringVisibilityLabelServiceImpl.java  |  49 +++--
 ...sibilityLabelReplicationWithExpAsString.java |   3 +-
 .../TestVisibilityLabelsReplication.java        |  16 +-
 .../apache/hadoop/hbase/util/HFileTestUtil.java |  17 +-
 .../util/LoadTestDataGeneratorWithTags.java     |   3 +-
 62 files changed, 1083 insertions(+), 581 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/a9b671b3/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java
index 665c59c..9a550f9 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java
@@ -35,6 +35,7 @@ import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.Tag;
+import org.apache.hadoop.hbase.TagUtil;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.classification.InterfaceStability;
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
@@ -124,7 +125,7 @@ public abstract class Mutation extends OperationWithAttributes implements Row, C
    * @param qualifier
    * @param ts
    * @param value
-   * @param tags - Specify the Tags as an Array {@link KeyValue.Tag}
+   * @param tags - Specify the Tags as an Array
    * @return a KeyValue with this objects row key and the Put identifier.
    */
   KeyValue createPutKeyValue(byte[] family, byte[] qualifier, long ts, byte[] value, Tag[] tags) {
@@ -138,7 +139,7 @@ public abstract class Mutation extends OperationWithAttributes implements Row, C
    * @return a KeyValue with this objects row key and the Put identifier.
    */
   KeyValue createPutKeyValue(byte[] family, ByteBuffer qualifier, long ts, ByteBuffer value,
-                             Tag[] tags) {
+      Tag[] tags) {
     return new KeyValue(this.row, 0, this.row == null ? 0 : this.row.length,
         family, 0, family == null ? 0 : family.length,
         qualifier, ts, KeyValue.Type.Put, value, tags != null ? Arrays.asList(tags) : null);
@@ -219,11 +220,11 @@ public abstract class Mutation extends OperationWithAttributes implements Row, C
                 c.getQualifierLength()));
     stringMap.put("timestamp", c.getTimestamp());
     stringMap.put("vlen", c.getValueLength());
-    List<Tag> tags = Tag.asList(c.getTagsArray(), c.getTagsOffset(), c.getTagsLength());
+    List<Tag> tags = CellUtil.getTags(c);
     if (tags != null) {
       List<String> tagsString = new ArrayList<String>();
       for (Tag t : tags) {
-        tagsString.add((t.getType()) + ":" + Bytes.toStringBinary(t.getValue()));
+        tagsString.add((t.getType()) + ":" + Bytes.toStringBinary(TagUtil.cloneValue(t)));
       }
       stringMap.put("tag", tagsString);
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/a9b671b3/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
index c02309b..f5e4305 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
@@ -53,6 +53,7 @@ import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.Tag;
+import org.apache.hadoop.hbase.TagUtil;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.Append;
 import org.apache.hadoop.hbase.client.Consistency;
@@ -580,20 +581,17 @@ public final class ProtobufUtil {
           if (qv.hasTimestamp()) {
             ts = qv.getTimestamp();
           }
-          byte[] tags;
+          byte[] allTagsBytes;
           if (qv.hasTags()) {
-            tags = qv.getTags().toByteArray();
-            Object[] array = Tag.asList(tags, 0, (short)tags.length).toArray();
-            Tag[] tagArray = new Tag[array.length];
-            for(int i = 0; i< array.length; i++) {
-              tagArray[i] = (Tag)array[i];
-            }
+            allTagsBytes = qv.getTags().toByteArray();
             if(qv.hasDeleteType()) {
               byte[] qual = qv.hasQualifier() ? qv.getQualifier().toByteArray() : null;
               put.add(new KeyValue(proto.getRow().toByteArray(), family, qual, ts,
-                  fromDeleteType(qv.getDeleteType()), null, tags));
+                  fromDeleteType(qv.getDeleteType()), null, allTagsBytes));
             } else {
-              put.addImmutable(family, qualifier, ts, value, tagArray);
+              List<Tag> tags = TagUtil.asList(allTagsBytes, 0, (short)allTagsBytes.length);
+              Tag[] tagsArray = new Tag[tags.size()];
+              put.addImmutable(family, qualifier, ts, value, tags.toArray(tagsArray));
             }
           } else {
             if(qv.hasDeleteType()) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/a9b671b3/hbase-common/src/main/java/org/apache/hadoop/hbase/ArrayBackedTag.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/ArrayBackedTag.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/ArrayBackedTag.java
new file mode 100644
index 0000000..2f4bb75
--- /dev/null
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/ArrayBackedTag.java
@@ -0,0 +1,143 @@
+/**
+ * Copyright The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase;
+
+import java.nio.ByteBuffer;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.util.Bytes;
+
+/**
+ * This is a {@link Tag} implementation in which value is backed by an on heap byte array.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class ArrayBackedTag implements Tag {
+  private final byte type;// TODO  extra type state needed?
+  private final byte[] bytes;
+  private int offset = 0;
+  private int length = 0;
+
+  /**
+   * The special tag will write the length of each tag and that will be
+   * followed by the type and then the actual tag.
+   * So every time the length part is parsed we need to add + 1 byte to it to
+   * get the type and then get the actual tag.
+   */
+  public ArrayBackedTag(byte tagType, String tag) {
+    this(tagType, Bytes.toBytes(tag));
+  }
+
+  /**
+   * Format for a tag :
+   * {@code <length of tag - 2 bytes><type code - 1 byte><tag>} tag length is serialized
+   * using 2 bytes only but as this will be unsigned, we can have max tag length of
+   * (Short.MAX_SIZE * 2) +1. It includes 1 byte type length and actual tag bytes length.
+   */
+  public ArrayBackedTag(byte tagType, byte[] tag) {
+    int tagLength = tag.length + TYPE_LENGTH_SIZE;
+    if (tagLength > MAX_TAG_LENGTH) {
+      throw new IllegalArgumentException(
+          "Invalid tag data being passed. Its length can not exceed " + MAX_TAG_LENGTH);
+    }
+    length = TAG_LENGTH_SIZE + tagLength;
+    bytes = new byte[length];
+    int pos = Bytes.putAsShort(bytes, 0, tagLength);
+    pos = Bytes.putByte(bytes, pos, tagType);
+    Bytes.putBytes(bytes, pos, tag, 0, tag.length);
+    this.type = tagType;
+  }
+
+  /**
+   * Creates a Tag from the specified byte array and offset. Presumes
+   * <code>bytes</code> content starting at <code>offset</code> is formatted as
+   * a Tag blob.
+   * The bytes to include the tag type, tag length and actual tag bytes.
+   * @param offset offset to start of Tag
+   */
+  public ArrayBackedTag(byte[] bytes, int offset) {
+    this(bytes, offset, getLength(bytes, offset));
+  }
+
+  private static int getLength(byte[] bytes, int offset) {
+    return TAG_LENGTH_SIZE + Bytes.readAsInt(bytes, offset, TAG_LENGTH_SIZE);
+  }
+
+  /**
+   * Creates a Tag from the specified byte array, starting at offset, and for length
+   * <code>length</code>. Presumes <code>bytes</code> content starting at <code>offset</code> is
+   * formatted as a Tag blob.
+   */
+  public ArrayBackedTag(byte[] bytes, int offset, int length) {
+    if (length > MAX_TAG_LENGTH) {
+      throw new IllegalArgumentException(
+          "Invalid tag data being passed. Its length can not exceed " + MAX_TAG_LENGTH);
+    }
+    this.bytes = bytes;
+    this.offset = offset;
+    this.length = length;
+    this.type = bytes[offset + TAG_LENGTH_SIZE];
+  }
+
+  /**
+   * @return The byte array backing this Tag.
+   */
+  public byte[] getValueArray() {
+    return this.bytes;
+  }
+
+  /**
+   * @return the tag type
+   */
+  public byte getType() {
+    return this.type;
+  }
+
+  /**
+   * @return Length of actual tag bytes within the backed buffer
+   */
+  public int getValueLength() {
+    return this.length - INFRASTRUCTURE_SIZE;
+  }
+
+  /**
+   * @return Offset of actual tag bytes within the backed buffer
+   */
+  public int getValueOffset() {
+    return this.offset + INFRASTRUCTURE_SIZE;
+  }
+
+  @Override
+  public boolean hasArray() {
+    return true;
+  }
+
+  @Override
+  public ByteBuffer getValueByteBuffer() {
+    return ByteBuffer.wrap(bytes);
+  }
+
+  @Override
+  public String toString() {
+    return "[Tag type : " + this.type + ", value : "
+        + Bytes.toStringBinary(bytes, getValueOffset(), getValueLength()) + "]";
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/a9b671b3/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java
index 0d34137..1ec6afa 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java
@@ -19,11 +19,13 @@
 package org.apache.hadoop.hbase;
 
 import static org.apache.hadoop.hbase.HConstants.EMPTY_BYTE_ARRAY;
+import static org.apache.hadoop.hbase.Tag.TAG_LENGTH_SIZE;
 
 import java.io.DataOutputStream;
 import java.io.IOException;
 import java.math.BigDecimal;
 import java.nio.ByteBuffer;
+import java.util.ArrayList;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map.Entry;
@@ -108,8 +110,8 @@ public final class CellUtil {
 
   /**
    * Returns tag value in a new byte array. If server-side, use
-   * {@link Tag#getBuffer()} with appropriate {@link Tag#getTagOffset()} and
-   * {@link Tag#getTagLength()} instead to save on allocations.
+   * {@link Tag#getValueArray()} with appropriate {@link Tag#getValueOffset()} and
+   * {@link Tag#getValueLength()} instead to save on allocations.
    * @param cell
    * @return tag value in a new byte array.
    */
@@ -749,7 +751,10 @@ public final class CellUtil {
    * @param offset
    * @param length
    * @return iterator for the tags
+   * @deprecated As of 2.0.0 and will be removed in 3.0.0
+   *             Instead use {@link #tagsIterator(Cell)}
    */
+  @Deprecated
   public static Iterator<Tag> tagsIterator(final byte[] tags, final int offset, final int length) {
     return new Iterator<Tag>() {
       private int pos = offset;
@@ -764,7 +769,7 @@ public final class CellUtil {
       public Tag next() {
         if (hasNext()) {
           int curTagLen = Bytes.readAsInt(tags, this.pos, Tag.TAG_LENGTH_SIZE);
-          Tag tag = new Tag(tags, pos, curTagLen + Tag.TAG_LENGTH_SIZE);
+          Tag tag = new ArrayBackedTag(tags, pos, curTagLen + TAG_LENGTH_SIZE);
           this.pos += Bytes.SIZEOF_SHORT + curTagLen;
           return tag;
         }
@@ -778,6 +783,115 @@ public final class CellUtil {
     };
   }
 
+  private static Iterator<Tag> tagsIterator(final ByteBuffer tags, final int offset,
+      final int length) {
+    return new Iterator<Tag>() {
+      private int pos = offset;
+      private int endOffset = offset + length - 1;
+
+      @Override
+      public boolean hasNext() {
+        return this.pos < endOffset;
+      }
+
+      @Override
+      public Tag next() {
+        if (hasNext()) {
+          int curTagLen = ByteBufferUtils.readAsInt(tags, this.pos, Tag.TAG_LENGTH_SIZE);
+          Tag tag = new OffheapTag(tags, pos, curTagLen + Tag.TAG_LENGTH_SIZE);
+          this.pos += Bytes.SIZEOF_SHORT + curTagLen;
+          return tag;
+        }
+        return null;
+      }
+
+      @Override
+      public void remove() {
+        throw new UnsupportedOperationException();
+      }
+    };
+  }
+
+  private static final Iterator<Tag> EMPTY_TAGS_ITR = new Iterator<Tag>() {
+    @Override
+    public boolean hasNext() {
+      return false;
+    }
+
+    @Override
+    public Tag next() {
+      return null;
+    }
+
+    @Override
+    public void remove() {
+      throw new UnsupportedOperationException();
+    }
+  };
+
+  /**
+   * Util method to iterate through the tags in the given cell.
+   *
+   * @param cell The Cell over which tags iterator is needed.
+   * @return iterator for the tags
+   */
+  public static Iterator<Tag> tagsIterator(final Cell cell) {
+    final int tagsLength = cell.getTagsLength();
+    // Save an object allocation where we can
+    if (tagsLength == 0) {
+      return EMPTY_TAGS_ITR;
+    }
+    if (cell instanceof ByteBufferedCell) {
+      return tagsIterator(((ByteBufferedCell) cell).getTagsByteBuffer(),
+          ((ByteBufferedCell) cell).getTagsPosition(), tagsLength);
+    }
+    return tagsIterator(cell.getTagsArray(), cell.getTagsOffset(), tagsLength);
+  }
+
+  /**
+   * @param cell The Cell
+   * @return Tags in the given Cell as a List
+   */
+  public static List<Tag> getTags(Cell cell) {
+    List<Tag> tags = new ArrayList<Tag>();
+    Iterator<Tag> tagsItr = tagsIterator(cell);
+    while (tagsItr.hasNext()) {
+      tags.add(tagsItr.next());
+    }
+    return tags;
+  }
+
+  /**
+   * Retrieve Cell's first tag, matching the passed in type
+   *
+   * @param cell The Cell
+   * @param type Type of the Tag to retrieve
+   * @return null if there is no tag of the passed in tag type
+   */
+  public static Tag getTag(Cell cell, byte type){
+    boolean bufferBacked = cell instanceof ByteBufferedCell;
+    int length = cell.getTagsLength();
+    int offset = bufferBacked? ((ByteBufferedCell)cell).getTagsPosition():cell.getTagsOffset();
+    int pos = offset;
+    while (pos < offset + length) {
+      int tagLen;
+      if (bufferBacked) {
+        ByteBuffer tagsBuffer = ((ByteBufferedCell)cell).getTagsByteBuffer();
+        tagLen = ByteBufferUtils.readAsInt(tagsBuffer, pos, TAG_LENGTH_SIZE);
+        if (ByteBufferUtils.toByte(tagsBuffer, pos + TAG_LENGTH_SIZE) == type) {
+          return new OffheapTag(tagsBuffer, pos, tagLen + TAG_LENGTH_SIZE);
+        }
+      } else {
+        tagLen = Bytes.readAsInt(cell.getTagsArray(), pos, TAG_LENGTH_SIZE);
+        if (cell.getTagsArray()[pos + TAG_LENGTH_SIZE] == type) {
+          return new ArrayBackedTag(cell.getTagsArray(), pos, tagLen + TAG_LENGTH_SIZE);
+        }
+      }
+      pos += TAG_LENGTH_SIZE + tagLen;
+    }
+    return null;
+  }
+
   /**
    * Returns true if the first range start1...end1 overlaps with the second range
    * start2...end2, assuming the byte arrays represent row keys

http://git-wip-us.apache.org/repos/asf/hbase/blob/a9b671b3/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java
index 933dd1d..a30a24c 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java
@@ -894,7 +894,7 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId,
     int tagsLength = 0;
     if (tags != null && tags.length > 0) {
       for (Tag t: tags) {
-        tagsLength += t.getLength();
+        tagsLength += t.getValueLength() + Tag.INFRASTRUCTURE_SIZE;
       }
     }
     checkForTagsLength(tagsLength);
@@ -928,7 +928,11 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId,
     if (tagsLength > 0) {
       pos = Bytes.putAsShort(buffer, pos, tagsLength);
       for (Tag t : tags) {
-        pos = Bytes.putBytes(buffer, pos, t.getBuffer(), t.getOffset(), t.getLength());
+        int tlen = t.getValueLength();
+        pos = Bytes.putAsShort(buffer, pos, tlen + Tag.TYPE_LENGTH_SIZE);
+        pos = Bytes.putByte(buffer, pos, t.getType());
+        TagUtil.copyValueTo(t, buffer, pos);
+        pos += tlen;
       }
     }
     return keyValueLength;
@@ -1013,7 +1017,7 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId,
     int tagsLength = 0;
     if (tags != null && !tags.isEmpty()) {
       for (Tag t : tags) {
-        tagsLength += t.getLength();
+        tagsLength += t.getValueLength() + Tag.INFRASTRUCTURE_SIZE;
       }
     }
     checkForTagsLength(tagsLength);
@@ -1053,7 +1057,11 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId,
     if (tagsLength > 0) {
       pos = Bytes.putAsShort(bytes, pos, tagsLength);
       for (Tag t : tags) {
-        pos = Bytes.putBytes(bytes, pos, t.getBuffer(), t.getOffset(), t.getLength());
+        int tlen = t.getValueLength();
+        pos = Bytes.putAsShort(bytes, pos, tlen + Tag.TYPE_LENGTH_SIZE);
+        pos = Bytes.putByte(bytes, pos, t.getType());
+        TagUtil.copyValueTo(t, bytes, pos);
+        pos += tlen;
       }
     }
     return bytes;
@@ -1176,7 +1184,7 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId,
     if (tags != null) {
       List<String> tagsString = new ArrayList<String>();
       for (Tag t : tags) {
-        tagsString.add((t.getType()) + ":" +Bytes.toStringBinary(t.getValue()));
+        tagsString.add((t.getType()) + ":" + TagUtil.getValueAsString(t));
       }
       stringMap.put("tag", tagsString);
     }
@@ -1558,7 +1566,7 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId,
     if (tagsLength == 0) {
       return EMPTY_ARRAY_LIST;
     }
-    return Tag.asList(getTagsArray(), getTagsOffset(), tagsLength);
+    return TagUtil.asList(getTagsArray(), getTagsOffset(), tagsLength);
   }
 
   /**
@@ -2386,7 +2394,7 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId,
   public static KeyValue cloneAndAddTags(Cell c, List<Tag> newTags) {
     List<Tag> existingTags = null;
     if(c.getTagsLength() > 0) {
-      existingTags = Tag.asList(c.getTagsArray(), c.getTagsOffset(), c.getTagsLength());
+      existingTags = CellUtil.getTags(c);
       existingTags.addAll(newTags);
     } else {
       existingTags = newTags;

http://git-wip-us.apache.org/repos/asf/hbase/blob/a9b671b3/hbase-common/src/main/java/org/apache/hadoop/hbase/OffheapTag.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/OffheapTag.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/OffheapTag.java
new file mode 100644
index 0000000..b3d65bb
--- /dev/null
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/OffheapTag.java
@@ -0,0 +1,83 @@
+/**
+ * Copyright The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase;
+
+import java.nio.ByteBuffer;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.util.ByteBufferUtils;
+
+/**
+ * This is a {@link Tag} implementation in which value is backed by an off heap
+ * {@link java.nio.ByteBuffer}
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class OffheapTag implements Tag {
+
+  private ByteBuffer buffer;
+  private int offset, length;
+  private byte type;
+
+  public OffheapTag(ByteBuffer buffer, int offset, int length) {
+    this.buffer = buffer;
+    this.offset = offset;
+    this.length = length;
+    this.type = ByteBufferUtils.toByte(buffer, offset + TAG_LENGTH_SIZE);
+  }
+
+  @Override
+  public byte getType() {
+    return this.type;
+  }
+
+  @Override
+  public int getValueOffset() {
+    return this.offset + INFRASTRUCTURE_SIZE;
+  }
+
+  @Override
+  public int getValueLength() {
+    return this.length - INFRASTRUCTURE_SIZE;
+  }
+
+  @Override
+  public boolean hasArray() {
+    return false;
+  }
+
+  @Override
+  public byte[] getValueArray() {
+    throw new UnsupportedOperationException(
+        "Tag is backed by an off heap buffer. Use getValueByteBuffer()");
+  }
+
+  @Override
+  public ByteBuffer getValueByteBuffer() {
+    return this.buffer;
+  }
+
+  @Override
+  public String toString() {
+    return "[Tag type : " + this.type + ", value : "
+        + ByteBufferUtils.toStringBinary(buffer, getValueOffset(), getValueLength()) + "]";
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/a9b671b3/hbase-common/src/main/java/org/apache/hadoop/hbase/Tag.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/Tag.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/Tag.java
index 36b87b1..1d55baa 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/Tag.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/Tag.java
@@ -19,201 +19,60 @@
  */
 package org.apache.hadoop.hbase;
 
-import java.util.ArrayList;
-import java.util.List;
+import java.nio.ByteBuffer;
 
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.classification.InterfaceStability;
 import org.apache.hadoop.hbase.util.Bytes;
+
 /**
- * Tags are part of cells and helps to add metadata about the KVs.
- * Metadata could be ACLs per cells, visibility labels, etc.
+ * Tags are part of cells and helps to add metadata about them.
+ * Metadata could be ACLs, visibility labels, etc.
+ * <p>
+ * Each Tag is having a type (one byte) and value part. The max value length for a Tag is 65533.
+ * <p>
+ * See {@link TagType} for reserved tag types.
  */
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
-public class Tag {
+public interface Tag {
+
   public final static int TYPE_LENGTH_SIZE = Bytes.SIZEOF_BYTE;
   public final static int TAG_LENGTH_SIZE = Bytes.SIZEOF_SHORT;
   public final static int INFRASTRUCTURE_SIZE = TYPE_LENGTH_SIZE + TAG_LENGTH_SIZE;
   public static final int MAX_TAG_LENGTH = (2 * Short.MAX_VALUE) + 1 - TAG_LENGTH_SIZE;
 
-  private final byte type;
-  private final byte[] bytes;
-  private int offset = 0;
-  private int length = 0;
-
-  /**
-   * The special tag will write the length of each tag and that will be
-   * followed by the type and then the actual tag.
-   * So every time the length part is parsed we need to add + 1 byte to it to
-   * get the type and then get the actual tag.
-   */
-  public Tag(byte tagType, String tag) {
-    this(tagType, Bytes.toBytes(tag));
-  }
-
-  /**
-   * Format for a tag :
-   * {@code <length of tag - 2 bytes><type code - 1 byte><tag>} tag length is serialized
-   * using 2 bytes only but as this will be unsigned, we can have max tag length of
-   * (Short.MAX_SIZE * 2) +1. It includes 1 byte type length and actual tag bytes length.
-   */
-  public Tag(byte tagType, byte[] tag) {
-    int tagLength = tag.length + TYPE_LENGTH_SIZE;
-    if (tagLength > MAX_TAG_LENGTH) {
-      throw new IllegalArgumentException(
-          "Invalid tag data being passed. Its length can not exceed " + MAX_TAG_LENGTH);
-    }
-    length = TAG_LENGTH_SIZE + tagLength;
-    bytes = new byte[length];
-    int pos = Bytes.putAsShort(bytes, 0, tagLength);
-    pos = Bytes.putByte(bytes, pos, tagType);
-    Bytes.putBytes(bytes, pos, tag, 0, tag.length);
-    this.type = tagType;
-  }
-
-  /**
-   * Creates a Tag from the specified byte array and offset. Presumes
-   * <code>bytes</code> content starting at <code>offset</code> is formatted as
-   * a Tag blob.
-   * The bytes to include the tag type, tag length and actual tag bytes.
-   * @param offset offset to start of Tag
-   */
-  public Tag(byte[] bytes, int offset) {
-    this(bytes, offset, getLength(bytes, offset));
-  }
-
-  private static int getLength(byte[] bytes, int offset) {
-    return TAG_LENGTH_SIZE + Bytes.readAsInt(bytes, offset, TAG_LENGTH_SIZE);
-  }
-
-  /**
-   * Creates a Tag from the specified byte array, starting at offset, and for length
-   * <code>length</code>. Presumes <code>bytes</code> content starting at <code>offset</code> is
-   * formatted as a Tag blob.
-   */
-  public Tag(byte[] bytes, int offset, int length) {
-    if (length > MAX_TAG_LENGTH) {
-      throw new IllegalArgumentException(
-          "Invalid tag data being passed. Its length can not exceed " + MAX_TAG_LENGTH);
-    }
-    this.bytes = bytes;
-    this.offset = offset;
-    this.length = length;
-    this.type = bytes[offset + TAG_LENGTH_SIZE];
-  }
-
-  /**
-   * @return The byte array backing this Tag.
-   */
-  public byte[] getBuffer() {
-    return this.bytes;
-  }
-
   /**
    * @return the tag type
    */
-  public byte getType() {
-    return this.type;
-  }
-
-  /**
-   * @return Length of actual tag bytes within the backed buffer
-   */
-  public int getTagLength() {
-    return this.length - INFRASTRUCTURE_SIZE;
-  }
-
-  /**
-   * @return Offset of actual tag bytes within the backed buffer
-   */
-  public int getTagOffset() {
-    return this.offset + INFRASTRUCTURE_SIZE;
-  }
-
-  /**
-   * Returns tag value in a new byte array.
-   * Primarily for use client-side. If server-side, use
-   * {@link #getBuffer()} with appropriate {@link #getTagOffset()} and {@link #getTagLength()}
-   * instead to save on allocations.
-   * @return tag value in a new byte array.
-   */
-  public byte[] getValue() {
-    int tagLength = getTagLength();
-    byte[] tag = new byte[tagLength];
-    Bytes.putBytes(tag, 0, bytes, getTagOffset(), tagLength);
-    return tag;
-  }
+  byte getType();
 
   /**
-   * Creates the list of tags from the byte array b. Expected that b is in the
-   * expected tag format
-   * @param b
-   * @param offset
-   * @param length
-   * @return List of tags
+   * @return Offset of tag value within the backed buffer
    */
-  public static List<Tag> asList(byte[] b, int offset, int length) {
-    List<Tag> tags = new ArrayList<Tag>();
-    int pos = offset;
-    while (pos < offset + length) {
-      int tagLen = Bytes.readAsInt(b, pos, TAG_LENGTH_SIZE);
-      tags.add(new Tag(b, pos, tagLen + TAG_LENGTH_SIZE));
-      pos += TAG_LENGTH_SIZE + tagLen;
-    }
-    return tags;
-  }
+  int getValueOffset();
 
   /**
-   * Write a list of tags into a byte array
-   * @param tags
-   * @return the serialized tag data as bytes
+   * @return Length of tag value within the backed buffer
    */
-  public static byte[] fromList(List<Tag> tags) {
-    int length = 0;
-    for (Tag tag: tags) {
-      length += tag.length;
-    }
-    byte[] b = new byte[length];
-    int pos = 0;
-    for (Tag tag: tags) {
-      System.arraycopy(tag.bytes, tag.offset, b, pos, tag.length);
-      pos += tag.length;
-    }
-    return b;
-  }
+  int getValueLength();
 
   /**
-   * Retrieve the first tag from the tags byte array matching the passed in tag type
-   * @param b
-   * @param offset
-   * @param length
-   * @param type
-   * @return null if there is no tag of the passed in tag type
+   * Tells whether or not this Tag is backed by a byte array.
+   * @return true when this Tag is backed by byte array
    */
-  public static Tag getTag(byte[] b, int offset, int length, byte type) {
-    int pos = offset;
-    while (pos < offset + length) {
-      int tagLen = Bytes.readAsInt(b, pos, TAG_LENGTH_SIZE);
-      if(b[pos + TAG_LENGTH_SIZE] == type) {
-        return new Tag(b, pos, tagLen + TAG_LENGTH_SIZE);
-      }
-      pos += TAG_LENGTH_SIZE + tagLen;
-    }
-    return null;
-  }
+  boolean hasArray();
 
   /**
-   * Returns the total length of the entire tag entity
+   * @return The array containing the value bytes.
+   * @throws UnsupportedOperationException
+   *           when {@link #hasArray()} return false. Use {@link #getValueByteBuffer()} in such
+   *           situation
    */
-  int getLength() {
-    return this.length;
-  }
+  byte[] getValueArray();
 
   /**
-   * Returns the offset of the entire tag entity
+   * @return The {@link java.nio.ByteBuffer} containing the value bytes.
    */
-  int getOffset() {
-    return this.offset;
-  }
+  ByteBuffer getValueByteBuffer();
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/a9b671b3/hbase-common/src/main/java/org/apache/hadoop/hbase/TagUtil.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/TagUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/TagUtil.java
new file mode 100644
index 0000000..15ddfc8
--- /dev/null
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/TagUtil.java
@@ -0,0 +1,219 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase;
+
+import static org.apache.hadoop.hbase.Tag.TAG_LENGTH_SIZE;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.io.util.StreamUtils;
+import org.apache.hadoop.hbase.util.ByteBufferUtils;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Pair;
+
+@InterfaceAudience.Private
+public final class TagUtil {
+
+  /**
+   * Private constructor to keep this class from being instantiated.
+   */
+  private TagUtil(){}
+
+  /**
+   * Returns tag value in a new byte array.
+   * Primarily for use client-side. If server-side, use
+   * {@link Tag#getValueArray()} with appropriate {@link Tag#getValueOffset()}
+   * and {@link Tag#getValueLength()} instead to save on allocations.
+   *
+   * @param tag The Tag whose value to be returned
+   * @return tag value in a new byte array.
+   */
+  public static byte[] cloneValue(Tag tag) {
+    int tagLength = tag.getValueLength();
+    byte[] tagArr = new byte[tagLength];
+    if (tag.hasArray()) {
+      Bytes.putBytes(tagArr, 0, tag.getValueArray(), tag.getValueOffset(), tagLength);
+    } else {
+      ByteBufferUtils.copyFromBufferToArray(tagArr, tag.getValueByteBuffer(), tag.getValueOffset(),
+          0, tagLength);
+    }
+    return tagArr;
+  }
+
+  /**
+   * Creates list of tags from given byte array, expected that it is in the expected tag format.
+   *
+   * @param b The byte array
+   * @param offset The offset in array where tag bytes begin
+   * @param length Total length of all tags bytes
+   * @return List of tags
+   */
+  public static List<Tag> asList(byte[] b, int offset, int length) {
+    List<Tag> tags = new ArrayList<Tag>();
+    int pos = offset;
+    while (pos < offset + length) {
+      int tagLen = Bytes.readAsInt(b, pos, TAG_LENGTH_SIZE);
+      tags.add(new ArrayBackedTag(b, pos, tagLen + TAG_LENGTH_SIZE));
+      pos += TAG_LENGTH_SIZE + tagLen;
+    }
+    return tags;
+  }
+
+  /**
+   * Creates list of tags from given ByteBuffer, expected that it is in the expected tag format.
+   *
+   * @param b The ByteBuffer
+   * @param offset The offset in ByteBuffer where tag bytes begin
+   * @param length Total length of all tags bytes
+   * @return List of tags
+   */
+  public static List<Tag> asList(ByteBuffer b, int offset, int length) {
+    List<Tag> tags = new ArrayList<Tag>();
+    int pos = offset;
+    while (pos < offset + length) {
+      int tagLen = ByteBufferUtils.readAsInt(b, pos, TAG_LENGTH_SIZE);
+      tags.add(new OffheapTag(b, pos, tagLen + TAG_LENGTH_SIZE));
+      pos += TAG_LENGTH_SIZE + tagLen;
+    }
+    return tags;
+  }
+
+  /**
+   * Write a list of tags into a byte array
+   *
+   * @param tags The list of tags
+   * @return the serialized tag data as bytes
+   */
+  public static byte[] fromList(List<Tag> tags) {
+    if (tags.isEmpty()) {
+      return HConstants.EMPTY_BYTE_ARRAY;
+    }
+    int length = 0;
+    for (Tag tag : tags) {
+      length += tag.getValueLength() + Tag.INFRASTRUCTURE_SIZE;
+    }
+    byte[] b = new byte[length];
+    int pos = 0;
+    int tlen;
+    for (Tag tag : tags) {
+      tlen = tag.getValueLength();
+      pos = Bytes.putAsShort(b, pos, tlen + Tag.TYPE_LENGTH_SIZE);
+      pos = Bytes.putByte(b, pos, tag.getType());
+      if (tag.hasArray()) {
+        pos = Bytes.putBytes(b, pos, tag.getValueArray(), tag.getValueOffset(), tlen);
+      } else {
+        ByteBufferUtils.copyFromBufferToArray(b, tag.getValueByteBuffer(), tag.getValueOffset(),
+            pos, tlen);
+        pos += tlen;
+      }
+    }
+    return b;
+  }
+
+  /**
+   * Converts the value bytes of the given tag into a long value
+   * @param tag The Tag
+   * @return value as long
+   */
+  public static long getValueAsLong(Tag tag) {
+    if (tag.hasArray()) {
+      return Bytes.toLong(tag.getValueArray(), tag.getValueOffset(), tag.getValueLength());
+    }
+    return ByteBufferUtils.toLong(tag.getValueByteBuffer(), tag.getValueOffset());
+  }
+
+  /**
+   * Converts the value bytes of the given tag into a byte value
+   * @param tag The Tag
+   * @return value as byte
+   */
+  public static byte getValueAsByte(Tag tag) {
+    if (tag.hasArray()) {
+      return tag.getValueArray()[tag.getValueOffset()];
+    }
+    return ByteBufferUtils.toByte(tag.getValueByteBuffer(), tag.getValueOffset());
+  }
+
+  /**
+   * Converts the value bytes of the given tag into a String value
+   * @param tag The Tag
+   * @return value as String
+   */
+  public static String getValueAsString(Tag tag){
+    if(tag.hasArray()){
+      return Bytes.toString(tag.getValueArray(), tag.getValueOffset(), tag.getValueLength());
+    }
+    return Bytes.toString(cloneValue(tag));
+  }
+
+  /**
+   * Matches the value part of given tags
+   * @param t1 Tag to match the value
+   * @param t2 Tag to match the value
+   * @return True if values of both tags are same.
+   */
+  public static boolean matchingValue(Tag t1, Tag t2) {
+    if (t1.hasArray() && t2.hasArray()) {
+      return Bytes.equals(t1.getValueArray(), t1.getValueOffset(), t1.getValueLength(),
+          t2.getValueArray(), t2.getValueOffset(), t2.getValueLength());
+    }
+    if (t1.hasArray()) {
+      return ByteBufferUtils.equals(t2.getValueByteBuffer(), t2.getValueOffset(),
+          t2.getValueLength(), t1.getValueArray(), t1.getValueOffset(), t1.getValueLength());
+    }
+    if (t2.hasArray()) {
+      return ByteBufferUtils.equals(t1.getValueByteBuffer(), t1.getValueOffset(),
+          t1.getValueLength(), t2.getValueArray(), t2.getValueOffset(), t2.getValueLength());
+    }
+    return ByteBufferUtils.equals(t1.getValueByteBuffer(), t1.getValueOffset(), t1.getValueLength(),
+        t2.getValueByteBuffer(), t2.getValueOffset(), t2.getValueLength());
+  }
+
+  /**
+   * Copies the tag's value bytes to the given byte array
+   * @param tag The Tag
+   * @param out The byte array where to copy the Tag value.
+   * @param offset The offset within 'out' array where to copy the Tag value.
+   */
+  public static void copyValueTo(Tag tag, byte[] out, int offset) {
+    if (tag.hasArray()) {
+      Bytes.putBytes(out, offset, tag.getValueArray(), tag.getValueOffset(), tag.getValueLength());
+    } else {
+      ByteBufferUtils.copyFromBufferToArray(out, tag.getValueByteBuffer(), tag.getValueOffset(),
+          offset, tag.getValueLength());
+    }
+  }
+
+  /**
+   * Reads an int value stored as a VInt at tag's given offset.
+   * @param tag The Tag
+   * @param offset The offset where VInt bytes begin
+   * @return A pair of the int value and number of bytes taken to store VInt
+   * @throws IOException When varint is malformed and not able to be read correctly
+   */
+  public static Pair<Integer, Integer> readVIntValuePart(Tag tag, int offset) throws IOException {
+    if (tag.hasArray()) {
+      return StreamUtils.readRawVarint32(tag.getValueArray(), offset);
+    }
+    return StreamUtils.readRawVarint32(tag.getValueByteBuffer(), offset);
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/a9b671b3/hbase-common/src/main/java/org/apache/hadoop/hbase/io/util/StreamUtils.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/util/StreamUtils.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/util/StreamUtils.java
index 6e13b44..0e1c3ae 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/util/StreamUtils.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/util/StreamUtils.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.io.util;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.OutputStream;
+import java.nio.ByteBuffer;
 
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.nio.ByteBuff;
@@ -127,9 +128,10 @@ public class StreamUtils {
    *          Offset in the input array where varInt is available
    * @return A pair of integers in which first value is the actual decoded varInt value and second
    *         value as number of bytes taken by this varInt for it's storage in the input array.
-   * @throws IOException
+   * @throws IOException When varint is malformed and not able to be read correctly
    */
-  public static Pair<Integer, Integer> readRawVarint32(byte[] input, int offset) throws IOException {
+  public static Pair<Integer, Integer> readRawVarint32(byte[] input, int offset)
+      throws IOException {
     int newOffset = offset;
     byte tmp = input[newOffset++];
     if (tmp >= 0) {
@@ -169,6 +171,47 @@ public class StreamUtils {
     return new Pair<Integer, Integer>(result, newOffset - offset);
   }
 
+  public static Pair<Integer, Integer> readRawVarint32(ByteBuffer input, int offset)
+      throws IOException {
+    int newOffset = offset;
+    byte tmp = input.get(newOffset++);
+    if (tmp >= 0) {
+      return new Pair<Integer, Integer>((int) tmp, newOffset - offset);
+    }
+    int result = tmp & 0x7f;
+    tmp = input.get(newOffset++);
+    if (tmp >= 0) {
+      result |= tmp << 7;
+    } else {
+      result |= (tmp & 0x7f) << 7;
+      tmp = input.get(newOffset++);
+      if (tmp >= 0) {
+        result |= tmp << 14;
+      } else {
+        result |= (tmp & 0x7f) << 14;
+        tmp = input.get(newOffset++);
+        if (tmp >= 0) {
+          result |= tmp << 21;
+        } else {
+          result |= (tmp & 0x7f) << 21;
+          tmp = input.get(newOffset++);
+          result |= tmp << 28;
+          if (tmp < 0) {
+            // Discard upper 32 bits.
+            for (int i = 0; i < 5; i++) {
+              tmp = input.get(newOffset++);
+              if (tmp >= 0) {
+                return new Pair<Integer, Integer>(result, newOffset - offset);
+              }
+            }
+            throw new IOException("Malformed varint");
+          }
+        }
+      }
+    }
+    return new Pair<Integer, Integer>(result, newOffset - offset);
+  }
+
   public static short toShort(byte hi, byte lo) {
     short s = (short) (((hi & 0xFF) << 8) | (lo & 0xFF));
     Preconditions.checkArgument(s >= 0);

http://git-wip-us.apache.org/repos/asf/hbase/blob/a9b671b3/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteBufferUtils.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteBufferUtils.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteBufferUtils.java
index 7bcc872..6e3fcaa 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteBufferUtils.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteBufferUtils.java
@@ -751,6 +751,29 @@ public final class ByteBufferUtils {
   }
 
   /**
+   * Converts a ByteBuffer to an int value
+   *
+   * @param buf The ByteBuffer
+   * @param offset Offset to int value
+   * @param length Number of bytes used to store the int value.
+   * @return the int value
+   * @throws IllegalArgumentException
+   *           if there's not enough bytes left in the buffer after the given offset
+   */
+  public static int readAsInt(ByteBuffer buf, int offset, final int length) {
+    if (offset + length > buf.limit()) {
+      throw new IllegalArgumentException("offset (" + offset + ") + length (" + length
+          + ") exceed the" + " limit of the buffer: " + buf.limit());
+    }
+    int n = 0;
+    for(int i = offset; i < (offset + length); i++) {
+      n <<= 8;
+      n ^= toByte(buf, i) & 0xFF;
+    }
+    return n;
+  }
+
+  /**
    * Reads a long value at the given buffer's offset.
    * @param buffer
    * @param offset

http://git-wip-us.apache.org/repos/asf/hbase/blob/a9b671b3/hbase-common/src/main/java/org/apache/hadoop/hbase/util/test/RedundantKVGenerator.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/test/RedundantKVGenerator.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/test/RedundantKVGenerator.java
index b44a724..7dc3d5a 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/test/RedundantKVGenerator.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/test/RedundantKVGenerator.java
@@ -24,6 +24,7 @@ import java.util.List;
 import java.util.Map;
 import java.util.Random;
 
+import org.apache.hadoop.hbase.ArrayBackedTag;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellComparator;
 import org.apache.hadoop.hbase.KeyValue;
@@ -280,8 +281,8 @@ public class RedundantKVGenerator {
       }
 
       if (useTags) {
-        result.add(new KeyValue(row, family, qualifier, timestamp, value, new Tag[] { new Tag(
-            (byte) 1, "value1") }));
+        result.add(new KeyValue(row, family, qualifier, timestamp, value,
+            new Tag[] { new ArrayBackedTag((byte) 1, "value1") }));
       } else {
         result.add(new KeyValue(row, family, qualifier, timestamp, value));
       }
@@ -365,7 +366,7 @@ public class RedundantKVGenerator {
       }
       if (useTags) {
         KeyValue keyValue = new KeyValue(row, family, qualifier, timestamp, value,
-            new Tag[] { new Tag((byte) 1, "value1") });
+            new Tag[] { new ArrayBackedTag((byte) 1, "value1") });
         ByteBuffer offheapKVBB = ByteBuffer.allocateDirect(keyValue.getLength());
         ByteBufferUtils.copyFromArrayToBuffer(offheapKVBB, keyValue.getBuffer(),
           keyValue.getOffset(), keyValue.getLength());

http://git-wip-us.apache.org/repos/asf/hbase/blob/a9b671b3/hbase-common/src/test/java/org/apache/hadoop/hbase/TestKeyValue.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestKeyValue.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestKeyValue.java
index cc1e511..e233348 100644
--- a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestKeyValue.java
+++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestKeyValue.java
@@ -442,7 +442,7 @@ public class TestKeyValue extends TestCase {
     byte[] metaValue1 = Bytes.toBytes("metaValue1");
     byte[] metaValue2 = Bytes.toBytes("metaValue2");
     KeyValue kv = new KeyValue(row, cf, q, HConstants.LATEST_TIMESTAMP, value, new Tag[] {
-        new Tag((byte) 1, metaValue1), new Tag((byte) 2, metaValue2) });
+        new ArrayBackedTag((byte) 1, metaValue1), new ArrayBackedTag((byte) 2, metaValue2) });
     assertTrue(kv.getTagsLength() > 0);
     assertTrue(Bytes.equals(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength(), row, 0,
       row.length));
@@ -458,44 +458,42 @@ public class TestKeyValue extends TestCase {
     boolean meta1Ok = false, meta2Ok = false;
     for (Tag tag : tags) {
       if (tag.getType() == (byte) 1) {
-        if (Bytes.equals(tag.getValue(), metaValue1)) {
+        if (Bytes.equals(TagUtil.cloneValue(tag), metaValue1)) {
           meta1Ok = true;
         }
       } else {
-        if (Bytes.equals(tag.getValue(), metaValue2)) {
+        if (Bytes.equals(TagUtil.cloneValue(tag), metaValue2)) {
           meta2Ok = true;
         }
       }
     }
     assertTrue(meta1Ok);
     assertTrue(meta2Ok);
-    Iterator<Tag> tagItr = CellUtil.tagsIterator(kv.getTagsArray(), kv.getTagsOffset(),
-        kv.getTagsLength());
+    Iterator<Tag> tagItr = CellUtil.tagsIterator(kv);
     //Iterator<Tag> tagItr = kv.tagsIterator();
     assertTrue(tagItr.hasNext());
     Tag next = tagItr.next();
-    assertEquals(10, next.getTagLength());
+    assertEquals(10, next.getValueLength());
     assertEquals((byte) 1, next.getType());
-    Bytes.equals(next.getValue(), metaValue1);
+    Bytes.equals(TagUtil.cloneValue(next), metaValue1);
     assertTrue(tagItr.hasNext());
     next = tagItr.next();
-    assertEquals(10, next.getTagLength());
+    assertEquals(10, next.getValueLength());
     assertEquals((byte) 2, next.getType());
-    Bytes.equals(next.getValue(), metaValue2);
+    Bytes.equals(TagUtil.cloneValue(next), metaValue2);
     assertFalse(tagItr.hasNext());
 
-    tagItr = CellUtil.tagsIterator(kv.getTagsArray(), kv.getTagsOffset(),
-        kv.getTagsLength());
+    tagItr = CellUtil.tagsIterator(kv);
     assertTrue(tagItr.hasNext());
     next = tagItr.next();
-    assertEquals(10, next.getTagLength());
+    assertEquals(10, next.getValueLength());
     assertEquals((byte) 1, next.getType());
-    Bytes.equals(next.getValue(), metaValue1);
+    Bytes.equals(TagUtil.cloneValue(next), metaValue1);
     assertTrue(tagItr.hasNext());
     next = tagItr.next();
-    assertEquals(10, next.getTagLength());
+    assertEquals(10, next.getValueLength());
     assertEquals((byte) 2, next.getType());
-    Bytes.equals(next.getValue(), metaValue2);
+    Bytes.equals(TagUtil.cloneValue(next), metaValue2);
     assertFalse(tagItr.hasNext());
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/a9b671b3/hbase-common/src/test/java/org/apache/hadoop/hbase/TestOffheapKeyValue.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestOffheapKeyValue.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestOffheapKeyValue.java
index f021215..9e76fc5 100644
--- a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestOffheapKeyValue.java
+++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestOffheapKeyValue.java
@@ -43,8 +43,8 @@ public class TestOffheapKeyValue {
   private static final byte[] fam2 = Bytes.toBytes(FAM2);
   private static final byte[] qual1 = Bytes.toBytes(QUAL1);
   private static final byte[] qual2 = Bytes.toBytes(QUAL2);
-  private static final Tag t1 = new Tag((byte) 1, Bytes.toBytes("TAG1"));
-  private static final Tag t2 = new Tag((byte) 2, Bytes.toBytes("TAG2"));
+  private static final Tag t1 = new ArrayBackedTag((byte) 1, Bytes.toBytes("TAG1"));
+  private static final Tag t2 = new ArrayBackedTag((byte) 2, Bytes.toBytes("TAG2"));
   private static final ArrayList<Tag> tags = new ArrayList<Tag>();
   static {
     tags.add(t1);
@@ -158,17 +158,17 @@ public class TestOffheapKeyValue {
     assertEquals(0L, offheapKV.getTimestamp());
     assertEquals(Type.Put.getCode(), offheapKV.getTypeByte());
     // change tags to handle both onheap and offheap stuff
-    List<Tag> resTags =
-        Tag.asList(offheapKV.getTagsArray(), offheapKV.getTagsOffset(), offheapKV.getTagsLength());
+    List<Tag> resTags = TagUtil.asList(offheapKV.getTagsArray(), offheapKV.getTagsOffset(),
+        offheapKV.getTagsLength());
     Tag tag1 = resTags.get(0);
     assertEquals(t1.getType(), tag1.getType());
-    assertEquals(Bytes.toString(t1.getValue()), Bytes.toString(getTagValue(tag1)));
+    assertEquals(TagUtil.getValueAsString(t1), TagUtil.getValueAsString(tag1));
     Tag tag2 = resTags.get(1);
     assertEquals(tag2.getType(), tag2.getType());
-    assertEquals(Bytes.toString(t2.getValue()), Bytes.toString(getTagValue(tag2)));
-    Tag res = Tag.getTag(offheapKV.getTagsArray(), 0, offheapKV.getTagsLength(), (byte) 2);
-    assertEquals(Bytes.toString(t2.getValue()), Bytes.toString(getTagValue(tag2)));
-    res = Tag.getTag(offheapKV.getTagsArray(), 0, offheapKV.getTagsLength(), (byte) 3);
+    assertEquals(TagUtil.getValueAsString(t2), TagUtil.getValueAsString(tag2));
+    Tag res = CellUtil.getTag(offheapKV, (byte) 2);
+    assertEquals(TagUtil.getValueAsString(t2), TagUtil.getValueAsString(tag2));
+    res = CellUtil.getTag(offheapKV, (byte) 3);
     assertNull(res);
   }
 
@@ -195,11 +195,4 @@ public class TestOffheapKeyValue {
     assertEquals(0L, offheapKeyOnlyKV.getTimestamp());
     assertEquals(Type.Put.getCode(), offheapKeyOnlyKV.getTypeByte());
   }
-  // TODO : Can be moved to TagUtil
-  private static byte[] getTagValue(Tag tag) {
-    int tagLength = tag.getTagLength();
-    byte[] tagBytes = new byte[tagLength];
-    System.arraycopy(tag.getBuffer(), tag.getTagOffset(), tagBytes, 0, tagLength);
-    return tagBytes;
-  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/a9b671b3/hbase-common/src/test/java/org/apache/hadoop/hbase/codec/TestCellCodecWithTags.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/codec/TestCellCodecWithTags.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/codec/TestCellCodecWithTags.java
index beff87a..cc70742 100644
--- a/hbase-common/src/test/java/org/apache/hadoop/hbase/codec/TestCellCodecWithTags.java
+++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/codec/TestCellCodecWithTags.java
@@ -33,6 +33,8 @@ import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.Tag;
+import org.apache.hadoop.hbase.TagUtil;
+import org.apache.hadoop.hbase.ArrayBackedTag;
 import org.apache.hadoop.hbase.testclassification.MiscTests;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -54,16 +56,16 @@ public class TestCellCodecWithTags {
     Codec.Encoder encoder = codec.getEncoder(dos);
     final Cell cell1 = new KeyValue(Bytes.toBytes("r"), Bytes.toBytes("f"), Bytes.toBytes("1"),
         HConstants.LATEST_TIMESTAMP, Bytes.toBytes("1"), new Tag[] {
-            new Tag((byte) 1, Bytes.toBytes("teststring1")),
-            new Tag((byte) 2, Bytes.toBytes("teststring2")) });
+            new ArrayBackedTag((byte) 1, Bytes.toBytes("teststring1")),
+            new ArrayBackedTag((byte) 2, Bytes.toBytes("teststring2")) });
     final Cell cell2 = new KeyValue(Bytes.toBytes("r"), Bytes.toBytes("f"), Bytes.toBytes("2"),
-        HConstants.LATEST_TIMESTAMP, Bytes.toBytes("2"), new Tag[] { new Tag((byte) 1,
+        HConstants.LATEST_TIMESTAMP, Bytes.toBytes("2"), new Tag[] { new ArrayBackedTag((byte) 1,
             Bytes.toBytes("teststring3")), });
     final Cell cell3 = new KeyValue(Bytes.toBytes("r"), Bytes.toBytes("f"), Bytes.toBytes("3"),
         HConstants.LATEST_TIMESTAMP, Bytes.toBytes("3"), new Tag[] {
-            new Tag((byte) 2, Bytes.toBytes("teststring4")),
-            new Tag((byte) 2, Bytes.toBytes("teststring5")),
-            new Tag((byte) 1, Bytes.toBytes("teststring6")) });
+            new ArrayBackedTag((byte) 2, Bytes.toBytes("teststring4")),
+            new ArrayBackedTag((byte) 2, Bytes.toBytes("teststring5")),
+            new ArrayBackedTag((byte) 1, Bytes.toBytes("teststring6")) });
 
     encoder.write(cell1);
     encoder.write(cell2);
@@ -77,36 +79,36 @@ public class TestCellCodecWithTags {
     assertTrue(decoder.advance());
     Cell c = decoder.current();
     assertTrue(CellUtil.equals(c, cell1));
-    List<Tag> tags = Tag.asList(c.getTagsArray(), c.getTagsOffset(), c.getTagsLength());
+    List<Tag> tags = TagUtil.asList(c.getTagsArray(), c.getTagsOffset(), c.getTagsLength());
     assertEquals(2, tags.size());
     Tag tag = tags.get(0);
     assertEquals(1, tag.getType());
-    assertTrue(Bytes.equals(Bytes.toBytes("teststring1"), tag.getValue()));
+    assertTrue(Bytes.equals(Bytes.toBytes("teststring1"), TagUtil.cloneValue(tag)));
     tag = tags.get(1);
     assertEquals(2, tag.getType());
-    assertTrue(Bytes.equals(Bytes.toBytes("teststring2"), tag.getValue()));
+    assertTrue(Bytes.equals(Bytes.toBytes("teststring2"), TagUtil.cloneValue(tag)));
     assertTrue(decoder.advance());
     c = decoder.current();
     assertTrue(CellUtil.equals(c, cell2));
-    tags = Tag.asList(c.getTagsArray(), c.getTagsOffset(), c.getTagsLength());
+    tags = TagUtil.asList(c.getTagsArray(), c.getTagsOffset(), c.getTagsLength());
     assertEquals(1, tags.size());
     tag = tags.get(0);
     assertEquals(1, tag.getType());
-    assertTrue(Bytes.equals(Bytes.toBytes("teststring3"), tag.getValue()));
+    assertTrue(Bytes.equals(Bytes.toBytes("teststring3"), TagUtil.cloneValue(tag)));
     assertTrue(decoder.advance());
     c = decoder.current();
     assertTrue(CellUtil.equals(c, cell3));
-    tags = Tag.asList(c.getTagsArray(), c.getTagsOffset(), c.getTagsLength());
+    tags = TagUtil.asList(c.getTagsArray(), c.getTagsOffset(), c.getTagsLength());
     assertEquals(3, tags.size());
     tag = tags.get(0);
     assertEquals(2, tag.getType());
-    assertTrue(Bytes.equals(Bytes.toBytes("teststring4"), tag.getValue()));
+    assertTrue(Bytes.equals(Bytes.toBytes("teststring4"), TagUtil.cloneValue(tag)));
     tag = tags.get(1);
     assertEquals(2, tag.getType());
-    assertTrue(Bytes.equals(Bytes.toBytes("teststring5"), tag.getValue()));
+    assertTrue(Bytes.equals(Bytes.toBytes("teststring5"), TagUtil.cloneValue(tag)));
     tag = tags.get(2);
     assertEquals(1, tag.getType());
-    assertTrue(Bytes.equals(Bytes.toBytes("teststring6"), tag.getValue()));
+    assertTrue(Bytes.equals(Bytes.toBytes("teststring6"), TagUtil.cloneValue(tag)));
     assertFalse(decoder.advance());
     dis.close();
     assertEquals(offset, cis.getCount());

http://git-wip-us.apache.org/repos/asf/hbase/blob/a9b671b3/hbase-common/src/test/java/org/apache/hadoop/hbase/codec/TestKeyValueCodecWithTags.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/codec/TestKeyValueCodecWithTags.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/codec/TestKeyValueCodecWithTags.java
index 04fb9a9..238d0a6 100644
--- a/hbase-common/src/test/java/org/apache/hadoop/hbase/codec/TestKeyValueCodecWithTags.java
+++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/codec/TestKeyValueCodecWithTags.java
@@ -33,6 +33,8 @@ import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.Tag;
+import org.apache.hadoop.hbase.TagUtil;
+import org.apache.hadoop.hbase.ArrayBackedTag;
 import org.apache.hadoop.hbase.testclassification.MiscTests;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -54,16 +56,16 @@ public class TestKeyValueCodecWithTags {
     Codec.Encoder encoder = codec.getEncoder(dos);
     final KeyValue kv1 = new KeyValue(Bytes.toBytes("r"), Bytes.toBytes("f"), Bytes.toBytes("1"),
         HConstants.LATEST_TIMESTAMP, Bytes.toBytes("1"), new Tag[] {
-            new Tag((byte) 1, Bytes.toBytes("teststring1")),
-            new Tag((byte) 2, Bytes.toBytes("teststring2")) });
+            new ArrayBackedTag((byte) 1, Bytes.toBytes("teststring1")),
+            new ArrayBackedTag((byte) 2, Bytes.toBytes("teststring2")) });
     final KeyValue kv2 = new KeyValue(Bytes.toBytes("r"), Bytes.toBytes("f"), Bytes.toBytes("2"),
-        HConstants.LATEST_TIMESTAMP, Bytes.toBytes("2"), new Tag[] { new Tag((byte) 1,
+        HConstants.LATEST_TIMESTAMP, Bytes.toBytes("2"), new Tag[] { new ArrayBackedTag((byte) 1,
             Bytes.toBytes("teststring3")), });
     final KeyValue kv3 = new KeyValue(Bytes.toBytes("r"), Bytes.toBytes("f"), Bytes.toBytes("3"),
         HConstants.LATEST_TIMESTAMP, Bytes.toBytes("3"), new Tag[] {
-            new Tag((byte) 2, Bytes.toBytes("teststring4")),
-            new Tag((byte) 2, Bytes.toBytes("teststring5")),
-            new Tag((byte) 1, Bytes.toBytes("teststring6")) });
+            new ArrayBackedTag((byte) 2, Bytes.toBytes("teststring4")),
+            new ArrayBackedTag((byte) 2, Bytes.toBytes("teststring5")),
+            new ArrayBackedTag((byte) 1, Bytes.toBytes("teststring6")) });
 
     encoder.write(kv1);
     encoder.write(kv2);
@@ -77,36 +79,36 @@ public class TestKeyValueCodecWithTags {
     assertTrue(decoder.advance());
     Cell c = decoder.current();
     assertTrue(CellUtil.equals(c, kv1));
-    List<Tag> tags = Tag.asList(c.getTagsArray(), c.getTagsOffset(), c.getTagsLength());
+    List<Tag> tags = TagUtil.asList(c.getTagsArray(), c.getTagsOffset(), c.getTagsLength());
     assertEquals(2, tags.size());
     Tag tag = tags.get(0);
     assertEquals(1, tag.getType());
-    assertTrue(Bytes.equals(Bytes.toBytes("teststring1"), tag.getValue()));
+    assertTrue(Bytes.equals(Bytes.toBytes("teststring1"), TagUtil.cloneValue(tag)));
     tag = tags.get(1);
     assertEquals(2, tag.getType());
-    assertTrue(Bytes.equals(Bytes.toBytes("teststring2"), tag.getValue()));
+    assertTrue(Bytes.equals(Bytes.toBytes("teststring2"), TagUtil.cloneValue(tag)));
     assertTrue(decoder.advance());
     c = decoder.current();
     assertTrue(CellUtil.equals(c, kv2));
-    tags = Tag.asList(c.getTagsArray(), c.getTagsOffset(), c.getTagsLength());
+    tags = TagUtil.asList(c.getTagsArray(), c.getTagsOffset(), c.getTagsLength());
     assertEquals(1, tags.size());
     tag = tags.get(0);
     assertEquals(1, tag.getType());
-    assertTrue(Bytes.equals(Bytes.toBytes("teststring3"), tag.getValue()));
+    assertTrue(Bytes.equals(Bytes.toBytes("teststring3"), TagUtil.cloneValue(tag)));
     assertTrue(decoder.advance());
     c = decoder.current();
     assertTrue(CellUtil.equals(c, kv3));
-    tags = Tag.asList(c.getTagsArray(), c.getTagsOffset(), c.getTagsLength());
+    tags = TagUtil.asList(c.getTagsArray(), c.getTagsOffset(), c.getTagsLength());
     assertEquals(3, tags.size());
     tag = tags.get(0);
     assertEquals(2, tag.getType());
-    assertTrue(Bytes.equals(Bytes.toBytes("teststring4"), tag.getValue()));
+    assertTrue(Bytes.equals(Bytes.toBytes("teststring4"), TagUtil.cloneValue(tag)));
     tag = tags.get(1);
     assertEquals(2, tag.getType());
-    assertTrue(Bytes.equals(Bytes.toBytes("teststring5"), tag.getValue()));
+    assertTrue(Bytes.equals(Bytes.toBytes("teststring5"), TagUtil.cloneValue(tag)));
     tag = tags.get(2);
     assertEquals(1, tag.getType());
-    assertTrue(Bytes.equals(Bytes.toBytes("teststring6"), tag.getValue()));
+    assertTrue(Bytes.equals(Bytes.toBytes("teststring6"), TagUtil.cloneValue(tag)));
     assertFalse(decoder.advance());
     dis.close();
     assertEquals(offset, cis.getCount());

http://git-wip-us.apache.org/repos/asf/hbase/blob/a9b671b3/hbase-common/src/test/java/org/apache/hadoop/hbase/io/TestTagCompressionContext.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/io/TestTagCompressionContext.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/io/TestTagCompressionContext.java
index f4c4afe..6c46cf2 100644
--- a/hbase-common/src/test/java/org/apache/hadoop/hbase/io/TestTagCompressionContext.java
+++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/io/TestTagCompressionContext.java
@@ -28,6 +28,7 @@ import java.util.List;
 
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.Tag;
+import org.apache.hadoop.hbase.ArrayBackedTag;
 import org.apache.hadoop.hbase.io.util.LRUDictionary;
 import org.apache.hadoop.hbase.nio.SingleByteBuff;
 import org.apache.hadoop.hbase.testclassification.MiscTests;
@@ -97,7 +98,7 @@ public class TestTagCompressionContext {
   private KeyValue createKVWithTags(int noOfTags) {
     List<Tag> tags = new ArrayList<Tag>();
     for (int i = 0; i < noOfTags; i++) {
-      tags.add(new Tag((byte) i, "tagValue" + i));
+      tags.add(new ArrayBackedTag((byte) i, "tagValue" + i));
     }
     KeyValue kv = new KeyValue(ROW, CF, Q, 1234L, V, tags);
     return kv;

http://git-wip-us.apache.org/repos/asf/hbase/blob/a9b671b3/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestByteRangeWithKVSerialization.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestByteRangeWithKVSerialization.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestByteRangeWithKVSerialization.java
index bd2a29d..717e24c 100644
--- a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestByteRangeWithKVSerialization.java
+++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestByteRangeWithKVSerialization.java
@@ -22,6 +22,7 @@ import java.util.List;
 
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.Tag;
+import org.apache.hadoop.hbase.ArrayBackedTag;
 import org.apache.hadoop.hbase.testclassification.MiscTests;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
 import org.junit.Assert;
@@ -65,7 +66,7 @@ public class TestByteRangeWithKVSerialization {
     int kvCount = 1000000;
     List<KeyValue> kvs = new ArrayList<KeyValue>(kvCount);
     int totalSize = 0;
-    Tag[] tags = new Tag[] { new Tag((byte) 1, "tag1") };
+    Tag[] tags = new Tag[] { new ArrayBackedTag((byte) 1, "tag1") };
     for (int i = 0; i < kvCount; i++) {
       KeyValue kv = new KeyValue(Bytes.toBytes(i), FAMILY, QUALIFIER, i, VALUE, tags);
       kv.setSequenceId(i);

http://git-wip-us.apache.org/repos/asf/hbase/blob/a9b671b3/hbase-prefix-tree/src/test/java/org/apache/hadoop/hbase/codec/prefixtree/row/data/TestRowDataTrivialWithTags.java
----------------------------------------------------------------------
diff --git a/hbase-prefix-tree/src/test/java/org/apache/hadoop/hbase/codec/prefixtree/row/data/TestRowDataTrivialWithTags.java b/hbase-prefix-tree/src/test/java/org/apache/hadoop/hbase/codec/prefixtree/row/data/TestRowDataTrivialWithTags.java
index 3c3699b..a615155 100644
--- a/hbase-prefix-tree/src/test/java/org/apache/hadoop/hbase/codec/prefixtree/row/data/TestRowDataTrivialWithTags.java
+++ b/hbase-prefix-tree/src/test/java/org/apache/hadoop/hbase/codec/prefixtree/row/data/TestRowDataTrivialWithTags.java
@@ -23,6 +23,7 @@ import java.util.List;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.KeyValueUtil;
 import org.apache.hadoop.hbase.Tag;
+import org.apache.hadoop.hbase.ArrayBackedTag;
 import org.apache.hadoop.hbase.codec.prefixtree.PrefixTreeBlockMeta;
 import org.apache.hadoop.hbase.codec.prefixtree.row.BaseTestRowData;
 import org.apache.hadoop.hbase.codec.prefixtree.scanner.CellScannerPosition;
@@ -46,9 +47,9 @@ public class TestRowDataTrivialWithTags extends BaseTestRowData{
   static List<KeyValue> d = Lists.newArrayList();
   static {
     List<Tag> tagList = new ArrayList<Tag>();
-    Tag t = new Tag((byte) 1, "visisbility");
+    Tag t = new ArrayBackedTag((byte) 1, "visisbility");
     tagList.add(t);
-    t = new Tag((byte) 2, "ACL");
+    t = new ArrayBackedTag((byte) 2, "ACL");
     tagList.add(t);
     d.add(new KeyValue(rA, cf, cq0, ts, v0, tagList));
     d.add(new KeyValue(rB, cf, cq0, ts, v0, tagList));

http://git-wip-us.apache.org/repos/asf/hbase/blob/a9b671b3/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/PerformanceEvaluation.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/PerformanceEvaluation.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/PerformanceEvaluation.java
index 8424bf9..dcd5b0a 100644
--- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/PerformanceEvaluation.java
+++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/PerformanceEvaluation.java
@@ -49,6 +49,7 @@ import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.Tag;
+import org.apache.hadoop.hbase.ArrayBackedTag;
 import org.apache.hadoop.hbase.client.BufferedMutator;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
@@ -1124,7 +1125,7 @@ public class PerformanceEvaluation extends Configured implements Tool {
         byte[] tag = generateData(this.rand, TAG_LENGTH);
         Tag[] tags = new Tag[noOfTags];
         for (int n = 0; n < noOfTags; n++) {
-          Tag t = new Tag((byte) n, tag);
+          Tag t = new ArrayBackedTag((byte) n, tag);
           tags[n] = t;
         }
         KeyValue kv = new KeyValue(row, FAMILY_NAME, QUALIFIER_NAME, HConstants.LATEST_TIMESTAMP,
@@ -1195,7 +1196,7 @@ public class PerformanceEvaluation extends Configured implements Tool {
         byte[] tag = generateData(this.rand, TAG_LENGTH);
         Tag[] tags = new Tag[noOfTags];
         for (int n = 0; n < noOfTags; n++) {
-          Tag t = new Tag((byte) n, tag);
+          Tag t = new ArrayBackedTag((byte) n, tag);
           tags[n] = t;
         }
         KeyValue kv = new KeyValue(row, FAMILY_NAME, QUALIFIER_NAME, HConstants.LATEST_TIMESTAMP,

http://git-wip-us.apache.org/repos/asf/hbase/blob/a9b671b3/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java
index 86d183b..cc202d4 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java
@@ -59,10 +59,11 @@ import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.HBaseInterfaceAudience;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.Tag;
+import org.apache.hadoop.hbase.TagUtil;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.KeyValueUtil;
-import org.apache.hadoop.hbase.Tag;
 import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
 import org.apache.hadoop.hbase.io.hfile.HFile.FileInfo;
 import org.apache.hadoop.hbase.mob.MobUtils;
@@ -367,11 +368,10 @@ public class HFilePrettyPrinter extends Configured implements Tool {
               + Bytes.toStringBinary(cell.getValueArray(), cell.getValueOffset(),
                   cell.getValueLength()));
           int i = 0;
-          List<Tag> tags = Tag.asList(cell.getTagsArray(), cell.getTagsOffset(),
+          List<Tag> tags = TagUtil.asList(cell.getTagsArray(), cell.getTagsOffset(),
               cell.getTagsLength());
           for (Tag tag : tags) {
-            System.out.print(String.format(" T[%d]: %s", i++,
-                Bytes.toStringBinary(tag.getBuffer(), tag.getTagOffset(), tag.getTagLength())));
+            System.out.print(String.format(" T[%d]: %s", i++, TagUtil.getValueAsString(tag)));
           }
         }
         System.out.println();
@@ -411,7 +411,7 @@ public class HFilePrettyPrinter extends Configured implements Tool {
           System.err.println("ERROR, wrong value format in mob reference cell "
             + CellUtil.getCellKeyAsString(cell));
         } else {
-          TableName tn = TableName.valueOf(tnTag.getValue());
+          TableName tn = TableName.valueOf(TagUtil.cloneValue(tnTag));
           String mobFileName = MobUtils.getMobFileName(cell);
           boolean exist = mobFileExists(fs, tn, mobFileName,
             Bytes.toString(CellUtil.cloneFamily(cell)), foundMobFiles, missingMobFiles);

http://git-wip-us.apache.org/repos/asf/hbase/blob/a9b671b3/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TextSortReducer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TextSortReducer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TextSortReducer.java
index c201eb7..d2adbd4 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TextSortReducer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TextSortReducer.java
@@ -25,6 +25,7 @@ import java.util.Set;
 import java.util.TreeSet;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.ArrayBackedTag;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellComparator;
 import org.apache.hadoop.hbase.KeyValue;
@@ -169,7 +170,7 @@ public class TextSortReducer extends
           // Add TTL directly to the KV so we can vary them when packing more than one KV
           // into puts
           if (ttl > 0) {
-            tags.add(new Tag(TagType.TTL_TAG_TYPE, Bytes.toBytes(ttl)));
+            tags.add(new ArrayBackedTag(TagType.TTL_TAG_TYPE, Bytes.toBytes(ttl)));
           }
           for (int i = 0; i < parsed.getColumnCount(); i++) {
             if (i == parser.getRowKeyColumnIndex() || i == parser.getTimestampKeyColumnIndex()

http://git-wip-us.apache.org/repos/asf/hbase/blob/a9b671b3/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterMapper.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterMapper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterMapper.java
index 98dc25e..e14874b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterMapper.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterMapper.java
@@ -22,6 +22,7 @@ import java.util.ArrayList;
 import java.util.List;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.ArrayBackedTag;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.Tag;
@@ -170,7 +171,7 @@ extends Mapper<LongWritable, Text, ImmutableBytesWritable, Put>
         // Add TTL directly to the KV so we can vary them when packing more than one KV
         // into puts
         if (ttl > 0) {
-          tags.add(new Tag(TagType.TTL_TAG_TYPE, Bytes.toBytes(ttl)));
+          tags.add(new ArrayBackedTag(TagType.TTL_TAG_TYPE, Bytes.toBytes(ttl)));
         }
       }
       Put put = new Put(rowKey.copyBytes());

http://git-wip-us.apache.org/repos/asf/hbase/blob/a9b671b3/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreCompactor.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreCompactor.java
index f48bb94..b5f412d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreCompactor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreCompactor.java
@@ -26,6 +26,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.ArrayBackedTag;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.KeyValue;
@@ -167,7 +168,8 @@ public class DefaultMobStoreCompactor extends DefaultCompactor {
     byte[] fileName = null;
     StoreFile.Writer mobFileWriter = null, delFileWriter = null;
     long mobCells = 0, deleteMarkersCount = 0;
-    Tag tableNameTag = new Tag(TagType.MOB_TABLE_NAME_TAG_TYPE, store.getTableName().getName());
+    Tag tableNameTag = new ArrayBackedTag(TagType.MOB_TABLE_NAME_TAG_TYPE,
+        store.getTableName().getName());
     long cellsCountCompactedToMob = 0, cellsCountCompactedFromMob = 0;
     long cellsSizeCompactedToMob = 0, cellsSizeCompactedFromMob = 0;
     try {

http://git-wip-us.apache.org/repos/asf/hbase/blob/a9b671b3/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreFlusher.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreFlusher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreFlusher.java
index ff350bf..999d25c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreFlusher.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreFlusher.java
@@ -27,6 +27,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.ArrayBackedTag;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.KeyValue;
@@ -166,8 +167,8 @@ public class DefaultMobStoreFlusher extends DefaultStoreFlusher {
     // the relative path is mobFiles
     byte[] fileName = Bytes.toBytes(mobFileWriter.getPath().getName());
     try {
-      Tag tableNameTag = new Tag(TagType.MOB_TABLE_NAME_TAG_TYPE, store.getTableName()
-          .getName());
+      Tag tableNameTag = new ArrayBackedTag(TagType.MOB_TABLE_NAME_TAG_TYPE,
+          store.getTableName().getName());
       List<Cell> cells = new ArrayList<Cell>();
       boolean hasMore;
       ScannerContext scannerContext =

http://git-wip-us.apache.org/repos/asf/hbase/blob/a9b671b3/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobConstants.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobConstants.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobConstants.java
index 4bdfe97..82fc9cf 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobConstants.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobConstants.java
@@ -18,6 +18,7 @@
  */
 package org.apache.hadoop.hbase.mob;
 
+import org.apache.hadoop.hbase.ArrayBackedTag;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.Tag;
 import org.apache.hadoop.hbase.TagType;
@@ -66,7 +67,7 @@ public final class MobConstants {
 
   public static final String MOB_CACHE_EVICT_PERIOD = "hbase.mob.cache.evict.period";
   public static final String MOB_CACHE_EVICT_REMAIN_RATIO = "hbase.mob.cache.evict.remain.ratio";
-  public static final Tag MOB_REF_TAG = new Tag(TagType.MOB_REFERENCE_TAG_TYPE,
+  public static final Tag MOB_REF_TAG = new ArrayBackedTag(TagType.MOB_REFERENCE_TAG_TYPE,
       HConstants.EMPTY_BYTE_ARRAY);
 
   public static final float DEFAULT_EVICT_REMAIN_RATIO = 0.5f;

http://git-wip-us.apache.org/repos/asf/hbase/blob/a9b671b3/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java
index d654788..52a19f5 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java
@@ -42,6 +42,7 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellComparator;
+import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
@@ -122,8 +123,7 @@ public final class MobUtils {
    */
   public static boolean isMobReferenceCell(Cell cell) {
     if (cell.getTagsLength() > 0) {
-      Tag tag = Tag.getTag(cell.getTagsArray(), cell.getTagsOffset(), cell.getTagsLength(),
-          TagType.MOB_REFERENCE_TAG_TYPE);
+      Tag tag = CellUtil.getTag(cell, TagType.MOB_REFERENCE_TAG_TYPE);
       return tag != null;
     }
     return false;
@@ -136,9 +136,7 @@ public final class MobUtils {
    */
   public static Tag getTableNameTag(Cell cell) {
     if (cell.getTagsLength() > 0) {
-      Tag tag = Tag.getTag(cell.getTagsArray(), cell.getTagsOffset(), cell.getTagsLength(),
-          TagType.MOB_TABLE_NAME_TAG_TYPE);
-      return tag;
+      return CellUtil.getTag(cell, TagType.MOB_TABLE_NAME_TAG_TYPE);
     }
     return null;
   }
@@ -438,7 +436,7 @@ public final class MobUtils {
     // snapshot for mob files.
     tags.add(tableNameTag);
     // Add the existing tags.
-    tags.addAll(Tag.asList(cell.getTagsArray(), cell.getTagsOffset(), cell.getTagsLength()));
+    tags.addAll(CellUtil.getTags(cell));
     int valueLength = cell.getValueLength();
     byte[] refValue = Bytes.add(Bytes.toBytes(valueLength), fileName);
     KeyValue reference = new KeyValue(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength(),

http://git-wip-us.apache.org/repos/asf/hbase/blob/a9b671b3/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactor.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactor.java
index ab9ee7e..6c6f115 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactor.java
@@ -38,6 +38,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.ArrayBackedTag;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellComparator;
 import org.apache.hadoop.hbase.HColumnDescriptor;
@@ -113,7 +114,7 @@ public class PartitionedMobCompactor extends MobCompactor {
     Configuration copyOfConf = new Configuration(conf);
     copyOfConf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0f);
     compactionCacheConfig = new CacheConfig(copyOfConf);
-    tableNameTag = new Tag(TagType.MOB_TABLE_NAME_TAG_TYPE, tableName.getName());
+    tableNameTag = new ArrayBackedTag(TagType.MOB_TABLE_NAME_TAG_TYPE, tableName.getName());
     cryptoContext = EncryptionUtil.createEncryptionContext(copyOfConf, column);
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/a9b671b3/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/mapreduce/MemStoreWrapper.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/mapreduce/MemStoreWrapper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/mapreduce/MemStoreWrapper.java
index 3daef7e..5955cc2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/mapreduce/MemStoreWrapper.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/mapreduce/MemStoreWrapper.java
@@ -25,6 +25,7 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.ArrayBackedTag;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
@@ -156,8 +157,8 @@ public class MemStoreWrapper {
     scanner = snapshot.getScanner();
     scanner.seek(KeyValueUtil.createFirstOnRow(HConstants.EMPTY_START_ROW));
     cell = null;
-    Tag tableNameTag = new Tag(TagType.MOB_TABLE_NAME_TAG_TYPE, Bytes.toBytes(this.table.getName()
-      .toString()));
+    Tag tableNameTag = new ArrayBackedTag(TagType.MOB_TABLE_NAME_TAG_TYPE,
+        Bytes.toBytes(this.table.getName().toString()));
     long updatedCount = 0;
     while (null != (cell = scanner.next())) {
       KeyValue reference = MobUtils.createMobRefKeyValue(cell, referenceValue, tableNameTag);


[07/17] hbase git commit: HBASE-14888 ClusterSchema: Add Namespace Operations

Posted by sy...@apache.org.
HBASE-14888 ClusterSchema: Add Namespace Operations


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/46303dfd
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/46303dfd
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/46303dfd

Branch: refs/heads/hbase-12439
Commit: 46303dfd751912371539aa41ee8698dfb5e8b304
Parents: 1c4edd2
Author: stack <st...@apache.org>
Authored: Tue Jan 5 14:35:27 2016 -0800
Committer: stack <st...@apache.org>
Committed: Tue Jan 5 14:35:27 2016 -0800

----------------------------------------------------------------------
 .../org/apache/hadoop/hbase/client/Admin.java   |  50 +-
 .../hadoop/hbase/client/ClusterConnection.java  |   1 +
 .../hbase/client/ConnectionImplementation.java  |  55 +-
 .../apache/hadoop/hbase/client/HBaseAdmin.java  | 231 +++---
 .../coprocessor/BypassCoprocessorException.java |  44 +
 .../java/org/apache/hadoop/hbase/Service.java   |  50 ++
 .../hbase/ServiceNotRunningException.java       |  39 +
 .../apache/hadoop/hbase/ResourceChecker.java    |   2 +-
 .../hbase/protobuf/generated/MasterProtos.java  | 794 +++++++++++++------
 hbase-protocol/src/main/protobuf/Master.proto   |   3 +
 .../java/org/apache/hadoop/hbase/Server.java    |  13 +-
 .../hadoop/hbase/master/ClusterSchema.java      | 131 +++
 .../hbase/master/ClusterSchemaException.java    |  37 +
 .../hbase/master/ClusterSchemaService.java      |  27 +
 .../hbase/master/ClusterSchemaServiceImpl.java  | 131 +++
 .../org/apache/hadoop/hbase/master/HMaster.java | 414 ++++------
 .../hadoop/hbase/master/MasterRpcServices.java  | 139 +---
 .../hadoop/hbase/master/MasterServices.java     |  78 +-
 .../hadoop/hbase/master/ServerManager.java      |   2 +-
 .../hbase/master/TableNamespaceManager.java     |  99 ++-
 .../procedure/CreateNamespaceProcedure.java     |   2 +-
 .../procedure/DeleteNamespaceProcedure.java     |   2 +-
 .../master/procedure/DeleteTableProcedure.java  |   4 +-
 .../procedure/ModifyNamespaceProcedure.java     |   3 +-
 .../master/procedure/ServerCrashProcedure.java  |   2 +-
 .../hbase/namespace/NamespaceStateManager.java  |   6 +-
 .../hbase/regionserver/HRegionServer.java       |  11 +-
 .../regionserver/ReplicationSyncUp.java         |   6 +
 .../resources/hbase-webapps/master/table.jsp    |   4 +-
 .../hadoop/hbase/MockRegionServerServices.java  |   6 +
 .../org/apache/hadoop/hbase/TestNamespace.java  |   2 +-
 .../client/TestShortCircuitConnection.java      |   2 +-
 .../hbase/coprocessor/TestMasterObserver.java   |  24 +-
 .../hadoop/hbase/master/MockRegionServer.java   |   6 +
 .../hbase/master/TestActiveMasterManager.java   |   6 +
 .../hadoop/hbase/master/TestCatalogJanitor.java |  73 +-
 .../hbase/master/TestClockSkewDetection.java    |   6 +
 .../hbase/master/TestMasterNoCluster.java       |  23 +-
 .../hbase/master/TestSplitLogManager.java       |   6 +
 .../balancer/TestRegionLocationFinder.java      |   5 +-
 .../hbase/master/cleaner/TestHFileCleaner.java  |   6 +
 .../master/cleaner/TestHFileLinkCleaner.java    |   6 +
 .../hbase/master/cleaner/TestLogsCleaner.java   |   6 +
 .../cleaner/TestReplicationHFileCleaner.java    |   6 +
 .../MasterProcedureTestingUtility.java          |   4 +-
 .../regionserver/TestHeapMemoryManager.java     |   6 +
 .../hbase/regionserver/TestSplitLogWorker.java  |   6 +
 .../replication/TestReplicationStateZKImpl.java |   6 +
 .../TestReplicationTrackerZKImpl.java           |   7 +
 .../TestReplicationSourceManager.java           |   6 +
 .../security/token/TestTokenAuthentication.java |   6 +
 .../apache/hadoop/hbase/util/MockServer.java    |   6 +
 52 files changed, 1615 insertions(+), 995 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/46303dfd/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
index b06902a..d7b52d5 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
@@ -965,49 +965,77 @@ public interface Admin extends Abortable, Closeable {
   Configuration getConfiguration();
 
   /**
-   * Create a new namespace
+   * Create a new namespace. Blocks until namespace has been successfully created or an exception
+   * is thrown.
    *
    * @param descriptor descriptor which describes the new namespace
-   * @throws IOException
    */
   void createNamespace(final NamespaceDescriptor descriptor)
-      throws IOException;
+  throws IOException;
 
   /**
-   * Modify an existing namespace
+   * Create a new namespace
+   *
+   * @param descriptor descriptor which describes the new namespace
+   * @return the result of the async create namespace operation. Use Future.get(long, TimeUnit) to
+   *  wait on the operation to complete.
+   */
+  Future<Void> createNamespaceAsync(final NamespaceDescriptor descriptor)
+  throws IOException;
+
+  /**
+   * Modify an existing namespace.  Blocks until namespace has been successfully modified or an
+   * exception is thrown.
    *
    * @param descriptor descriptor which describes the new namespace
-   * @throws IOException
    */
   void modifyNamespace(final NamespaceDescriptor descriptor)
-      throws IOException;
+  throws IOException;
+
+  /**
+   * Modify an existing namespace
+   *
+   * @param descriptor descriptor which describes the new namespace
+   * @return the result of the async modify namespace operation. Use Future.get(long, TimeUnit) to
+   *  wait on the operation to complete.
+   */
+  Future<Void> modifyNamespaceAsync(final NamespaceDescriptor descriptor)
+  throws IOException;
 
   /**
    * Delete an existing namespace. Only empty namespaces (no tables) can be removed.
+   * Blocks until namespace has been successfully deleted or an
+   * exception is thrown.
    *
    * @param name namespace name
-   * @throws IOException
    */
   void deleteNamespace(final String name) throws IOException;
 
   /**
+   * Delete an existing namespace. Only empty namespaces (no tables) can be removed.
+   *
+   * @param name namespace name
+   * @return the result of the async delete namespace operation. Use Future.get(long, TimeUnit) to
+   *  wait on the operation to complete.
+   */
+  Future<Void> deleteNamespaceAsync(final String name) throws IOException;
+
+  /**
    * Get a namespace descriptor by name
    *
    * @param name name of namespace descriptor
    * @return A descriptor
-   * @throws IOException
    */
   NamespaceDescriptor getNamespaceDescriptor(final String name)
-      throws IOException;
+  throws IOException;
 
   /**
    * List available namespace descriptors
    *
    * @return List of descriptors
-   * @throws IOException
    */
   NamespaceDescriptor[] listNamespaceDescriptors()
-    throws IOException;
+  throws IOException;
 
   /**
    * Get list of table descriptors by namespace

http://git-wip-us.apache.org/repos/asf/hbase/blob/46303dfd/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java
index 99071fa..741989f 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java
@@ -1,5 +1,6 @@
 /**
  *
+
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information

http://git-wip-us.apache.org/repos/asf/hbase/blob/46303dfd/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
index 0ef2a17..ecac792 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
@@ -150,8 +150,6 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
   //  be waiting for the master lock => deadlock.
   private final Object masterAndZKLock = new Object();
 
-  private long keepZooKeeperWatcherAliveUntil = Long.MAX_VALUE;
-
   // thread executor shared by all HTableInterface instances created
   // by this connection
   private volatile ExecutorService batchPool = null;
@@ -398,7 +396,7 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
       synchronized (this) {
         if (batchPool == null) {
           this.batchPool = getThreadPool(conf.getInt("hbase.hconnection.threads.max", 256),
-              conf.getInt("hbase.hconnection.threads.core", 256), "-shared-", null);
+              conf.getInt("hbase.hconnection.threads.core", 256), "-shared", null);
           this.cleanupPool = true;
         }
       }
@@ -482,7 +480,6 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
 
   /**
    * @return The cluster registry implementation to use.
-   * @throws java.io.IOException
    */
   private Registry setupRegistry() throws IOException {
     return RegistryFactory.getRegistry(this);
@@ -542,7 +539,6 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
   /**
    * @return true if the master is running, throws an exception otherwise
    * @throws org.apache.hadoop.hbase.MasterNotRunningException - if the master is not running
-   * @throws org.apache.hadoop.hbase.ZooKeeperConnectionException
    * @deprecated this has been deprecated without a replacement
    */
   @Deprecated
@@ -981,9 +977,6 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
   /**
    * Search the cache for a location that fits our table and row key.
    * Return null if no suitable region is located.
-   *
-   * @param tableName
-   * @param row
    * @return Null or region location found in cache.
    */
   RegionLocations getCachedLocation(final TableName tableName,
@@ -1181,13 +1174,11 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
 
     /**
      * Make stub and cache it internal so can be used later doing the isMasterRunning call.
-     * @param channel
      */
     protected abstract Object makeStub(final BlockingRpcChannel channel);
 
     /**
      * Once setup, check it works by doing isMasterRunning check.
-     * @throws com.google.protobuf.ServiceException
      */
     protected abstract void isMasterRunning() throws ServiceException;
 
@@ -1195,9 +1186,6 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
      * Create a stub. Try once only.  It is not typed because there is no common type to
      * protobuf services nor their interfaces.  Let the caller do appropriate casting.
      * @return A stub for master services.
-     * @throws java.io.IOException
-     * @throws org.apache.zookeeper.KeeperException
-     * @throws com.google.protobuf.ServiceException
      */
     private Object makeStubNoRetries() throws IOException, KeeperException, ServiceException {
       ZooKeeperKeepAliveConnection zkw;
@@ -1370,10 +1358,6 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
 
   private ZooKeeperKeepAliveConnection keepAliveZookeeper;
   private AtomicInteger keepAliveZookeeperUserCount = new AtomicInteger(0);
-  private boolean canCloseZKW = true;
-
-  // keepAlive time, in ms. No reason to make it configurable.
-  private static final long keepAlive = 5 * 60 * 1000;
 
   /**
    * Retrieve a shared ZooKeeperWatcher. You must close it it once you've have finished with it.
@@ -1391,7 +1375,6 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
         keepAliveZookeeper = new ZooKeeperKeepAliveConnection(conf, this.toString(), this);
       }
       keepAliveZookeeperUserCount.addAndGet(1);
-      keepZooKeeperWatcherAliveUntil = Long.MAX_VALUE;
       return keepAliveZookeeper;
     }
   }
@@ -1400,9 +1383,6 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
     if (zkw == null){
       return;
     }
-    if (keepAliveZookeeperUserCount.addAndGet(-1) <= 0) {
-      keepZooKeeperWatcherAliveUntil = System.currentTimeMillis() + keepAlive;
-    }
   }
 
   private void closeZooKeeperWatcher() {
@@ -1820,7 +1800,6 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
     };
   }
 
-
   private static void release(MasterServiceState mss) {
     if (mss != null && mss.connection != null) {
       ((ConnectionImplementation)mss.connection).releaseMaster(mss);
@@ -1893,7 +1872,7 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
    * or delete it from the cache. Does nothing if we can be sure from the exception that
    * the location is still accurate, or if the cache has already been updated.
    * @param exception an object (to simplify user code) on which we will try to find a nested
-   *                  or wrapped or both RegionMovedException
+   *  or wrapped or both RegionMovedException
    * @param source server that is the source of the location update.
    */
   @Override
@@ -1964,7 +1943,7 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
   }
 
   /**
-   * @deprecated since 0.96 - Use {@link org.apache.hadoop.hbase.client.HTableInterface#batch} instead
+   * @deprecated since 0.96 Use {@link org.apache.hadoop.hbase.client.HTableInterface#batch} instead
    */
   @Override
   @Deprecated
@@ -1999,8 +1978,8 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
    * If the method returns it means that there is no error, and the 'results' array will
    * contain no exception. On error, an exception is thrown, and the 'results' array will
    * contain results and exceptions.
-   * @deprecated since 0.96 -
-   *   Use {@link org.apache.hadoop.hbase.client.HTable#processBatchCallback} instead
+   * @deprecated since 0.96
+   *  Use {@link org.apache.hadoop.hbase.client.HTable#processBatchCallback} instead
    */
   @Override
   @Deprecated
@@ -2225,7 +2204,8 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
   }
 
   /**
-   * @deprecated Use {@link org.apache.hadoop.hbase.client.Admin#getTableDescriptorsByTableName(java.util.List)} instead
+   * @deprecated Use {@link
+   *  org.apache.hadoop.hbase.client.Admin#getTableDescriptorsByTableName(java.util.List)} instead
    */
   @Deprecated
   @Override
@@ -2245,12 +2225,13 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
   }
 
   /**
-   * @deprecated Use {@link org.apache.hadoop.hbase.client.Admin#getTableDescriptorsByTableName(java.util.List)} instead
+   * @deprecated Use
+   *  {@link org.apache.hadoop.hbase.client.Admin#getTableDescriptorsByTableName(java.util.List)}
+   *  instead
    */
   @Deprecated
   @Override
-  public HTableDescriptor[] getHTableDescriptors(
-      List<String> names) throws IOException {
+  public HTableDescriptor[] getHTableDescriptors(List<String> names) throws IOException {
     List<TableName> tableNames = new ArrayList<TableName>(names.size());
     for(String name : names) {
       tableNames.add(TableName.valueOf(name));
@@ -2269,7 +2250,9 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
    * @param tableName table name
    * @throws java.io.IOException if the connection to master fails or if the table
    *  is not found.
-   * @deprecated Use {@link org.apache.hadoop.hbase.client.Admin#getTableDescriptor(org.apache.hadoop.hbase.TableName)} instead
+   * @deprecated Use {@link
+   *  org.apache.hadoop.hbase.client.Admin#getTableDescriptor(org.apache.hadoop.hbase.TableName)}
+   *  instead
    */
   @Deprecated
   @Override
@@ -2294,7 +2277,9 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
   }
 
   /**
-   * @deprecated Use {@link org.apache.hadoop.hbase.client.Admin#getTableDescriptor(org.apache.hadoop.hbase.TableName)} instead
+   * @deprecated Use {@link
+   *  org.apache.hadoop.hbase.client.Admin#getTableDescriptor(org.apache.hadoop.hbase.TableName)}
+   *  instead
    */
   @Deprecated
   @Override
@@ -2306,10 +2291,8 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
   @Override
   public TableState getTableState(TableName tableName) throws IOException {
     if (this.closed) throw new IOException(toString() + " closed");
-
     TableState tableState = MetaTableAccessor.getTableState(this, tableName);
-    if (tableState == null)
-      throw new TableNotFoundException(tableName);
+    if (tableState == null) throw new TableNotFoundException(tableName);
     return tableState;
   }
 
@@ -2318,4 +2301,4 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
     return RpcRetryingCallerFactory
         .instantiate(conf, this.interceptor, this.getStatisticsTracker());
   }
-}
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/46303dfd/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
index 7a50458..db94ff4 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
@@ -28,12 +28,12 @@ import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.atomic.AtomicReference;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.Future;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicReference;
 import java.util.regex.Pattern;
 
 import org.apache.commons.logging.Log;
@@ -95,11 +95,13 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnRequest;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnResponse;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AssignRegionRequest;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceResponse;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableRequest;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableResponse;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteColumnRequest;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteColumnResponse;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceResponse;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotRequest;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteTableRequest;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteTableResponse;
@@ -135,6 +137,7 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTi
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyColumnRequest;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyColumnResponse;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceResponse;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyTableRequest;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyTableResponse;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveRegionRequest;
@@ -254,23 +257,10 @@ public class HBaseAdmin implements Admin {
   }
 
   @Override
-  public boolean abortProcedure(
-      final long procId,
-      final boolean mayInterruptIfRunning) throws IOException {
-    Future<Boolean> future = abortProcedureAsync(procId, mayInterruptIfRunning);
-    try {
-      return future.get(syncWaitTimeout, TimeUnit.MILLISECONDS);
-    } catch (InterruptedException e) {
-      throw new InterruptedIOException("Interrupted when waiting for procedure to be cancelled");
-    } catch (TimeoutException e) {
-      throw new TimeoutIOException(e);
-    } catch (ExecutionException e) {
-      if (e.getCause() instanceof IOException) {
-        throw (IOException)e.getCause();
-      } else {
-        throw new IOException(e.getCause());
-      }
-    }
+  public boolean abortProcedure(final long procId, final boolean mayInterruptIfRunning)
+  throws IOException {
+    return get(abortProcedureAsync(procId, mayInterruptIfRunning), this.syncWaitTimeout,
+      TimeUnit.MILLISECONDS);
   }
 
   @Override
@@ -464,22 +454,7 @@ public class HBaseAdmin implements Admin {
   @Override
   public void createTable(final HTableDescriptor desc, byte [][] splitKeys)
       throws IOException {
-    Future<Void> future = createTableAsync(desc, splitKeys);
-    try {
-      // TODO: how long should we wait? spin forever?
-      future.get(syncWaitTimeout, TimeUnit.MILLISECONDS);
-    } catch (InterruptedException e) {
-      throw new InterruptedIOException("Interrupted when waiting" +
-          " for table to be enabled; meta scan was done");
-    } catch (TimeoutException e) {
-      throw new TimeoutIOException(e);
-    } catch (ExecutionException e) {
-      if (e.getCause() instanceof IOException) {
-        throw (IOException)e.getCause();
-      } else {
-        throw new IOException(e.getCause());
-      }
-    }
+    get(createTableAsync(desc, splitKeys), syncWaitTimeout, TimeUnit.MILLISECONDS);
   }
 
   @Override
@@ -550,20 +525,7 @@ public class HBaseAdmin implements Admin {
 
   @Override
   public void deleteTable(final TableName tableName) throws IOException {
-    Future<Void> future = deleteTableAsync(tableName);
-    try {
-      future.get(syncWaitTimeout, TimeUnit.MILLISECONDS);
-    } catch (InterruptedException e) {
-      throw new InterruptedIOException("Interrupted when waiting for table to be deleted");
-    } catch (TimeoutException e) {
-      throw new TimeoutIOException(e);
-    } catch (ExecutionException e) {
-      if (e.getCause() instanceof IOException) {
-        throw (IOException)e.getCause();
-      } else {
-        throw new IOException(e.getCause());
-      }
-    }
+    get(deleteTableAsync(tableName), syncWaitTimeout, TimeUnit.MILLISECONDS);
   }
 
   @Override
@@ -641,21 +603,7 @@ public class HBaseAdmin implements Admin {
   @Override
   public void truncateTable(final TableName tableName, final boolean preserveSplits)
       throws IOException {
-    Future<Void> future = truncateTableAsync(tableName, preserveSplits);
-    try {
-      future.get(syncWaitTimeout, TimeUnit.MILLISECONDS);
-    } catch (InterruptedException e) {
-      throw new InterruptedIOException("Interrupted when waiting for table " + tableName
-          + " to be enabled.");
-    } catch (TimeoutException e) {
-      throw new TimeoutIOException(e);
-    } catch (ExecutionException e) {
-      if (e.getCause() instanceof IOException) {
-        throw (IOException) e.getCause();
-      } else {
-        throw new IOException(e.getCause());
-      }
-    }
+    get(truncateTableAsync(tableName, preserveSplits), syncWaitTimeout, TimeUnit.MILLISECONDS);
   }
 
   @Override
@@ -717,20 +665,7 @@ public class HBaseAdmin implements Admin {
   @Override
   public void enableTable(final TableName tableName)
   throws IOException {
-    Future<Void> future = enableTableAsync(tableName);
-    try {
-      future.get(syncWaitTimeout, TimeUnit.MILLISECONDS);
-    } catch (InterruptedException e) {
-      throw new InterruptedIOException("Interrupted when waiting for table to be disabled");
-    } catch (TimeoutException e) {
-      throw new TimeoutIOException(e);
-    } catch (ExecutionException e) {
-      if (e.getCause() instanceof IOException) {
-        throw (IOException)e.getCause();
-      } else {
-        throw new IOException(e.getCause());
-      }
-    }
+    get(enableTableAsync(tableName), syncWaitTimeout, TimeUnit.MILLISECONDS);
   }
 
   /**
@@ -833,20 +768,7 @@ public class HBaseAdmin implements Admin {
   @Override
   public void disableTable(final TableName tableName)
   throws IOException {
-    Future<Void> future = disableTableAsync(tableName);
-    try {
-      future.get(syncWaitTimeout, TimeUnit.MILLISECONDS);
-    } catch (InterruptedException e) {
-      throw new InterruptedIOException("Interrupted when waiting for table to be disabled");
-    } catch (TimeoutException e) {
-      throw new TimeoutIOException(e);
-    } catch (ExecutionException e) {
-      if (e.getCause() instanceof IOException) {
-        throw (IOException)e.getCause();
-      } else {
-        throw new IOException(e.getCause());
-      }
-    }
+    get(disableTableAsync(tableName), syncWaitTimeout, TimeUnit.MILLISECONDS);
   }
 
   @Override
@@ -1841,43 +1763,103 @@ public class HBaseAdmin implements Admin {
     return this.conf;
   }
 
+  /**
+   * Do a get with a timeout against the passed in <code>future<code>.
+   */
+  private static <T> T get(final Future<T> future, final long timeout, final TimeUnit units)
+  throws IOException {
+    try {
+      // TODO: how long should we wait? Spin forever?
+      return future.get(timeout, units);
+    } catch (InterruptedException e) {
+      throw new InterruptedIOException("Interrupt while waiting on " + future);
+    } catch (TimeoutException e) {
+      throw new TimeoutIOException(e);
+    } catch (ExecutionException e) {
+      if (e.getCause() instanceof IOException) {
+        throw (IOException)e.getCause();
+      } else {
+        throw new IOException(e.getCause());
+      }
+    }
+  }
+
   @Override
-  public void createNamespace(final NamespaceDescriptor descriptor) throws IOException {
-    executeCallable(new MasterCallable<Void>(getConnection()) {
+  public void createNamespace(final NamespaceDescriptor descriptor)
+  throws IOException {
+    get(createNamespaceAsync(descriptor), this.syncWaitTimeout, TimeUnit.MILLISECONDS);
+  }
+
+  @Override
+  public Future<Void> createNamespaceAsync(final NamespaceDescriptor descriptor)
+  throws IOException {
+    CreateNamespaceResponse response =
+        executeCallable(new MasterCallable<CreateNamespaceResponse>(getConnection()) {
       @Override
-      public Void call(int callTimeout) throws Exception {
-        master.createNamespace(null,
+      public CreateNamespaceResponse call(int callTimeout) throws Exception {
+        return master.createNamespace(null,
           CreateNamespaceRequest.newBuilder()
             .setNamespaceDescriptor(ProtobufUtil
               .toProtoNamespaceDescriptor(descriptor)).build()
         );
-        return null;
       }
     });
+    return new NamespaceFuture(this, descriptor.getName(), response.getProcId()) {
+      @Override
+      public String getOperationType() {
+        return "CREATE_NAMESPACE";
+      }
+    };
   }
 
   @Override
-  public void modifyNamespace(final NamespaceDescriptor descriptor) throws IOException {
-    executeCallable(new MasterCallable<Void>(getConnection()) {
+  public void modifyNamespace(final NamespaceDescriptor descriptor)
+  throws IOException {
+    get(modifyNamespaceAsync(descriptor), this.syncWaitTimeout, TimeUnit.MILLISECONDS);
+  }
+
+  @Override
+  public Future<Void> modifyNamespaceAsync(final NamespaceDescriptor descriptor)
+  throws IOException {
+    ModifyNamespaceResponse response =
+        executeCallable(new MasterCallable<ModifyNamespaceResponse>(getConnection()) {
       @Override
-      public Void call(int callTimeout) throws Exception {
-        master.modifyNamespace(null, ModifyNamespaceRequest.newBuilder().
+      public ModifyNamespaceResponse call(int callTimeout) throws Exception {
+        return master.modifyNamespace(null, ModifyNamespaceRequest.newBuilder().
           setNamespaceDescriptor(ProtobufUtil.toProtoNamespaceDescriptor(descriptor)).build());
-        return null;
       }
     });
+    return new NamespaceFuture(this, descriptor.getName(), response.getProcId()) {
+      @Override
+      public String getOperationType() {
+        return "MODIFY_NAMESPACE";
+      }
+    };
   }
 
   @Override
-  public void deleteNamespace(final String name) throws IOException {
-    executeCallable(new MasterCallable<Void>(getConnection()) {
+  public void deleteNamespace(final String name)
+  throws IOException {
+    get(deleteNamespaceAsync(name), this.syncWaitTimeout, TimeUnit.MILLISECONDS);
+  }
+
+  @Override
+  public Future<Void> deleteNamespaceAsync(final String name)
+  throws IOException {
+    DeleteNamespaceResponse response =
+        executeCallable(new MasterCallable<DeleteNamespaceResponse>(getConnection()) {
       @Override
-      public Void call(int callTimeout) throws Exception {
-        master.deleteNamespace(null, DeleteNamespaceRequest.newBuilder().
+      public DeleteNamespaceResponse call(int callTimeout) throws Exception {
+        return master.deleteNamespace(null, DeleteNamespaceRequest.newBuilder().
           setNamespaceName(name).build());
-        return null;
       }
     });
+    return new NamespaceFuture(this, name, response.getProcId()) {
+      @Override
+      public String getOperationType() {
+        return "DELETE_NAMESPACE";
+      }
+    };
   }
 
   @Override
@@ -3184,6 +3166,11 @@ public class HBaseAdmin implements Admin {
       this.tableName = tableName;
     }
 
+    @Override
+    public String toString() {
+      return getDescription();
+    }
+
     /**
      * @return the table name
      */
@@ -3222,7 +3209,7 @@ public class HBaseAdmin implements Admin {
       @Override
       public void throwTimeoutException(long elapsedTime) throws TimeoutException {
         throw new TimeoutException("The operation: " + getOperationType() + " on table: " +
-            tableName.getNameAsString() + " not completed after " + elapsedTime + "msec");
+            tableName.getNameAsString() + " has not completed after " + elapsedTime + "ms");
       }
     }
 
@@ -3344,6 +3331,34 @@ public class HBaseAdmin implements Admin {
     }
   }
 
+  @InterfaceAudience.Private
+  @InterfaceStability.Evolving
+  protected static abstract class NamespaceFuture extends ProcedureFuture<Void> {
+    private final String namespaceName;
+
+    public NamespaceFuture(final HBaseAdmin admin, final String namespaceName, final Long procId) {
+      super(admin, procId);
+      this.namespaceName = namespaceName;
+    }
+
+    /**
+     * @return the namespace name
+     */
+    protected String getNamespaceName() {
+      return namespaceName;
+    }
+
+    /**
+     * @return the operation type like CREATE_NAMESPACE, DELETE_NAMESPACE, etc.
+     */
+    public abstract String getOperationType();
+
+    @Override
+    public String toString() {
+      return "Operation: " + getOperationType() + ", Namespace: " + getNamespaceName();
+    }
+  }
+
   @Override
   public List<SecurityCapability> getSecurityCapabilities() throws IOException {
     try {

http://git-wip-us.apache.org/repos/asf/hbase/blob/46303dfd/hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/BypassCoprocessorException.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/BypassCoprocessorException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/BypassCoprocessorException.java
new file mode 100644
index 0000000..3b01a9e
--- /dev/null
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/BypassCoprocessorException.java
@@ -0,0 +1,44 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.coprocessor;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+
+/**
+ * Thrown if a coprocessor rules we should bypass an operation
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class BypassCoprocessorException extends CoprocessorException {
+  private static final long serialVersionUID = 5943889011582357043L;
+
+  /** Default Constructor */
+  public BypassCoprocessorException() {
+    super();
+  }
+
+  /**
+   * Constructs the exception and supplies a string as the message
+   * @param s - message
+   */
+  public BypassCoprocessorException(String s) {
+    super(s);
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/46303dfd/hbase-common/src/main/java/org/apache/hadoop/hbase/Service.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/Service.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/Service.java
new file mode 100644
index 0000000..97d93cc
--- /dev/null
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/Service.java
@@ -0,0 +1,50 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase;
+
+import java.io.IOException;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+
+/**
+ * Simple Service.
+ */
+// This is a WIP. We have Services throughout hbase. Either have all implement what is here or
+// just remove this as an experiment that did not work out.
+// TODO: Move on to guava Service after we update our guava version; later guava has nicer
+// Service implmentation.
+// TODO: Move all Services on to this one Interface.
+@InterfaceAudience.Private
+public interface Service {
+  /**
+   * Initiates service startup (if necessary), returning once the service has finished starting.
+   * @throws IOException Throws exception if already running and if we fail to start successfully.
+   */
+  void startAndWait() throws IOException;
+
+  /**
+   * @return True if this Service is running.
+   */
+  boolean isRunning();
+
+  /**
+   * Initiates service shutdown (if necessary), returning once the service has finished stopping.
+   * @throws IOException Throws exception if not running of if we fail to stop successfully.
+   */
+  void stopAndWait() throws IOException;
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/46303dfd/hbase-common/src/main/java/org/apache/hadoop/hbase/ServiceNotRunningException.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/ServiceNotRunningException.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/ServiceNotRunningException.java
new file mode 100644
index 0000000..f6325ea
--- /dev/null
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/ServiceNotRunningException.java
@@ -0,0 +1,39 @@
+package org.apache.hadoop.hbase;
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+
+@SuppressWarnings("serial")
+@InterfaceAudience.Private
+public class ServiceNotRunningException extends HBaseIOException {
+  public ServiceNotRunningException() {
+  }
+
+  public ServiceNotRunningException(String message) {
+    super(message);
+  }
+
+  public ServiceNotRunningException(String message, Throwable cause) {
+    super(message, cause);
+  }
+
+  public ServiceNotRunningException(Throwable cause) {
+    super(cause);
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/46303dfd/hbase-common/src/test/java/org/apache/hadoop/hbase/ResourceChecker.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/ResourceChecker.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/ResourceChecker.java
index 539aea3..ee0380a 100644
--- a/hbase-common/src/test/java/org/apache/hadoop/hbase/ResourceChecker.java
+++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/ResourceChecker.java
@@ -41,7 +41,7 @@ public class ResourceChecker {
 
   /**
    * Constructor
-   * @param tagLine - the tagLine is added to the logs. Must be be null.
+   * @param tagLine The tagLine is added to the logs. Must not be null.
    */
   public ResourceChecker(final String tagLine) {
     this.tagLine = tagLine;


[09/17] hbase git commit: HBASE-14468 addendum.

Posted by sy...@apache.org.
HBASE-14468 addendum.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e8fbc9b4
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e8fbc9b4
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e8fbc9b4

Branch: refs/heads/hbase-12439
Commit: e8fbc9b43a3742358e0bdfe441ff4ca9d14e127b
Parents: 72d32cc
Author: Lars Hofhansl <la...@apache.org>
Authored: Tue Jan 5 15:54:34 2016 -0800
Committer: Lars Hofhansl <la...@apache.org>
Committed: Tue Jan 5 15:54:34 2016 -0800

----------------------------------------------------------------------
 .../org/apache/hadoop/hbase/master/HMaster.java    | 17 ++---------------
 1 file changed, 2 insertions(+), 15 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/e8fbc9b4/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 4472b65..2e42acb 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -1607,21 +1607,8 @@ public class HMaster extends HRegionServer implements MasterServices {
             ExploringCompactionPolicy.class.getName());
     }
 
-    long majorCompactionPeriod = Long.MAX_VALUE;
-    String sv = htd.getConfigurationValue(HConstants.MAJOR_COMPACTION_PERIOD);
-    if (sv != null) {
-      majorCompactionPeriod = Long.parseLong(sv);
-    } else {
-      majorCompactionPeriod =
-          conf.getLong(HConstants.MAJOR_COMPACTION_PERIOD, majorCompactionPeriod);
-    }
-    String splitPolicyClassName = htd.getRegionSplitPolicyClassName();
-    if (splitPolicyClassName == null) {
-      splitPolicyClassName = conf.get(HConstants.HBASE_REGION_SPLIT_POLICY_KEY);
-    }
-
     int blockingFileCount = HStore.DEFAULT_BLOCKING_STOREFILE_COUNT;
-    sv = htd.getConfigurationValue(HStore.BLOCKING_STOREFILES_KEY);
+    String sv = htd.getConfigurationValue(HStore.BLOCKING_STOREFILES_KEY);
     if (sv != null) {
       blockingFileCount = Integer.parseInt(sv);
     } else {
@@ -1634,7 +1621,7 @@ public class HMaster extends HRegionServer implements MasterServices {
       if (compactionPolicy == null) {
         compactionPolicy = className;
       }
-      if (className.equals(FIFOCompactionPolicy.class.getName()) == false) {
+      if (!compactionPolicy.equals(FIFOCompactionPolicy.class.getName())) {
         continue;
       }
       // FIFOCompaction


[02/17] hbase git commit: HBASE-14524 Short-circuit comparison of rows in CellComparator. (Lars Francke)

Posted by sy...@apache.org.
HBASE-14524 Short-circuit comparison of rows in CellComparator. (Lars Francke)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/9997e4ee
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/9997e4ee
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/9997e4ee

Branch: refs/heads/hbase-12439
Commit: 9997e4ee52136a681faa825dea46bd1162d71eca
Parents: 998b937
Author: anoopsjohn <an...@gmail.com>
Authored: Tue Jan 5 08:47:23 2016 +0530
Committer: anoopsjohn <an...@gmail.com>
Committed: Tue Jan 5 08:47:23 2016 +0530

----------------------------------------------------------------------
 .../src/main/java/org/apache/hadoop/hbase/CellComparator.java    | 4 ++++
 1 file changed, 4 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/9997e4ee/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparator.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparator.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparator.java
index 7002762..b179963 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparator.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparator.java
@@ -315,6 +315,10 @@ public class CellComparator implements Comparator<Cell>, Serializable {
    * @return 0 if both cells are equal, 1 if left cell is bigger than right, -1 otherwise
    */
   public int compareRows(final Cell left, final Cell right) {
+    // left and right can be exactly the same at the beginning of a row
+    if (left == right) {
+      return 0;
+    }
     if (left instanceof ByteBufferedCell && right instanceof ByteBufferedCell) {
       return ByteBufferUtils.compareTo(((ByteBufferedCell) left).getRowByteBuffer(),
           ((ByteBufferedCell) left).getRowPosition(), left.getRowLength(),


[16/17] hbase git commit: HBASE-15073 Finer grained control over normalization actions for RegionNormalizer

Posted by sy...@apache.org.
HBASE-15073 Finer grained control over normalization actions for RegionNormalizer


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d65978fc
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d65978fc
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d65978fc

Branch: refs/heads/hbase-12439
Commit: d65978fceb85dd59e7fd66e2a93832a452e4c648
Parents: a9b671b
Author: tedyu <yu...@gmail.com>
Authored: Wed Jan 6 17:25:41 2016 -0800
Committer: tedyu <yu...@gmail.com>
Committed: Wed Jan 6 17:25:41 2016 -0800

----------------------------------------------------------------------
 .../apache/hadoop/hbase/HTableDescriptor.java   | 60 +++++++++++++-------
 .../hbase/normalizer/NormalizationPlan.java     | 45 +++++++++++++++
 .../hadoop/hbase/master/AssignmentManager.java  |  2 +-
 .../org/apache/hadoop/hbase/master/HMaster.java | 22 ++++---
 .../normalizer/EmptyNormalizationPlan.java      |  2 +-
 .../normalizer/MergeNormalizationPlan.java      |  2 +-
 .../master/normalizer/NormalizationPlan.java    | 45 ---------------
 .../master/normalizer/RegionNormalizer.java     |  9 ++-
 .../normalizer/SimpleRegionNormalizer.java      | 14 +++--
 .../normalizer/SplitNormalizationPlan.java      |  2 +-
 .../normalizer/TestSimpleRegionNormalizer.java  | 47 +++++++++++----
 .../TestSimpleRegionNormalizerOnCluster.java    |  6 +-
 hbase-shell/src/main/ruby/hbase/admin.rb        |  5 +-
 .../src/main/ruby/shell/commands/normalize.rb   |  2 +-
 .../ruby/shell/commands/normalizer_switch.rb    |  3 +-
 15 files changed, 164 insertions(+), 102 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/d65978fc/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
index 1bd4e07..0fb0455 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
@@ -40,6 +40,8 @@ import org.apache.hadoop.hbase.classification.InterfaceStability;
 import org.apache.hadoop.hbase.client.Durability;
 import org.apache.hadoop.hbase.client.RegionReplicaUtil;
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
+import org.apache.hadoop.hbase.normalizer.NormalizationPlan;
+import org.apache.hadoop.hbase.normalizer.NormalizationPlan.PlanType;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema;
@@ -185,13 +187,14 @@ public class HTableDescriptor implements Comparable<HTableDescriptor> {
 
   /**
    * <em>INTERNAL</em> Used by shell/rest interface to access this metadata
-   * attribute which denotes if the table should be treated by region normalizer.
+   * attribute which denotes the allowed types of action (split/merge) when the table is treated
+   * by region normalizer.
    *
-   * @see #isNormalizationEnabled()
+   * @see #getDesiredNormalizationTypes()
    */
-  public static final String NORMALIZATION_ENABLED = "NORMALIZATION_ENABLED";
-  private static final Bytes NORMALIZATION_ENABLED_KEY =
-    new Bytes(Bytes.toBytes(NORMALIZATION_ENABLED));
+  public static final String NORMALIZATION_MODE = "NORMALIZATION_MODE";
+  private static final Bytes NORMALIZATION_MODE_KEY =
+    new Bytes(Bytes.toBytes(NORMALIZATION_MODE));
 
   /** Default durability for HTD is USE_DEFAULT, which defaults to HBase-global default value */
   private static final Durability DEFAULT_DURABLITY = Durability.USE_DEFAULT;
@@ -220,11 +223,6 @@ public class HTableDescriptor implements Comparable<HTableDescriptor> {
   public static final boolean DEFAULT_COMPACTION_ENABLED = true;
 
   /**
-   * Constant that denotes whether the table is normalized by default.
-   */
-  public static final boolean DEFAULT_NORMALIZATION_ENABLED = false;
-
-  /**
    * Constant that denotes the maximum default size of the memstore after which
    * the contents are flushed to the store files
    */
@@ -249,7 +247,7 @@ public class HTableDescriptor implements Comparable<HTableDescriptor> {
         String.valueOf(DEFAULT_DEFERRED_LOG_FLUSH));
     DEFAULT_VALUES.put(DURABILITY, DEFAULT_DURABLITY.name()); //use the enum name
     DEFAULT_VALUES.put(REGION_REPLICATION, String.valueOf(DEFAULT_REGION_REPLICATION));
-    DEFAULT_VALUES.put(NORMALIZATION_ENABLED, String.valueOf(DEFAULT_NORMALIZATION_ENABLED));
+    DEFAULT_VALUES.put(NORMALIZATION_MODE, "");
     for (String s : DEFAULT_VALUES.keySet()) {
       RESERVED_KEYWORDS.add(new Bytes(Bytes.toBytes(s)));
     }
@@ -640,22 +638,42 @@ public class HTableDescriptor implements Comparable<HTableDescriptor> {
   }
 
   /**
-   * Check if normalization enable flag of the table is true. If flag is
-   * false then no region normalizer won't attempt to normalize this table.
+   * Check if normalization flag of the table. If flag is
+   * empty then region normalizer won't attempt to normalize this table.
    *
-   * @return true if region normalization is enabled for this table
+   * @return List of PlanType if region normalization is enabled for this table
+   *         null means region normalization is disabled
    */
-  public boolean isNormalizationEnabled() {
-    return isSomething(NORMALIZATION_ENABLED_KEY, DEFAULT_NORMALIZATION_ENABLED);
+  public List<PlanType> getDesiredNormalizationTypes() {
+    byte [] value = getValue(NORMALIZATION_MODE_KEY);
+    if (value == null) {
+      return null;
+    }
+    String strValue = Bytes.toString(value);
+    if (strValue.isEmpty()) {
+      return null;
+    }
+    List<NormalizationPlan.PlanType> types = new ArrayList<>();
+    if (strValue.toUpperCase().contains("M")) {
+      types.add(PlanType.MERGE);
+    }
+    if (strValue.toUpperCase().contains("S")) {
+      types.add(PlanType.SPLIT);
+    }
+    return types;
   }
 
   /**
-   * Setting the table normalization enable flag.
+   * Setting the types of action for table normalization mode flag.
    *
-   * @param isEnable True if enable normalization.
-   */
-  public HTableDescriptor setNormalizationEnabled(final boolean isEnable) {
-    setValue(NORMALIZATION_ENABLED_KEY, isEnable ? TRUE : FALSE);
+   * @param types String containing desired types of action:
+   *        "M" for region merge
+   *        "S" for region split
+   *        "MS" for region merge / split
+   */
+  public HTableDescriptor setNormalizationMode(final String types) {
+    setValue(NORMALIZATION_MODE_KEY, types == null || types.isEmpty() ? null :
+      new Bytes(Bytes.toBytes(types.toUpperCase())));
     return this;
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/d65978fc/hbase-client/src/main/java/org/apache/hadoop/hbase/normalizer/NormalizationPlan.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/normalizer/NormalizationPlan.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/normalizer/NormalizationPlan.java
new file mode 100644
index 0000000..66481e6
--- /dev/null
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/normalizer/NormalizationPlan.java
@@ -0,0 +1,45 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.normalizer;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.Admin;
+
+/**
+ * Interface for normalization plan.
+ */
+@InterfaceAudience.Private
+public interface NormalizationPlan {
+  enum PlanType {
+    SPLIT,
+    MERGE,
+    NONE
+  }
+
+  /**
+   * Executes normalization plan on cluster (does actual splitting/merging work).
+   * @param admin instance of Admin
+   */
+  void execute(Admin admin);
+
+  /**
+   * @return the type of this plan
+   */
+  PlanType getType();
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/d65978fc/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
index 4feb2e7..c319bb1 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
@@ -72,7 +72,7 @@ import org.apache.hadoop.hbase.master.balancer.FavoredNodeAssignmentHelper;
 import org.apache.hadoop.hbase.master.balancer.FavoredNodeLoadBalancer;
 import org.apache.hadoop.hbase.master.handler.DisableTableHandler;
 import org.apache.hadoop.hbase.master.handler.EnableTableHandler;
-import org.apache.hadoop.hbase.master.normalizer.NormalizationPlan.PlanType;
+import org.apache.hadoop.hbase.normalizer.NormalizationPlan.PlanType;
 import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition;
 import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
 import org.apache.hadoop.hbase.quotas.QuotaExceededException;

http://git-wip-us.apache.org/repos/asf/hbase/blob/d65978fc/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 2e42acb..4a9b792 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -92,7 +92,6 @@ import org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory;
 import org.apache.hadoop.hbase.master.cleaner.HFileCleaner;
 import org.apache.hadoop.hbase.master.cleaner.LogCleaner;
 import org.apache.hadoop.hbase.master.handler.DispatchMergingRegionHandler;
-import org.apache.hadoop.hbase.master.normalizer.NormalizationPlan;
 import org.apache.hadoop.hbase.master.normalizer.RegionNormalizer;
 import org.apache.hadoop.hbase.master.normalizer.RegionNormalizerChore;
 import org.apache.hadoop.hbase.master.normalizer.RegionNormalizerFactory;
@@ -114,6 +113,8 @@ import org.apache.hadoop.hbase.mob.MobConstants;
 import org.apache.hadoop.hbase.monitoring.MemoryBoundedLogMessageBuffer;
 import org.apache.hadoop.hbase.monitoring.MonitoredTask;
 import org.apache.hadoop.hbase.monitoring.TaskMonitor;
+import org.apache.hadoop.hbase.normalizer.NormalizationPlan;
+import org.apache.hadoop.hbase.normalizer.NormalizationPlan.PlanType;
 import org.apache.hadoop.hbase.procedure.MasterProcedureManagerHost;
 import org.apache.hadoop.hbase.procedure.flush.MasterFlushTableProcedureManager;
 import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
@@ -1323,14 +1324,21 @@ public class HMaster extends HRegionServer implements MasterServices {
 
       for (TableName table : allEnabledTables) {
         TableDescriptor tblDesc = getTableDescriptors().getDescriptor(table);
-        if (table.isSystemTable() || (tblDesc != null &&
-            tblDesc.getHTableDescriptor() != null &&
-            !tblDesc.getHTableDescriptor().isNormalizationEnabled())) {
-          LOG.debug("Skipping normalization for table: " + table + ", as it's either system"
-            + " table or doesn't have auto normalization turned on");
+        if (table.isSystemTable()) {
+          LOG.debug("Skipping normalization for table: " + table + ", as it's system table");
           continue;
         }
-        NormalizationPlan plan = this.normalizer.computePlanForTable(table);
+        List<PlanType> types = null;
+        if (tblDesc != null &&
+            tblDesc.getHTableDescriptor() != null) {
+          types = tblDesc.getHTableDescriptor().getDesiredNormalizationTypes();
+          if (types == null) {
+            LOG.debug("Skipping normalization for table: " + table + ", as it"
+                + " doesn't have auto normalization turned on");
+            continue;
+          }
+        }
+        NormalizationPlan plan = this.normalizer.computePlanForTable(table, types);
         plan.execute(clusterConnection.getAdmin());
       }
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/d65978fc/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/EmptyNormalizationPlan.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/EmptyNormalizationPlan.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/EmptyNormalizationPlan.java
index 5aecc48..29cc0c3 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/EmptyNormalizationPlan.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/EmptyNormalizationPlan.java
@@ -20,7 +20,7 @@ package org.apache.hadoop.hbase.master.normalizer;
 
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.master.normalizer.NormalizationPlan.PlanType;
+import org.apache.hadoop.hbase.normalizer.NormalizationPlan;
 
 /**
  * Plan which signifies that no normalization is required,

http://git-wip-us.apache.org/repos/asf/hbase/blob/d65978fc/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/MergeNormalizationPlan.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/MergeNormalizationPlan.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/MergeNormalizationPlan.java
index e2035bb..f3ce1d5 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/MergeNormalizationPlan.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/MergeNormalizationPlan.java
@@ -23,7 +23,7 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.master.normalizer.NormalizationPlan.PlanType;
+import org.apache.hadoop.hbase.normalizer.NormalizationPlan;
 
 import java.io.IOException;
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/d65978fc/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/NormalizationPlan.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/NormalizationPlan.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/NormalizationPlan.java
deleted file mode 100644
index 9f866d3..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/NormalizationPlan.java
+++ /dev/null
@@ -1,45 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.master.normalizer;
-
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.client.Admin;
-
-/**
- * Interface for normalization plan.
- */
-@InterfaceAudience.Private
-public interface NormalizationPlan {
-  enum PlanType {
-    SPLIT,
-    MERGE,
-    NONE
-  }
-
-  /**
-   * Executes normalization plan on cluster (does actual splitting/merging work).
-   * @param admin instance of Admin
-   */
-  void execute(Admin admin);
-
-  /**
-   * @return the type of this plan
-   */
-  PlanType getType();
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/d65978fc/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/RegionNormalizer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/RegionNormalizer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/RegionNormalizer.java
index d60474d..616098e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/RegionNormalizer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/RegionNormalizer.java
@@ -18,12 +18,15 @@
  */
 package org.apache.hadoop.hbase.master.normalizer;
 
+import java.util.List;
+
 import org.apache.hadoop.hbase.HBaseIOException;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.master.MasterServices;
-import org.apache.hadoop.hbase.master.normalizer.NormalizationPlan.PlanType;
+import org.apache.hadoop.hbase.normalizer.NormalizationPlan;
+import org.apache.hadoop.hbase.normalizer.NormalizationPlan.PlanType;
 
 /**
  * Performs "normalization" of regions on the cluster, making sure that suboptimal
@@ -47,9 +50,11 @@ public interface RegionNormalizer {
   /**
    * Computes next optimal normalization plan.
    * @param table table to normalize
+   * @param types desired types of NormalizationPlan
    * @return Next (perhaps most urgent) normalization action to perform
    */
-  NormalizationPlan computePlanForTable(TableName table) throws HBaseIOException;
+  NormalizationPlan computePlanForTable(TableName table, List<PlanType> types)
+      throws HBaseIOException;
 
   /**
    * Notification for the case where plan couldn't be executed due to constraint violation, such as

http://git-wip-us.apache.org/repos/asf/hbase/blob/d65978fc/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java
index fe10bd1..a035647 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java
@@ -27,7 +27,8 @@ import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.master.MasterServices;
-import org.apache.hadoop.hbase.master.normalizer.NormalizationPlan.PlanType;
+import org.apache.hadoop.hbase.normalizer.NormalizationPlan;
+import org.apache.hadoop.hbase.normalizer.NormalizationPlan.PlanType;
 import org.apache.hadoop.hbase.util.Triple;
 
 import java.util.ArrayList;
@@ -60,7 +61,7 @@ public class SimpleRegionNormalizer implements RegionNormalizer {
   private static final Log LOG = LogFactory.getLog(SimpleRegionNormalizer.class);
   private static final int MIN_REGION_COUNT = 3;
   private MasterServices masterServices;
-  private static long[] skippedCount = new long[NormalizationPlan.PlanType.values().length];
+  private static long[] skippedCount = new long[PlanType.values().length];
 
   /**
    * Set the master service.
@@ -102,10 +103,12 @@ public class SimpleRegionNormalizer implements RegionNormalizer {
    * Action may be either a split, or a merge, or no action.
    *
    * @param table table to normalize
+   * @param types desired types of NormalizationPlan
    * @return normalization plan to execute
    */
   @Override
-  public NormalizationPlan computePlanForTable(TableName table) throws HBaseIOException {
+  public NormalizationPlan computePlanForTable(TableName table, List<PlanType> types)
+      throws HBaseIOException {
     if (table == null || table.isSystemTable()) {
       LOG.debug("Normalization of system table " + table + " isn't allowed");
       return EmptyNormalizationPlan.getInstance();
@@ -146,7 +149,7 @@ public class SimpleRegionNormalizer implements RegionNormalizer {
 
     // now; if the largest region is >2 times large than average, we split it, split
     // is more high priority normalization action than merge.
-    if (largestRegion.getSecond() > 2 * avgRegionSize) {
+    if (types.contains(PlanType.SPLIT) && largestRegion.getSecond() > 2 * avgRegionSize) {
       LOG.debug("Table " + table + ", largest region "
         + largestRegion.getFirst().getRegionNameAsString() + " has size "
         + largestRegion.getSecond() + ", more than 2 times than avg size, splitting");
@@ -167,7 +170,8 @@ public class SimpleRegionNormalizer implements RegionNormalizer {
     }
     Triple<HRegionInfo, Long, Integer> candidateRegion = regionsWithSize.get(candidateIdx);
     Triple<HRegionInfo, Long, Integer> candidateRegion2 = regionsWithSize.get(candidateIdx+1);
-    if (candidateRegion.getSecond() + candidateRegion2.getSecond() < avgRegionSize) {
+    if (types.contains(PlanType.MERGE) &&
+        candidateRegion.getSecond() + candidateRegion2.getSecond() < avgRegionSize) {
       LOG.debug("Table " + table + ", smallest region size: " + candidateRegion.getSecond()
         + " and its smallest neighbor size: " + candidateRegion2.getSecond()
         + ", less than the avg size, merging them");

http://git-wip-us.apache.org/repos/asf/hbase/blob/d65978fc/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SplitNormalizationPlan.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SplitNormalizationPlan.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SplitNormalizationPlan.java
index b95bfb7..76b7cc2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SplitNormalizationPlan.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SplitNormalizationPlan.java
@@ -23,7 +23,7 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.master.normalizer.NormalizationPlan.PlanType;
+import org.apache.hadoop.hbase.normalizer.NormalizationPlan;
 
 import java.io.IOException;
 import java.util.Arrays;

http://git-wip-us.apache.org/repos/asf/hbase/blob/d65978fc/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizer.java
index 970af43..4395aa3 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizer.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizer.java
@@ -26,6 +26,8 @@ import org.apache.hadoop.hbase.RegionLoad;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.master.MasterServices;
+import org.apache.hadoop.hbase.normalizer.NormalizationPlan;
+import org.apache.hadoop.hbase.normalizer.NormalizationPlan.PlanType;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -53,6 +55,18 @@ public class TestSimpleRegionNormalizer {
   private static final Log LOG = LogFactory.getLog(TestSimpleRegionNormalizer.class);
 
   private static RegionNormalizer normalizer;
+  private static List<PlanType> bothTypes;
+  static {
+    bothTypes = new ArrayList<>();
+    bothTypes.add(PlanType.SPLIT);
+    bothTypes.add(PlanType.MERGE);
+  }
+
+  private static List<PlanType> splitType;
+  static {
+    splitType = new ArrayList<>();
+    splitType.add(PlanType.SPLIT);
+  }
 
   // mocks
   private static MasterServices masterServices;
@@ -69,7 +83,7 @@ public class TestSimpleRegionNormalizer {
     Map<byte[], Integer> regionSizes = new HashMap<>();
 
     setupMocksForNormalizer(regionSizes, hris);
-    NormalizationPlan plan = normalizer.computePlanForTable(testTable);
+    NormalizationPlan plan = normalizer.computePlanForTable(testTable, bothTypes);
     assertTrue(plan instanceof EmptyNormalizationPlan);
   }
 
@@ -88,7 +102,7 @@ public class TestSimpleRegionNormalizer {
     regionSizes.put(hri2.getRegionName(), 15);
 
     setupMocksForNormalizer(regionSizes, hris);
-    NormalizationPlan plan = normalizer.computePlanForTable(testTable);
+    NormalizationPlan plan = normalizer.computePlanForTable(testTable, bothTypes);
     assertTrue((plan instanceof EmptyNormalizationPlan));
   }
 
@@ -114,14 +128,18 @@ public class TestSimpleRegionNormalizer {
     hris.add(hri4);
     regionSizes.put(hri4.getRegionName(), 10);
 
-
     setupMocksForNormalizer(regionSizes, hris);
-    NormalizationPlan plan = normalizer.computePlanForTable(testTable);
+    NormalizationPlan plan = normalizer.computePlanForTable(testTable, bothTypes);
     assertTrue(plan instanceof EmptyNormalizationPlan);
   }
 
   @Test
   public void testMergeOfSmallRegions() throws HBaseIOException {
+    testMergeOfSmallRegions(true);
+    testMergeOfSmallRegions(false);
+  }
+
+  public void testMergeOfSmallRegions(boolean mergeDesired) throws HBaseIOException {
     TableName testTable = TableName.valueOf("testMergeOfSmallRegions");
     List<HRegionInfo> hris = new ArrayList<>();
     Map<byte[], Integer> regionSizes = new HashMap<>();
@@ -147,11 +165,16 @@ public class TestSimpleRegionNormalizer {
     regionSizes.put(hri5.getRegionName(), 16);
 
     setupMocksForNormalizer(regionSizes, hris);
-    NormalizationPlan plan = normalizer.computePlanForTable(testTable);
-
-    assertTrue(plan instanceof MergeNormalizationPlan);
-    assertEquals(hri2, ((MergeNormalizationPlan) plan).getFirstRegion());
-    assertEquals(hri3, ((MergeNormalizationPlan) plan).getSecondRegion());
+    NormalizationPlan plan = normalizer.computePlanForTable(testTable,
+      mergeDesired ? bothTypes : splitType);
+
+    if (mergeDesired) {
+      assertTrue(plan instanceof MergeNormalizationPlan);
+      assertEquals(hri2, ((MergeNormalizationPlan) plan).getFirstRegion());
+      assertEquals(hri3, ((MergeNormalizationPlan) plan).getSecondRegion());
+    } else {
+      assertTrue(plan instanceof EmptyNormalizationPlan);
+    }
   }
 
   // Test for situation illustrated in HBASE-14867
@@ -186,7 +209,7 @@ public class TestSimpleRegionNormalizer {
     regionSizes.put(hri6.getRegionName(), 2700);
 
     setupMocksForNormalizer(regionSizes, hris);
-    NormalizationPlan plan = normalizer.computePlanForTable(testTable);
+    NormalizationPlan plan = normalizer.computePlanForTable(testTable, bothTypes);
 
     assertTrue(plan instanceof MergeNormalizationPlan);
     assertEquals(hri5, ((MergeNormalizationPlan) plan).getFirstRegion());
@@ -220,7 +243,7 @@ public class TestSimpleRegionNormalizer {
     regionSizes.put(hri5.getRegionName(), 5);
 
     setupMocksForNormalizer(regionSizes, hris);
-    NormalizationPlan plan = normalizer.computePlanForTable(testTable);
+    NormalizationPlan plan = normalizer.computePlanForTable(testTable, bothTypes);
 
     assertTrue(plan instanceof EmptyNormalizationPlan);
   }
@@ -248,7 +271,7 @@ public class TestSimpleRegionNormalizer {
     regionSizes.put(hri4.getRegionName(), 30);
 
     setupMocksForNormalizer(regionSizes, hris);
-    NormalizationPlan plan = normalizer.computePlanForTable(testTable);
+    NormalizationPlan plan = normalizer.computePlanForTable(testTable, bothTypes);
 
     assertTrue(plan instanceof SplitNormalizationPlan);
     assertEquals(hri4, ((SplitNormalizationPlan) plan).getRegionInfo());

http://git-wip-us.apache.org/repos/asf/hbase/blob/d65978fc/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizerOnCluster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizerOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizerOnCluster.java
index 4fe42ed..99fb268 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizerOnCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizerOnCluster.java
@@ -32,7 +32,7 @@ import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.master.HMaster;
 import org.apache.hadoop.hbase.master.TableNamespaceManager;
-import org.apache.hadoop.hbase.master.normalizer.NormalizationPlan.PlanType;
+import org.apache.hadoop.hbase.normalizer.NormalizationPlan.PlanType;
 import org.apache.hadoop.hbase.namespace.TestNamespaceAuditor;
 import org.apache.hadoop.hbase.quotas.QuotaUtil;
 import org.apache.hadoop.hbase.regionserver.HRegion;
@@ -137,7 +137,7 @@ public class TestSimpleRegionNormalizerOnCluster {
     }
 
     HTableDescriptor htd = admin.getTableDescriptor(TABLENAME);
-    htd.setNormalizationEnabled(true);
+    htd.setNormalizationMode("MS");
     admin.modifyTable(TABLENAME, htd);
 
     admin.flush(TABLENAME);
@@ -207,7 +207,7 @@ public class TestSimpleRegionNormalizerOnCluster {
     }
 
     HTableDescriptor htd = admin.getTableDescriptor(TABLENAME);
-    htd.setNormalizationEnabled(true);
+    htd.setNormalizationMode("MS");
     admin.modifyTable(TABLENAME, htd);
 
     admin.flush(TABLENAME);

http://git-wip-us.apache.org/repos/asf/hbase/blob/d65978fc/hbase-shell/src/main/ruby/hbase/admin.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/hbase/admin.rb b/hbase-shell/src/main/ruby/hbase/admin.rb
index c61b598..661783f 100644
--- a/hbase-shell/src/main/ruby/hbase/admin.rb
+++ b/hbase-shell/src/main/ruby/hbase/admin.rb
@@ -280,7 +280,10 @@ module Hbase
     #----------------------------------------------------------------------------------------------
     # Parse arguments and update HTableDescriptor accordingly
     def parse_htd_args(htd, arg)
-      htd.setNormalizationEnabled(JBoolean.valueOf(arg.delete(NORMALIZATION_ENABLED))) if arg[NORMALIZATION_ENABLED]
+      if arg.has_key?(NORMALIZATION_MODE)
+        mode = arg.delete(NORMALIZATION_MODE)
+        htd.setValue(NORMALIZATION_MODE, mode)
+      end
     end
 
     #----------------------------------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hbase/blob/d65978fc/hbase-shell/src/main/ruby/shell/commands/normalize.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/normalize.rb b/hbase-shell/src/main/ruby/shell/commands/normalize.rb
index 7e6302c..e2b3d42 100644
--- a/hbase-shell/src/main/ruby/shell/commands/normalize.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/normalize.rb
@@ -22,7 +22,7 @@ module Shell
     class Normalize < Command
       def help
         return <<-EOF
-Trigger region normalizer for all tables which have NORMALIZATION_ENABLED flag set. Returns true
+Trigger region normalizer for all tables which have NORMALIZATION_MODE flag set. Returns true
  if normalizer ran successfully, false otherwise. Note that this command has no effect
  if region normalizer is disabled (make sure it's turned on using 'normalizer_switch' command).
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/d65978fc/hbase-shell/src/main/ruby/shell/commands/normalizer_switch.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/normalizer_switch.rb b/hbase-shell/src/main/ruby/shell/commands/normalizer_switch.rb
index 6d959c4..ee9e2d1 100644
--- a/hbase-shell/src/main/ruby/shell/commands/normalizer_switch.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/normalizer_switch.rb
@@ -23,7 +23,8 @@ module Shell
       def help
         return <<-EOF
 Enable/Disable region normalizer. Returns previous normalizer state.
-When normalizer is enabled, it handles all tables with 'NORMALIZATION_ENABLED' => true.
+When normalizer is enabled, it handles all tables with 'NORMALIZATION_MODE' flag containing
+types of normalization actions.
 Examples:
 
   hbase> normalizer_switch true