You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by st...@apache.org on 2013/08/08 08:08:31 UTC

svn commit: r1511591 [8/23] - in /hbase/branches/0.95: hbase-client/src/main/java/org/apache/hadoop/hbase/ hbase-client/src/main/java/org/apache/hadoop/hbase/catalog/ hbase-client/src/main/java/org/apache/hadoop/hbase/client/ hbase-client/src/main/java...

Modified: hbase/branches/0.95/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterMonitorProtos.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterMonitorProtos.java?rev=1511591&r1=1511590&r2=1511591&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterMonitorProtos.java (original)
+++ hbase/branches/0.95/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterMonitorProtos.java Thu Aug  8 06:08:23 2013
@@ -11,9 +11,10 @@ public final class MasterMonitorProtos {
   public interface GetSchemaAlterStatusRequestOrBuilder
       extends com.google.protobuf.MessageOrBuilder {
     
-    // required bytes table_name = 1;
+    // required .TableName table_name = 1;
     boolean hasTableName();
-    com.google.protobuf.ByteString getTableName();
+    org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName();
+    org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder();
   }
   public static final class GetSchemaAlterStatusRequest extends
       com.google.protobuf.GeneratedMessage
@@ -44,18 +45,21 @@ public final class MasterMonitorProtos {
     }
     
     private int bitField0_;
-    // required bytes table_name = 1;
+    // required .TableName table_name = 1;
     public static final int TABLE_NAME_FIELD_NUMBER = 1;
-    private com.google.protobuf.ByteString tableName_;
+    private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_;
     public boolean hasTableName() {
       return ((bitField0_ & 0x00000001) == 0x00000001);
     }
-    public com.google.protobuf.ByteString getTableName() {
+    public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() {
+      return tableName_;
+    }
+    public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() {
       return tableName_;
     }
     
     private void initFields() {
-      tableName_ = com.google.protobuf.ByteString.EMPTY;
+      tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
     }
     private byte memoizedIsInitialized = -1;
     public final boolean isInitialized() {
@@ -66,6 +70,10 @@ public final class MasterMonitorProtos {
         memoizedIsInitialized = 0;
         return false;
       }
+      if (!getTableName().isInitialized()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
       memoizedIsInitialized = 1;
       return true;
     }
@@ -74,7 +82,7 @@ public final class MasterMonitorProtos {
                         throws java.io.IOException {
       getSerializedSize();
       if (((bitField0_ & 0x00000001) == 0x00000001)) {
-        output.writeBytes(1, tableName_);
+        output.writeMessage(1, tableName_);
       }
       getUnknownFields().writeTo(output);
     }
@@ -87,7 +95,7 @@ public final class MasterMonitorProtos {
       size = 0;
       if (((bitField0_ & 0x00000001) == 0x00000001)) {
         size += com.google.protobuf.CodedOutputStream
-          .computeBytesSize(1, tableName_);
+          .computeMessageSize(1, tableName_);
       }
       size += getUnknownFields().getSerializedSize();
       memoizedSerializedSize = size;
@@ -238,6 +246,7 @@ public final class MasterMonitorProtos {
       }
       private void maybeForceBuilderInitialization() {
         if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+          getTableNameFieldBuilder();
         }
       }
       private static Builder create() {
@@ -246,7 +255,11 @@ public final class MasterMonitorProtos {
       
       public Builder clear() {
         super.clear();
-        tableName_ = com.google.protobuf.ByteString.EMPTY;
+        if (tableNameBuilder_ == null) {
+          tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+        } else {
+          tableNameBuilder_.clear();
+        }
         bitField0_ = (bitField0_ & ~0x00000001);
         return this;
       }
@@ -289,7 +302,11 @@ public final class MasterMonitorProtos {
         if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
           to_bitField0_ |= 0x00000001;
         }
-        result.tableName_ = tableName_;
+        if (tableNameBuilder_ == null) {
+          result.tableName_ = tableName_;
+        } else {
+          result.tableName_ = tableNameBuilder_.build();
+        }
         result.bitField0_ = to_bitField0_;
         onBuilt();
         return result;
@@ -307,7 +324,7 @@ public final class MasterMonitorProtos {
       public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetSchemaAlterStatusRequest other) {
         if (other == org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetSchemaAlterStatusRequest.getDefaultInstance()) return this;
         if (other.hasTableName()) {
-          setTableName(other.getTableName());
+          mergeTableName(other.getTableName());
         }
         this.mergeUnknownFields(other.getUnknownFields());
         return this;
@@ -318,6 +335,10 @@ public final class MasterMonitorProtos {
           
           return false;
         }
+        if (!getTableName().isInitialized()) {
+          
+          return false;
+        }
         return true;
       }
       
@@ -345,8 +366,12 @@ public final class MasterMonitorProtos {
               break;
             }
             case 10: {
-              bitField0_ |= 0x00000001;
-              tableName_ = input.readBytes();
+              org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder();
+              if (hasTableName()) {
+                subBuilder.mergeFrom(getTableName());
+              }
+              input.readMessage(subBuilder, extensionRegistry);
+              setTableName(subBuilder.buildPartial());
               break;
             }
           }
@@ -355,29 +380,95 @@ public final class MasterMonitorProtos {
       
       private int bitField0_;
       
-      // required bytes table_name = 1;
-      private com.google.protobuf.ByteString tableName_ = com.google.protobuf.ByteString.EMPTY;
+      // required .TableName table_name = 1;
+      private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+      private com.google.protobuf.SingleFieldBuilder<
+          org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableNameBuilder_;
       public boolean hasTableName() {
         return ((bitField0_ & 0x00000001) == 0x00000001);
       }
-      public com.google.protobuf.ByteString getTableName() {
-        return tableName_;
+      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() {
+        if (tableNameBuilder_ == null) {
+          return tableName_;
+        } else {
+          return tableNameBuilder_.getMessage();
+        }
       }
-      public Builder setTableName(com.google.protobuf.ByteString value) {
-        if (value == null) {
-    throw new NullPointerException();
-  }
-  bitField0_ |= 0x00000001;
-        tableName_ = value;
-        onChanged();
+      public Builder setTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) {
+        if (tableNameBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          tableName_ = value;
+          onChanged();
+        } else {
+          tableNameBuilder_.setMessage(value);
+        }
+        bitField0_ |= 0x00000001;
+        return this;
+      }
+      public Builder setTableName(
+          org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) {
+        if (tableNameBuilder_ == null) {
+          tableName_ = builderForValue.build();
+          onChanged();
+        } else {
+          tableNameBuilder_.setMessage(builderForValue.build());
+        }
+        bitField0_ |= 0x00000001;
+        return this;
+      }
+      public Builder mergeTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) {
+        if (tableNameBuilder_ == null) {
+          if (((bitField0_ & 0x00000001) == 0x00000001) &&
+              tableName_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()) {
+            tableName_ =
+              org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder(tableName_).mergeFrom(value).buildPartial();
+          } else {
+            tableName_ = value;
+          }
+          onChanged();
+        } else {
+          tableNameBuilder_.mergeFrom(value);
+        }
+        bitField0_ |= 0x00000001;
         return this;
       }
       public Builder clearTableName() {
+        if (tableNameBuilder_ == null) {
+          tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+          onChanged();
+        } else {
+          tableNameBuilder_.clear();
+        }
         bitField0_ = (bitField0_ & ~0x00000001);
-        tableName_ = getDefaultInstance().getTableName();
-        onChanged();
         return this;
       }
+      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableNameBuilder() {
+        bitField0_ |= 0x00000001;
+        onChanged();
+        return getTableNameFieldBuilder().getBuilder();
+      }
+      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() {
+        if (tableNameBuilder_ != null) {
+          return tableNameBuilder_.getMessageOrBuilder();
+        } else {
+          return tableName_;
+        }
+      }
+      private com.google.protobuf.SingleFieldBuilder<
+          org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> 
+          getTableNameFieldBuilder() {
+        if (tableNameBuilder_ == null) {
+          tableNameBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+              org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>(
+                  tableName_,
+                  getParentForChildren(),
+                  isClean());
+          tableName_ = null;
+        }
+        return tableNameBuilder_;
+      }
       
       // @@protoc_insertion_point(builder_scope:GetSchemaAlterStatusRequest)
     }
@@ -830,10 +921,15 @@ public final class MasterMonitorProtos {
   public interface GetTableDescriptorsRequestOrBuilder
       extends com.google.protobuf.MessageOrBuilder {
     
-    // repeated string table_names = 1;
-    java.util.List<String> getTableNamesList();
+    // repeated .TableName table_names = 1;
+    java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName> 
+        getTableNamesList();
+    org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableNames(int index);
     int getTableNamesCount();
-    String getTableNames(int index);
+    java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> 
+        getTableNamesOrBuilderList();
+    org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNamesOrBuilder(
+        int index);
   }
   public static final class GetTableDescriptorsRequest extends
       com.google.protobuf.GeneratedMessage
@@ -863,28 +959,41 @@ public final class MasterMonitorProtos {
       return org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.internal_static_GetTableDescriptorsRequest_fieldAccessorTable;
     }
     
-    // repeated string table_names = 1;
+    // repeated .TableName table_names = 1;
     public static final int TABLE_NAMES_FIELD_NUMBER = 1;
-    private com.google.protobuf.LazyStringList tableNames_;
-    public java.util.List<String>
-        getTableNamesList() {
+    private java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName> tableNames_;
+    public java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName> getTableNamesList() {
+      return tableNames_;
+    }
+    public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> 
+        getTableNamesOrBuilderList() {
       return tableNames_;
     }
     public int getTableNamesCount() {
       return tableNames_.size();
     }
-    public String getTableNames(int index) {
+    public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableNames(int index) {
+      return tableNames_.get(index);
+    }
+    public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNamesOrBuilder(
+        int index) {
       return tableNames_.get(index);
     }
     
     private void initFields() {
-      tableNames_ = com.google.protobuf.LazyStringArrayList.EMPTY;
+      tableNames_ = java.util.Collections.emptyList();
     }
     private byte memoizedIsInitialized = -1;
     public final boolean isInitialized() {
       byte isInitialized = memoizedIsInitialized;
       if (isInitialized != -1) return isInitialized == 1;
       
+      for (int i = 0; i < getTableNamesCount(); i++) {
+        if (!getTableNames(i).isInitialized()) {
+          memoizedIsInitialized = 0;
+          return false;
+        }
+      }
       memoizedIsInitialized = 1;
       return true;
     }
@@ -893,7 +1002,7 @@ public final class MasterMonitorProtos {
                         throws java.io.IOException {
       getSerializedSize();
       for (int i = 0; i < tableNames_.size(); i++) {
-        output.writeBytes(1, tableNames_.getByteString(i));
+        output.writeMessage(1, tableNames_.get(i));
       }
       getUnknownFields().writeTo(output);
     }
@@ -904,14 +1013,9 @@ public final class MasterMonitorProtos {
       if (size != -1) return size;
     
       size = 0;
-      {
-        int dataSize = 0;
-        for (int i = 0; i < tableNames_.size(); i++) {
-          dataSize += com.google.protobuf.CodedOutputStream
-            .computeBytesSizeNoTag(tableNames_.getByteString(i));
-        }
-        size += dataSize;
-        size += 1 * getTableNamesList().size();
+      for (int i = 0; i < tableNames_.size(); i++) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeMessageSize(1, tableNames_.get(i));
       }
       size += getUnknownFields().getSerializedSize();
       memoizedSerializedSize = size;
@@ -1059,6 +1163,7 @@ public final class MasterMonitorProtos {
       }
       private void maybeForceBuilderInitialization() {
         if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+          getTableNamesFieldBuilder();
         }
       }
       private static Builder create() {
@@ -1067,8 +1172,12 @@ public final class MasterMonitorProtos {
       
       public Builder clear() {
         super.clear();
-        tableNames_ = com.google.protobuf.LazyStringArrayList.EMPTY;
-        bitField0_ = (bitField0_ & ~0x00000001);
+        if (tableNamesBuilder_ == null) {
+          tableNames_ = java.util.Collections.emptyList();
+          bitField0_ = (bitField0_ & ~0x00000001);
+        } else {
+          tableNamesBuilder_.clear();
+        }
         return this;
       }
       
@@ -1106,12 +1215,15 @@ public final class MasterMonitorProtos {
       public org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsRequest buildPartial() {
         org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsRequest(this);
         int from_bitField0_ = bitField0_;
-        if (((bitField0_ & 0x00000001) == 0x00000001)) {
-          tableNames_ = new com.google.protobuf.UnmodifiableLazyStringList(
-              tableNames_);
-          bitField0_ = (bitField0_ & ~0x00000001);
+        if (tableNamesBuilder_ == null) {
+          if (((bitField0_ & 0x00000001) == 0x00000001)) {
+            tableNames_ = java.util.Collections.unmodifiableList(tableNames_);
+            bitField0_ = (bitField0_ & ~0x00000001);
+          }
+          result.tableNames_ = tableNames_;
+        } else {
+          result.tableNames_ = tableNamesBuilder_.build();
         }
-        result.tableNames_ = tableNames_;
         onBuilt();
         return result;
       }
@@ -1127,21 +1239,43 @@ public final class MasterMonitorProtos {
       
       public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsRequest other) {
         if (other == org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsRequest.getDefaultInstance()) return this;
-        if (!other.tableNames_.isEmpty()) {
-          if (tableNames_.isEmpty()) {
-            tableNames_ = other.tableNames_;
-            bitField0_ = (bitField0_ & ~0x00000001);
-          } else {
-            ensureTableNamesIsMutable();
-            tableNames_.addAll(other.tableNames_);
+        if (tableNamesBuilder_ == null) {
+          if (!other.tableNames_.isEmpty()) {
+            if (tableNames_.isEmpty()) {
+              tableNames_ = other.tableNames_;
+              bitField0_ = (bitField0_ & ~0x00000001);
+            } else {
+              ensureTableNamesIsMutable();
+              tableNames_.addAll(other.tableNames_);
+            }
+            onChanged();
+          }
+        } else {
+          if (!other.tableNames_.isEmpty()) {
+            if (tableNamesBuilder_.isEmpty()) {
+              tableNamesBuilder_.dispose();
+              tableNamesBuilder_ = null;
+              tableNames_ = other.tableNames_;
+              bitField0_ = (bitField0_ & ~0x00000001);
+              tableNamesBuilder_ = 
+                com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
+                   getTableNamesFieldBuilder() : null;
+            } else {
+              tableNamesBuilder_.addAllMessages(other.tableNames_);
+            }
           }
-          onChanged();
         }
         this.mergeUnknownFields(other.getUnknownFields());
         return this;
       }
       
       public final boolean isInitialized() {
+        for (int i = 0; i < getTableNamesCount(); i++) {
+          if (!getTableNames(i).isInitialized()) {
+            
+            return false;
+          }
+        }
         return true;
       }
       
@@ -1169,8 +1303,9 @@ public final class MasterMonitorProtos {
               break;
             }
             case 10: {
-              ensureTableNamesIsMutable();
-              tableNames_.add(input.readBytes());
+              org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder();
+              input.readMessage(subBuilder, extensionRegistry);
+              addTableNames(subBuilder.buildPartial());
               break;
             }
           }
@@ -1179,60 +1314,190 @@ public final class MasterMonitorProtos {
       
       private int bitField0_;
       
-      // repeated string table_names = 1;
-      private com.google.protobuf.LazyStringList tableNames_ = com.google.protobuf.LazyStringArrayList.EMPTY;
+      // repeated .TableName table_names = 1;
+      private java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName> tableNames_ =
+        java.util.Collections.emptyList();
       private void ensureTableNamesIsMutable() {
         if (!((bitField0_ & 0x00000001) == 0x00000001)) {
-          tableNames_ = new com.google.protobuf.LazyStringArrayList(tableNames_);
+          tableNames_ = new java.util.ArrayList<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName>(tableNames_);
           bitField0_ |= 0x00000001;
          }
       }
-      public java.util.List<String>
-          getTableNamesList() {
-        return java.util.Collections.unmodifiableList(tableNames_);
+      
+      private com.google.protobuf.RepeatedFieldBuilder<
+          org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableNamesBuilder_;
+      
+      public java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName> getTableNamesList() {
+        if (tableNamesBuilder_ == null) {
+          return java.util.Collections.unmodifiableList(tableNames_);
+        } else {
+          return tableNamesBuilder_.getMessageList();
+        }
       }
       public int getTableNamesCount() {
-        return tableNames_.size();
+        if (tableNamesBuilder_ == null) {
+          return tableNames_.size();
+        } else {
+          return tableNamesBuilder_.getCount();
+        }
       }
-      public String getTableNames(int index) {
-        return tableNames_.get(index);
+      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableNames(int index) {
+        if (tableNamesBuilder_ == null) {
+          return tableNames_.get(index);
+        } else {
+          return tableNamesBuilder_.getMessage(index);
+        }
       }
       public Builder setTableNames(
-          int index, String value) {
-        if (value == null) {
-    throw new NullPointerException();
-  }
-  ensureTableNamesIsMutable();
-        tableNames_.set(index, value);
-        onChanged();
+          int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) {
+        if (tableNamesBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          ensureTableNamesIsMutable();
+          tableNames_.set(index, value);
+          onChanged();
+        } else {
+          tableNamesBuilder_.setMessage(index, value);
+        }
         return this;
       }
-      public Builder addTableNames(String value) {
-        if (value == null) {
-    throw new NullPointerException();
-  }
-  ensureTableNamesIsMutable();
-        tableNames_.add(value);
-        onChanged();
+      public Builder setTableNames(
+          int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) {
+        if (tableNamesBuilder_ == null) {
+          ensureTableNamesIsMutable();
+          tableNames_.set(index, builderForValue.build());
+          onChanged();
+        } else {
+          tableNamesBuilder_.setMessage(index, builderForValue.build());
+        }
+        return this;
+      }
+      public Builder addTableNames(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) {
+        if (tableNamesBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          ensureTableNamesIsMutable();
+          tableNames_.add(value);
+          onChanged();
+        } else {
+          tableNamesBuilder_.addMessage(value);
+        }
+        return this;
+      }
+      public Builder addTableNames(
+          int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) {
+        if (tableNamesBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          ensureTableNamesIsMutable();
+          tableNames_.add(index, value);
+          onChanged();
+        } else {
+          tableNamesBuilder_.addMessage(index, value);
+        }
+        return this;
+      }
+      public Builder addTableNames(
+          org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) {
+        if (tableNamesBuilder_ == null) {
+          ensureTableNamesIsMutable();
+          tableNames_.add(builderForValue.build());
+          onChanged();
+        } else {
+          tableNamesBuilder_.addMessage(builderForValue.build());
+        }
+        return this;
+      }
+      public Builder addTableNames(
+          int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) {
+        if (tableNamesBuilder_ == null) {
+          ensureTableNamesIsMutable();
+          tableNames_.add(index, builderForValue.build());
+          onChanged();
+        } else {
+          tableNamesBuilder_.addMessage(index, builderForValue.build());
+        }
         return this;
       }
       public Builder addAllTableNames(
-          java.lang.Iterable<String> values) {
-        ensureTableNamesIsMutable();
-        super.addAll(values, tableNames_);
-        onChanged();
+          java.lang.Iterable<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName> values) {
+        if (tableNamesBuilder_ == null) {
+          ensureTableNamesIsMutable();
+          super.addAll(values, tableNames_);
+          onChanged();
+        } else {
+          tableNamesBuilder_.addAllMessages(values);
+        }
         return this;
       }
       public Builder clearTableNames() {
-        tableNames_ = com.google.protobuf.LazyStringArrayList.EMPTY;
-        bitField0_ = (bitField0_ & ~0x00000001);
-        onChanged();
+        if (tableNamesBuilder_ == null) {
+          tableNames_ = java.util.Collections.emptyList();
+          bitField0_ = (bitField0_ & ~0x00000001);
+          onChanged();
+        } else {
+          tableNamesBuilder_.clear();
+        }
         return this;
       }
-      void addTableNames(com.google.protobuf.ByteString value) {
-        ensureTableNamesIsMutable();
-        tableNames_.add(value);
-        onChanged();
+      public Builder removeTableNames(int index) {
+        if (tableNamesBuilder_ == null) {
+          ensureTableNamesIsMutable();
+          tableNames_.remove(index);
+          onChanged();
+        } else {
+          tableNamesBuilder_.remove(index);
+        }
+        return this;
+      }
+      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableNamesBuilder(
+          int index) {
+        return getTableNamesFieldBuilder().getBuilder(index);
+      }
+      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNamesOrBuilder(
+          int index) {
+        if (tableNamesBuilder_ == null) {
+          return tableNames_.get(index);  } else {
+          return tableNamesBuilder_.getMessageOrBuilder(index);
+        }
+      }
+      public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> 
+           getTableNamesOrBuilderList() {
+        if (tableNamesBuilder_ != null) {
+          return tableNamesBuilder_.getMessageOrBuilderList();
+        } else {
+          return java.util.Collections.unmodifiableList(tableNames_);
+        }
+      }
+      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder addTableNamesBuilder() {
+        return getTableNamesFieldBuilder().addBuilder(
+            org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance());
+      }
+      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder addTableNamesBuilder(
+          int index) {
+        return getTableNamesFieldBuilder().addBuilder(
+            index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance());
+      }
+      public java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder> 
+           getTableNamesBuilderList() {
+        return getTableNamesFieldBuilder().getBuilderList();
+      }
+      private com.google.protobuf.RepeatedFieldBuilder<
+          org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> 
+          getTableNamesFieldBuilder() {
+        if (tableNamesBuilder_ == null) {
+          tableNamesBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
+              org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>(
+                  tableNames_,
+                  ((bitField0_ & 0x00000001) == 0x00000001),
+                  getParentForChildren(),
+                  isClean());
+          tableNames_ = null;
+        }
+        return tableNamesBuilder_;
       }
       
       // @@protoc_insertion_point(builder_scope:GetTableDescriptorsRequest)
@@ -3068,26 +3333,27 @@ public final class MasterMonitorProtos {
   static {
     java.lang.String[] descriptorData = {
       "\n\023MasterMonitor.proto\032\014Master.proto\032\013hba" +
-      "se.proto\032\023ClusterStatus.proto\"1\n\033GetSche" +
-      "maAlterStatusRequest\022\022\n\ntable_name\030\001 \002(\014" +
-      "\"T\n\034GetSchemaAlterStatusResponse\022\035\n\025yet_" +
-      "to_update_regions\030\001 \001(\r\022\025\n\rtotal_regions" +
-      "\030\002 \001(\r\"1\n\032GetTableDescriptorsRequest\022\023\n\013" +
-      "table_names\030\001 \003(\t\"A\n\033GetTableDescriptors" +
-      "Response\022\"\n\014table_schema\030\001 \003(\0132\014.TableSc" +
-      "hema\"\031\n\027GetClusterStatusRequest\"B\n\030GetCl" +
-      "usterStatusResponse\022&\n\016cluster_status\030\001 ",
-      "\002(\0132\016.ClusterStatus2\314\002\n\024MasterMonitorSer" +
-      "vice\022S\n\024GetSchemaAlterStatus\022\034.GetSchema" +
-      "AlterStatusRequest\032\035.GetSchemaAlterStatu" +
-      "sResponse\022P\n\023GetTableDescriptors\022\033.GetTa" +
-      "bleDescriptorsRequest\032\034.GetTableDescript" +
-      "orsResponse\022G\n\020GetClusterStatus\022\030.GetClu" +
-      "sterStatusRequest\032\031.GetClusterStatusResp" +
-      "onse\022D\n\017IsMasterRunning\022\027.IsMasterRunnin" +
-      "gRequest\032\030.IsMasterRunningResponseBI\n*or" +
-      "g.apache.hadoop.hbase.protobuf.generated",
-      "B\023MasterMonitorProtosH\001\210\001\001\240\001\001"
+      "se.proto\032\023ClusterStatus.proto\"=\n\033GetSche" +
+      "maAlterStatusRequest\022\036\n\ntable_name\030\001 \002(\013" +
+      "2\n.TableName\"T\n\034GetSchemaAlterStatusResp" +
+      "onse\022\035\n\025yet_to_update_regions\030\001 \001(\r\022\025\n\rt" +
+      "otal_regions\030\002 \001(\r\"=\n\032GetTableDescriptor" +
+      "sRequest\022\037\n\013table_names\030\001 \003(\0132\n.TableNam" +
+      "e\"A\n\033GetTableDescriptorsResponse\022\"\n\014tabl" +
+      "e_schema\030\001 \003(\0132\014.TableSchema\"\031\n\027GetClust" +
+      "erStatusRequest\"B\n\030GetClusterStatusRespo",
+      "nse\022&\n\016cluster_status\030\001 \002(\0132\016.ClusterSta" +
+      "tus2\314\002\n\024MasterMonitorService\022S\n\024GetSchem" +
+      "aAlterStatus\022\034.GetSchemaAlterStatusReque" +
+      "st\032\035.GetSchemaAlterStatusResponse\022P\n\023Get" +
+      "TableDescriptors\022\033.GetTableDescriptorsRe" +
+      "quest\032\034.GetTableDescriptorsResponse\022G\n\020G" +
+      "etClusterStatus\022\030.GetClusterStatusReques" +
+      "t\032\031.GetClusterStatusResponse\022D\n\017IsMaster" +
+      "Running\022\027.IsMasterRunningRequest\032\030.IsMas" +
+      "terRunningResponseBI\n*org.apache.hadoop.",
+      "hbase.protobuf.generatedB\023MasterMonitorP" +
+      "rotosH\001\210\001\001\240\001\001"
     };
     com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
       new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {

Modified: hbase/branches/0.95/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/SecureBulkLoadProtos.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/SecureBulkLoadProtos.java?rev=1511591&r1=1511590&r2=1511591&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/SecureBulkLoadProtos.java (original)
+++ hbase/branches/0.95/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/SecureBulkLoadProtos.java Thu Aug  8 06:08:23 2013
@@ -1972,9 +1972,10 @@ public final class SecureBulkLoadProtos 
   public interface PrepareBulkLoadRequestOrBuilder
       extends com.google.protobuf.MessageOrBuilder {
     
-    // required bytes table_name = 1;
+    // required .TableName table_name = 1;
     boolean hasTableName();
-    com.google.protobuf.ByteString getTableName();
+    org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName();
+    org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder();
   }
   public static final class PrepareBulkLoadRequest extends
       com.google.protobuf.GeneratedMessage
@@ -2005,18 +2006,21 @@ public final class SecureBulkLoadProtos 
     }
     
     private int bitField0_;
-    // required bytes table_name = 1;
+    // required .TableName table_name = 1;
     public static final int TABLE_NAME_FIELD_NUMBER = 1;
-    private com.google.protobuf.ByteString tableName_;
+    private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_;
     public boolean hasTableName() {
       return ((bitField0_ & 0x00000001) == 0x00000001);
     }
-    public com.google.protobuf.ByteString getTableName() {
+    public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() {
+      return tableName_;
+    }
+    public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() {
       return tableName_;
     }
     
     private void initFields() {
-      tableName_ = com.google.protobuf.ByteString.EMPTY;
+      tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
     }
     private byte memoizedIsInitialized = -1;
     public final boolean isInitialized() {
@@ -2027,6 +2031,10 @@ public final class SecureBulkLoadProtos 
         memoizedIsInitialized = 0;
         return false;
       }
+      if (!getTableName().isInitialized()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
       memoizedIsInitialized = 1;
       return true;
     }
@@ -2035,7 +2043,7 @@ public final class SecureBulkLoadProtos 
                         throws java.io.IOException {
       getSerializedSize();
       if (((bitField0_ & 0x00000001) == 0x00000001)) {
-        output.writeBytes(1, tableName_);
+        output.writeMessage(1, tableName_);
       }
       getUnknownFields().writeTo(output);
     }
@@ -2048,7 +2056,7 @@ public final class SecureBulkLoadProtos 
       size = 0;
       if (((bitField0_ & 0x00000001) == 0x00000001)) {
         size += com.google.protobuf.CodedOutputStream
-          .computeBytesSize(1, tableName_);
+          .computeMessageSize(1, tableName_);
       }
       size += getUnknownFields().getSerializedSize();
       memoizedSerializedSize = size;
@@ -2199,6 +2207,7 @@ public final class SecureBulkLoadProtos 
       }
       private void maybeForceBuilderInitialization() {
         if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+          getTableNameFieldBuilder();
         }
       }
       private static Builder create() {
@@ -2207,7 +2216,11 @@ public final class SecureBulkLoadProtos 
       
       public Builder clear() {
         super.clear();
-        tableName_ = com.google.protobuf.ByteString.EMPTY;
+        if (tableNameBuilder_ == null) {
+          tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+        } else {
+          tableNameBuilder_.clear();
+        }
         bitField0_ = (bitField0_ & ~0x00000001);
         return this;
       }
@@ -2250,7 +2263,11 @@ public final class SecureBulkLoadProtos 
         if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
           to_bitField0_ |= 0x00000001;
         }
-        result.tableName_ = tableName_;
+        if (tableNameBuilder_ == null) {
+          result.tableName_ = tableName_;
+        } else {
+          result.tableName_ = tableNameBuilder_.build();
+        }
         result.bitField0_ = to_bitField0_;
         onBuilt();
         return result;
@@ -2268,7 +2285,7 @@ public final class SecureBulkLoadProtos 
       public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.SecureBulkLoadProtos.PrepareBulkLoadRequest other) {
         if (other == org.apache.hadoop.hbase.protobuf.generated.SecureBulkLoadProtos.PrepareBulkLoadRequest.getDefaultInstance()) return this;
         if (other.hasTableName()) {
-          setTableName(other.getTableName());
+          mergeTableName(other.getTableName());
         }
         this.mergeUnknownFields(other.getUnknownFields());
         return this;
@@ -2279,6 +2296,10 @@ public final class SecureBulkLoadProtos 
           
           return false;
         }
+        if (!getTableName().isInitialized()) {
+          
+          return false;
+        }
         return true;
       }
       
@@ -2306,8 +2327,12 @@ public final class SecureBulkLoadProtos 
               break;
             }
             case 10: {
-              bitField0_ |= 0x00000001;
-              tableName_ = input.readBytes();
+              org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder();
+              if (hasTableName()) {
+                subBuilder.mergeFrom(getTableName());
+              }
+              input.readMessage(subBuilder, extensionRegistry);
+              setTableName(subBuilder.buildPartial());
               break;
             }
           }
@@ -2316,29 +2341,95 @@ public final class SecureBulkLoadProtos 
       
       private int bitField0_;
       
-      // required bytes table_name = 1;
-      private com.google.protobuf.ByteString tableName_ = com.google.protobuf.ByteString.EMPTY;
+      // required .TableName table_name = 1;
+      private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+      private com.google.protobuf.SingleFieldBuilder<
+          org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableNameBuilder_;
       public boolean hasTableName() {
         return ((bitField0_ & 0x00000001) == 0x00000001);
       }
-      public com.google.protobuf.ByteString getTableName() {
-        return tableName_;
+      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() {
+        if (tableNameBuilder_ == null) {
+          return tableName_;
+        } else {
+          return tableNameBuilder_.getMessage();
+        }
       }
-      public Builder setTableName(com.google.protobuf.ByteString value) {
-        if (value == null) {
-    throw new NullPointerException();
-  }
-  bitField0_ |= 0x00000001;
-        tableName_ = value;
-        onChanged();
+      public Builder setTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) {
+        if (tableNameBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          tableName_ = value;
+          onChanged();
+        } else {
+          tableNameBuilder_.setMessage(value);
+        }
+        bitField0_ |= 0x00000001;
+        return this;
+      }
+      public Builder setTableName(
+          org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) {
+        if (tableNameBuilder_ == null) {
+          tableName_ = builderForValue.build();
+          onChanged();
+        } else {
+          tableNameBuilder_.setMessage(builderForValue.build());
+        }
+        bitField0_ |= 0x00000001;
+        return this;
+      }
+      public Builder mergeTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) {
+        if (tableNameBuilder_ == null) {
+          if (((bitField0_ & 0x00000001) == 0x00000001) &&
+              tableName_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()) {
+            tableName_ =
+              org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder(tableName_).mergeFrom(value).buildPartial();
+          } else {
+            tableName_ = value;
+          }
+          onChanged();
+        } else {
+          tableNameBuilder_.mergeFrom(value);
+        }
+        bitField0_ |= 0x00000001;
         return this;
       }
       public Builder clearTableName() {
+        if (tableNameBuilder_ == null) {
+          tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+          onChanged();
+        } else {
+          tableNameBuilder_.clear();
+        }
         bitField0_ = (bitField0_ & ~0x00000001);
-        tableName_ = getDefaultInstance().getTableName();
-        onChanged();
         return this;
       }
+      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableNameBuilder() {
+        bitField0_ |= 0x00000001;
+        onChanged();
+        return getTableNameFieldBuilder().getBuilder();
+      }
+      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() {
+        if (tableNameBuilder_ != null) {
+          return tableNameBuilder_.getMessageOrBuilder();
+        } else {
+          return tableName_;
+        }
+      }
+      private com.google.protobuf.SingleFieldBuilder<
+          org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> 
+          getTableNameFieldBuilder() {
+        if (tableNameBuilder_ == null) {
+          tableNameBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+              org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>(
+                  tableName_,
+                  getParentForChildren(),
+                  isClean());
+          tableName_ = null;
+        }
+        return tableNameBuilder_;
+      }
       
       // @@protoc_insertion_point(builder_scope:PrepareBulkLoadRequest)
     }
@@ -3887,19 +3978,19 @@ public final class SecureBulkLoadProtos 
       "FilesResponse\022\016\n\006loaded\030\001 \002(\010\"[\n\024Delegat" +
       "ionTokenProto\022\022\n\nidentifier\030\001 \001(\014\022\020\n\010pas" +
       "sword\030\002 \001(\014\022\014\n\004kind\030\003 \001(\t\022\017\n\007service\030\004 \001" +
-      "(\t\",\n\026PrepareBulkLoadRequest\022\022\n\ntable_na",
-      "me\030\001 \002(\014\"-\n\027PrepareBulkLoadResponse\022\022\n\nb" +
-      "ulk_token\030\001 \002(\t\",\n\026CleanupBulkLoadReques" +
-      "t\022\022\n\nbulk_token\030\001 \002(\t\"\031\n\027CleanupBulkLoad" +
-      "Response2\370\001\n\025SecureBulkLoadService\022D\n\017Pr" +
-      "epareBulkLoad\022\027.PrepareBulkLoadRequest\032\030" +
-      ".PrepareBulkLoadResponse\022S\n\024SecureBulkLo" +
-      "adHFiles\022\034.SecureBulkLoadHFilesRequest\032\035" +
-      ".SecureBulkLoadHFilesResponse\022D\n\017Cleanup" +
-      "BulkLoad\022\027.CleanupBulkLoadRequest\032\030.Clea" +
-      "nupBulkLoadResponseBJ\n*org.apache.hadoop",
-      ".hbase.protobuf.generatedB\024SecureBulkLoa" +
-      "dProtosH\001\210\001\001\240\001\001"
+      "(\t\"8\n\026PrepareBulkLoadRequest\022\036\n\ntable_na",
+      "me\030\001 \002(\0132\n.TableName\"-\n\027PrepareBulkLoadR" +
+      "esponse\022\022\n\nbulk_token\030\001 \002(\t\",\n\026CleanupBu" +
+      "lkLoadRequest\022\022\n\nbulk_token\030\001 \002(\t\"\031\n\027Cle" +
+      "anupBulkLoadResponse2\370\001\n\025SecureBulkLoadS" +
+      "ervice\022D\n\017PrepareBulkLoad\022\027.PrepareBulkL" +
+      "oadRequest\032\030.PrepareBulkLoadResponse\022S\n\024" +
+      "SecureBulkLoadHFiles\022\034.SecureBulkLoadHFi" +
+      "lesRequest\032\035.SecureBulkLoadHFilesRespons" +
+      "e\022D\n\017CleanupBulkLoad\022\027.CleanupBulkLoadRe" +
+      "quest\032\030.CleanupBulkLoadResponseBJ\n*org.a",
+      "pache.hadoop.hbase.protobuf.generatedB\024S" +
+      "ecureBulkLoadProtosH\001\210\001\001\240\001\001"
     };
     com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
       new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {

Modified: hbase/branches/0.95/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java?rev=1511591&r1=1511590&r2=1511591&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java (original)
+++ hbase/branches/0.95/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java Thu Aug  8 06:08:23 2013
@@ -5037,9 +5037,10 @@ public final class ZooKeeperProtos {
   public interface TableLockOrBuilder
       extends com.google.protobuf.MessageOrBuilder {
     
-    // optional bytes table_name = 1;
+    // optional .TableName table_name = 1;
     boolean hasTableName();
-    com.google.protobuf.ByteString getTableName();
+    org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName();
+    org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder();
     
     // optional .ServerName lock_owner = 2;
     boolean hasLockOwner();
@@ -5091,13 +5092,16 @@ public final class ZooKeeperProtos {
     }
     
     private int bitField0_;
-    // optional bytes table_name = 1;
+    // optional .TableName table_name = 1;
     public static final int TABLE_NAME_FIELD_NUMBER = 1;
-    private com.google.protobuf.ByteString tableName_;
+    private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_;
     public boolean hasTableName() {
       return ((bitField0_ & 0x00000001) == 0x00000001);
     }
-    public com.google.protobuf.ByteString getTableName() {
+    public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() {
+      return tableName_;
+    }
+    public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() {
       return tableName_;
     }
     
@@ -5177,7 +5181,7 @@ public final class ZooKeeperProtos {
     }
     
     private void initFields() {
-      tableName_ = com.google.protobuf.ByteString.EMPTY;
+      tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
       lockOwner_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
       threadId_ = 0L;
       isShared_ = false;
@@ -5189,6 +5193,12 @@ public final class ZooKeeperProtos {
       byte isInitialized = memoizedIsInitialized;
       if (isInitialized != -1) return isInitialized == 1;
       
+      if (hasTableName()) {
+        if (!getTableName().isInitialized()) {
+          memoizedIsInitialized = 0;
+          return false;
+        }
+      }
       if (hasLockOwner()) {
         if (!getLockOwner().isInitialized()) {
           memoizedIsInitialized = 0;
@@ -5203,7 +5213,7 @@ public final class ZooKeeperProtos {
                         throws java.io.IOException {
       getSerializedSize();
       if (((bitField0_ & 0x00000001) == 0x00000001)) {
-        output.writeBytes(1, tableName_);
+        output.writeMessage(1, tableName_);
       }
       if (((bitField0_ & 0x00000002) == 0x00000002)) {
         output.writeMessage(2, lockOwner_);
@@ -5231,7 +5241,7 @@ public final class ZooKeeperProtos {
       size = 0;
       if (((bitField0_ & 0x00000001) == 0x00000001)) {
         size += com.google.protobuf.CodedOutputStream
-          .computeBytesSize(1, tableName_);
+          .computeMessageSize(1, tableName_);
       }
       if (((bitField0_ & 0x00000002) == 0x00000002)) {
         size += com.google.protobuf.CodedOutputStream
@@ -5447,6 +5457,7 @@ public final class ZooKeeperProtos {
       }
       private void maybeForceBuilderInitialization() {
         if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+          getTableNameFieldBuilder();
           getLockOwnerFieldBuilder();
         }
       }
@@ -5456,7 +5467,11 @@ public final class ZooKeeperProtos {
       
       public Builder clear() {
         super.clear();
-        tableName_ = com.google.protobuf.ByteString.EMPTY;
+        if (tableNameBuilder_ == null) {
+          tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+        } else {
+          tableNameBuilder_.clear();
+        }
         bitField0_ = (bitField0_ & ~0x00000001);
         if (lockOwnerBuilder_ == null) {
           lockOwner_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
@@ -5513,7 +5528,11 @@ public final class ZooKeeperProtos {
         if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
           to_bitField0_ |= 0x00000001;
         }
-        result.tableName_ = tableName_;
+        if (tableNameBuilder_ == null) {
+          result.tableName_ = tableName_;
+        } else {
+          result.tableName_ = tableNameBuilder_.build();
+        }
         if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
           to_bitField0_ |= 0x00000002;
         }
@@ -5555,7 +5574,7 @@ public final class ZooKeeperProtos {
       public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock other) {
         if (other == org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock.getDefaultInstance()) return this;
         if (other.hasTableName()) {
-          setTableName(other.getTableName());
+          mergeTableName(other.getTableName());
         }
         if (other.hasLockOwner()) {
           mergeLockOwner(other.getLockOwner());
@@ -5577,6 +5596,12 @@ public final class ZooKeeperProtos {
       }
       
       public final boolean isInitialized() {
+        if (hasTableName()) {
+          if (!getTableName().isInitialized()) {
+            
+            return false;
+          }
+        }
         if (hasLockOwner()) {
           if (!getLockOwner().isInitialized()) {
             
@@ -5610,8 +5635,12 @@ public final class ZooKeeperProtos {
               break;
             }
             case 10: {
-              bitField0_ |= 0x00000001;
-              tableName_ = input.readBytes();
+              org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder();
+              if (hasTableName()) {
+                subBuilder.mergeFrom(getTableName());
+              }
+              input.readMessage(subBuilder, extensionRegistry);
+              setTableName(subBuilder.buildPartial());
               break;
             }
             case 18: {
@@ -5649,29 +5678,95 @@ public final class ZooKeeperProtos {
       
       private int bitField0_;
       
-      // optional bytes table_name = 1;
-      private com.google.protobuf.ByteString tableName_ = com.google.protobuf.ByteString.EMPTY;
+      // optional .TableName table_name = 1;
+      private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+      private com.google.protobuf.SingleFieldBuilder<
+          org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableNameBuilder_;
       public boolean hasTableName() {
         return ((bitField0_ & 0x00000001) == 0x00000001);
       }
-      public com.google.protobuf.ByteString getTableName() {
-        return tableName_;
+      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() {
+        if (tableNameBuilder_ == null) {
+          return tableName_;
+        } else {
+          return tableNameBuilder_.getMessage();
+        }
       }
-      public Builder setTableName(com.google.protobuf.ByteString value) {
-        if (value == null) {
-    throw new NullPointerException();
-  }
-  bitField0_ |= 0x00000001;
-        tableName_ = value;
-        onChanged();
+      public Builder setTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) {
+        if (tableNameBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          tableName_ = value;
+          onChanged();
+        } else {
+          tableNameBuilder_.setMessage(value);
+        }
+        bitField0_ |= 0x00000001;
+        return this;
+      }
+      public Builder setTableName(
+          org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) {
+        if (tableNameBuilder_ == null) {
+          tableName_ = builderForValue.build();
+          onChanged();
+        } else {
+          tableNameBuilder_.setMessage(builderForValue.build());
+        }
+        bitField0_ |= 0x00000001;
+        return this;
+      }
+      public Builder mergeTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) {
+        if (tableNameBuilder_ == null) {
+          if (((bitField0_ & 0x00000001) == 0x00000001) &&
+              tableName_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()) {
+            tableName_ =
+              org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder(tableName_).mergeFrom(value).buildPartial();
+          } else {
+            tableName_ = value;
+          }
+          onChanged();
+        } else {
+          tableNameBuilder_.mergeFrom(value);
+        }
+        bitField0_ |= 0x00000001;
         return this;
       }
       public Builder clearTableName() {
+        if (tableNameBuilder_ == null) {
+          tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+          onChanged();
+        } else {
+          tableNameBuilder_.clear();
+        }
         bitField0_ = (bitField0_ & ~0x00000001);
-        tableName_ = getDefaultInstance().getTableName();
-        onChanged();
         return this;
       }
+      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableNameBuilder() {
+        bitField0_ |= 0x00000001;
+        onChanged();
+        return getTableNameFieldBuilder().getBuilder();
+      }
+      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() {
+        if (tableNameBuilder_ != null) {
+          return tableNameBuilder_.getMessageOrBuilder();
+        } else {
+          return tableName_;
+        }
+      }
+      private com.google.protobuf.SingleFieldBuilder<
+          org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> 
+          getTableNameFieldBuilder() {
+        if (tableNameBuilder_ == null) {
+          tableNameBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+              org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>(
+                  tableName_,
+                  getParentForChildren(),
+                  isClean());
+          tableName_ = null;
+        }
+        return tableNameBuilder_;
+      }
       
       // optional .ServerName lock_owner = 2;
       private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName lockOwner_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
@@ -7093,16 +7188,17 @@ public final class ZooKeeperProtos {
       "licationState.State\"\"\n\005State\022\013\n\007ENABLED\020" +
       "\000\022\014\n\010DISABLED\020\001\"+\n\027ReplicationHLogPositi" +
       "on\022\020\n\010position\030\001 \002(\003\"%\n\017ReplicationLock\022",
-      "\022\n\nlock_owner\030\001 \002(\t\"\214\001\n\tTableLock\022\022\n\ntab" +
-      "le_name\030\001 \001(\014\022\037\n\nlock_owner\030\002 \001(\0132\013.Serv" +
-      "erName\022\021\n\tthread_id\030\003 \001(\003\022\021\n\tis_shared\030\004" +
-      " \001(\010\022\017\n\007purpose\030\005 \001(\t\022\023\n\013create_time\030\006 \001" +
-      "(\003\";\n\017StoreSequenceId\022\023\n\013family_name\030\001 \002" +
-      "(\014\022\023\n\013sequence_id\030\002 \002(\004\"g\n\026RegionStoreSe" +
-      "quenceIds\022 \n\030last_flushed_sequence_id\030\001 " +
-      "\002(\004\022+\n\021store_sequence_id\030\002 \003(\0132\020.StoreSe" +
-      "quenceIdBE\n*org.apache.hadoop.hbase.prot" +
-      "obuf.generatedB\017ZooKeeperProtosH\001\210\001\001\240\001\001"
+      "\022\n\nlock_owner\030\001 \002(\t\"\230\001\n\tTableLock\022\036\n\ntab" +
+      "le_name\030\001 \001(\0132\n.TableName\022\037\n\nlock_owner\030" +
+      "\002 \001(\0132\013.ServerName\022\021\n\tthread_id\030\003 \001(\003\022\021\n" +
+      "\tis_shared\030\004 \001(\010\022\017\n\007purpose\030\005 \001(\t\022\023\n\013cre" +
+      "ate_time\030\006 \001(\003\";\n\017StoreSequenceId\022\023\n\013fam" +
+      "ily_name\030\001 \002(\014\022\023\n\013sequence_id\030\002 \002(\004\"g\n\026R" +
+      "egionStoreSequenceIds\022 \n\030last_flushed_se" +
+      "quence_id\030\001 \002(\004\022+\n\021store_sequence_id\030\002 \003" +
+      "(\0132\020.StoreSequenceIdBE\n*org.apache.hadoo" +
+      "p.hbase.protobuf.generatedB\017ZooKeeperPro",
+      "tosH\001\210\001\001\240\001\001"
     };
     com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
       new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {

Modified: hbase/branches/0.95/hbase-protocol/src/main/protobuf/AccessControl.proto
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-protocol/src/main/protobuf/AccessControl.proto?rev=1511591&r1=1511590&r2=1511591&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-protocol/src/main/protobuf/AccessControl.proto (original)
+++ hbase/branches/0.95/hbase-protocol/src/main/protobuf/AccessControl.proto Thu Aug  8 06:08:23 2013
@@ -22,6 +22,8 @@ option java_generic_services = true;
 option java_generate_equals_and_hash = true;
 option optimize_for = SPEED;
 
+import "hbase.proto";
+
 message Permission {
     enum Action {
         READ = 0;
@@ -31,7 +33,7 @@ message Permission {
         ADMIN = 4;
     }
     repeated Action action = 1;
-    optional bytes table = 2;
+    optional TableName tableName = 2;
     optional bytes family = 3;
     optional bytes qualifier = 4;
 }
@@ -70,7 +72,7 @@ message RevokeResponse {
 
 
 message UserPermissionsRequest {
-    optional bytes table = 1;
+    optional TableName tableName = 1;
 }
 
 message UserPermissionsResponse {

Modified: hbase/branches/0.95/hbase-protocol/src/main/protobuf/MasterAdmin.proto
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-protocol/src/main/protobuf/MasterAdmin.proto?rev=1511591&r1=1511590&r2=1511591&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-protocol/src/main/protobuf/MasterAdmin.proto (original)
+++ hbase/branches/0.95/hbase-protocol/src/main/protobuf/MasterAdmin.proto Thu Aug  8 06:08:23 2013
@@ -32,7 +32,7 @@ import "Client.proto";
 /* Column-level protobufs */
 
 message AddColumnRequest {
-  required bytes table_name = 1;
+  required TableName table_name = 1;
   required ColumnFamilySchema column_families = 2;
 }
 
@@ -40,7 +40,7 @@ message AddColumnResponse {
 }
 
 message DeleteColumnRequest {
-  required bytes table_name = 1;
+  required TableName table_name = 1;
   required bytes column_name = 2;
 }
 
@@ -48,7 +48,7 @@ message DeleteColumnResponse {
 }
 
 message ModifyColumnRequest {
-  required bytes table_name = 1;
+  required TableName table_name = 1;
   required ColumnFamilySchema column_families = 2;
 }
 
@@ -110,34 +110,81 @@ message CreateTableResponse {
 }
 
 message DeleteTableRequest {
-  required bytes table_name = 1;
+  required TableName table_name = 1;
 }
 
 message DeleteTableResponse {
 }
 
 message EnableTableRequest {
-  required bytes table_name = 1;
+  required TableName table_name = 1;
 }
 
 message EnableTableResponse {
 }
 
 message DisableTableRequest {
-  required bytes table_name = 1;
+  required TableName table_name = 1;
 }
 
 message DisableTableResponse {
 }
 
 message ModifyTableRequest {
-  required bytes table_name = 1;
+  required TableName table_name = 1;
   required TableSchema table_schema = 2;
 }
 
 message ModifyTableResponse {
 }
 
+/* Namespace-level protobufs */
+
+message CreateNamespaceRequest {
+  required NamespaceDescriptor namespaceDescriptor = 1;
+}
+
+message CreateNamespaceResponse {
+}
+
+message DeleteNamespaceRequest {
+  required string namespaceName = 1;
+}
+
+message DeleteNamespaceResponse {
+}
+
+message ModifyNamespaceRequest {
+  required NamespaceDescriptor namespaceDescriptor = 1;
+}
+
+message GetNamespaceDescriptorRequest {
+  required string namespaceName = 1;
+}
+
+message GetNamespaceDescriptorResponse {
+  required NamespaceDescriptor namespaceDescriptor = 1;
+}
+
+message ModifyNamespaceResponse {
+}
+
+message ListNamespaceDescriptorsRequest {
+}
+
+message ListNamespaceDescriptorsResponse {
+  repeated NamespaceDescriptor namespaceDescriptor = 1;
+}
+
+message GetTableDescriptorsByNamespaceRequest {
+  required string namespaceName = 1;
+}
+
+message GetTableDescriptorsByNamespaceResponse {
+  repeated TableSchema tableSchema = 1;
+}
+
+
 /* Cluster-level protobufs */
 
 
@@ -382,6 +429,31 @@ service MasterAdminService {
    */
   rpc IsRestoreSnapshotDone(IsRestoreSnapshotDoneRequest) returns(IsRestoreSnapshotDoneResponse);
 
+
   /** return true if master is available */
   rpc IsMasterRunning(IsMasterRunningRequest) returns(IsMasterRunningResponse);
+
+  /** Modify a namespace's metadata */
+  rpc ModifyNamespace(ModifyNamespaceRequest)
+    returns(ModifyNamespaceResponse);
+
+  /** Creates a new namespace synchronously */
+  rpc CreateNamespace(CreateNamespaceRequest)
+    returns(CreateNamespaceResponse);
+
+  /** Delete's namespace synchronously */
+  rpc DeleteNamespace(DeleteNamespaceRequest)
+    returns(DeleteNamespaceResponse);
+
+  /** Get a namespace descriptor by name */
+  rpc GetNamespaceDescriptor(GetNamespaceDescriptorRequest)
+    returns(GetNamespaceDescriptorResponse);
+
+  /** returns a list of namespaces */
+  rpc ListNamespaceDescriptors(ListNamespaceDescriptorsRequest)
+    returns(ListNamespaceDescriptorsResponse);
+
+  /** returns a list of tables for a given namespace*/
+  rpc GetTableDescriptorsByNamespace(GetTableDescriptorsByNamespaceRequest)
+    returns(GetTableDescriptorsByNamespaceResponse);
 }

Modified: hbase/branches/0.95/hbase-protocol/src/main/protobuf/MasterMonitor.proto
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-protocol/src/main/protobuf/MasterMonitor.proto?rev=1511591&r1=1511590&r2=1511591&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-protocol/src/main/protobuf/MasterMonitor.proto (original)
+++ hbase/branches/0.95/hbase-protocol/src/main/protobuf/MasterMonitor.proto Thu Aug  8 06:08:23 2013
@@ -29,7 +29,7 @@ import "hbase.proto";
 import "ClusterStatus.proto";
 
 message GetSchemaAlterStatusRequest {
-  required bytes table_name = 1;
+  required TableName table_name = 1;
 }
 
 message GetSchemaAlterStatusResponse {
@@ -38,7 +38,7 @@ message GetSchemaAlterStatusResponse {
 }
 
 message GetTableDescriptorsRequest {
-  repeated string table_names = 1;
+  repeated TableName table_names = 1;
 }
 
 message GetTableDescriptorsResponse {

Modified: hbase/branches/0.95/hbase-protocol/src/main/protobuf/SecureBulkLoad.proto
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-protocol/src/main/protobuf/SecureBulkLoad.proto?rev=1511591&r1=1511590&r2=1511591&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-protocol/src/main/protobuf/SecureBulkLoad.proto (original)
+++ hbase/branches/0.95/hbase-protocol/src/main/protobuf/SecureBulkLoad.proto Thu Aug  8 06:08:23 2013
@@ -44,7 +44,7 @@ message DelegationTokenProto {
 }
 
 message PrepareBulkLoadRequest {
-  required bytes table_name = 1;
+  required TableName table_name = 1;
 }
 
 message PrepareBulkLoadResponse {

Modified: hbase/branches/0.95/hbase-protocol/src/main/protobuf/WAL.proto
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-protocol/src/main/protobuf/WAL.proto?rev=1511591&r1=1511590&r2=1511591&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-protocol/src/main/protobuf/WAL.proto (original)
+++ hbase/branches/0.95/hbase-protocol/src/main/protobuf/WAL.proto Thu Aug  8 06:08:23 2013
@@ -34,7 +34,7 @@ message WALKey {
   required uint64 log_sequence_number = 3;
   required uint64 write_time = 4;
   optional UUID cluster_id = 5;
-  
+
   repeated FamilyScope scopes = 6;
   optional uint32 following_kv_count = 7;
 /*

Modified: hbase/branches/0.95/hbase-protocol/src/main/protobuf/ZooKeeper.proto
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-protocol/src/main/protobuf/ZooKeeper.proto?rev=1511591&r1=1511590&r2=1511591&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-protocol/src/main/protobuf/ZooKeeper.proto (original)
+++ hbase/branches/0.95/hbase-protocol/src/main/protobuf/ZooKeeper.proto Thu Aug  8 06:08:23 2013
@@ -144,7 +144,7 @@ message ReplicationLock {
  * Metadata associated with a table lock in zookeeper
  */
 message TableLock {
-  optional bytes table_name = 1;
+  optional TableName table_name = 1;
   optional ServerName lock_owner = 2;
   optional int64 thread_id = 3;
   optional bool is_shared = 4;

Modified: hbase/branches/0.95/hbase-protocol/src/main/protobuf/hbase.proto
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-protocol/src/main/protobuf/hbase.proto?rev=1511591&r1=1511590&r2=1511591&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-protocol/src/main/protobuf/hbase.proto (original)
+++ hbase/branches/0.95/hbase-protocol/src/main/protobuf/hbase.proto Thu Aug  8 06:08:23 2013
@@ -26,11 +26,19 @@ option optimize_for = SPEED;
 import "Cell.proto";
 
 /**
+ * Table Name
+ */
+message TableName {
+  required bytes namespace = 1;
+  required bytes qualifier = 2;
+}
+
+/**
  * Table Schema
  * Inspired by the rest TableSchema
  */
 message TableSchema {
-  optional bytes name = 1;
+  optional TableName table_name = 1;
   repeated BytesBytesPair attributes = 2;
   repeated ColumnFamilySchema column_families = 3;
   repeated NameStringPair configuration = 4;
@@ -51,7 +59,7 @@ message ColumnFamilySchema {
  */
 message RegionInfo {
   required uint64 region_id = 1;
-  required bytes table_name = 2;
+  required TableName table_name = 2;
   optional bytes start_key = 3;
   optional bytes end_key = 4;
   optional bool offline = 5;
@@ -172,3 +180,8 @@ message UUID {
   required uint64 least_sig_bits = 1;
   required uint64 most_sig_bits = 2;
 }
+
+message NamespaceDescriptor {
+  required bytes name = 1;
+  repeated NameStringPair configuration = 2;
+}

Modified: hbase/branches/0.95/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon?rev=1511591&r1=1511590&r2=1511591&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon (original)
+++ hbase/branches/0.95/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon Thu Aug  8 06:08:23 2013
@@ -39,14 +39,17 @@ org.apache.hadoop.hbase.master.HMaster;
 org.apache.hadoop.hbase.master.AssignmentManager;
 org.apache.hadoop.hbase.master.ServerManager;
 org.apache.hadoop.hbase.HConstants;
+org.apache.hadoop.hbase.NamespaceDescriptor;
 org.apache.hadoop.hbase.ServerLoad;
 org.apache.hadoop.hbase.ServerName;
 org.apache.hadoop.hbase.client.HBaseAdmin;
 org.apache.hadoop.hbase.client.HConnectionManager;
 org.apache.hadoop.hbase.HTableDescriptor;
 org.apache.hadoop.hbase.HBaseConfiguration;
+org.apache.hadoop.hbase.TableName;
 org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
 org.apache.hadoop.hbase.master.DeadServer;
+org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 </%import>
 <%if format.equals("json") %>
   <& ../common/TaskMonitorTmpl; filter = filter; format = "json" &>
@@ -293,27 +296,39 @@ AssignmentManager assignmentManager = ma
   </body>
 </html>
 
-
 <%def catalogTables>
+<%java>
+ HTableDescriptor[] sysTables = admin.getTableDescriptorsByNamespace(NamespaceDescriptor
+ .SYSTEM_NAMESPACE_NAME_STR);
+</%java>
 
 <table class="table table-striped">
 <tr>
+    <th>System Tables</th>
     <th>Table Name</th>
     <%if (frags != null) %>
         <th title="Fragmentation - Will be 0% after a major compaction and fluctuate during normal usage.">Frag.</th>
     </%if>
     <th>Description</th>
 </tr>
-  <%if (metaLocation != null) %>
+<%for HTableDescriptor systemTable : sysTables%>
 <tr>
-    <td><a href="table.jsp?name=<% Bytes.toString(HConstants.META_TABLE_NAME) %>"><% Bytes.toString(HConstants.META_TABLE_NAME) %></a></td>
+<%java>TableName tableName = systemTable.getTableName();</%java>
+    <td><a href="table.jsp?name=<% tableName %>"><% tableName %></a></td>
     <%if (frags != null)%>
-        <td align="center"><% frags.get(".META.") != null ? frags.get(".META.").intValue() + "%" : "n/a" %></td>
+        <td align="center"><% frags.get(tableName.getNameAsString()) != null ? frags.get(tableName.getNameAsString())
+        .intValue() + "%" : "n/a" %></td>
     </%if>
-    <td>The .META. table holds references to all User Table regions</td>
+    <%java>String description = null;
+        if (tableName.equals(TableName.META_TABLE_NAME)){
+            description = "The .META. table holds references to all User Table regions";
+        } else {
+            description = "The .NAMESPACE. table holds information about namespaces.";
+        }
+    </%java>
+    <td><% description %></td>
 </tr>
-
-  </%if>
+</%for>
 </table>
 </%def>
 
@@ -333,11 +348,12 @@ AssignmentManager assignmentManager = ma
     </tr>
     <%for HTableDescriptor htDesc : tables%>
     <tr>
-        <td><a href=table.jsp?name=<% htDesc.getNameAsString() %>><% htDesc.getNameAsString() %></a> </td>
+        <td><a href=table.jsp?name=<% htDesc.getTableName().getNameAsString() %>><% htDesc.getTableName().getNameAsString() %></a> </td>
         <%if (frags != null) %>
-            <td align="center"><% frags.get(htDesc.getNameAsString()) != null ? frags.get(htDesc.getNameAsString()).intValue() + "%" : "n/a" %></td>
+            <td align="center"><% frags.get(htDesc.getTableName().getNameAsString()) != null ? frags.get(htDesc.getTableName().getNameAsString()).intValue() + "%" : "n/a" %></td>
         </%if>
-        <td><% master.getAssignmentManager().getRegionStates().getRegionsOfTable(htDesc.getName()).size() %>
+        <td><% master.getAssignmentManager().getRegionStates().getRegionsOfTable(htDesc
+        .getTableName()).size() %>
         <td><% htDesc.toStringCustomizedValues() %></td>
     </tr>
     </%for>
@@ -358,9 +374,13 @@ AssignmentManager assignmentManager = ma
         <th>Creation Time</th>
     </tr>
     <%for SnapshotDescription snapshotDesc : snapshots%>
+    <%java>
+        TableName snapshotTable = TableName.valueOf(snapshotDesc.getTable());
+    </%java>
     <tr>
         <td><a href="snapshot.jsp?name=<% snapshotDesc.getName() %>"><% snapshotDesc.getName() %></a> </td>
-        <td><a href="table.jsp?name=<% snapshotDesc.getTable() %>"><% snapshotDesc.getTable() %></a></td>
+        <td><a href="table.jsp?name=<% snapshotTable.getNameAsString() %>"><% snapshotTable.getNameAsString() %></a>
+        </td>
         <td><% new Date(snapshotDesc.getCreationTime()) %></td>
     </tr>
     </%for>

Modified: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java?rev=1511591&r1=1511590&r2=1511591&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java (original)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java Thu Aug  8 06:08:23 2013
@@ -450,7 +450,7 @@ public class LocalHBaseCluster {
     cluster.startup();
     HBaseAdmin admin = new HBaseAdmin(conf);
     HTableDescriptor htd =
-      new HTableDescriptor(Bytes.toBytes(cluster.getClass().getName()));
+      new HTableDescriptor(TableName.valueOf(cluster.getClass().getName()));
     admin.createTable(htd);
     cluster.shutdown();
   }

Modified: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java?rev=1511591&r1=1511590&r2=1511591&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java (original)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java Thu Aug  8 06:08:23 2013
@@ -31,19 +31,19 @@ import org.apache.hadoop.classification.
 @InterfaceStability.Evolving
 public interface TableDescriptors {
   /**
-   * @param tablename
+   * @param tableName
    * @return HTableDescriptor for tablename
    * @throws IOException
    */
-  HTableDescriptor get(final String tablename)
+  HTableDescriptor get(final TableName tableName)
   throws IOException;
 
   /**
-   * @param tablename
-   * @return HTableDescriptor for tablename
+   * Get Map of all NamespaceDescriptors for a given namespace.
+   * @return Map of all descriptors.
    * @throws IOException
    */
-  HTableDescriptor get(final byte[] tablename)
+  Map<String, HTableDescriptor> getByNamespace(String name)
   throws IOException;
 
   /**
@@ -68,6 +68,6 @@ public interface TableDescriptors {
    * @return Instance of table descriptor or null if none found.
    * @throws IOException
    */
-  HTableDescriptor remove(final String tablename)
+  HTableDescriptor remove(final TableName tablename)
   throws IOException;
 }

Added: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/ZKNamespaceManager.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/ZKNamespaceManager.java?rev=1511591&view=auto
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/ZKNamespaceManager.java (added)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/ZKNamespaceManager.java Thu Aug  8 06:08:23 2013
@@ -0,0 +1,205 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase;
+
+import com.google.common.collect.Sets;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.zookeeper.ZKUtil;
+import org.apache.hadoop.hbase.zookeeper.ZooKeeperListener;
+import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
+import org.apache.zookeeper.KeeperException;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.NavigableMap;
+import java.util.NavigableSet;
+import java.util.concurrent.ConcurrentSkipListMap;
+
+
+/**
+ * Class servers two purposes:
+ *
+ * 1. Broadcast NamespaceDescriptor information via ZK
+ * (Done by the Master)
+ * 2. Consume broadcasted NamespaceDescriptor changes
+ * (Done by the RegionServers)
+ *
+ */
+@InterfaceAudience.Private
+public class ZKNamespaceManager extends ZooKeeperListener {
+  private static Log LOG = LogFactory.getLog(ZKNamespaceManager.class);
+  private final String nsZNode;
+  private volatile NavigableMap<String,NamespaceDescriptor> cache;
+
+  public ZKNamespaceManager(ZooKeeperWatcher zkw) throws IOException {
+    super(zkw);
+    nsZNode = ZooKeeperWatcher.namespaceZNode;
+    cache = new ConcurrentSkipListMap<String, NamespaceDescriptor>();
+  }
+
+  public void start() throws IOException {
+    watcher.registerListener(this);
+    try {
+      if (ZKUtil.watchAndCheckExists(watcher, nsZNode)) {
+        List<ZKUtil.NodeAndData> existing =
+            ZKUtil.getChildDataAndWatchForNewChildren(watcher, nsZNode);
+        if (existing != null) {
+          refreshNodes(existing);
+        }
+      } else {
+        ZKUtil.createWithParents(watcher, nsZNode);
+      }
+    } catch (KeeperException e) {
+      throw new IOException("Failed to initialize ZKNamespaceManager", e);
+    }
+  }
+
+  public NamespaceDescriptor get(String name) {
+    return cache.get(name);
+  }
+
+  public void update(NamespaceDescriptor ns) throws IOException {
+    writeNamespace(ns);
+    cache.put(ns.getName(), ns);
+  }
+
+  public void remove(String name) throws IOException {
+    deleteNamespace(name);
+    cache.remove(name);
+  }
+
+  public NavigableSet<NamespaceDescriptor> list() throws IOException {
+    NavigableSet<NamespaceDescriptor> ret =
+        Sets.newTreeSet(NamespaceDescriptor.NAMESPACE_DESCRIPTOR_COMPARATOR);
+    for(NamespaceDescriptor ns: cache.values()) {
+      ret.add(ns);
+    }
+    return ret;
+  }
+
+  @Override
+  public void nodeCreated(String path) {
+    if (nsZNode.equals(path)) {
+      try {
+        List<ZKUtil.NodeAndData> nodes =
+            ZKUtil.getChildDataAndWatchForNewChildren(watcher, nsZNode);
+        refreshNodes(nodes);
+      } catch (KeeperException ke) {
+        String msg = "Error reading data from zookeeper";
+        LOG.error(msg, ke);
+        watcher.abort(msg, ke);
+      } catch (IOException e) {
+        String msg = "Error parsing data from zookeeper";
+        LOG.error(msg, e);
+        watcher.abort(msg, e);
+      }
+    }
+  }
+
+  @Override
+  public void nodeDeleted(String path) {
+    if (nsZNode.equals(ZKUtil.getParent(path))) {
+      String nsName = ZKUtil.getNodeName(path);
+      cache.remove(nsName);
+    }
+  }
+
+  @Override
+  public void nodeDataChanged(String path) {
+    if (nsZNode.equals(ZKUtil.getParent(path))) {
+      try {
+        byte[] data = ZKUtil.getDataAndWatch(watcher, path);
+        NamespaceDescriptor ns =
+            ProtobufUtil.toNamespaceDescriptor(
+                HBaseProtos.NamespaceDescriptor.parseFrom(data));
+        cache.put(ns.getName(), ns);
+      } catch (KeeperException ke) {
+        String msg = "Error reading data from zookeeper for node "+path;
+        LOG.error(msg, ke);
+        // only option is to abort
+        watcher.abort(msg, ke);
+      } catch (IOException ioe) {
+        String msg = "Error deserializing namespace: "+path;
+        LOG.error(msg, ioe);
+        watcher.abort(msg, ioe);
+      }
+    }
+  }
+
+  @Override
+  public void nodeChildrenChanged(String path) {
+    if (nsZNode.equals(path)) {
+      try {
+        List<ZKUtil.NodeAndData> nodes =
+            ZKUtil.getChildDataAndWatchForNewChildren(watcher, nsZNode);
+        refreshNodes(nodes);
+      } catch (KeeperException ke) {
+        LOG.error("Error reading data from zookeeper for path "+path, ke);
+        watcher.abort("Zookeeper error get node children for path "+path, ke);
+      } catch (IOException e) {
+        LOG.error("Error deserializing namespace child from: "+path, e);
+        watcher.abort("Error deserializing namespace child from: " + path, e);
+      }
+    }
+  }
+
+  private void deleteNamespace(String name) throws IOException {
+    String zNode = ZKUtil.joinZNode(nsZNode, name);
+    try {
+      ZKUtil.deleteNode(watcher, zNode);
+    } catch (KeeperException e) {
+      LOG.error("Failed updating permissions for namespace "+name, e);
+      throw new IOException("Failed updating permissions for namespace "+name, e);
+    }
+  }
+
+  private void writeNamespace(NamespaceDescriptor ns) throws IOException {
+    String zNode = ZKUtil.joinZNode(nsZNode, ns.getName());
+    try {
+      ZKUtil.createWithParents(watcher, zNode);
+      ZKUtil.updateExistingNodeData(watcher, zNode,
+          ProtobufUtil.toProtoNamespaceDescriptor(ns).toByteArray(), -1);
+    } catch (KeeperException e) {
+      LOG.error("Failed updating permissions for namespace "+ns.getName(), e);
+      throw new IOException("Failed updating permissions for namespace "+ns.getName(), e);
+    }
+  }
+
+  private void refreshNodes(List<ZKUtil.NodeAndData> nodes) throws IOException {
+    for (ZKUtil.NodeAndData n : nodes) {
+      if (n.isEmpty()) continue;
+      String path = n.getNode();
+      String namespace = ZKUtil.getNodeName(path);
+      byte[] nodeData = n.getData();
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Updating namespace cache from node "+namespace+" with data: "+
+            Bytes.toStringBinary(nodeData));
+      }
+      NamespaceDescriptor ns =
+          ProtobufUtil.toNamespaceDescriptor(
+              HBaseProtos.NamespaceDescriptor.parseFrom(nodeData));
+      cache.put(ns.getName(), ns);
+    }
+  }
+}

Modified: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java?rev=1511591&r1=1511590&r2=1511591&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java (original)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java Thu Aug  8 06:08:23 2013
@@ -31,8 +31,8 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathFilter;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.StoreFile;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -73,7 +73,7 @@ public class HFileArchiver {
   public static void archiveRegion(Configuration conf, FileSystem fs, HRegionInfo info)
       throws IOException {
     Path rootDir = FSUtils.getRootDir(conf);
-    archiveRegion(fs, rootDir, HTableDescriptor.getTableDir(rootDir, info.getTableName()),
+    archiveRegion(fs, rootDir, FSUtils.getTableDir(rootDir, info.getTableName()),
       HRegion.getRegionDir(rootDir, info));
   }
 
@@ -107,7 +107,9 @@ public class HFileArchiver {
 
     // make sure the regiondir lives under the tabledir
     Preconditions.checkArgument(regionDir.toString().startsWith(tableDir.toString()));
-    Path regionArchiveDir = HFileArchiveUtil.getRegionArchiveDir(rootdir, tableDir, regionDir);
+    Path regionArchiveDir = HFileArchiveUtil.getRegionArchiveDir(rootdir,
+        FSUtils.getTableName(tableDir),
+        regionDir.getName());
 
     FileStatusConverter getAsFile = new FileStatusConverter(fs);
     // otherwise, we attempt to archive the store files

Modified: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/ConstraintProcessor.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/ConstraintProcessor.java?rev=1511591&r1=1511590&r2=1511591&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/ConstraintProcessor.java (original)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/ConstraintProcessor.java Thu Aug  8 06:08:23 2013
@@ -75,7 +75,7 @@ public class ConstraintProcessor extends
 
     if (LOG.isInfoEnabled()) {
       LOG.info("Finished loading " + constraints.size()
-          + " user Constraints on table: " + new String(desc.getName()));
+          + " user Constraints on table: " + desc.getTableName());
     }
 
   }