You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by ap...@apache.org on 2009/07/04 04:16:18 UTC

svn commit: r791050 [1/2] - in /hadoop/hbase/trunk_on_hadoop-0.18.3: ./ conf/ src/contrib/stargate/ src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/ src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/ src/contrib/starga...

Author: apurtell
Date: Sat Jul  4 02:16:16 2009
New Revision: 791050

URL: http://svn.apache.org/viewvc?rev=791050&view=rev
Log:
HBASE-1597,HBASE-1607,HBASE-1218,HBASE-1606

Added:
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/ProtobufMessageHandler.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/ModelSchema.xsd
Removed:
    hadoop/hbase/trunk_on_hadoop-0.18.3/conf/zoo.cfg
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/IProtobufWrapper.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/zoo.cfg
Modified:
    hadoop/hbase/trunk_on_hadoop-0.18.3/CHANGES.txt
    hadoop/hbase/trunk_on_hadoop-0.18.3/build.xml
    hadoop/hbase/trunk_on_hadoop-0.18.3/conf/hbase-default.xml
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/build.xml
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/CellModel.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/CellSetModel.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/ColumnSchemaModel.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/RowModel.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/ScannerModel.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/StorageClusterStatusModel.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/StorageClusterVersionModel.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/TableInfoModel.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/TableListModel.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/TableModel.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/TableRegionModel.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/TableSchemaModel.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/VersionModel.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/provider/JAXBContextResolver.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/provider/consumer/ProtobufMessageBodyConsumer.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/provider/producer/ProtobufMessageBodyProducer.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/test/org/apache/hadoop/hbase/stargate/MiniClusterTestCase.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/test/org/apache/hadoop/hbase/stargate/model/TestTableSchemaModel.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/transactional/src/test/org/apache/hadoop/hbase/regionserver/transactional/TestTHLog.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/HConstants.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/HTableDescriptor.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/KeyValue.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/RegionHistorian.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/client/UnmodifyableHTableDescriptor.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/io/HalfHFileReader.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/io/hfile/HFile.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/HLog.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/MemStore.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/Store.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/util/ClassSize.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/zookeeper/HQuorumPeer.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWrapper.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/hbase-site.xml
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/HBaseClusterTestCase.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/MiniZooKeeperCluster.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/TestZooKeeper.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/io/TestHeapSize.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/io/hfile/RandomSeek.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/io/hfile/TestHFile.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/io/hfile/TestHFilePerformance.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/io/hfile/TestHFileSeek.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/TestCompaction.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/TestStoreFile.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/zookeeper/HQuorumPeerTest.java

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/CHANGES.txt?rev=791050&r1=791049&r2=791050&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/CHANGES.txt (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/CHANGES.txt Sat Jul  4 02:16:16 2009
@@ -433,6 +433,12 @@
                (Lars George via Stack)
    HBASE-1596  Remove WatcherWrapper and have all users of Zookeeper provide a
                Watcher
+   HBASE-1597  Prevent unnecessary caching of blocks during compactions
+               (Jon Gray via Stack)
+   HBASE-1607  Redo MemStore heap sizing to be accurate, testable, and more
+               like new LruBlockCache (Jon Gray via Stack)
+   HBASE-1218  Implement in-memory column (Jon Gray via Stack)
+   HBASE-1606  Remove zoo.cfg, put config options into hbase-site.xml
 
   OPTIMIZATIONS
    HBASE-1412  Change values for delete column and column family in KeyValue

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/build.xml
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/build.xml?rev=791050&r1=791049&r2=791050&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/build.xml (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/build.xml Sat Jul  4 02:16:16 2009
@@ -185,7 +185,7 @@
       <fileset dir="${conf.dir}" >
         <include name="hbase-default.xml" />
       </fileset>
-      <zipfileset dir="conf" prefix="conf" includes="zoo.cfg,hbase-default.xml" />
+      <zipfileset dir="conf" prefix="conf" includes="hbase-default.xml" />
       <zipfileset dir="${build.webapps}" prefix="webapps"/>
    		<manifest>
             <attribute name="Main-Class" value="org/apache/hadoop/hbase/mapreduce/Driver" />

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/conf/hbase-default.xml
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/conf/hbase-default.xml?rev=791050&r1=791049&r2=791050&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/conf/hbase-default.xml (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/conf/hbase-default.xml Sat Jul  4 02:16:16 2009
@@ -396,7 +396,7 @@
     <value>/hbase</value>
     <description>Root ZNode for HBase in ZooKeeper. All of HBase's ZooKeeper
       files that are configured with a relative path will go under this node.
-      By default, all of HBase's ZooKeeper file patsh are configured with a
+      By default, all of HBase's ZooKeeper file path are configured with a
       relative path, so they will all go under this directory unless changed.
     </description>
   </property>
@@ -419,4 +419,95 @@
       mode flag is stored at /hbase/safe-mode.
     </description>
   </property>
+
+  <!--
+  The following three properties are used together to create the list of
+  host:peer_port:leader_port quorum servers for ZooKeeper.
+  -->
+  <property>
+    <name>hbase.zookeeper.quorum</name>
+    <value>localhost</value>
+    <description>Comma separated list of servers in the ZooKeeper Quorum.
+    For example, "host1.mydomain.com,host2.mydomain.com,host3.mydomain.com".
+    By default this is set to localhost for local and pseudo-distributed modes
+    of operation. For a fully-distributed setup, this should be set to a full
+    list of ZooKeeper quorum servers. If HBASE_MANAGES_ZK is set in hbase-env.sh
+    this is the list of servers which we will start/stop ZooKeeper on.
+    </description>
+  </property>
+  <property>
+    <name>hbase.zookeeper.peerport</name>
+    <value>2888</value>
+    <description>Port used by ZooKeeper peers to talk to each other.
+    See http://hadoop.apache.org/zookeeper/docs/r3.1.1/zookeeperStarted.html#sc_RunningReplicatedZooKeeper
+    for more information.
+    </description>
+  </property>
+  <property>
+    <name>hbase.zookeeper.leaderport</name>
+    <value>3888</value>
+    <description>Port used by ZooKeeper for leader election.
+    See http://hadoop.apache.org/zookeeper/docs/r3.1.1/zookeeperStarted.html#sc_RunningReplicatedZooKeeper
+    for more information.
+    </description>
+  </property>
+  <!-- End of properties used to generate ZooKeeper host:port quorum list. -->
+
+  <!--
+  Beginning of properties that are directly mapped from ZooKeeper's zoo.cfg.
+  All properties with an "hbase.zookeeper.property." prefix are converted for
+  ZooKeeper's configuration. Hence, if you want to add an option from zoo.cfg,
+  e.g.  "initLimit=10" you would append the following to your configuration:
+    <property>
+      <name>hbase.zookeeper.property.initLimit</name>
+      <value>10</value>
+    </property>
+  -->
+  <property>
+    <name>hbase.zookeeper.property.tickTime</name>
+    <value>2000</value>
+    <description>Property from ZooKeeper's config zoo.cfg.
+    The number of milliseconds of each tick.
+    </description>
+  </property>
+  <property>
+    <name>hbase.zookeeper.property.initLimit</name>
+    <value>10</value>
+    <description>Property from ZooKeeper's config zoo.cfg.
+    The number of ticks that the initial synchronization phase can take.
+    </description>
+  </property>
+  <property>
+    <name>hbase.zookeeper.property.syncLimit</name>
+    <value>5</value>
+    <description>Property from ZooKeeper's config zoo.cfg.
+    The number of ticks that can pass between sending a request and getting an
+    acknowledgment.
+    </description>
+  </property>
+  <property>
+    <name>hbase.zookeeper.property.dataDir</name>
+    <value>${hbase.tmp.dir}/zookeeper</value>
+    <description>Property from ZooKeeper's config zoo.cfg.
+    The directory where the snapshot is stored.
+    </description>
+  </property>
+  <property>
+    <name>hbase.zookeeper.property.clientPort</name>
+    <value>2181</value>
+    <description>Property from ZooKeeper's config zoo.cfg.
+    The port at which the clients will connect.
+    </description>
+  </property>
+  <property>
+    <name>hbase.zookeeper.property.maxClientCnxns</name>
+    <value>30</value>
+    <description>Property from ZooKeeper's config zoo.cfg.
+    Limit on number of concurrent connections (at the socket level) that a
+    single client, identified by IP address, may make to a single member of
+    the ZooKeeper ensemble. Set high to avoid zk connection issues running
+    standalone and pseudo-distributed.
+    </description>
+  </property>
+  <!-- End of properties that are directly mapped from ZooKeeper's zoo.cfg -->
 </configuration>

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/build.xml
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/build.xml?rev=791050&r1=791049&r2=791050&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/build.xml (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/build.xml Sat Jul  4 02:16:16 2009
@@ -13,7 +13,13 @@
   
   <property name="javac.debug" value="on"/>
   <property name="javac.source" value="1.6"/>
-  
+
+  <!-- the unit test classpath -->
+  <path id="test.classpath.stargate">
+    <pathelement location="${lib.dir}" />
+    <path refid="test.classpath"/>
+  </path>
+
   <target name="init-contrib">
     <tstamp/>
     <mkdir dir="${build.war.classes}"/>
@@ -44,7 +50,7 @@
   
   <target name="compile-test" depends="compile-jar,compile-war">
     <javac srcdir="${src.test}" includes="**/*.java" destdir="${build.test}" debug="${javac.debug}" source="1.6">
-      <classpath refid="test.classpath"/>
+      <classpath refid="test.classpath.stargate"/>
     </javac>
   </target>
   
@@ -54,7 +60,7 @@
       <sysproperty key="build.test" value="${build.test}"/>
       <sysproperty key="user.dir" value="${build.test}/data"/>
       <sysproperty key="test.log.dir" value="${hadoop.log.dir}"/>
-      <classpath refid="test.classpath"/>
+      <classpath refid="test.classpath.stargate"/>
       <formatter type="${test.junit.output.format}"/>
       <batchtest todir="${build.test}" unless="testcase">
         <fileset dir="${src.test}" includes="**/Test*.java"/>
@@ -73,11 +79,6 @@
       <fileset dir="${lib.dir}"/>
       <mapper type="flatten"/>
     </copy>
-    <copy todir="${build.war.classes}" overwrite="true">
-      <fileset dir="${conf.dir}">
-        <include name="zoo.cfg"/>
-      </fileset>
-    </copy>
     
     <war destfile="${build.dir}/${war.file}" webxml="${web.xml.file}">
       <lib dir="${build.dir}/lib"/>

Added: hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/ProtobufMessageHandler.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/ProtobufMessageHandler.java?rev=791050&view=auto
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/ProtobufMessageHandler.java (added)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/ProtobufMessageHandler.java Sat Jul  4 02:16:16 2009
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2009 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate;
+
+import java.io.IOException;
+
+/**
+ * Common interface for models capable of supporting protobuf marshalling
+ * and unmarshalling. Hooks up to the ProtobufMessageBodyConsumer and
+ * ProtobufMessageBodyProducer adapters. 
+ */
+public abstract interface ProtobufMessageHandler {
+  /**
+   * @return the protobuf represention of the model
+   */
+  public byte[] createProtobufOutput();
+
+  /**
+   * Initialize the model from a protobuf representation.
+   * @param message the raw bytes of the protobuf message
+   * @return reference to self for convenience
+   * @throws IOException
+   */
+  public ProtobufMessageHandler getObjectFromMessage(byte[] message)
+    throws IOException;
+}

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/CellModel.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/CellModel.java?rev=791050&r1=791049&r2=791050&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/CellModel.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/CellModel.java Sat Jul  4 02:16:16 2009
@@ -29,6 +29,7 @@
 import javax.xml.bind.annotation.XmlValue;
 
 import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.stargate.ProtobufMessageHandler;
 import org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell;
 
 import com.google.protobuf.ByteString;
@@ -37,10 +38,24 @@
  * Representation of a cell. A cell is a single value associated a column and
  * optional qualifier, and either the timestamp when it was stored or the user-
  * provided timestamp if one was explicitly supplied.
+ *
+ * <pre>
+ * &lt;complexType name="Cell"&gt;
+ *   &lt;sequence&gt;
+ *     &lt;element name="value" maxOccurs="1" minOccurs="1"&gt;
+ *       &lt;simpleType&gt;
+ *         &lt;restriction base="base64Binary"/&gt;
+ *       &lt;/simpleType&gt;
+ *     &lt;/element&gt;
+ *   &lt;/sequence&gt;
+ *   &lt;attribute name="column" type="base64Binary" /&gt;
+ *   &lt;attribute name="timestamp" type="int" /&gt;
+ * &lt;/complexType&gt;
+ * </pre>
  */
 @XmlRootElement(name="Cell")
 @XmlType(propOrder={"column","timestamp"})
-public class CellModel implements IProtobufWrapper, Serializable {
+public class CellModel implements ProtobufMessageHandler, Serializable {
   private static final long serialVersionUID = 1L;
   
   private long timestamp;
@@ -138,7 +153,7 @@
   }
 
   @Override
-  public IProtobufWrapper getObjectFromMessage(byte[] message)
+  public ProtobufMessageHandler getObjectFromMessage(byte[] message)
       throws IOException {
     Cell.Builder builder = Cell.newBuilder();
     builder.mergeFrom(message);

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/CellSetModel.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/CellSetModel.java?rev=791050&r1=791049&r2=791050&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/CellSetModel.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/CellSetModel.java Sat Jul  4 02:16:16 2009
@@ -29,6 +29,7 @@
 import javax.xml.bind.annotation.XmlElement;
 
 import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.stargate.ProtobufMessageHandler;
 import org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell;
 import org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet;
 
@@ -37,9 +38,38 @@
 /**
  * Representation of a grouping of cells. May contain cells from more than
  * one row. Encapsulates RowModel and CellModel models.
+ * 
+ * <pre>
+ * &lt;complexType name="CellSet"&gt;
+ *   &lt;sequence&gt;
+ *     &lt;element name="row" type="tns:Row" maxOccurs="unbounded" 
+ *       minOccurs="1"&gt;&lt;/element&gt;
+ *   &lt;/sequence&gt;
+ * &lt;/complexType&gt;
+ * 
+ * &lt;complexType name="Row"&gt;
+ *   &lt;sequence&gt;
+ *     &lt;element name="key" type="base64Binary"&gt;&lt;/element&gt;
+ *     &lt;element name="cell" type="tns:Cell" 
+ *       maxOccurs="unbounded" minOccurs="1"&gt;&lt;/element&gt;
+ *   &lt;/sequence&gt;
+ * &lt;/complexType&gt;
+ *
+ * &lt;complexType name="Cell"&gt;
+ *   &lt;sequence&gt;
+ *     &lt;element name="value" maxOccurs="1" minOccurs="1"&gt;
+ *       &lt;simpleType&gt;
+ *         &lt;restriction base="base64Binary"/&gt;
+ *       &lt;/simpleType&gt;
+ *     &lt;/element&gt;
+ *   &lt;/sequence&gt;
+ *   &lt;attribute name="column" type="base64Binary" /&gt;
+ *   &lt;attribute name="timestamp" type="int" /&gt;
+ * &lt;/complexType&gt;
+ * </pre>
  */
 @XmlRootElement(name="CellSet")
-public class CellSetModel implements Serializable, IProtobufWrapper {
+public class CellSetModel implements Serializable, ProtobufMessageHandler {
 
   private static final long serialVersionUID = 1L;
   
@@ -97,7 +127,7 @@
   }
 
   @Override
-  public IProtobufWrapper getObjectFromMessage(byte[] message)
+  public ProtobufMessageHandler getObjectFromMessage(byte[] message)
       throws IOException {
     CellSet.Builder builder = CellSet.newBuilder();
     builder.mergeFrom(message);

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/ColumnSchemaModel.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/ColumnSchemaModel.java?rev=791050&r1=791049&r2=791050&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/ColumnSchemaModel.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/ColumnSchemaModel.java Sat Jul  4 02:16:16 2009
@@ -35,6 +35,13 @@
 
 /**
  * Representation of a column family schema.
+ * 
+ * <pre>
+ * &lt;complexType name="ColumnSchema"&gt;
+ *   &lt;attribute name="name" type="string"&gt;&lt;/attribute&gt;
+ *   &lt;anyAttribute&gt;&lt;/anyAttribute&gt;
+ * &lt;/complexType&gt;
+ * </pre>
  */
 @XmlRootElement(name="ColumnSchema")
 @XmlType(propOrder = {"name"})

Added: hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/ModelSchema.xsd
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/ModelSchema.xsd?rev=791050&view=auto
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/ModelSchema.xsd (added)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/ModelSchema.xsd Sat Jul  4 02:16:16 2009
@@ -0,0 +1,133 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<schema targetNamespace="ModelSchema" elementFormDefault="qualified" xmlns="http://www.w3.org/2001/XMLSchema" xmlns:tns="ModelSchema">
+
+    <element name="CellSet" type="tns:CellSet"></element>
+    
+    <complexType name="CellSet">
+    	<sequence>
+    		<element name="row" type="tns:Row" maxOccurs="unbounded" minOccurs="1"></element>
+    	</sequence>
+    </complexType>
+
+    <complexType name="Row">
+    	<sequence>
+    		<element name="key" type="base64Binary"></element>
+    		<element name="cell" type="tns:Cell" maxOccurs="unbounded" minOccurs="1"></element>
+    	</sequence>
+    </complexType>
+
+    <complexType name="Cell">
+    	<sequence>
+    		<element name="value" maxOccurs="1" minOccurs="1"><simpleType><restriction base="base64Binary"></restriction></simpleType></element>
+    	</sequence>
+    	<attribute name="column" type="base64Binary" />
+    	<attribute name="timestamp" type="int" />
+    </complexType>
+
+    <element name="Version" type="tns:Version"></element>
+    
+    <complexType name="Version">
+      <attribute name="Stargate" type="string"></attribute>
+      <attribute name="JVM" type="string"></attribute>
+      <attribute name="OS" type="string"></attribute>
+      <attribute name="Server" type="string"></attribute>
+      <attribute name="Jersey" type="string"></attribute>
+    </complexType>
+
+
+    <element name="TableList" type="tns:TableList"></element>
+    
+    <complexType name="TableList">
+    	<sequence>
+    		<element name="table" type="tns:Table" maxOccurs="unbounded" minOccurs="1"></element>
+    	</sequence>
+    </complexType>
+
+    <complexType name="Table">
+    	<sequence>
+    		<element name="name" type="string"></element>
+    	</sequence>
+    </complexType>
+
+    <element name="TableInfo" type="tns:TableInfo"></element>
+    
+    <complexType name="TableInfo">
+    	<sequence>
+    		<element name="region" type="tns:TableRegion" maxOccurs="unbounded" minOccurs="1"></element>
+    	</sequence>
+    	<attribute name="name" type="string"></attribute>
+    </complexType>
+
+    <complexType name="TableRegion">
+    	<attribute name="name" type="string"></attribute>
+    	<attribute name="id" type="int"></attribute>
+    	<attribute name="startKey" type="base64Binary"></attribute>
+    	<attribute name="endKey" type="base64Binary"></attribute>
+    	<attribute name="location" type="string"></attribute>
+    </complexType>
+
+    <element name="TableSchema" type="tns:TableSchema"></element>
+    
+    <complexType name="TableSchema">
+    	<sequence>
+    		<element name="column" type="tns:ColumnSchema" maxOccurs="unbounded" minOccurs="1"></element>
+    	</sequence>
+    	<attribute name="name" type="string"></attribute>
+    	<anyAttribute></anyAttribute>
+    </complexType>
+
+    <complexType name="ColumnSchema">
+    	<attribute name="name" type="string"></attribute>
+    	<anyAttribute></anyAttribute>
+    </complexType>
+
+    <element name="Scanner" type="tns:Scanner"></element>
+    
+    <complexType name="Scanner">
+    	<attribute name="startRow" type="base64Binary"></attribute>
+    	<attribute name="endRow" type="base64Binary"></attribute>
+    	<attribute name="columns" type="base64Binary"></attribute>
+    	<attribute name="batch" type="int"></attribute>
+    	<attribute name="startTime" type="int"></attribute>
+    	<attribute name="endTime" type="int"></attribute>
+    </complexType>
+
+    <element name="StorageClusterVersion"
+    	type="tns:StorageClusterVersion">
+    </element>
+    
+    <complexType name="StorageClusterVersion">
+    	<attribute name="version" type="string"></attribute>
+    </complexType>
+
+    <element name="StorageClusterStatus"
+    	type="tns:StorageClusterStatus">
+    </element>
+    
+    <complexType name="StorageClusterStatus">
+    	<sequence>
+    		<element name="liveNode" type="tns:Node"
+    			maxOccurs="unbounded" minOccurs="0">
+    		</element>
+    		<element name="deadNode" type="string" maxOccurs="unbounded"
+    			minOccurs="0">
+    		</element>
+    	</sequence>
+    	<attribute name="regions" type="int"></attribute>
+    	<attribute name="requests" type="int"></attribute>
+    	<attribute name="averageLoad" type="float"></attribute>
+    </complexType>
+
+    <complexType name="Node">
+    	<sequence>
+    		<element name="region" type="tns:Region" maxOccurs="unbounded" minOccurs="0"></element>
+    	</sequence>
+    	<attribute name="name" type="string"></attribute>
+    	<attribute name="startCode" type="int"></attribute>
+    	<attribute name="requests" type="int"></attribute>
+    </complexType>
+
+    <complexType name="Region">
+    	<attribute name="name" type="base64Binary"></attribute>
+    </complexType>
+</schema>
\ No newline at end of file

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/RowModel.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/RowModel.java?rev=791050&r1=791049&r2=791050&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/RowModel.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/RowModel.java Sat Jul  4 02:16:16 2009
@@ -29,13 +29,25 @@
 import javax.xml.bind.annotation.XmlElement;
 import javax.xml.bind.annotation.XmlRootElement;
 
+import org.apache.hadoop.hbase.stargate.ProtobufMessageHandler;
+
 /**
  * Representation of a row. A row is a related set of cells, grouped by common
  * row key. RowModels do not appear in results by themselves. They are always
  * encapsulated within CellSetModels.
+ * 
+ * <pre>
+ * &lt;complexType name="Row"&gt;
+ *   &lt;sequence&gt;
+ *     &lt;element name="key" type="base64Binary"&gt;&lt;/element&gt;
+ *     &lt;element name="cell" type="tns:Cell" 
+ *       maxOccurs="unbounded" minOccurs="1"&gt;&lt;/element&gt;
+ *   &lt;/sequence&gt;
+ * &lt;/complexType&gt;
+ * </pre>
  */
 @XmlRootElement(name="Row")
-public class RowModel implements IProtobufWrapper, Serializable {
+public class RowModel implements ProtobufMessageHandler, Serializable {
   private static final long serialVersionUID = 1L;
 
   private byte[] key;
@@ -121,7 +133,7 @@
   }
 
   @Override
-  public IProtobufWrapper getObjectFromMessage(byte[] message)
+  public ProtobufMessageHandler getObjectFromMessage(byte[] message)
       throws IOException {
     // there is no standalone row protobuf message
     throw new UnsupportedOperationException(

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/ScannerModel.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/ScannerModel.java?rev=791050&r1=791049&r2=791050&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/ScannerModel.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/ScannerModel.java Sat Jul  4 02:16:16 2009
@@ -29,6 +29,7 @@
 import javax.xml.bind.annotation.XmlRootElement;
 
 import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.stargate.ProtobufMessageHandler;
 import org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.Scanner;
 import org.apache.hadoop.hbase.util.Bytes;
 
@@ -36,9 +37,20 @@
 
 /**
  * A representation of Scanner parameters.
+ * 
+ * <pre>
+ * &lt;complexType name="Scanner"&gt;
+ *   &lt;attribute name="startRow" type="base64Binary"&gt;&lt;/attribute&gt;
+ *   &lt;attribute name="endRow" type="base64Binary"&gt;&lt;/attribute&gt;
+ *   &lt;attribute name="columns" type="base64Binary"&gt;&lt;/attribute&gt;
+ *   &lt;attribute name="batch" type="int"&gt;&lt;/attribute&gt;
+ *   &lt;attribute name="startTime" type="int"&gt;&lt;/attribute&gt;
+ *   &lt;attribute name="endTime" type="int"&gt;&lt;/attribute&gt;
+ * &lt;/complexType&gt;
+ * </pre>
  */
 @XmlRootElement(name="Scanner")
-public class ScannerModel implements IProtobufWrapper, Serializable {
+public class ScannerModel implements ProtobufMessageHandler, Serializable {
   private static final long serialVersionUID = 1L;
 
   private byte[] startRow = HConstants.EMPTY_START_ROW;
@@ -223,7 +235,7 @@
   }
 
   @Override
-  public IProtobufWrapper getObjectFromMessage(byte[] message)
+  public ProtobufMessageHandler getObjectFromMessage(byte[] message)
       throws IOException {
     Scanner.Builder builder = Scanner.newBuilder();
     builder.mergeFrom(message);

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/StorageClusterStatusModel.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/StorageClusterStatusModel.java?rev=791050&r1=791049&r2=791050&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/StorageClusterStatusModel.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/StorageClusterStatusModel.java Sat Jul  4 02:16:16 2009
@@ -30,6 +30,7 @@
 import javax.xml.bind.annotation.XmlElementWrapper;
 import javax.xml.bind.annotation.XmlRootElement;
 
+import org.apache.hadoop.hbase.stargate.ProtobufMessageHandler;
 import org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus;
 import org.apache.hadoop.hbase.util.Bytes;
 
@@ -46,10 +47,40 @@
  * <li>liveNodes: detailed status of the live region servers</li>
  * <li>deadNodes: the names of region servers declared dead</li>
  * </ul>
+ * 
+ * <pre>
+ * &lt;complexType name="StorageClusterStatus"&gt;
+ *   &lt;sequence&gt;
+ *     &lt;element name="liveNode" type="tns:Node"
+ *       maxOccurs="unbounded" minOccurs="0"&gt;
+ *     &lt;/element&gt;
+ *     &lt;element name="deadNode" type="string" maxOccurs="unbounded"
+ *       minOccurs="0"&gt;
+ *     &lt;/element&gt;
+ *   &lt;/sequence&gt;
+ *   &lt;attribute name="regions" type="int"&gt;&lt;/attribute&gt;
+ *   &lt;attribute name="requests" type="int"&gt;&lt;/attribute&gt;
+ *   &lt;attribute name="averageLoad" type="float"&gt;&lt;/attribute&gt;
+ * &lt;/complexType&gt;
+ *
+ * &lt;complexType name="Node"&gt;
+ *   &lt;sequence&gt;
+ *     &lt;element name="region" type="tns:Region" 
+ *       maxOccurs="unbounded" minOccurs="0"&gt;&lt;/element&gt;
+ *   &lt;/sequence&gt;
+ *   &lt;attribute name="name" type="string"&gt;&lt;/attribute&gt;
+ *   &lt;attribute name="startCode" type="int"&gt;&lt;/attribute&gt;
+ *   &lt;attribute name="requests" type="int"&gt;&lt;/attribute&gt;
+ * &lt;/complexType&gt;
+ *
+ * &lt;complexType name="Region"&gt;
+ *   &lt;attribute name="name" type="base64Binary"&gt;&lt;/attribute&gt;
+ * &lt;/complexType&gt;
+ * </pre>
  */
 @XmlRootElement(name="ClusterStatus")
 public class StorageClusterStatusModel 
-    implements Serializable, IProtobufWrapper {
+    implements Serializable, ProtobufMessageHandler {
 	private static final long serialVersionUID = 1L;
 
 	/**
@@ -382,7 +413,7 @@
   }
 
   @Override
-  public IProtobufWrapper getObjectFromMessage(byte[] message)
+  public ProtobufMessageHandler getObjectFromMessage(byte[] message)
       throws IOException {
     StorageClusterStatus.Builder builder = StorageClusterStatus.newBuilder();
     builder.mergeFrom(message);

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/StorageClusterVersionModel.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/StorageClusterVersionModel.java?rev=791050&r1=791049&r2=791050&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/StorageClusterVersionModel.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/StorageClusterVersionModel.java Sat Jul  4 02:16:16 2009
@@ -26,7 +26,13 @@
 import javax.xml.bind.annotation.XmlValue;
 
 /**
- * Simple representation of the version of the storage cluster (HBase)
+ * Simple representation of the version of the storage cluster
+ * 
+ * <pre>
+ * &lt;complexType name="StorageClusterVersion"&gt;
+ *   &lt;attribute name="version" type="string"&gt;&lt;/attribute&gt;
+ * &lt;/complexType&gt;
+ * </pre>
  */
 @XmlRootElement(name="ClusterVersion")
 public class StorageClusterVersionModel implements Serializable {

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/TableInfoModel.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/TableInfoModel.java?rev=791050&r1=791049&r2=791050&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/TableInfoModel.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/TableInfoModel.java Sat Jul  4 02:16:16 2009
@@ -30,16 +30,27 @@
 import javax.xml.bind.annotation.XmlRootElement;
 import javax.xml.bind.annotation.XmlType;
 
+import org.apache.hadoop.hbase.stargate.ProtobufMessageHandler;
 import org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo;
 
 import com.google.protobuf.ByteString;
 
 /**
  * Representation of a list of table regions. 
+ * 
+ * <pre>
+ * &lt;complexType name="TableInfo"&gt;
+ *   &lt;sequence&gt;
+ *     &lt;element name="region" type="tns:TableRegion" 
+ *       maxOccurs="unbounded" minOccurs="1"&gt;&lt;/element&gt;
+ *   &lt;/sequence&gt;
+ *   &lt;attribute name="name" type="string"&gt;&lt;/attribute&gt;
+ * &lt;/complexType&gt;
+ * </pre>
  */
 @XmlRootElement(name="TableInfo")
 @XmlType(propOrder = {"name","regions"})
-public class TableInfoModel implements Serializable, IProtobufWrapper {
+public class TableInfoModel implements Serializable, ProtobufMessageHandler {
   private static final long serialVersionUID = 1L;
 
   private String name;
@@ -134,7 +145,7 @@
   }
 
   @Override
-  public IProtobufWrapper getObjectFromMessage(byte[] message) 
+  public ProtobufMessageHandler getObjectFromMessage(byte[] message) 
       throws IOException {
     TableInfo.Builder builder = TableInfo.newBuilder();
     builder.mergeFrom(message);

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/TableListModel.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/TableListModel.java?rev=791050&r1=791049&r2=791050&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/TableListModel.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/TableListModel.java Sat Jul  4 02:16:16 2009
@@ -28,13 +28,14 @@
 import javax.xml.bind.annotation.XmlElementRef;
 import javax.xml.bind.annotation.XmlRootElement;
 
+import org.apache.hadoop.hbase.stargate.ProtobufMessageHandler;
 import org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.TableList;
 
 /**
  * Simple representation of a list of table names.
  */
 @XmlRootElement(name="TableList")
-public class TableListModel implements Serializable, IProtobufWrapper {
+public class TableListModel implements Serializable, ProtobufMessageHandler {
 
 	private static final long serialVersionUID = 1L;
 
@@ -99,7 +100,7 @@
 	}
 
   @Override
-  public IProtobufWrapper getObjectFromMessage(byte[] message)
+  public ProtobufMessageHandler getObjectFromMessage(byte[] message)
       throws IOException {
     TableList.Builder builder = TableList.newBuilder();
     builder.mergeFrom(message);

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/TableModel.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/TableModel.java?rev=791050&r1=791049&r2=791050&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/TableModel.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/TableModel.java Sat Jul  4 02:16:16 2009
@@ -27,6 +27,14 @@
 
 /**
  * Simple representation of a table name.
+ * 
+ * <pre>
+ * &lt;complexType name="Table"&gt;
+ *   &lt;sequence&gt;
+ *     &lt;element name="name" type="string"&gt;&lt;/element&gt;
+ *   &lt;/sequence&gt;
+ * &lt;/complexType&gt;
+ * </pre>
  */
 @XmlRootElement(name="table")
 public class TableModel implements Serializable {

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/TableRegionModel.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/TableRegionModel.java?rev=791050&r1=791049&r2=791050&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/TableRegionModel.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/TableRegionModel.java Sat Jul  4 02:16:16 2009
@@ -31,6 +31,16 @@
 /**
  * Representation of a region of a table and its current location on the
  * storage cluster.
+ * 
+ * <pre>
+ * &lt;complexType name="TableRegion"&gt;
+ *   &lt;attribute name="name" type="string"&gt;&lt;/attribute&gt;
+ *   &lt;attribute name="id" type="int"&gt;&lt;/attribute&gt;
+ *   &lt;attribute name="startKey" type="base64Binary"&gt;&lt;/attribute&gt;
+ *   &lt;attribute name="endKey" type="base64Binary"&gt;&lt;/attribute&gt;
+ *   &lt;attribute name="location" type="string"&gt;&lt;/attribute&gt;
+ *  &lt;/complexType&gt;
+ * </pre>
  */
 @XmlRootElement(name="Region")
 @XmlType(propOrder = {"name","id","startKey","endKey","location"})

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/TableSchemaModel.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/TableSchemaModel.java?rev=791050&r1=791049&r2=791050&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/TableSchemaModel.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/TableSchemaModel.java Sat Jul  4 02:16:16 2009
@@ -38,17 +38,28 @@
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.stargate.ProtobufMessageHandler;
 import org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema;
 import org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema;
 
 /**
  * A representation of HBase table descriptors.
+ * 
+ * <pre>
+ * &lt;complexType name="TableSchema"&gt;
+ *   &lt;sequence&gt;
+ *     &lt;element name="column" type="tns:ColumnSchema" 
+ *       maxOccurs="unbounded" minOccurs="1"&gt;&lt;/element&gt;
+ *   &lt;/sequence&gt;
+ *   &lt;attribute name="name" type="string"&gt;&lt;/attribute&gt;
+ *   &lt;anyAttribute&gt;&lt;/anyAttribute&gt;
+ * &lt;/complexType&gt;
+ * </pre>
  */
 @XmlRootElement(name="TableSchema")
 @XmlType(propOrder = {"name","columns"})
-public class TableSchemaModel implements Serializable, IProtobufWrapper {
+public class TableSchemaModel implements Serializable, ProtobufMessageHandler {
   private static final long serialVersionUID = 1L;
-  private static final QName IN_MEMORY = new QName(HConstants.IN_MEMORY);
   private static final QName IS_META = new QName(HTableDescriptor.IS_META);
   private static final QName IS_ROOT = new QName(HTableDescriptor.IS_ROOT);
   private static final QName READONLY = new QName(HTableDescriptor.READONLY);
@@ -177,15 +188,6 @@
   // confuse JAXB
 
   /**
-   * @return true if IN_MEMORY attribute exists and is true
-   */
-  public boolean __getInMemory() {
-    Object o = attrs.get(IN_MEMORY);
-    return o != null ? 
-      Boolean.valueOf(o.toString()) : HTableDescriptor.DEFAULT_IN_MEMORY;
-  }
-
-  /**
    * @return true if IS_META attribute exists and is truel
    */
   public boolean __getIsMeta() {
@@ -211,13 +213,6 @@
   }
 
   /**
-   * @param value desired value of IN_MEMORY attribute
-   */
-  public void __setInMemory(boolean value) {
-    attrs.put(IN_MEMORY, Boolean.toString(value));
-  }
-
-  /**
    * @param value desired value of IS_META attribute
    */
   public void __setIsMeta(boolean value) {
@@ -273,10 +268,6 @@
       }
       builder.addColumns(familyBuilder);
     }
-    if (attrs.containsKey(IN_MEMORY)) {
-      builder.setInMemory(
-        Boolean.valueOf(attrs.get(IN_MEMORY).toString()));
-    }
     if (attrs.containsKey(READONLY)) {
       builder.setReadOnly(
         Boolean.valueOf(attrs.get(READONLY).toString()));
@@ -285,7 +276,7 @@
   }
 
   @Override
-  public IProtobufWrapper getObjectFromMessage(byte[] message) 
+  public ProtobufMessageHandler getObjectFromMessage(byte[] message) 
       throws IOException {
     TableSchema.Builder builder = TableSchema.newBuilder();
     builder.mergeFrom(message);
@@ -293,9 +284,6 @@
     for (TableSchema.Attribute attr: builder.getAttrsList()) {
       this.addAttribute(attr.getName(), attr.getValue());
     }
-    if (builder.hasInMemory()) {
-      this.addAttribute(HConstants.IN_MEMORY, builder.getInMemory());
-    }
     if (builder.hasReadOnly()) {
       this.addAttribute(HTableDescriptor.READONLY, builder.getReadOnly());
     }

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/VersionModel.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/VersionModel.java?rev=791050&r1=791049&r2=791050&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/VersionModel.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/VersionModel.java Sat Jul  4 02:16:16 2009
@@ -27,6 +27,7 @@
 import javax.xml.bind.annotation.XmlAttribute;
 import javax.xml.bind.annotation.XmlRootElement;
 
+import org.apache.hadoop.hbase.stargate.ProtobufMessageHandler;
 import org.apache.hadoop.hbase.stargate.RESTServlet;
 import org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version;
 
@@ -44,7 +45,7 @@
  * </ul>
  */
 @XmlRootElement(name="Version")
-public class VersionModel implements Serializable, IProtobufWrapper {
+public class VersionModel implements Serializable, ProtobufMessageHandler {
 
 	private static final long serialVersionUID = 1L;
 
@@ -183,7 +184,7 @@
   }
 
   @Override
-  public IProtobufWrapper getObjectFromMessage(byte[] message)
+  public ProtobufMessageHandler getObjectFromMessage(byte[] message)
       throws IOException {
     Version.Builder builder = Version.newBuilder();
     builder.mergeFrom(message);

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/provider/JAXBContextResolver.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/provider/JAXBContextResolver.java?rev=791050&r1=791049&r2=791050&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/provider/JAXBContextResolver.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/provider/JAXBContextResolver.java Sat Jul  4 02:16:16 2009
@@ -83,7 +83,6 @@
 
 	@Override
 	public JAXBContext getContext(Class<?> objectType) {
-		System.out.println("Executed getContext");
 		return (types.contains(objectType)) ? context : null;
   }
 }

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/provider/consumer/ProtobufMessageBodyConsumer.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/provider/consumer/ProtobufMessageBodyConsumer.java?rev=791050&r1=791049&r2=791050&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/provider/consumer/ProtobufMessageBodyConsumer.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/provider/consumer/ProtobufMessageBodyConsumer.java Sat Jul  4 02:16:16 2009
@@ -36,31 +36,31 @@
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.stargate.Constants;
-import org.apache.hadoop.hbase.stargate.model.IProtobufWrapper;
+import org.apache.hadoop.hbase.stargate.ProtobufMessageHandler;
 
 /**
  * Adapter for hooking up Jersey content processing dispatch to
- * IProtobufWrapper interface capable handlers for decoding protobuf input.
+ * ProtobufMessageHandler interface capable handlers for decoding protobuf input.
  */
 @Provider
 @Consumes(Constants.MIMETYPE_PROTOBUF)
 public class ProtobufMessageBodyConsumer 
-    implements MessageBodyReader<IProtobufWrapper> {
+    implements MessageBodyReader<ProtobufMessageHandler> {
   private static final Log LOG =
     LogFactory.getLog(ProtobufMessageBodyConsumer.class);
 
   @Override
   public boolean isReadable(Class<?> type, Type genericType,
       Annotation[] annotations, MediaType mediaType) {
-    return IProtobufWrapper.class.isAssignableFrom(type);
+    return ProtobufMessageHandler.class.isAssignableFrom(type);
   }
 
   @Override
-  public IProtobufWrapper readFrom(Class<IProtobufWrapper> type, Type genericType,
+  public ProtobufMessageHandler readFrom(Class<ProtobufMessageHandler> type, Type genericType,
       Annotation[] annotations, MediaType mediaType,
       MultivaluedMap<String, String> httpHeaders, InputStream inputStream)
       throws IOException, WebApplicationException {
-    IProtobufWrapper obj = null;
+    ProtobufMessageHandler obj = null;
     try {
       obj = type.newInstance();
       ByteArrayOutputStream baos = new ByteArrayOutputStream();

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/provider/producer/ProtobufMessageBodyProducer.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/provider/producer/ProtobufMessageBodyProducer.java?rev=791050&r1=791049&r2=791050&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/provider/producer/ProtobufMessageBodyProducer.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/provider/producer/ProtobufMessageBodyProducer.java Sat Jul  4 02:16:16 2009
@@ -36,10 +36,10 @@
 import javax.ws.rs.ext.Provider;
 
 import org.apache.hadoop.hbase.stargate.Constants;
-import org.apache.hadoop.hbase.stargate.model.IProtobufWrapper;
+import org.apache.hadoop.hbase.stargate.ProtobufMessageHandler;
 
 /**
- * An adapter between Jersey and IProtobufWrapper implementors. Hooks up
+ * An adapter between Jersey and ProtobufMessageHandler implementors. Hooks up
  * protobuf output producing methods to the Jersey content handling framework.
  * Jersey will first call getSize() to learn the number of bytes that will be
  * sent, then writeTo to perform the actual I/O.
@@ -47,18 +47,18 @@
 @Provider
 @Produces(Constants.MIMETYPE_PROTOBUF)
 public class ProtobufMessageBodyProducer
-  implements MessageBodyWriter<IProtobufWrapper> {
+  implements MessageBodyWriter<ProtobufMessageHandler> {
 
   private Map<Object, byte[]> buffer = new WeakHashMap<Object, byte[]>();
 
 	@Override
 	public boolean isWriteable(Class<?> type, Type genericType, 
 	  Annotation[] annotations, MediaType mediaType) {
-      return IProtobufWrapper.class.isAssignableFrom(type);
+      return ProtobufMessageHandler.class.isAssignableFrom(type);
   }
 
 	@Override
-	public long getSize(IProtobufWrapper m, Class<?> type, Type genericType,
+	public long getSize(ProtobufMessageHandler m, Class<?> type, Type genericType,
 	    Annotation[] annotations, MediaType mediaType) {
 	  ByteArrayOutputStream baos = new ByteArrayOutputStream();
 	  try {
@@ -71,7 +71,7 @@
 	  return bytes.length;
 	}
 
-	public void writeTo(IProtobufWrapper m, Class<?> type, Type genericType,
+	public void writeTo(ProtobufMessageHandler m, Class<?> type, Type genericType,
 	    Annotation[] annotations, MediaType mediaType, 
 	    MultivaluedMap<String, Object> httpHeaders, OutputStream entityStream) 
 	    throws IOException, WebApplicationException {

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/test/org/apache/hadoop/hbase/stargate/MiniClusterTestCase.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/test/org/apache/hadoop/hbase/stargate/MiniClusterTestCase.java?rev=791050&r1=791049&r2=791050&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/test/org/apache/hadoop/hbase/stargate/MiniClusterTestCase.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/test/org/apache/hadoop/hbase/stargate/MiniClusterTestCase.java Sat Jul  4 02:16:16 2009
@@ -25,6 +25,7 @@
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.dfs.MiniDFSCluster;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseConfiguration;
@@ -34,7 +35,6 @@
 import org.apache.hadoop.hbase.MiniZooKeeperCluster;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.util.FSUtils;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.log4j.Level;
 import org.apache.log4j.Logger;

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/test/org/apache/hadoop/hbase/stargate/model/TestTableSchemaModel.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/test/org/apache/hadoop/hbase/stargate/model/TestTableSchemaModel.java?rev=791050&r1=791049&r2=791050&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/test/org/apache/hadoop/hbase/stargate/model/TestTableSchemaModel.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/stargate/src/test/org/apache/hadoop/hbase/stargate/model/TestTableSchemaModel.java Sat Jul  4 02:16:16 2009
@@ -35,7 +35,6 @@
 public class TestTableSchemaModel extends TestCase {
 
   public static final String TABLE_NAME = "testTable";
-  private static final boolean IN_MEMORY = false;
   private static final boolean IS_META = false;
   private static final boolean IS_ROOT = false;
   private static final boolean READONLY = false;
@@ -44,8 +43,7 @@
     "<TableSchema name=\"testTable\"" +
       " IS_META=\"false\"" +
       " IS_ROOT=\"false\"" +
-      " READONLY=\"false\"" +
-      " IN_MEMORY=\"false\">" +
+      " READONLY=\"false\">" +
       TestColumnSchemaModel.AS_XML + 
     "</TableSchema>";
 
@@ -72,7 +70,6 @@
   public static TableSchemaModel buildTestModel(String name) {
     TableSchemaModel model = new TableSchemaModel();
     model.setName(name);
-    model.__setInMemory(IN_MEMORY);
     model.__setIsMeta(IS_META);
     model.__setIsRoot(IS_ROOT);
     model.__setReadOnly(READONLY);
@@ -108,7 +105,6 @@
 
   public static void checkModel(TableSchemaModel model, String tableName) {
     assertEquals(model.getName(), tableName);
-    assertEquals(model.__getInMemory(), IN_MEMORY);
     assertEquals(model.__getIsMeta(), IS_META);
     assertEquals(model.__getIsRoot(), IS_ROOT);
     assertEquals(model.__getReadOnly(), READONLY);

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/transactional/src/test/org/apache/hadoop/hbase/regionserver/transactional/TestTHLog.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/transactional/src/test/org/apache/hadoop/hbase/regionserver/transactional/TestTHLog.java?rev=791050&r1=791049&r2=791050&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/transactional/src/test/org/apache/hadoop/hbase/regionserver/transactional/TestTHLog.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/contrib/transactional/src/test/org/apache/hadoop/hbase/regionserver/transactional/TestTHLog.java Sat Jul  4 02:16:16 2009
@@ -23,6 +23,7 @@
 import java.util.List;
 import java.util.Map;
 
+import org.apache.hadoop.dfs.MiniDFSCluster;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseTestCase;
 import org.apache.hadoop.hbase.HConstants;
@@ -31,7 +32,6 @@
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
 
 /** JUnit test case for HLog */
 public class TestTHLog extends HBaseTestCase implements

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/HConstants.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/HConstants.java?rev=791050&r1=791049&r2=791050&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/HConstants.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/HConstants.java Sat Jul  4 02:16:16 2009
@@ -75,6 +75,9 @@
   /** default port for master web api */
   static final int DEFAULT_MASTER_INFOPORT = 60010;
 
+  /** Name of ZooKeeper quorum configuration parameter. */
+  static final String ZOOKEEPER_QUORUM = "hbase.zookeeper.quorum";
+
   /** Name of ZooKeeper config file in conf/ directory. */
   static final String ZOOKEEPER_CONFIG_NAME = "zoo.cfg";
 

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/HTableDescriptor.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/HTableDescriptor.java?rev=791050&r1=791049&r2=791050&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/HTableDescriptor.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/HTableDescriptor.java Sat Jul  4 02:16:16 2009
@@ -90,8 +90,6 @@
   private static final ImmutableBytesWritable TRUE =
     new ImmutableBytesWritable(Bytes.toBytes(Boolean.TRUE.toString()));
 
-  public static final boolean DEFAULT_IN_MEMORY = false;
-
   public static final boolean DEFAULT_READONLY = false;
 
   public static final int DEFAULT_MEMSTORE_FLUSH_SIZE = 1024*1024*64;
@@ -353,25 +351,6 @@
   }
 
   /**
-   * @return true if all columns in the table should be kept in the 
-   * HRegionServer cache only
-   */
-  public boolean isInMemory() {
-    String value = getValue(HConstants.IN_MEMORY);
-    if (value != null)
-      return Boolean.valueOf(value).booleanValue();
-    return DEFAULT_IN_MEMORY;
-  }
-
-  /**
-   * @param inMemory True if all of the columns in the table should be kept in
-   * the HRegionServer cache only.
-   */
-  public void setInMemory(boolean inMemory) {
-    setValue(HConstants.IN_MEMORY, Boolean.toString(inMemory));
-  }
-
-  /**
    * @return true if all columns in the table should be read only
    */
   public boolean isReadOnly() {

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/KeyValue.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/KeyValue.java?rev=791050&r1=791049&r2=791050&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/KeyValue.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/KeyValue.java Sat Jul  4 02:16:16 2009
@@ -639,9 +639,9 @@
     int familylength = b[columnoffset - 1];
     int columnlength = l - ((columnoffset - o) + TIMESTAMP_TYPE_SIZE);
     String family = familylength == 0? "":
-      Bytes.toString(b, columnoffset, familylength);
+      Bytes.toStringBinary(b, columnoffset, familylength);
     String qualifier = columnlength == 0? "":
-      Bytes.toString(b, columnoffset + familylength,
+      Bytes.toStringBinary(b, columnoffset + familylength,
       columnlength - familylength);
     long timestamp = Bytes.toLong(b, o + (l - TIMESTAMP_TYPE_SIZE));
     byte type = b[o + l - 1];

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/RegionHistorian.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/RegionHistorian.java?rev=791050&r1=791049&r2=791050&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/RegionHistorian.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/RegionHistorian.java Sat Jul  4 02:16:16 2009
@@ -33,8 +33,8 @@
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.io.Cell;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.ClassSize;
 
 /**
  * The Region Historian task is to keep track of every modification a region
@@ -49,8 +49,6 @@
   
   private HTable metaTable;
 
-
-
   /** Singleton reference */
   private static RegionHistorian historian;
 
@@ -333,4 +331,7 @@
       LOG.debug("Offlined");
     }
   }
+
+  public static final long FIXED_OVERHEAD = ClassSize.align(
+      ClassSize.OBJECT + ClassSize.REFERENCE);
 }
\ No newline at end of file

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/client/UnmodifyableHTableDescriptor.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/client/UnmodifyableHTableDescriptor.java?rev=791050&r1=791049&r2=791050&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/client/UnmodifyableHTableDescriptor.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/client/UnmodifyableHTableDescriptor.java Sat Jul  4 02:16:16 2009
@@ -74,15 +74,7 @@
   public HColumnDescriptor removeFamily(final byte [] column) {
     throw new UnsupportedOperationException("HTableDescriptor is read-only");
   }
-
-  /**
-   * @see org.apache.hadoop.hbase.HTableDescriptor#setInMemory(boolean)
-   */
-  @Override
-  public void setInMemory(boolean inMemory) {
-    throw new UnsupportedOperationException("HTableDescriptor is read-only");
-  }
-
+  
   /**
    * @see org.apache.hadoop.hbase.HTableDescriptor#setReadOnly(boolean)
    */

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/io/HalfHFileReader.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/io/HalfHFileReader.java?rev=791050&r1=791049&r2=791050&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/io/HalfHFileReader.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/io/HalfHFileReader.java Sat Jul  4 02:16:16 2009
@@ -63,7 +63,7 @@
   public HalfHFileReader(final FileSystem fs, final Path p, final BlockCache c,
     final Reference r)
   throws IOException {
-    super(fs, p, c);
+    super(fs, p, c, false);
     // This is not actual midkey for this half-file; its just border
     // around which we split top and bottom.  Have to look in files to find
     // actual last and first keys for bottom and top halves.  Half-files don't

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/io/hfile/HFile.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/io/hfile/HFile.java?rev=791050&r1=791049&r2=791050&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/io/hfile/HFile.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/io/hfile/HFile.java Sat Jul  4 02:16:16 2009
@@ -513,8 +513,7 @@
       }
     }
 
-    private void checkValue(final byte [] value,
-        @SuppressWarnings("unused") final int offset,
+    private void checkValue(final byte [] value, final int offset,
         final int length) throws IOException {
       if (value == null) {
         throw new IOException("Value cannot be null");
@@ -658,6 +657,9 @@
     private final BlockCache cache;
     public int cacheHits = 0;
     public int blockLoads = 0;
+    
+    // Whether file is from in-memory store
+    private boolean inMemory = false;
 
     // Name for this object used when logging or in toString.  Is either
     // the result of a toString on the stream or else is toString of passed
@@ -669,7 +671,7 @@
      */
     @SuppressWarnings("unused")
     private Reader() throws IOException {
-      this(null, null, null);
+      this(null, null, null, false);
     }
 
     /** 
@@ -681,9 +683,9 @@
      * @param cache block cache. Pass null if none.
      * @throws IOException
      */
-    public Reader(FileSystem fs, Path path, BlockCache cache)
+    public Reader(FileSystem fs, Path path, BlockCache cache, boolean inMemory)
     throws IOException {
-      this(fs.open(path), fs.getFileStatus(path).getLen(), cache);
+      this(fs.open(path), fs.getFileStatus(path).getLen(), cache, inMemory);
       this.closeIStream = true;
       this.name = path.toString();
     }
@@ -699,13 +701,13 @@
      * @throws IOException
      */
     public Reader(final FSDataInputStream fsdis, final long size,
-        final BlockCache cache)
-    throws IOException {
+        final BlockCache cache, final boolean inMemory) {
       this.cache = cache;
       this.fileSize = size;
       this.istream = fsdis;
       this.closeIStream = false;
       this.name = this.istream.toString();
+      this.inMemory = inMemory;
     }
 
     @Override
@@ -713,6 +715,7 @@
       return "reader=" + this.name +
           (!isFileInfoLoaded()? "":
             ", compression=" + this.compressAlgo.getName() +
+            ", inMemory=" + this.inMemory +
             ", firstKey=" + toStringFirstKey() +
             ", lastKey=" + toStringLastKey()) +
             ", avgKeyLen=" + this.avgKeyLen +
@@ -722,17 +725,21 @@
     }
 
     protected String toStringFirstKey() {
-      return Bytes.toStringBinary(getFirstKey());
+      return KeyValue.keyToString(getFirstKey());
     }
 
     protected String toStringLastKey() {
-      return Bytes.toStringBinary(getFirstKey());
+      return KeyValue.keyToString(getFirstKey());
     }
 
     public long length() {
       return this.fileSize;
     }
-
+    
+    public boolean inMemory() {
+      return this.inMemory;
+    }
+       
     /**
      * Read in the index and file info.
      * @return A map of fileinfo data.
@@ -918,15 +925,26 @@
         buf.limit(buf.limit() - DATABLOCKMAGIC.length);
         buf.rewind();
 
-        // Cache a copy, not the one we are sending back, so the position doesnt
-        // get messed.
-        if (cache != null) {
-          cache.cacheBlock(name + block, buf.duplicate());
-        }
+        // Cache the block
+        cacheBlock(name + block, buf.duplicate());
 
         return buf;
       }
     }
+    
+    /**
+     * Cache this block if there is a block cache available.<p>
+     * 
+     * Makes a copy of the ByteBuffer, not the one we are sending back, so the 
+     * position does not get messed up.
+     * @param blockName
+     * @param buf
+     */
+    void cacheBlock(String blockName, ByteBuffer buf) {
+      if (cache != null) {
+        cache.cacheBlock(blockName, buf.duplicate(), inMemory);
+      }
+    }
 
     /*
      * Decompress <code>compressedSize</code> bytes off the backing
@@ -1241,6 +1259,36 @@
       return trailer.toString();
     }
   }
+  
+
+  /**
+   * HFile Reader that does not cache blocks that were not already cached.<p>
+   * 
+   * Used for compactions.
+   */
+  public static class CompactionReader extends Reader {
+    public CompactionReader(Reader reader) {
+      super(reader.istream, reader.fileSize, reader.cache, reader.inMemory);
+      super.blockIndex = reader.blockIndex;
+      super.trailer = reader.trailer;
+      super.lastkey = reader.lastkey;
+      super.avgKeyLen = reader.avgKeyLen;
+      super.avgValueLen = reader.avgValueLen;
+      super.comparator = reader.comparator;
+      super.metaIndex = reader.metaIndex;
+      super.fileInfoLoaded = reader.fileInfoLoaded;
+      super.compressAlgo = reader.compressAlgo;
+    }
+    
+    /**
+     * Do not cache this block when doing a compaction.
+     */
+    @Override
+    void cacheBlock(String blockName, ByteBuffer buf) {
+      return;
+    }
+  }
+  
   /*
    * The RFile has a fixed trailer which contains offsets to other variable
    * parts of the file.  Also includes basic metadata on this file.
@@ -1586,7 +1634,7 @@
       return;
     }
 
-    HFile.Reader reader = new HFile.Reader(fs, path, null);
+    HFile.Reader reader = new HFile.Reader(fs, path, null, false);
     Map<byte[],byte[]> fileInfo = reader.loadFileInfo();
 
     // scan thru and count the # of unique rows.

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java?rev=791050&r1=791049&r2=791050&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java Sat Jul  4 02:16:16 2009
@@ -372,8 +372,16 @@
         remainingBuckets--;
       }
       
+      float singleMB = ((float)bucketSingle.totalSize())/((float)(1024*1024));
+      float multiMB = ((float)bucketMulti.totalSize())/((float)(1024*1024));
+      float memoryMB = ((float)bucketMemory.totalSize())/((float)(1024*1024));
+      
       LOG.debug("Block cache LRU eviction completed. " + 
-          "Freed " + bytesFreed + " bytes");
+          "Freed " + bytesFreed + " bytes.  " +
+          "Priority Sizes: " +
+          "Single=" + singleMB + "MB (" + bucketSingle.totalSize() + "), " +
+          "Multi=" + multiMB + "MB (" + bucketMulti.totalSize() + ")," +
+          "Memory=" + memoryMB + "MB (" + bucketMemory.totalSize() + ")");
       
     } finally {
       stats.evict();
@@ -424,6 +432,10 @@
       return totalSize - bucketSize;
     }
     
+    public long totalSize() {
+      return totalSize;
+    }
+    
     public int compareTo(BlockBucket that) {
       if(this.overflow() == that.overflow()) return 0;
       return this.overflow() > that.overflow() ? 1 : -1;
@@ -539,19 +551,15 @@
     LruBlockCache.LOG.debug("Cache Stats: Sizes: " + 
         "Total=" + sizeMB + "MB (" + totalSize + "), " +
         "Free=" + freeMB + "MB (" + freeSize + "), " +
-        "Max=" + maxMB + "MB (" + maxSize +")");
-    
-    // Log hit/miss and eviction counts
-    LruBlockCache.LOG.debug("Cache Stats: Counts: " +
+        "Max=" + maxMB + "MB (" + maxSize +")" +
+      ", Counts: " +
         "Blocks=" + size() +", " +
         "Access=" + stats.getRequestCount() + ", " +
         "Hit=" + stats.getHitCount() + ", " +
         "Miss=" + stats.getMissCount() + ", " +
         "Evictions=" + stats.getEvictionCount() + ", " +
-        "Evicted=" + stats.getEvictedCount());
-    
-    // Log hit/miss and eviction ratios
-    LruBlockCache.LOG.debug("Cache Stats: Ratios: " +
+        "Evicted=" + stats.getEvictedCount() +
+      ", Ratios: " +
         "Hit Ratio=" + stats.getHitRatio()*100 + "%, " +
         "Miss Ratio=" + stats.getMissRatio()*100 + "%, " +
         "Evicted/Run=" + stats.evictedPerEviction());

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/HLog.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/HLog.java?rev=791050&r1=791049&r2=791050&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/HLog.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/HLog.java Sat Jul  4 02:16:16 2009
@@ -55,6 +55,7 @@
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.RemoteExceptionHandler;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.ClassSize;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.io.SequenceFile;
@@ -111,7 +112,7 @@
   private final int flushlogentries;
   private final AtomicInteger unflushedEntries = new AtomicInteger(0);
   private volatile long lastLogFlushTime;
-
+  
   /*
    * Current log file.
    */
@@ -1117,4 +1118,9 @@
       }
     }
   }
+
+  public static final long FIXED_OVERHEAD = ClassSize.align(
+      ClassSize.OBJECT + (5 * ClassSize.REFERENCE) +
+      ClassSize.ATOMIC_INTEGER + Bytes.SIZEOF_INT + (3 * Bytes.SIZEOF_LONG));
+  
 }

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java?rev=791050&r1=791049&r2=791050&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java Sat Jul  4 02:16:16 2009
@@ -54,9 +54,11 @@
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.io.HeapSize;
 import org.apache.hadoop.hbase.io.Reference.Range;
 import org.apache.hadoop.hbase.ipc.HRegionInterface;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.ClassSize;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.Writables;
 import org.apache.hadoop.util.Progressable;
@@ -98,7 +100,7 @@
  * regionName is a unique identifier for this HRegion. (startKey, endKey]
  * defines the keyspace for this HRegion.
  */
-public class HRegion implements HConstants { // , Writable{
+public class HRegion implements HConstants, HeapSize { // , Writable{
   static final Log LOG = LogFactory.getLog(HRegion.class);
   static final String SPLITDIR = "splits";
   static final String MERGEDIR = "merges";
@@ -2322,45 +2324,28 @@
             + " in table " + regionInfo.getTableDesc());
     }
   }
+
+  public static final long FIXED_OVERHEAD = ClassSize.align(
+      (3 * Bytes.SIZEOF_LONG) + (2 * Bytes.SIZEOF_INT) + Bytes.SIZEOF_BOOLEAN +
+      (21 * ClassSize.REFERENCE) + ClassSize.OBJECT);
+  
+  public static final long DEEP_OVERHEAD = ClassSize.align(FIXED_OVERHEAD +
+      ClassSize.OBJECT + (2 * ClassSize.ATOMIC_BOOLEAN) + 
+      ClassSize.ATOMIC_LONG + ClassSize.ATOMIC_INTEGER +
+      ClassSize.CONCURRENT_HASHMAP + 
+      (16 * ClassSize.CONCURRENT_HASHMAP_ENTRY) + 
+      (16 * ClassSize.CONCURRENT_HASHMAP_SEGMENT) +
+      ClassSize.CONCURRENT_SKIPLISTMAP + ClassSize.CONCURRENT_SKIPLISTMAP_ENTRY +
+      RegionHistorian.FIXED_OVERHEAD + HLog.FIXED_OVERHEAD +
+      ClassSize.align(ClassSize.OBJECT + (5 * Bytes.SIZEOF_BOOLEAN)) +
+      (3 * ClassSize.REENTRANT_LOCK));
   
-  
-//  //HBaseAdmin Debugging 
-//  /**
-//   * @return number of stores in the region
-//   */
-//  public int getNumStores() {
-//    return this.numStores;
-//  }
-//  /**
-//   * @return the name of the region
-//   */
-//  public byte [] getRegionsName() {
-//    return this.name;
-//  }
-//  /**
-//   * @return the number of files in every store
-//   */
-//  public int [] getStoresSize() {
-//    return this.storeSize;
-//  }
-//  
-//  //Writable, used for debugging purposes only
-//  public void readFields(final DataInput in)
-//  throws IOException {
-//    this.name = Bytes.readByteArray(in);
-//    this.numStores = in.readInt();
-//    this.storeSize = new int [numStores];
-//    for(int i=0; i<this.numStores; i++) {
-//      this.storeSize[i] = in.readInt();
-//    }
-//  }
-//
-//  public void write(final DataOutput out)
-//  throws IOException {
-//    Bytes.writeByteArray(out, this.regionInfo.getRegionName());
-//    out.writeInt(this.stores.size());
-//    for(Store store : this.stores.values()) {
-//      out.writeInt(store.getNumberOfstorefiles());
-//    }
-//  }
+  @Override
+  public long heapSize() {
+    long heapSize = DEEP_OVERHEAD;
+    for(Store store : this.stores.values()) {
+      heapSize += store.heapSize();
+    }
+    return heapSize;
+  }
 }

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/MemStore.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/MemStore.java?rev=791050&r1=791049&r2=791050&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/MemStore.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/MemStore.java Sat Jul  4 02:16:16 2009
@@ -30,14 +30,17 @@
 import java.util.NavigableSet;
 import java.util.SortedSet;
 import java.util.TreeSet;
+import java.util.concurrent.atomic.AtomicLong;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.io.HeapSize;
 import org.apache.hadoop.hbase.regionserver.DeleteCompare.DeleteCode;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.ClassSize;
 
 /**
  * The MemStore holds in-memory modifications to the Store.  Modifications
@@ -50,7 +53,7 @@
  * TODO: With new KVSLS, need to make sure we update HeapSize with difference
  * in KV size.
  */
-class MemStore {
+public class MemStore implements HeapSize {
   private static final Log LOG = LogFactory.getLog(MemStore.class);
 
   private final long ttl;
@@ -75,8 +78,8 @@
   // Used comparing versions -- same r/c and type but different timestamp.
   final KeyValue.KVComparator comparatorIgnoreTimestamp;
 
-  // TODO: Fix this guess by studying jprofiler
-  private final static int ESTIMATED_KV_HEAP_TAX = 60;
+  // Used to track own heapSize
+  final AtomicLong size;
 
   /**
    * Default constructor. Used for tests.
@@ -98,6 +101,7 @@
     this.comparatorIgnoreType = this.comparator.getComparatorIgnoringType();
     this.kvset = new KeyValueSkipListSet(c);
     this.snapshot = new KeyValueSkipListSet(c);
+    this.size = new AtomicLong(DEEP_OVERHEAD);
   }
 
   void dump() {
@@ -129,6 +133,8 @@
         if (!this.kvset.isEmpty()) {
           this.snapshot = this.kvset;
           this.kvset = new KeyValueSkipListSet(this.comparator);
+          // Reset heap to not include any keys
+          this.size.set(DEEP_OVERHEAD);
         }
       }
     } finally {
@@ -181,7 +187,8 @@
     long size = -1;
     this.lock.readLock().lock();
     try {
-      size = heapSize(kv, this.kvset.add(kv));
+      size = heapSizeChange(kv, this.kvset.add(kv));
+      this.size.addAndGet(size);
     } finally {
       this.lock.readLock().unlock();
     }
@@ -254,33 +261,19 @@
       //Delete all the entries effected by the last added delete
       for (KeyValue kv : deletes) {
         notpresent = this.kvset.remove(kv);
-        size -= heapSize(kv, notpresent);
+        size -= heapSizeChange(kv, notpresent);
       }
       
       // Adding the delete to memstore. Add any value, as long as
       // same instance each time.
-      size += heapSize(delete, this.kvset.add(delete));
+      size += heapSizeChange(delete, this.kvset.add(delete));
     } finally {
       this.lock.readLock().unlock();
     }
+    this.size.addAndGet(size);
     return size;
   }
   
-  /*
-   * Calculate how the memstore size has changed, approximately.  Be careful.
-   * If class changes, be sure to change the size calculation.
-   * Add in tax of Map.Entry.
-   * @param kv
-   * @param notpresent True if the kv was NOT present in the set.
-   * @return Size
-   */
-  long heapSize(final KeyValue kv, final boolean notpresent) {
-    return notpresent?
-      // Add overhead for value byte array and for Map.Entry -- 57 bytes
-      // on x64 according to jprofiler.
-      ESTIMATED_KV_HEAP_TAX + 57 + kv.getLength(): 0; // Guess no change in size.
-  }
-
   /**
    * @param kv Find the row that comes after this one.  If null, we return the
    * first.
@@ -694,6 +687,42 @@
       }
     }
   }
+  
+  public final static long FIXED_OVERHEAD = ClassSize.align(
+      ClassSize.OBJECT + Bytes.SIZEOF_LONG + (7 * ClassSize.REFERENCE));
+  
+  public final static long DEEP_OVERHEAD = ClassSize.align(FIXED_OVERHEAD +
+      ClassSize.REENTRANT_LOCK + ClassSize.ATOMIC_LONG +
+      (2 * ClassSize.CONCURRENT_SKIPLISTMAP));
+
+  /*
+   * Calculate how the MemStore size has changed.  Includes overhead of the
+   * backing Map.
+   * @param kv
+   * @param notpresent True if the kv was NOT present in the set.
+   * @return Size
+   */
+  long heapSizeChange(final KeyValue kv, final boolean notpresent) {
+    return notpresent ? 
+        ClassSize.align(ClassSize.CONCURRENT_SKIPLISTMAP_ENTRY + kv.heapSize()):
+        0;
+  }
+  
+  /**
+   * Get the entire heap usage for this MemStore not including keys in the
+   * snapshot.
+   */
+  @Override
+  public long heapSize() {
+    return size.get();
+  }
+  
+  /**
+   * Get the heap usage of KVs in this MemStore.
+   */
+  public long keySize() {
+    return heapSize() - DEEP_OVERHEAD;
+  }
 
   /**
    * Code to help figure if our approximation of object heap sizes is close