You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by ec...@apache.org on 2013/02/25 23:50:29 UTC

svn commit: r1449950 [28/35] - in /hbase/trunk: ./ hbase-client/ hbase-client/src/ hbase-client/src/main/ hbase-client/src/main/java/ hbase-client/src/main/java/org/ hbase-client/src/main/java/org/apache/ hbase-client/src/main/java/org/apache/hadoop/ h...

Added: hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java?rev=1449950&view=auto
==============================================================================
--- hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java (added)
+++ hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java Mon Feb 25 22:50:17 2013
@@ -0,0 +1,462 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.zookeeper;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.Abortable;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.exceptions.ZooKeeperConnectionException;
+import org.apache.hadoop.hbase.util.Threads;
+import org.apache.zookeeper.KeeperException;
+import org.apache.zookeeper.WatchedEvent;
+import org.apache.zookeeper.Watcher;
+import org.apache.zookeeper.ZooDefs;
+import org.apache.zookeeper.data.ACL;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.concurrent.CountDownLatch;
+
+/**
+ * Acts as the single ZooKeeper Watcher.  One instance of this is instantiated
+ * for each Master, RegionServer, and client process.
+ *
+ * <p>This is the only class that implements {@link Watcher}.  Other internal
+ * classes which need to be notified of ZooKeeper events must register with
+ * the local instance of this watcher via {@link #registerListener}.
+ *
+ * <p>This class also holds and manages the connection to ZooKeeper.  Code to
+ * deal with connection related events and exceptions are handled here.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class ZooKeeperWatcher implements Watcher, Abortable, Closeable {
+  private static final Log LOG = LogFactory.getLog(ZooKeeperWatcher.class);
+
+  // Identifier for this watcher (for logging only).  It is made of the prefix
+  // passed on construction and the zookeeper sessionid.
+  private String identifier;
+
+  // zookeeper quorum
+  private String quorum;
+
+  // zookeeper connection
+  private RecoverableZooKeeper recoverableZooKeeper;
+
+  // abortable in case of zk failure
+  protected Abortable abortable;
+
+  // listeners to be notified
+  private final List<ZooKeeperListener> listeners =
+    new CopyOnWriteArrayList<ZooKeeperListener>();
+
+  // Used by ZKUtil:waitForZKConnectionIfAuthenticating to wait for SASL
+  // negotiation to complete
+  public CountDownLatch saslLatch = new CountDownLatch(1);
+
+  // node names
+
+  // base znode for this cluster
+  public String baseZNode;
+  // znode containing location of server hosting root region
+  public String rootServerZNode;
+  // znode containing ephemeral nodes of the regionservers
+  public String rsZNode;
+  // znode containing ephemeral nodes of the draining regionservers
+  public String drainingZNode;
+  // znode of currently active master
+  private String masterAddressZNode;
+  // znode of this master in backup master directory, if not the active master
+  public String backupMasterAddressesZNode;
+  // znode containing the current cluster state
+  public String clusterStateZNode;
+  // znode used for region transitioning and assignment
+  public String assignmentZNode;
+  // znode used for table disabling/enabling
+  public String tableZNode;
+  // znode containing the unique cluster ID
+  public String clusterIdZNode;
+  // znode used for log splitting work assignment
+  public String splitLogZNode;
+  // znode containing the state of the load balancer
+  public String balancerZNode;
+  // znode containing the lock for the tables
+  public String tableLockZNode;
+
+  // Certain ZooKeeper nodes need to be world-readable
+  public static final ArrayList<ACL> CREATOR_ALL_AND_WORLD_READABLE =
+    new ArrayList<ACL>() { {
+      add(new ACL(ZooDefs.Perms.READ,ZooDefs.Ids.ANYONE_ID_UNSAFE));
+      add(new ACL(ZooDefs.Perms.ALL,ZooDefs.Ids.AUTH_IDS));
+    }};
+
+  private final Configuration conf;
+
+  private final Exception constructorCaller;
+
+  /**
+   * Instantiate a ZooKeeper connection and watcher.
+   * @param identifier string that is passed to RecoverableZookeeper to be used as
+   * identifier for this instance. Use null for default.
+   * @throws IOException
+   * @throws ZooKeeperConnectionException
+   */
+  public ZooKeeperWatcher(Configuration conf, String identifier,
+      Abortable abortable) throws ZooKeeperConnectionException, IOException {
+    this(conf, identifier, abortable, false);
+  }
+  /**
+   * Instantiate a ZooKeeper connection and watcher.
+   * @param identifier string that is passed to RecoverableZookeeper to be used as
+   * identifier for this instance. Use null for default.
+   * @throws IOException
+   * @throws ZooKeeperConnectionException
+   */
+  public ZooKeeperWatcher(Configuration conf, String identifier,
+      Abortable abortable, boolean canCreateBaseZNode)
+  throws IOException, ZooKeeperConnectionException {
+    this.conf = conf;
+    // Capture a stack trace now.  Will print it out later if problem so we can
+    // distingush amongst the myriad ZKWs.
+    try {
+      throw new Exception("ZKW CONSTRUCTOR STACK TRACE FOR DEBUGGING");
+    } catch (Exception e) {
+      this.constructorCaller = e;
+    }
+    this.quorum = ZKConfig.getZKQuorumServersString(conf);
+    // Identifier will get the sessionid appended later below down when we
+    // handle the syncconnect event.
+    this.identifier = identifier;
+    this.abortable = abortable;
+    setNodeNames(conf);
+    this.recoverableZooKeeper = ZKUtil.connect(conf, quorum, this, identifier);
+    if (canCreateBaseZNode) {
+      createBaseZNodes();
+    }
+  }
+
+  private void createBaseZNodes() throws ZooKeeperConnectionException {
+    try {
+      // Create all the necessary "directories" of znodes
+      ZKUtil.createWithParents(this, baseZNode);
+      ZKUtil.createAndFailSilent(this, assignmentZNode);
+      ZKUtil.createAndFailSilent(this, rsZNode);
+      ZKUtil.createAndFailSilent(this, drainingZNode);
+      ZKUtil.createAndFailSilent(this, tableZNode);
+      ZKUtil.createAndFailSilent(this, splitLogZNode);
+      ZKUtil.createAndFailSilent(this, backupMasterAddressesZNode);
+      ZKUtil.createAndFailSilent(this, tableLockZNode);
+    } catch (KeeperException e) {
+      throw new ZooKeeperConnectionException(
+          prefix("Unexpected KeeperException creating base node"), e);
+    }
+  }
+
+  @Override
+  public String toString() {
+    return this.identifier;
+  }
+
+  /**
+   * Adds this instance's identifier as a prefix to the passed <code>str</code>
+   * @param str String to amend.
+   * @return A new string with this instance's identifier as prefix: e.g.
+   * if passed 'hello world', the returned string could be
+   */
+  public String prefix(final String str) {
+    return this.toString() + " " + str;
+  }
+
+  /**
+   * Set the local variable node names using the specified configuration.
+   */
+  private void setNodeNames(Configuration conf) {
+    baseZNode = conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT,
+        HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT);
+    rootServerZNode = ZKUtil.joinZNode(baseZNode,
+        conf.get("zookeeper.znode.rootserver", "root-region-server"));
+    rsZNode = ZKUtil.joinZNode(baseZNode,
+        conf.get("zookeeper.znode.rs", "rs"));
+    drainingZNode = ZKUtil.joinZNode(baseZNode,
+        conf.get("zookeeper.znode.draining.rs", "draining"));
+    masterAddressZNode = ZKUtil.joinZNode(baseZNode,
+        conf.get("zookeeper.znode.master", "master"));
+    backupMasterAddressesZNode = ZKUtil.joinZNode(baseZNode,
+        conf.get("zookeeper.znode.backup.masters", "backup-masters"));
+    clusterStateZNode = ZKUtil.joinZNode(baseZNode,
+        conf.get("zookeeper.znode.state", "running"));
+    assignmentZNode = ZKUtil.joinZNode(baseZNode,
+        conf.get("zookeeper.znode.unassigned", "region-in-transition"));
+    tableZNode = ZKUtil.joinZNode(baseZNode,
+        conf.get("zookeeper.znode.tableEnableDisable", "table"));
+    clusterIdZNode = ZKUtil.joinZNode(baseZNode,
+        conf.get("zookeeper.znode.clusterId", "hbaseid"));
+    splitLogZNode = ZKUtil.joinZNode(baseZNode,
+        conf.get("zookeeper.znode.splitlog", HConstants.SPLIT_LOGDIR_NAME));
+    balancerZNode = ZKUtil.joinZNode(baseZNode,
+        conf.get("zookeeper.znode.balancer", "balancer"));
+    tableLockZNode = ZKUtil.joinZNode(baseZNode,
+        conf.get("zookeeper.znode.tableLock", "table-lock"));
+  }
+
+  /**
+   * Register the specified listener to receive ZooKeeper events.
+   * @param listener
+   */
+  public void registerListener(ZooKeeperListener listener) {
+    listeners.add(listener);
+  }
+
+  /**
+   * Register the specified listener to receive ZooKeeper events and add it as
+   * the first in the list of current listeners.
+   * @param listener
+   */
+  public void registerListenerFirst(ZooKeeperListener listener) {
+    listeners.add(0, listener);
+  }
+
+  public void unregisterListener(ZooKeeperListener listener) {
+    listeners.remove(listener);
+  }
+
+  /**
+   * Get the connection to ZooKeeper.
+   * @return connection reference to zookeeper
+   */
+  public RecoverableZooKeeper getRecoverableZooKeeper() {
+    return recoverableZooKeeper;
+  }
+
+  public void reconnectAfterExpiration() throws IOException, InterruptedException {
+    recoverableZooKeeper.reconnectAfterExpiration();
+  }
+
+  /**
+   * Get the quorum address of this instance.
+   * @return quorum string of this zookeeper connection instance
+   */
+  public String getQuorum() {
+    return quorum;
+  }
+
+  /**
+   * Method called from ZooKeeper for events and connection status.
+   * <p>
+   * Valid events are passed along to listeners.  Connection status changes
+   * are dealt with locally.
+   */
+  @Override
+  public void process(WatchedEvent event) {
+    LOG.debug(prefix("Received ZooKeeper Event, " +
+        "type=" + event.getType() + ", " +
+        "state=" + event.getState() + ", " +
+        "path=" + event.getPath()));
+
+    switch(event.getType()) {
+
+      // If event type is NONE, this is a connection status change
+      case None: {
+        connectionEvent(event);
+        break;
+      }
+
+      // Otherwise pass along to the listeners
+
+      case NodeCreated: {
+        for(ZooKeeperListener listener : listeners) {
+          listener.nodeCreated(event.getPath());
+        }
+        break;
+      }
+
+      case NodeDeleted: {
+        for(ZooKeeperListener listener : listeners) {
+          listener.nodeDeleted(event.getPath());
+        }
+        break;
+      }
+
+      case NodeDataChanged: {
+        for(ZooKeeperListener listener : listeners) {
+          listener.nodeDataChanged(event.getPath());
+        }
+        break;
+      }
+
+      case NodeChildrenChanged: {
+        for(ZooKeeperListener listener : listeners) {
+          listener.nodeChildrenChanged(event.getPath());
+        }
+        break;
+      }
+    }
+  }
+
+  // Connection management
+
+  /**
+   * Called when there is a connection-related event via the Watcher callback.
+   * <p>
+   * If Disconnected or Expired, this should shutdown the cluster. But, since
+   * we send a KeeperException.SessionExpiredException along with the abort
+   * call, it's possible for the Abortable to catch it and try to create a new
+   * session with ZooKeeper. This is what the client does in HCM.
+   * <p>
+   * @param event
+   */
+  private void connectionEvent(WatchedEvent event) {
+    switch(event.getState()) {
+      case SyncConnected:
+        // Now, this callback can be invoked before the this.zookeeper is set.
+        // Wait a little while.
+        long finished = System.currentTimeMillis() +
+          this.conf.getLong("hbase.zookeeper.watcher.sync.connected.wait", 2000);
+        while (System.currentTimeMillis() < finished) {
+          Threads.sleep(1);
+          if (this.recoverableZooKeeper != null) break;
+        }
+        if (this.recoverableZooKeeper == null) {
+          LOG.error("ZK is null on connection event -- see stack trace " +
+            "for the stack trace when constructor was called on this zkw",
+            this.constructorCaller);
+          throw new NullPointerException("ZK is null");
+        }
+        this.identifier = this.identifier + "-0x" +
+          Long.toHexString(this.recoverableZooKeeper.getSessionId());
+        // Update our identifier.  Otherwise ignore.
+        LOG.debug(this.identifier + " connected");
+        break;
+
+      // Abort the server if Disconnected or Expired
+      case Disconnected:
+        LOG.debug(prefix("Received Disconnected from ZooKeeper, ignoring"));
+        break;
+
+      case Expired:
+        String msg = prefix(this.identifier + " received expired from " +
+          "ZooKeeper, aborting");
+        // TODO: One thought is to add call to ZooKeeperListener so say,
+        // ZooKeeperNodeTracker can zero out its data values.
+        if (this.abortable != null) this.abortable.abort(msg,
+            new KeeperException.SessionExpiredException());
+        break;
+
+      case ConnectedReadOnly:
+        break;
+
+      default:
+        throw new IllegalStateException("Received event is not valid.");
+    }
+  }
+
+  /**
+   * Forces a synchronization of this ZooKeeper client connection.
+   * <p>
+   * Executing this method before running other methods will ensure that the
+   * subsequent operations are up-to-date and consistent as of the time that
+   * the sync is complete.
+   * <p>
+   * This is used for compareAndSwap type operations where we need to read the
+   * data of an existing node and delete or transition that node, utilizing the
+   * previously read version and data.  We want to ensure that the version read
+   * is up-to-date from when we begin the operation.
+   */
+  public void sync(String path) {
+    this.recoverableZooKeeper.sync(path, null, null);
+  }
+
+  /**
+   * Handles KeeperExceptions in client calls.
+   * <p>
+   * This may be temporary but for now this gives one place to deal with these.
+   * <p>
+   * TODO: Currently this method rethrows the exception to let the caller handle
+   * <p>
+   * @param ke
+   * @throws KeeperException
+   */
+  public void keeperException(KeeperException ke)
+  throws KeeperException {
+    LOG.error(prefix("Received unexpected KeeperException, re-throwing exception"), ke);
+    throw ke;
+  }
+
+  /**
+   * Handles InterruptedExceptions in client calls.
+   * <p>
+   * This may be temporary but for now this gives one place to deal with these.
+   * <p>
+   * TODO: Currently, this method does nothing.
+   *       Is this ever expected to happen?  Do we abort or can we let it run?
+   *       Maybe this should be logged as WARN?  It shouldn't happen?
+   * <p>
+   * @param ie
+   */
+  public void interruptedException(InterruptedException ie) {
+    LOG.debug(prefix("Received InterruptedException, doing nothing here"), ie);
+    // At least preserver interrupt.
+    Thread.currentThread().interrupt();
+    // no-op
+  }
+
+  /**
+   * Close the connection to ZooKeeper.
+   *
+   * @throws InterruptedException
+   */
+  public void close() {
+    try {
+      if (recoverableZooKeeper != null) {
+        recoverableZooKeeper.close();
+      }
+    } catch (InterruptedException e) {
+      Thread.currentThread().interrupt();
+    }
+  }
+
+  public Configuration getConfiguration() {
+    return conf;
+  }
+
+  @Override
+  public void abort(String why, Throwable e) {
+    this.abortable.abort(why, e);
+  }
+
+  @Override
+  public boolean isAborted() {
+    return this.abortable.isAborted();
+  }
+
+  /**
+   * @return Path to the currently active master.
+   */
+  public String getMasterAddressZNode() {
+    return this.masterAddressZNode;
+  }
+
+}

Added: hbase/trunk/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAttributes.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAttributes.java?rev=1449950&view=auto
==============================================================================
--- hbase/trunk/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAttributes.java (added)
+++ hbase/trunk/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAttributes.java Mon Feb 25 22:50:17 2013
@@ -0,0 +1,168 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.client;
+
+import java.util.Arrays;
+
+import org.apache.hadoop.hbase.SmallTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(SmallTests.class)
+public class TestAttributes {
+  @Test
+  public void testPutAttributes() {
+    Put put = new Put(new byte [] {});
+    Assert.assertTrue(put.getAttributesMap().isEmpty());
+    Assert.assertNull(put.getAttribute("absent"));
+
+    put.setAttribute("absent", null);
+    Assert.assertTrue(put.getAttributesMap().isEmpty());
+    Assert.assertNull(put.getAttribute("absent"));
+
+    // adding attribute
+    put.setAttribute("attribute1", Bytes.toBytes("value1"));
+    Assert.assertTrue(Arrays.equals(Bytes.toBytes("value1"), put.getAttribute("attribute1")));
+    Assert.assertEquals(1, put.getAttributesMap().size());
+    Assert.assertTrue(Arrays.equals(Bytes.toBytes("value1"), put.getAttributesMap().get("attribute1")));
+
+    // overriding attribute value
+    put.setAttribute("attribute1", Bytes.toBytes("value12"));
+    Assert.assertTrue(Arrays.equals(Bytes.toBytes("value12"), put.getAttribute("attribute1")));
+    Assert.assertEquals(1, put.getAttributesMap().size());
+    Assert.assertTrue(Arrays.equals(Bytes.toBytes("value12"), put.getAttributesMap().get("attribute1")));
+
+    // adding another attribute
+    put.setAttribute("attribute2", Bytes.toBytes("value2"));
+    Assert.assertTrue(Arrays.equals(Bytes.toBytes("value2"), put.getAttribute("attribute2")));
+    Assert.assertEquals(2, put.getAttributesMap().size());
+    Assert.assertTrue(Arrays.equals(Bytes.toBytes("value2"), put.getAttributesMap().get("attribute2")));
+
+    // removing attribute
+    put.setAttribute("attribute2", null);
+    Assert.assertNull(put.getAttribute("attribute2"));
+    Assert.assertEquals(1, put.getAttributesMap().size());
+    Assert.assertNull(put.getAttributesMap().get("attribute2"));
+
+    // removing non-existed attribute
+    put.setAttribute("attribute2", null);
+    Assert.assertNull(put.getAttribute("attribute2"));
+    Assert.assertEquals(1, put.getAttributesMap().size());
+    Assert.assertNull(put.getAttributesMap().get("attribute2"));
+
+    // removing another attribute
+    put.setAttribute("attribute1", null);
+    Assert.assertNull(put.getAttribute("attribute1"));
+    Assert.assertTrue(put.getAttributesMap().isEmpty());
+    Assert.assertNull(put.getAttributesMap().get("attribute1"));
+  }
+
+
+  @Test
+  public void testDeleteAttributes() {
+    Delete del = new Delete(new byte [] {});
+    Assert.assertTrue(del.getAttributesMap().isEmpty());
+    Assert.assertNull(del.getAttribute("absent"));
+
+    del.setAttribute("absent", null);
+    Assert.assertTrue(del.getAttributesMap().isEmpty());
+    Assert.assertNull(del.getAttribute("absent"));
+
+    // adding attribute
+    del.setAttribute("attribute1", Bytes.toBytes("value1"));
+    Assert.assertTrue(Arrays.equals(Bytes.toBytes("value1"), del.getAttribute("attribute1")));
+    Assert.assertEquals(1, del.getAttributesMap().size());
+    Assert.assertTrue(Arrays.equals(Bytes.toBytes("value1"), del.getAttributesMap().get("attribute1")));
+
+    // overriding attribute value
+    del.setAttribute("attribute1", Bytes.toBytes("value12"));
+    Assert.assertTrue(Arrays.equals(Bytes.toBytes("value12"), del.getAttribute("attribute1")));
+    Assert.assertEquals(1, del.getAttributesMap().size());
+    Assert.assertTrue(Arrays.equals(Bytes.toBytes("value12"), del.getAttributesMap().get("attribute1")));
+
+    // adding another attribute
+    del.setAttribute("attribute2", Bytes.toBytes("value2"));
+    Assert.assertTrue(Arrays.equals(Bytes.toBytes("value2"), del.getAttribute("attribute2")));
+    Assert.assertEquals(2, del.getAttributesMap().size());
+    Assert.assertTrue(Arrays.equals(Bytes.toBytes("value2"), del.getAttributesMap().get("attribute2")));
+
+    // removing attribute
+    del.setAttribute("attribute2", null);
+    Assert.assertNull(del.getAttribute("attribute2"));
+    Assert.assertEquals(1, del.getAttributesMap().size());
+    Assert.assertNull(del.getAttributesMap().get("attribute2"));
+
+    // removing non-existed attribute
+    del.setAttribute("attribute2", null);
+    Assert.assertNull(del.getAttribute("attribute2"));
+    Assert.assertEquals(1, del.getAttributesMap().size());
+    Assert.assertNull(del.getAttributesMap().get("attribute2"));
+
+    // removing another attribute
+    del.setAttribute("attribute1", null);
+    Assert.assertNull(del.getAttribute("attribute1"));
+    Assert.assertTrue(del.getAttributesMap().isEmpty());
+    Assert.assertNull(del.getAttributesMap().get("attribute1"));
+  }
+
+  @Test
+  public void testGetId() {
+    Get get = new Get(null);
+    Assert.assertNull("Make sure id is null if unset", get.toMap().get("id"));
+    get.setId("myId");
+    Assert.assertEquals("myId", get.toMap().get("id"));
+  }
+
+  @Test
+  public void testAppendId() {
+    Append append = new Append(Bytes.toBytes("testRow"));
+    Assert.assertNull("Make sure id is null if unset", append.toMap().get("id"));
+    append.setId("myId");
+    Assert.assertEquals("myId", append.toMap().get("id"));
+  }
+
+  @Test
+  public void testDeleteId() {
+    Delete delete = new Delete(new byte [] {});
+    Assert.assertNull("Make sure id is null if unset", delete.toMap().get("id"));
+    delete.setId("myId");
+    Assert.assertEquals("myId", delete.toMap().get("id"));
+  }
+
+  @Test
+  public void testPutId() {
+    Put put = new Put(new byte [] {});
+    Assert.assertNull("Make sure id is null if unset", put.toMap().get("id"));
+    put.setId("myId");
+    Assert.assertEquals("myId", put.toMap().get("id"));
+  }
+
+  @Test
+  public void testScanId() {
+    Scan scan = new Scan();
+    Assert.assertNull("Make sure id is null if unset", scan.toMap().get("id"));
+    scan.setId("myId");
+    Assert.assertEquals("myId", scan.toMap().get("id"));
+  }
+
+}
+

Added: hbase/trunk/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestGet.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestGet.java?rev=1449950&view=auto
==============================================================================
--- hbase/trunk/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestGet.java (added)
+++ hbase/trunk/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestGet.java Mon Feb 25 22:50:17 2013
@@ -0,0 +1,110 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.client;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Set;
+
+import org.apache.hadoop.hbase.SmallTests;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+// TODO: cover more test cases
+@Category(SmallTests.class)
+public class TestGet {
+  @Test
+  public void testAttributesSerialization() throws IOException {
+    Get get = new Get(Bytes.toBytes("row"));
+    get.setAttribute("attribute1", Bytes.toBytes("value1"));
+    get.setAttribute("attribute2", Bytes.toBytes("value2"));
+    get.setAttribute("attribute3", Bytes.toBytes("value3"));
+
+    ClientProtos.Get getProto = ProtobufUtil.toGet(get);
+
+    Get get2 = ProtobufUtil.toGet(getProto);
+    Assert.assertNull(get2.getAttribute("absent"));
+    Assert.assertTrue(Arrays.equals(Bytes.toBytes("value1"), get2.getAttribute("attribute1")));
+    Assert.assertTrue(Arrays.equals(Bytes.toBytes("value2"), get2.getAttribute("attribute2")));
+    Assert.assertTrue(Arrays.equals(Bytes.toBytes("value3"), get2.getAttribute("attribute3")));
+    Assert.assertEquals(3, get2.getAttributesMap().size());
+  }
+
+  @Test
+  public void testGetAttributes() {
+    Get get = new Get(null);
+    Assert.assertTrue(get.getAttributesMap().isEmpty());
+    Assert.assertNull(get.getAttribute("absent"));
+
+    get.setAttribute("absent", null);
+    Assert.assertTrue(get.getAttributesMap().isEmpty());
+    Assert.assertNull(get.getAttribute("absent"));
+
+    // adding attribute
+    get.setAttribute("attribute1", Bytes.toBytes("value1"));
+    Assert.assertTrue(Arrays.equals(Bytes.toBytes("value1"), get.getAttribute("attribute1")));
+    Assert.assertEquals(1, get.getAttributesMap().size());
+    Assert.assertTrue(Arrays.equals(Bytes.toBytes("value1"), get.getAttributesMap().get("attribute1")));
+
+    // overriding attribute value
+    get.setAttribute("attribute1", Bytes.toBytes("value12"));
+    Assert.assertTrue(Arrays.equals(Bytes.toBytes("value12"), get.getAttribute("attribute1")));
+    Assert.assertEquals(1, get.getAttributesMap().size());
+    Assert.assertTrue(Arrays.equals(Bytes.toBytes("value12"), get.getAttributesMap().get("attribute1")));
+
+    // adding another attribute
+    get.setAttribute("attribute2", Bytes.toBytes("value2"));
+    Assert.assertTrue(Arrays.equals(Bytes.toBytes("value2"), get.getAttribute("attribute2")));
+    Assert.assertEquals(2, get.getAttributesMap().size());
+    Assert.assertTrue(Arrays.equals(Bytes.toBytes("value2"), get.getAttributesMap().get("attribute2")));
+
+    // removing attribute
+    get.setAttribute("attribute2", null);
+    Assert.assertNull(get.getAttribute("attribute2"));
+    Assert.assertEquals(1, get.getAttributesMap().size());
+    Assert.assertNull(get.getAttributesMap().get("attribute2"));
+
+    // removing non-existed attribute
+    get.setAttribute("attribute2", null);
+    Assert.assertNull(get.getAttribute("attribute2"));
+    Assert.assertEquals(1, get.getAttributesMap().size());
+    Assert.assertNull(get.getAttributesMap().get("attribute2"));
+
+    // removing another attribute
+    get.setAttribute("attribute1", null);
+    Assert.assertNull(get.getAttribute("attribute1"));
+    Assert.assertTrue(get.getAttributesMap().isEmpty());
+    Assert.assertNull(get.getAttributesMap().get("attribute1"));
+  }
+
+  @Test
+  public void testNullQualifier() {
+    Get get = new Get(null);
+    byte[] family = Bytes.toBytes("family");
+    get.addColumn(family, null);
+    Set<byte[]> qualifiers = get.getFamilyMap().get(family);
+    Assert.assertEquals(1, qualifiers.size());
+  }
+}
+

Added: hbase/trunk/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestOperation.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestOperation.java?rev=1449950&view=auto
==============================================================================
--- hbase/trunk/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestOperation.java (added)
+++ hbase/trunk/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestOperation.java Mon Feb 25 22:50:17 2013
@@ -0,0 +1,372 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+
+import org.apache.hadoop.hbase.SmallTests;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.hadoop.hbase.filter.BinaryComparator;
+import org.apache.hadoop.hbase.filter.ColumnCountGetFilter;
+import org.apache.hadoop.hbase.filter.ColumnPaginationFilter;
+import org.apache.hadoop.hbase.filter.ColumnPrefixFilter;
+import org.apache.hadoop.hbase.filter.ColumnRangeFilter;
+import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
+import org.apache.hadoop.hbase.filter.DependentColumnFilter;
+import org.apache.hadoop.hbase.filter.FamilyFilter;
+import org.apache.hadoop.hbase.filter.Filter;
+import org.apache.hadoop.hbase.filter.FilterList;
+import org.apache.hadoop.hbase.filter.FilterList.Operator;
+import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter;
+import org.apache.hadoop.hbase.filter.InclusiveStopFilter;
+import org.apache.hadoop.hbase.filter.KeyOnlyFilter;
+import org.apache.hadoop.hbase.filter.MultipleColumnPrefixFilter;
+import org.apache.hadoop.hbase.filter.PageFilter;
+import org.apache.hadoop.hbase.filter.PrefixFilter;
+import org.apache.hadoop.hbase.filter.QualifierFilter;
+import org.apache.hadoop.hbase.filter.RowFilter;
+import org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
+import org.apache.hadoop.hbase.filter.SingleColumnValueExcludeFilter;
+import org.apache.hadoop.hbase.filter.SkipFilter;
+import org.apache.hadoop.hbase.filter.TimestampsFilter;
+import org.apache.hadoop.hbase.filter.ValueFilter;
+import org.apache.hadoop.hbase.filter.WhileMatchFilter;
+import org.apache.hadoop.hbase.util.Bytes;
+
+import org.codehaus.jackson.map.ObjectMapper;
+import org.junit.experimental.categories.Category;
+
+/**
+ * Run tests that use the functionality of the Operation superclass for
+ * Puts, Gets, Deletes, Scans, and MultiPuts.
+ */
+@Category(SmallTests.class)
+public class TestOperation {
+  private static byte [] ROW = Bytes.toBytes("testRow");
+  private static byte [] FAMILY = Bytes.toBytes("testFamily");
+  private static byte [] QUALIFIER = Bytes.toBytes("testQualifier");
+  private static byte [] VALUE = Bytes.toBytes("testValue");
+
+  private static ObjectMapper mapper = new ObjectMapper();
+
+  private static List<Long> TS_LIST = Arrays.asList(2L, 3L, 5L);
+  private static TimestampsFilter TS_FILTER = new TimestampsFilter(TS_LIST);
+  private static String STR_TS_FILTER =
+      TS_FILTER.getClass().getSimpleName() + " (3/3): [2, 3, 5]";
+
+  private static List<Long> L_TS_LIST =
+      Arrays.asList(0L, 1L, 2L, 3L, 4L, 5L, 6L, 7L, 8L, 9L, 10L);
+  private static TimestampsFilter L_TS_FILTER =
+      new TimestampsFilter(L_TS_LIST);
+  private static String STR_L_TS_FILTER =
+      L_TS_FILTER.getClass().getSimpleName() + " (5/11): [0, 1, 2, 3, 4]";
+
+  private static String COL_NAME_1 = "col1";
+  private static ColumnPrefixFilter COL_PRE_FILTER =
+      new ColumnPrefixFilter(COL_NAME_1.getBytes());
+  private static String STR_COL_PRE_FILTER =
+      COL_PRE_FILTER.getClass().getSimpleName() + " " + COL_NAME_1;
+
+  private static String COL_NAME_2 = "col2";
+  private static ColumnRangeFilter CR_FILTER = new ColumnRangeFilter(
+      COL_NAME_1.getBytes(), true, COL_NAME_2.getBytes(), false);
+  private static String STR_CR_FILTER = CR_FILTER.getClass().getSimpleName()
+      + " [" + COL_NAME_1 + ", " + COL_NAME_2 + ")";
+
+  private static int COL_COUNT = 9;
+  private static ColumnCountGetFilter CCG_FILTER =
+      new ColumnCountGetFilter(COL_COUNT);
+  private static String STR_CCG_FILTER =
+      CCG_FILTER.getClass().getSimpleName() + " " + COL_COUNT;
+
+  private static int LIMIT = 3;
+  private static int OFFSET = 4;
+  private static ColumnPaginationFilter CP_FILTER =
+      new ColumnPaginationFilter(LIMIT, OFFSET);
+  private static String STR_CP_FILTER = CP_FILTER.getClass().getSimpleName()
+      + " (" + LIMIT + ", " + OFFSET + ")";
+
+  private static String STOP_ROW_KEY = "stop";
+  private static InclusiveStopFilter IS_FILTER =
+      new InclusiveStopFilter(STOP_ROW_KEY.getBytes());
+  private static String STR_IS_FILTER =
+      IS_FILTER.getClass().getSimpleName() + " " + STOP_ROW_KEY;
+
+  private static String PREFIX = "prefix";
+  private static PrefixFilter PREFIX_FILTER =
+      new PrefixFilter(PREFIX.getBytes());
+  private static String STR_PREFIX_FILTER = "PrefixFilter " + PREFIX;
+
+  private static byte[][] PREFIXES = {
+      "0".getBytes(), "1".getBytes(), "2".getBytes()};
+  private static MultipleColumnPrefixFilter MCP_FILTER =
+      new MultipleColumnPrefixFilter(PREFIXES);
+  private static String STR_MCP_FILTER =
+      MCP_FILTER.getClass().getSimpleName() + " (3/3): [0, 1, 2]";
+
+  private static byte[][] L_PREFIXES = {
+    "0".getBytes(), "1".getBytes(), "2".getBytes(), "3".getBytes(),
+    "4".getBytes(), "5".getBytes(), "6".getBytes(), "7".getBytes()};
+  private static MultipleColumnPrefixFilter L_MCP_FILTER =
+      new MultipleColumnPrefixFilter(L_PREFIXES);
+  private static String STR_L_MCP_FILTER =
+      L_MCP_FILTER.getClass().getSimpleName() + " (5/8): [0, 1, 2, 3, 4]";
+
+  private static int PAGE_SIZE = 9;
+  private static PageFilter PAGE_FILTER = new PageFilter(PAGE_SIZE);
+  private static String STR_PAGE_FILTER =
+      PAGE_FILTER.getClass().getSimpleName() + " " + PAGE_SIZE;
+
+  private static SkipFilter SKIP_FILTER = new SkipFilter(L_TS_FILTER);
+  private static String STR_SKIP_FILTER =
+      SKIP_FILTER.getClass().getSimpleName() + " " + STR_L_TS_FILTER;
+
+  private static WhileMatchFilter WHILE_FILTER =
+      new WhileMatchFilter(L_TS_FILTER);
+  private static String STR_WHILE_FILTER =
+      WHILE_FILTER.getClass().getSimpleName() + " " + STR_L_TS_FILTER;
+
+  private static KeyOnlyFilter KEY_ONLY_FILTER = new KeyOnlyFilter();
+  private static String STR_KEY_ONLY_FILTER =
+      KEY_ONLY_FILTER.getClass().getSimpleName();
+
+  private static FirstKeyOnlyFilter FIRST_KEY_ONLY_FILTER =
+      new FirstKeyOnlyFilter();
+  private static String STR_FIRST_KEY_ONLY_FILTER =
+      FIRST_KEY_ONLY_FILTER.getClass().getSimpleName();
+
+  private static CompareOp CMP_OP = CompareOp.EQUAL;
+  private static byte[] CMP_VALUE = "value".getBytes();
+  private static BinaryComparator BC = new BinaryComparator(CMP_VALUE);
+  private static DependentColumnFilter DC_FILTER =
+      new DependentColumnFilter(FAMILY, QUALIFIER, true, CMP_OP, BC);
+  private static String STR_DC_FILTER = String.format(
+      "%s (%s, %s, %s, %s, %s)", DC_FILTER.getClass().getSimpleName(),
+      Bytes.toStringBinary(FAMILY), Bytes.toStringBinary(QUALIFIER), true,
+      CMP_OP.name(), Bytes.toStringBinary(BC.getValue()));
+
+  private static FamilyFilter FAMILY_FILTER = new FamilyFilter(CMP_OP, BC);
+  private static String STR_FAMILY_FILTER =
+      FAMILY_FILTER.getClass().getSimpleName() + " (EQUAL, value)";
+
+  private static QualifierFilter QUALIFIER_FILTER =
+      new QualifierFilter(CMP_OP, BC);
+  private static String STR_QUALIFIER_FILTER =
+      QUALIFIER_FILTER.getClass().getSimpleName() + " (EQUAL, value)";
+
+  private static RowFilter ROW_FILTER = new RowFilter(CMP_OP, BC);
+  private static String STR_ROW_FILTER =
+      ROW_FILTER.getClass().getSimpleName() + " (EQUAL, value)";
+
+  private static ValueFilter VALUE_FILTER = new ValueFilter(CMP_OP, BC);
+  private static String STR_VALUE_FILTER =
+      VALUE_FILTER.getClass().getSimpleName() + " (EQUAL, value)";
+
+  private static SingleColumnValueFilter SCV_FILTER =
+      new SingleColumnValueFilter(FAMILY, QUALIFIER, CMP_OP, CMP_VALUE);
+  private static String STR_SCV_FILTER = String.format("%s (%s, %s, %s, %s)",
+      SCV_FILTER.getClass().getSimpleName(), Bytes.toStringBinary(FAMILY),
+      Bytes.toStringBinary(QUALIFIER), CMP_OP.name(),
+      Bytes.toStringBinary(CMP_VALUE));
+
+  private static SingleColumnValueExcludeFilter SCVE_FILTER =
+      new SingleColumnValueExcludeFilter(FAMILY, QUALIFIER, CMP_OP, CMP_VALUE);
+  private static String STR_SCVE_FILTER = String.format("%s (%s, %s, %s, %s)",
+      SCVE_FILTER.getClass().getSimpleName(), Bytes.toStringBinary(FAMILY),
+      Bytes.toStringBinary(QUALIFIER), CMP_OP.name(),
+      Bytes.toStringBinary(CMP_VALUE));
+
+  private static FilterList AND_FILTER_LIST = new FilterList(
+      Operator.MUST_PASS_ALL, Arrays.asList((Filter) TS_FILTER, L_TS_FILTER,
+          CR_FILTER));
+  private static String STR_AND_FILTER_LIST = String.format(
+      "%s AND (3/3): [%s, %s, %s]", AND_FILTER_LIST.getClass().getSimpleName(),
+      STR_TS_FILTER, STR_L_TS_FILTER, STR_CR_FILTER);
+
+  private static FilterList OR_FILTER_LIST = new FilterList(
+      Operator.MUST_PASS_ONE, Arrays.asList((Filter) TS_FILTER, L_TS_FILTER,
+          CR_FILTER));
+  private static String STR_OR_FILTER_LIST = String.format(
+      "%s OR (3/3): [%s, %s, %s]", AND_FILTER_LIST.getClass().getSimpleName(),
+      STR_TS_FILTER, STR_L_TS_FILTER, STR_CR_FILTER);
+
+  private static FilterList L_FILTER_LIST = new FilterList(
+      Arrays.asList((Filter) TS_FILTER, L_TS_FILTER, CR_FILTER, COL_PRE_FILTER,
+          CCG_FILTER, CP_FILTER, PREFIX_FILTER, PAGE_FILTER));
+  private static String STR_L_FILTER_LIST = String.format(
+      "%s AND (5/8): [%s, %s, %s, %s, %s]",
+      L_FILTER_LIST.getClass().getSimpleName(), STR_TS_FILTER, STR_L_TS_FILTER,
+      STR_CR_FILTER, STR_COL_PRE_FILTER, STR_CCG_FILTER, STR_CP_FILTER);
+
+  private static Filter[] FILTERS = {
+    TS_FILTER,             // TimestampsFilter
+    L_TS_FILTER,           // TimestampsFilter
+    COL_PRE_FILTER,        // ColumnPrefixFilter
+    CP_FILTER,             // ColumnPaginationFilter
+    CR_FILTER,             // ColumnRangeFilter
+    CCG_FILTER,            // ColumnCountGetFilter
+    IS_FILTER,             // InclusiveStopFilter
+    PREFIX_FILTER,         // PrefixFilter
+    PAGE_FILTER,           // PageFilter
+    SKIP_FILTER,           // SkipFilter
+    WHILE_FILTER,          // WhileMatchFilter
+    KEY_ONLY_FILTER,       // KeyOnlyFilter
+    FIRST_KEY_ONLY_FILTER, // FirstKeyOnlyFilter
+    MCP_FILTER,            // MultipleColumnPrefixFilter
+    L_MCP_FILTER,          // MultipleColumnPrefixFilter
+    DC_FILTER,             // DependentColumnFilter
+    FAMILY_FILTER,         // FamilyFilter
+    QUALIFIER_FILTER,      // QualifierFilter
+    ROW_FILTER,            // RowFilter
+    VALUE_FILTER,          // ValueFilter
+    SCV_FILTER,            // SingleColumnValueFilter
+    SCVE_FILTER,           // SingleColumnValueExcludeFilter
+    AND_FILTER_LIST,       // FilterList
+    OR_FILTER_LIST,        // FilterList
+    L_FILTER_LIST,         // FilterList
+  };
+
+  private static String[] FILTERS_INFO = {
+    STR_TS_FILTER,             // TimestampsFilter
+    STR_L_TS_FILTER,           // TimestampsFilter
+    STR_COL_PRE_FILTER,        // ColumnPrefixFilter
+    STR_CP_FILTER,             // ColumnPaginationFilter
+    STR_CR_FILTER,             // ColumnRangeFilter
+    STR_CCG_FILTER,            // ColumnCountGetFilter
+    STR_IS_FILTER,             // InclusiveStopFilter
+    STR_PREFIX_FILTER,         // PrefixFilter
+    STR_PAGE_FILTER,           // PageFilter
+    STR_SKIP_FILTER,           // SkipFilter
+    STR_WHILE_FILTER,          // WhileMatchFilter
+    STR_KEY_ONLY_FILTER,       // KeyOnlyFilter
+    STR_FIRST_KEY_ONLY_FILTER, // FirstKeyOnlyFilter
+    STR_MCP_FILTER,            // MultipleColumnPrefixFilter
+    STR_L_MCP_FILTER,          // MultipleColumnPrefixFilter
+    STR_DC_FILTER,             // DependentColumnFilter
+    STR_FAMILY_FILTER,         // FamilyFilter
+    STR_QUALIFIER_FILTER,      // QualifierFilter
+    STR_ROW_FILTER,            // RowFilter
+    STR_VALUE_FILTER,          // ValueFilter
+    STR_SCV_FILTER,            // SingleColumnValueFilter
+    STR_SCVE_FILTER,           // SingleColumnValueExcludeFilter
+    STR_AND_FILTER_LIST,       // FilterList
+    STR_OR_FILTER_LIST,        // FilterList
+    STR_L_FILTER_LIST,         // FilterList
+  };
+
+  static {
+    assertEquals("The sizes of static arrays do not match: "
+        + "[FILTERS: %d <=> FILTERS_INFO: %d]",
+        FILTERS.length, FILTERS_INFO.length);
+  }
+
+  /**
+   * Test the client Operations' JSON encoding to ensure that produced JSON is 
+   * parseable and that the details are present and not corrupted.
+   * @throws IOException
+   */
+  @Test
+  public void testOperationJSON()
+      throws IOException {
+    // produce a Scan Operation
+    Scan scan = new Scan(ROW);
+    scan.addColumn(FAMILY, QUALIFIER);
+    // get its JSON representation, and parse it
+    String json = scan.toJSON();
+    Map<String, Object> parsedJSON = mapper.readValue(json, HashMap.class);
+    // check for the row
+    assertEquals("startRow incorrect in Scan.toJSON()",
+        Bytes.toStringBinary(ROW), parsedJSON.get("startRow"));
+    // check for the family and the qualifier.
+    List familyInfo = (List) ((Map) parsedJSON.get("families")).get(
+        Bytes.toStringBinary(FAMILY));
+    assertNotNull("Family absent in Scan.toJSON()", familyInfo);
+    assertEquals("Qualifier absent in Scan.toJSON()", 1, familyInfo.size());
+    assertEquals("Qualifier incorrect in Scan.toJSON()",
+        Bytes.toStringBinary(QUALIFIER),
+        familyInfo.get(0));
+
+    // produce a Get Operation
+    Get get = new Get(ROW);
+    get.addColumn(FAMILY, QUALIFIER);
+    // get its JSON representation, and parse it
+    json = get.toJSON();
+    parsedJSON = mapper.readValue(json, HashMap.class);
+    // check for the row
+    assertEquals("row incorrect in Get.toJSON()",
+        Bytes.toStringBinary(ROW), parsedJSON.get("row"));
+    // check for the family and the qualifier.
+    familyInfo = (List) ((Map) parsedJSON.get("families")).get(
+        Bytes.toStringBinary(FAMILY));
+    assertNotNull("Family absent in Get.toJSON()", familyInfo);
+    assertEquals("Qualifier absent in Get.toJSON()", 1, familyInfo.size());
+    assertEquals("Qualifier incorrect in Get.toJSON()",
+        Bytes.toStringBinary(QUALIFIER),
+        familyInfo.get(0));
+
+    // produce a Put operation
+    Put put = new Put(ROW);
+    put.add(FAMILY, QUALIFIER, VALUE);
+    // get its JSON representation, and parse it
+    json = put.toJSON();
+    parsedJSON = mapper.readValue(json, HashMap.class);
+    // check for the row
+    assertEquals("row absent in Put.toJSON()",
+        Bytes.toStringBinary(ROW), parsedJSON.get("row"));
+    // check for the family and the qualifier.
+    familyInfo = (List) ((Map) parsedJSON.get("families")).get(
+        Bytes.toStringBinary(FAMILY));
+    assertNotNull("Family absent in Put.toJSON()", familyInfo);
+    assertEquals("KeyValue absent in Put.toJSON()", 1, familyInfo.size());
+    Map kvMap = (Map) familyInfo.get(0);
+    assertEquals("Qualifier incorrect in Put.toJSON()",
+        Bytes.toStringBinary(QUALIFIER),
+        kvMap.get("qualifier"));
+    assertEquals("Value length incorrect in Put.toJSON()", 
+        VALUE.length, kvMap.get("vlen"));
+
+    // produce a Delete operation
+    Delete delete = new Delete(ROW);
+    delete.deleteColumn(FAMILY, QUALIFIER);
+    // get its JSON representation, and parse it
+    json = delete.toJSON();
+    parsedJSON = mapper.readValue(json, HashMap.class);
+    // check for the row
+    assertEquals("row absent in Delete.toJSON()",
+        Bytes.toStringBinary(ROW), parsedJSON.get("row"));
+    // check for the family and the qualifier.
+    familyInfo = (List) ((Map) parsedJSON.get("families")).get(
+        Bytes.toStringBinary(FAMILY));
+    assertNotNull("Family absent in Delete.toJSON()", familyInfo);
+    assertEquals("KeyValue absent in Delete.toJSON()", 1, familyInfo.size());
+    kvMap = (Map) familyInfo.get(0);
+    assertEquals("Qualifier incorrect in Delete.toJSON()", 
+        Bytes.toStringBinary(QUALIFIER), kvMap.get("qualifier"));
+  }
+
+}
+

Added: hbase/trunk/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestPutDotHas.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestPutDotHas.java?rev=1449950&view=auto
==============================================================================
--- hbase/trunk/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestPutDotHas.java (added)
+++ hbase/trunk/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestPutDotHas.java Mon Feb 25 22:50:17 2013
@@ -0,0 +1,76 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import org.apache.hadoop.hbase.SmallTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(SmallTests.class)
+/**
+ * Addresses HBASE-6047
+ * We test put.has call with all of its polymorphic magic
+ */
+public class TestPutDotHas {
+
+  public static final byte[] ROW_01 = Bytes.toBytes("row-01");
+  public static final byte[] QUALIFIER_01 = Bytes.toBytes("qualifier-01");
+  public static final byte[] VALUE_01 = Bytes.toBytes("value-01");
+  public static final byte[] FAMILY_01 = Bytes.toBytes("family-01");
+  public static final long TS = 1234567L;
+  public Put put = new Put(ROW_01);
+
+  @Before
+  public void setUp() {
+    put.add(FAMILY_01, QUALIFIER_01, TS, VALUE_01);
+  }
+
+  @Test
+  public void testHasIgnoreValueIgnoreTS() {
+    Assert.assertTrue(put.has(FAMILY_01, QUALIFIER_01));
+    Assert.assertFalse(put.has(QUALIFIER_01, FAMILY_01));
+  }
+
+  @Test
+  public void testHasIgnoreValue() {
+    Assert.assertTrue(put.has(FAMILY_01, QUALIFIER_01, TS));
+    Assert.assertFalse(put.has(FAMILY_01, QUALIFIER_01, TS + 1));
+  }
+
+  @Test
+  public void testHasIgnoreTS() {
+    Assert.assertTrue(put.has(FAMILY_01, QUALIFIER_01, VALUE_01));
+    Assert.assertFalse(put.has(FAMILY_01, VALUE_01, QUALIFIER_01));
+  }
+
+  @Test
+  public void testHas() {
+    Assert.assertTrue(put.has(FAMILY_01, QUALIFIER_01, TS, VALUE_01));
+    // Bad TS
+    Assert.assertFalse(put.has(FAMILY_01, QUALIFIER_01, TS + 1, VALUE_01));
+    // Bad Value
+    Assert.assertFalse(put.has(FAMILY_01, QUALIFIER_01, TS, QUALIFIER_01));
+    // Bad Family
+    Assert.assertFalse(put.has(QUALIFIER_01, QUALIFIER_01, TS, VALUE_01));
+    // Bad Qual
+    Assert.assertFalse(put.has(FAMILY_01, FAMILY_01, TS, VALUE_01));
+  }
+}

Added: hbase/trunk/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestScan.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestScan.java?rev=1449950&view=auto
==============================================================================
--- hbase/trunk/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestScan.java (added)
+++ hbase/trunk/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestScan.java Mon Feb 25 22:50:17 2013
@@ -0,0 +1,111 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.client;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Set;
+
+import org.apache.hadoop.hbase.SmallTests;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+// TODO: cover more test cases
+@Category(SmallTests.class)
+public class TestScan {
+  @Test
+  public void testAttributesSerialization() throws IOException {
+    Scan scan = new Scan();
+    scan.setAttribute("attribute1", Bytes.toBytes("value1"));
+    scan.setAttribute("attribute2", Bytes.toBytes("value2"));
+    scan.setAttribute("attribute3", Bytes.toBytes("value3"));
+
+    ClientProtos.Scan scanProto = ProtobufUtil.toScan(scan);
+
+    Scan scan2 = ProtobufUtil.toScan(scanProto);
+
+    Assert.assertNull(scan2.getAttribute("absent"));
+    Assert.assertTrue(Arrays.equals(Bytes.toBytes("value1"), scan2.getAttribute("attribute1")));
+    Assert.assertTrue(Arrays.equals(Bytes.toBytes("value2"), scan2.getAttribute("attribute2")));
+    Assert.assertTrue(Arrays.equals(Bytes.toBytes("value3"), scan2.getAttribute("attribute3")));
+    Assert.assertEquals(3, scan2.getAttributesMap().size());
+  }
+
+  @Test
+  public void testScanAttributes() {
+    Scan scan = new Scan();
+    Assert.assertTrue(scan.getAttributesMap().isEmpty());
+    Assert.assertNull(scan.getAttribute("absent"));
+
+    scan.setAttribute("absent", null);
+    Assert.assertTrue(scan.getAttributesMap().isEmpty());
+    Assert.assertNull(scan.getAttribute("absent"));
+
+    // adding attribute
+    scan.setAttribute("attribute1", Bytes.toBytes("value1"));
+    Assert.assertTrue(Arrays.equals(Bytes.toBytes("value1"), scan.getAttribute("attribute1")));
+    Assert.assertEquals(1, scan.getAttributesMap().size());
+    Assert.assertTrue(Arrays.equals(Bytes.toBytes("value1"), scan.getAttributesMap().get("attribute1")));
+
+    // overriding attribute value
+    scan.setAttribute("attribute1", Bytes.toBytes("value12"));
+    Assert.assertTrue(Arrays.equals(Bytes.toBytes("value12"), scan.getAttribute("attribute1")));
+    Assert.assertEquals(1, scan.getAttributesMap().size());
+    Assert.assertTrue(Arrays.equals(Bytes.toBytes("value12"), scan.getAttributesMap().get("attribute1")));
+
+    // adding another attribute
+    scan.setAttribute("attribute2", Bytes.toBytes("value2"));
+    Assert.assertTrue(Arrays.equals(Bytes.toBytes("value2"), scan.getAttribute("attribute2")));
+    Assert.assertEquals(2, scan.getAttributesMap().size());
+    Assert.assertTrue(Arrays.equals(Bytes.toBytes("value2"), scan.getAttributesMap().get("attribute2")));
+
+    // removing attribute
+    scan.setAttribute("attribute2", null);
+    Assert.assertNull(scan.getAttribute("attribute2"));
+    Assert.assertEquals(1, scan.getAttributesMap().size());
+    Assert.assertNull(scan.getAttributesMap().get("attribute2"));
+
+    // removing non-existed attribute
+    scan.setAttribute("attribute2", null);
+    Assert.assertNull(scan.getAttribute("attribute2"));
+    Assert.assertEquals(1, scan.getAttributesMap().size());
+    Assert.assertNull(scan.getAttributesMap().get("attribute2"));
+
+    // removing another attribute
+    scan.setAttribute("attribute1", null);
+    Assert.assertNull(scan.getAttribute("attribute1"));
+    Assert.assertTrue(scan.getAttributesMap().isEmpty());
+    Assert.assertNull(scan.getAttributesMap().get("attribute1"));
+  }
+
+  @Test
+  public void testNullQualifier() {
+    Scan scan = new Scan();
+    byte[] family = Bytes.toBytes("family");
+    scan.addColumn(family, null);
+    Set<byte[]> qualifiers = scan.getFamilyMap().get(family);
+    Assert.assertEquals(1, qualifiers.size());
+  }
+}
+

Added: hbase/trunk/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromAdmin.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromAdmin.java?rev=1449950&view=auto
==============================================================================
--- hbase/trunk/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromAdmin.java (added)
+++ hbase/trunk/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromAdmin.java Mon Feb 25 22:50:17 2013
@@ -0,0 +1,160 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.SmallTests;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
+import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotResponse;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.mockito.Mockito;
+
+import com.google.protobuf.RpcController;
+
+/**
+ * Test snapshot logic from the client
+ */
+@Category(SmallTests.class)
+public class TestSnapshotFromAdmin {
+
+  private static final Log LOG = LogFactory.getLog(TestSnapshotFromAdmin.class);
+
+  /**
+   * Test that the logic for doing 'correct' back-off based on exponential increase and the max-time
+   * passed from the server ensures the correct overall waiting for the snapshot to finish.
+   * @throws Exception
+   */
+  @Test(timeout = 10000)
+  public void testBackoffLogic() throws Exception {
+    final int maxWaitTime = 7500;
+    final int numRetries = 10;
+    final int pauseTime = 500;
+    // calculate the wait time, if we just do straight backoff (ignoring the expected time from
+    // master)
+    long ignoreExpectedTime = 0;
+    for (int i = 0; i < 6; i++) {
+      ignoreExpectedTime += HConstants.RETRY_BACKOFF[i] * pauseTime;
+    }
+    // the correct wait time, capping at the maxTime/tries + fudge room
+    final long time = pauseTime * 3 + ((maxWaitTime / numRetries) * 3) + 300;
+    assertTrue("Capped snapshot wait time isn't less that the uncapped backoff time "
+        + "- further testing won't prove anything.", time < ignoreExpectedTime);
+
+    // setup the mocks
+    HConnectionManager.HConnectionImplementation mockConnection = Mockito
+        .mock(HConnectionManager.HConnectionImplementation.class);
+    Configuration conf = HBaseConfiguration.create();
+    // setup the conf to match the expected properties
+    conf.setInt("hbase.client.retries.number", numRetries);
+    conf.setLong("hbase.client.pause", pauseTime);
+    // mock the master admin to our mock
+    MasterAdminKeepAliveConnection mockMaster = Mockito.mock(MasterAdminKeepAliveConnection.class);
+    Mockito.when(mockConnection.getConfiguration()).thenReturn(conf);
+    Mockito.when(mockConnection.getKeepAliveMasterAdmin()).thenReturn(mockMaster);
+    // set the max wait time for the snapshot to complete
+    TakeSnapshotResponse response = TakeSnapshotResponse.newBuilder()
+        .setExpectedTimeout(maxWaitTime)
+        .build();
+    Mockito
+        .when(
+          mockMaster.snapshot((RpcController) Mockito.isNull(),
+            Mockito.any(TakeSnapshotRequest.class))).thenReturn(response);
+    // setup the response
+    IsSnapshotDoneResponse.Builder builder = IsSnapshotDoneResponse.newBuilder();
+    builder.setDone(false);
+    // first five times, we return false, last we get success
+    Mockito.when(
+      mockMaster.isSnapshotDone((RpcController) Mockito.isNull(),
+        Mockito.any(IsSnapshotDoneRequest.class))).thenReturn(builder.build(), builder.build(),
+      builder.build(), builder.build(), builder.build(), builder.setDone(true).build());
+
+    // setup the admin and run the test
+    HBaseAdmin admin = new HBaseAdmin(mockConnection);
+    String snapshot = "snapshot";
+    String table = "table";
+    // get start time
+    long start = System.currentTimeMillis();
+    admin.snapshot(snapshot, table);
+    long finish = System.currentTimeMillis();
+    long elapsed = (finish - start);
+    assertTrue("Elapsed time:" + elapsed + " is more than expected max:" + time, elapsed <= time);
+    admin.close();
+  }
+
+  /**
+   * Make sure that we validate the snapshot name and the table name before we pass anything across
+   * the wire
+   * @throws Exception on failure
+   */
+  @Test
+  public void testValidateSnapshotName() throws Exception {
+    HConnectionManager.HConnectionImplementation mockConnection = Mockito
+        .mock(HConnectionManager.HConnectionImplementation.class);
+    Configuration conf = HBaseConfiguration.create();
+    Mockito.when(mockConnection.getConfiguration()).thenReturn(conf);
+    HBaseAdmin admin = new HBaseAdmin(mockConnection);
+    SnapshotDescription.Builder builder = SnapshotDescription.newBuilder();
+    // check that invalid snapshot names fail
+    failSnapshotStart(admin, builder.setName(".snapshot").build());
+    failSnapshotStart(admin, builder.setName("-snapshot").build());
+    failSnapshotStart(admin, builder.setName("snapshot fails").build());
+    failSnapshotStart(admin, builder.setName("snap$hot").build());
+    // check the table name also get verified
+    failSnapshotStart(admin, builder.setName("snapshot").setTable(".table").build());
+    failSnapshotStart(admin, builder.setName("snapshot").setTable("-table").build());
+    failSnapshotStart(admin, builder.setName("snapshot").setTable("table fails").build());
+    failSnapshotStart(admin, builder.setName("snapshot").setTable("tab%le").build());
+
+    // mock the master connection
+    MasterAdminKeepAliveConnection master = Mockito.mock(MasterAdminKeepAliveConnection.class);
+    Mockito.when(mockConnection.getKeepAliveMasterAdmin()).thenReturn(master);
+    TakeSnapshotResponse response = TakeSnapshotResponse.newBuilder().setExpectedTimeout(0).build();
+    Mockito.when(
+      master.snapshot((RpcController) Mockito.isNull(), Mockito.any(TakeSnapshotRequest.class)))
+        .thenReturn(response);
+    IsSnapshotDoneResponse doneResponse = IsSnapshotDoneResponse.newBuilder().setDone(true).build();
+    Mockito.when(
+      master.isSnapshotDone((RpcController) Mockito.isNull(),
+        Mockito.any(IsSnapshotDoneRequest.class))).thenReturn(doneResponse);
+
+      // make sure that we can use valid names
+    admin.snapshot(builder.setName("snapshot").setTable("table").build());
+  }
+
+  private void failSnapshotStart(HBaseAdmin admin, SnapshotDescription snapshot) throws IOException {
+    try {
+      admin.snapshot(snapshot);
+      fail("Snapshot should not have succeed with name:" + snapshot.getName());
+    } catch (IllegalArgumentException e) {
+      LOG.debug("Correctly failed to start snapshot:" + e.getMessage());
+    }
+  }
+}
\ No newline at end of file

Modified: hbase/trunk/hbase-common/pom.xml
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-common/pom.xml?rev=1449950&r1=1449949&r2=1449950&view=diff
==============================================================================
--- hbase/trunk/hbase-common/pom.xml (original)
+++ hbase/trunk/hbase-common/pom.xml Mon Feb 25 22:50:17 2013
@@ -31,32 +31,82 @@
   <description>Common functionality for HBase</description>
 
   <build>
+    <resources>
+      <resource>
+          <directory>src/main/resources/</directory>
+          <includes>
+              <include>hbase-default.xml</include>
+          </includes>
+          <filtering>true</filtering>
+      </resource>
+    </resources>
     <plugins>
-      <plugin>
-        <artifactId>maven-surefire-plugin</artifactId>
-        <configuration>
-          <properties>
-            <property>
-              <name>listener</name>
-              <value>org.apache.hadoop.hbase.ResourceCheckerJUnitListener</value>
-            </property>
-          </properties>
-        </configuration>
-        <!-- Always skip the second part executions, since we only run
-        simple unit tests in this module -->
-        <executions>
-          <execution>
-            <id>secondPartTestsExecution</id>
-            <phase>test</phase>
-            <goals>
-              <goal>test</goal>
-            </goals>
+        <plugin>
+            <artifactId>maven-antrun-plugin</artifactId>
+            <executions>
+                <!-- Generate web app sources -->
+                <execution>
+                    <id>generate</id>
+                    <phase>generate-sources</phase>
+                    <configuration>
+                        <target>
+                            <property name="generated.sources" location="${project.build.directory}/generated-sources"/>
+
+                            <exec executable="sh">
+                                <arg line="${basedir}/src/saveVersion.sh ${project.version} ${generated.sources}/java"/>
+                            </exec>
+                        </target>
+                    </configuration>
+                    <goals>
+                        <goal>run</goal>
+                    </goals>
+                </execution>
+            </executions>
+        </plugin>
+        <plugin>
+            <groupId>org.codehaus.mojo</groupId>
+            <artifactId>build-helper-maven-plugin</artifactId>
+            <executions>
+                <!-- Add the generated sources -->
+                <execution>
+                    <id>versionInfo-source</id>
+                    <phase>generate-sources</phase>
+                    <goals>
+                        <goal>add-source</goal>
+                    </goals>
+                    <configuration>
+                        <sources>
+                            <source>${project.build.directory}/generated-sources/java</source>
+                        </sources>
+                    </configuration>
+                </execution>
+            </executions>
+        </plugin>
+        <plugin>
+            <artifactId>maven-surefire-plugin</artifactId>
             <configuration>
-              <skip>true</skip>
+              <properties>
+                <property>
+                  <name>listener</name>
+                  <value>org.apache.hadoop.hbase.ResourceCheckerJUnitListener</value>
+                </property>
+              </properties>
             </configuration>
-          </execution>
-        </executions>
-      </plugin>
+            <!-- Always skip the second part executions, since we only run
+            simple unit tests in this module -->
+            <executions>
+              <execution>
+                <id>secondPartTestsExecution</id>
+                <phase>test</phase>
+                <goals>
+                  <goal>test</goal>
+                </goals>
+                <configuration>
+                  <skip>true</skip>
+                </configuration>
+              </execution>
+            </executions>
+          </plugin>
     </plugins>
   </build>
 

Modified: hbase/trunk/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java?rev=1449950&r1=1449949&r2=1449950&view=diff
==============================================================================
--- hbase/trunk/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java (original)
+++ hbase/trunk/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java Mon Feb 25 22:50:17 2013
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hbase;
 
+import java.nio.ByteBuffer;
 import java.nio.charset.Charset;
 import java.util.Arrays;
 import java.util.Collections;
@@ -27,6 +28,9 @@ import java.util.regex.Pattern;
 import org.apache.commons.lang.ArrayUtils;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hbase.util.Bytes;
+
+import static org.apache.hadoop.hbase.io.hfile.BlockType.MAGIC_LENGTH;
 
 /**
  * HConstants holds a bunch of HBase-related constants
@@ -39,6 +43,36 @@ public final class HConstants {
 
   /** When we encode strings, we always specify UTF8 encoding */
   public static final Charset UTF8_CHARSET = Charset.forName(UTF8_ENCODING);
+  /**
+   * Default block size for an HFile.
+   */
+  public final static int DEFAULT_BLOCKSIZE = 64 * 1024;
+  /*
+     * Name of directory that holds recovered edits written by the wal log
+     * splitting code, one per region
+     */
+  public static final String RECOVERED_EDITS_DIR = "recovered.edits";
+  /**
+   * The first four bytes of Hadoop RPC connections
+   */
+  public static final ByteBuffer RPC_HEADER = ByteBuffer.wrap("hrpc".getBytes());
+  public static final byte CURRENT_VERSION = 5;
+
+  // HFileBlock constants.
+
+  /** The size data structures with minor version is 0 */
+  public static final int HFILEBLOCK_HEADER_SIZE_NO_CHECKSUM = MAGIC_LENGTH + 2 * Bytes.SIZEOF_INT
+      + Bytes.SIZEOF_LONG;
+  /** The size of a version 2 HFile block header, minor version 1.
+   * There is a 1 byte checksum type, followed by a 4 byte bytesPerChecksum
+   * followed by another 4 byte value to store sizeofDataOnDisk.
+   */
+  public static final int HFILEBLOCK_HEADER_SIZE = HFILEBLOCK_HEADER_SIZE_NO_CHECKSUM +
+    Bytes.SIZEOF_BYTE + 2 * Bytes.SIZEOF_INT;
+  /** Just an array of bytes of the right size. */
+  public static final byte[] HFILEBLOCK_DUMMY_HEADER = new byte[HFILEBLOCK_HEADER_SIZE];
+
+  //End HFileBlockConstants.
 
   private static byte[] toBytes(String target) {
     return target.getBytes(UTF8_CHARSET);

Added: hbase/trunk/hbase-common/src/main/java/org/apache/hadoop/hbase/io/TimeRange.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-common/src/main/java/org/apache/hadoop/hbase/io/TimeRange.java?rev=1449950&view=auto
==============================================================================
--- hbase/trunk/hbase-common/src/main/java/org/apache/hadoop/hbase/io/TimeRange.java (added)
+++ hbase/trunk/hbase-common/src/main/java/org/apache/hadoop/hbase/io/TimeRange.java Mon Feb 25 22:50:17 2013
@@ -0,0 +1,183 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.io;
+
+import java.io.IOException;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hbase.util.Bytes;
+
+/**
+ * Represents an interval of version timestamps.
+ * <p>
+ * Evaluated according to minStamp <= timestamp < maxStamp
+ * or [minStamp,maxStamp) in interval notation.
+ * <p>
+ * Only used internally; should not be accessed directly by clients.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Stable
+public class TimeRange {
+  private long minStamp = 0L;
+  private long maxStamp = Long.MAX_VALUE;
+  private boolean allTime = false;
+
+  /**
+   * Default constructor.
+   * Represents interval [0, Long.MAX_VALUE) (allTime)
+   */
+  public TimeRange() {
+    allTime = true;
+  }
+
+  /**
+   * Represents interval [minStamp, Long.MAX_VALUE)
+   * @param minStamp the minimum timestamp value, inclusive
+   */
+  public TimeRange(long minStamp) {
+    this.minStamp = minStamp;
+  }
+
+  /**
+   * Represents interval [minStamp, Long.MAX_VALUE)
+   * @param minStamp the minimum timestamp value, inclusive
+   */
+  public TimeRange(byte [] minStamp) {
+  	this.minStamp = Bytes.toLong(minStamp);
+  }
+
+  /**
+   * Represents interval [minStamp, maxStamp)
+   * @param minStamp the minimum timestamp, inclusive
+   * @param maxStamp the maximum timestamp, exclusive
+   * @throws IOException
+   */
+  public TimeRange(long minStamp, long maxStamp)
+  throws IOException {
+    if(maxStamp < minStamp) {
+      throw new IOException("maxStamp is smaller than minStamp");
+    }
+    this.minStamp = minStamp;
+    this.maxStamp = maxStamp;
+  }
+
+  /**
+   * Represents interval [minStamp, maxStamp)
+   * @param minStamp the minimum timestamp, inclusive
+   * @param maxStamp the maximum timestamp, exclusive
+   * @throws IOException
+   */
+  public TimeRange(byte [] minStamp, byte [] maxStamp)
+  throws IOException {
+    this(Bytes.toLong(minStamp), Bytes.toLong(maxStamp));
+  }
+
+  /**
+   * @return the smallest timestamp that should be considered
+   */
+  public long getMin() {
+    return minStamp;
+  }
+
+  /**
+   * @return the biggest timestamp that should be considered
+   */
+  public long getMax() {
+    return maxStamp;
+  }
+
+  /**
+   * Check if it is for all time
+   * @return true if it is for all time
+   */
+  public boolean isAllTime() {
+    return allTime;
+  }
+
+  /**
+   * Check if the specified timestamp is within this TimeRange.
+   * <p>
+   * Returns true if within interval [minStamp, maxStamp), false
+   * if not.
+   * @param bytes timestamp to check
+   * @param offset offset into the bytes
+   * @return true if within TimeRange, false if not
+   */
+  public boolean withinTimeRange(byte [] bytes, int offset) {
+  	if(allTime) return true;
+  	return withinTimeRange(Bytes.toLong(bytes, offset));
+  }
+
+  /**
+   * Check if the specified timestamp is within this TimeRange.
+   * <p>
+   * Returns true if within interval [minStamp, maxStamp), false
+   * if not.
+   * @param timestamp timestamp to check
+   * @return true if within TimeRange, false if not
+   */
+  public boolean withinTimeRange(long timestamp) {
+  	if(allTime) return true;
+  	// check if >= minStamp
+  	return (minStamp <= timestamp && timestamp < maxStamp);
+  }
+
+  /**
+   * Check if the specified timestamp is within this TimeRange.
+   * <p>
+   * Returns true if within interval [minStamp, maxStamp), false
+   * if not.
+   * @param timestamp timestamp to check
+   * @return true if within TimeRange, false if not
+   */
+  public boolean withinOrAfterTimeRange(long timestamp) {
+    if(allTime) return true;
+    // check if >= minStamp
+    return (timestamp >= minStamp);
+  }
+
+  /**
+   * Compare the timestamp to timerange
+   * @param timestamp
+   * @return -1 if timestamp is less than timerange,
+   * 0 if timestamp is within timerange,
+   * 1 if timestamp is greater than timerange
+   */
+  public int compare(long timestamp) {
+    if (timestamp < minStamp) {
+      return -1;
+    } else if (timestamp >= maxStamp) {
+      return 1;
+    } else {
+      return 0;
+    }
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder();
+    sb.append("maxStamp=");
+    sb.append(this.maxStamp);
+    sb.append(", minStamp=");
+    sb.append(this.minStamp);
+    return sb.toString();
+  }
+}
\ No newline at end of file