You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@accumulo.apache.org by ct...@apache.org on 2014/04/09 19:58:00 UTC

[29/64] [abbrv] Merge branch '1.4.6-SNAPSHOT' into 1.5.2-SNAPSHOT

http://git-wip-us.apache.org/repos/asf/accumulo/blob/92613388/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/util/ConfiguratorBase.java
----------------------------------------------------------------------
diff --cc core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/util/ConfiguratorBase.java
index cb54856,0000000..1a029dc
mode 100644,000000..100644
--- a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/util/ConfiguratorBase.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/util/ConfiguratorBase.java
@@@ -1,273 -1,0 +1,272 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.core.client.mapreduce.lib.util;
 +
 +import java.nio.charset.Charset;
 +
 +import org.apache.accumulo.core.client.AccumuloSecurityException;
 +import org.apache.accumulo.core.client.Instance;
 +import org.apache.accumulo.core.client.ZooKeeperInstance;
 +import org.apache.accumulo.core.client.mock.MockInstance;
 +import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
 +import org.apache.accumulo.core.security.CredentialHelper;
 +import org.apache.accumulo.core.util.ArgumentChecker;
 +import org.apache.commons.codec.binary.Base64;
 +import org.apache.hadoop.conf.Configuration;
 +import org.apache.hadoop.util.StringUtils;
 +import org.apache.log4j.Level;
 +import org.apache.log4j.Logger;
 +
 +/**
 + * @since 1.5.0
 + */
 +public class ConfiguratorBase {
 +
 +  /**
 +   * Configuration keys for {@link Instance#getConnector(String, AuthenticationToken)}.
 +   * 
 +   * @since 1.5.0
 +   */
 +  public static enum ConnectorInfo {
 +    IS_CONFIGURED, PRINCIPAL, TOKEN, TOKEN_CLASS
 +  }
 +
 +  /**
 +   * Configuration keys for {@link Instance}, {@link ZooKeeperInstance}, and {@link MockInstance}.
 +   * 
 +   * @since 1.5.0
 +   */
 +  protected static enum InstanceOpts {
 +    TYPE, NAME, ZOO_KEEPERS;
 +  }
 +
 +  /**
 +   * Configuration keys for general configuration options.
 +   * 
 +   * @since 1.5.0
 +   */
 +  protected static enum GeneralOpts {
 +    LOG_LEVEL
 +  }
 +
 +  /**
 +   * Provides a configuration key for a given feature enum, prefixed by the implementingClass
 +   * 
 +   * @param implementingClass
 +   *          the class whose name will be used as a prefix for the property configuration key
 +   * @param e
 +   *          the enum used to provide the unique part of the configuration key
 +   * @return the configuration key
 +   * @since 1.5.0
 +   */
 +  protected static String enumToConfKey(Class<?> implementingClass, Enum<?> e) {
 +    return implementingClass.getSimpleName() + "." + e.getDeclaringClass().getSimpleName() + "." + StringUtils.camelize(e.name().toLowerCase());
 +  }
 +
 +  /**
 +   * Sets the connector information needed to communicate with Accumulo in this job.
 +   * 
 +   * <p>
 +   * <b>WARNING:</b> The serialized token is stored in the configuration and shared with all MapReduce tasks. It is BASE64 encoded to provide a charset safe
 +   * conversion to a string, and is not intended to be secure.
 +   * 
 +   * @param implementingClass
 +   *          the class whose name will be used as a prefix for the property configuration key
 +   * @param conf
 +   *          the Hadoop configuration object to configure
 +   * @param principal
 +   *          a valid Accumulo user name
 +   * @param token
 +   *          the user's password
-    * @throws AccumuloSecurityException
 +   * @since 1.5.0
 +   */
 +  public static void setConnectorInfo(Class<?> implementingClass, Configuration conf, String principal, AuthenticationToken token)
 +      throws AccumuloSecurityException {
 +    if (isConnectorInfoSet(implementingClass, conf))
 +      throw new IllegalStateException("Connector info for " + implementingClass.getSimpleName() + " can only be set once per job");
 +
 +    ArgumentChecker.notNull(principal, token);
 +    conf.setBoolean(enumToConfKey(implementingClass, ConnectorInfo.IS_CONFIGURED), true);
 +    conf.set(enumToConfKey(implementingClass, ConnectorInfo.PRINCIPAL), principal);
 +    conf.set(enumToConfKey(implementingClass, ConnectorInfo.TOKEN_CLASS), token.getClass().getCanonicalName());
 +    conf.set(enumToConfKey(implementingClass, ConnectorInfo.TOKEN), CredentialHelper.tokenAsBase64(token));
 +  }
 +
 +  /**
 +   * Determines if the connector info has already been set for this instance.
 +   * 
 +   * @param implementingClass
 +   *          the class whose name will be used as a prefix for the property configuration key
 +   * @param conf
 +   *          the Hadoop configuration object to configure
 +   * @return true if the connector info has already been set, false otherwise
 +   * @since 1.5.0
 +   * @see #setConnectorInfo(Class, Configuration, String, AuthenticationToken)
 +   */
 +  public static Boolean isConnectorInfoSet(Class<?> implementingClass, Configuration conf) {
 +    return conf.getBoolean(enumToConfKey(implementingClass, ConnectorInfo.IS_CONFIGURED), false);
 +  }
 +
 +  /**
 +   * Gets the user name from the configuration.
 +   * 
 +   * @param implementingClass
 +   *          the class whose name will be used as a prefix for the property configuration key
 +   * @param conf
 +   *          the Hadoop configuration object to configure
 +   * @return the principal
 +   * @since 1.5.0
 +   * @see #setConnectorInfo(Class, Configuration, String, AuthenticationToken)
 +   */
 +  public static String getPrincipal(Class<?> implementingClass, Configuration conf) {
 +    return conf.get(enumToConfKey(implementingClass, ConnectorInfo.PRINCIPAL));
 +  }
 +
 +  /**
 +   * Gets the serialized token class from the configuration.
 +   * 
 +   * @param implementingClass
 +   *          the class whose name will be used as a prefix for the property configuration key
 +   * @param conf
 +   *          the Hadoop configuration object to configure
 +   * @return the principal
 +   * @since 1.5.0
 +   * @see #setConnectorInfo(Class, Configuration, String, AuthenticationToken)
 +   */
 +  public static String getTokenClass(Class<?> implementingClass, Configuration conf) {
 +    return conf.get(enumToConfKey(implementingClass, ConnectorInfo.TOKEN_CLASS));
 +  }
 +
 +  /**
 +   * Gets the password from the configuration. WARNING: The password is stored in the Configuration and shared with all MapReduce tasks; It is BASE64 encoded to
 +   * provide a charset safe conversion to a string, and is not intended to be secure.
 +   * 
 +   * @param implementingClass
 +   *          the class whose name will be used as a prefix for the property configuration key
 +   * @param conf
 +   *          the Hadoop configuration object to configure
 +   * @return the decoded principal's authentication token
 +   * @since 1.5.0
 +   * @see #setConnectorInfo(Class, Configuration, String, AuthenticationToken)
 +   */
 +  public static byte[] getToken(Class<?> implementingClass, Configuration conf) {
 +    return Base64.decodeBase64(conf.get(enumToConfKey(implementingClass, ConnectorInfo.TOKEN), "").getBytes(Charset.forName("UTF-8")));
 +  }
 +
 +  /**
 +   * Configures a {@link ZooKeeperInstance} for this job.
 +   * 
 +   * @param implementingClass
 +   *          the class whose name will be used as a prefix for the property configuration key
 +   * @param conf
 +   *          the Hadoop configuration object to configure
 +   * @param instanceName
 +   *          the Accumulo instance name
 +   * @param zooKeepers
 +   *          a comma-separated list of zookeeper servers
 +   * @since 1.5.0
 +   */
 +  public static void setZooKeeperInstance(Class<?> implementingClass, Configuration conf, String instanceName, String zooKeepers) {
 +    String key = enumToConfKey(implementingClass, InstanceOpts.TYPE);
 +    if (!conf.get(key, "").isEmpty())
 +      throw new IllegalStateException("Instance info can only be set once per job; it has already been configured with " + conf.get(key));
 +    conf.set(key, "ZooKeeperInstance");
 +
 +    ArgumentChecker.notNull(instanceName, zooKeepers);
 +    conf.set(enumToConfKey(implementingClass, InstanceOpts.NAME), instanceName);
 +    conf.set(enumToConfKey(implementingClass, InstanceOpts.ZOO_KEEPERS), zooKeepers);
 +  }
 +
 +  /**
 +   * Configures a {@link MockInstance} for this job.
 +   * 
 +   * @param implementingClass
 +   *          the class whose name will be used as a prefix for the property configuration key
 +   * @param conf
 +   *          the Hadoop configuration object to configure
 +   * @param instanceName
 +   *          the Accumulo instance name
 +   * @since 1.5.0
 +   */
 +  public static void setMockInstance(Class<?> implementingClass, Configuration conf, String instanceName) {
 +    String key = enumToConfKey(implementingClass, InstanceOpts.TYPE);
 +    if (!conf.get(key, "").isEmpty())
 +      throw new IllegalStateException("Instance info can only be set once per job; it has already been configured with " + conf.get(key));
 +    conf.set(key, "MockInstance");
 +
 +    ArgumentChecker.notNull(instanceName);
 +    conf.set(enumToConfKey(implementingClass, InstanceOpts.NAME), instanceName);
 +  }
 +
 +  /**
 +   * Initializes an Accumulo {@link Instance} based on the configuration.
 +   * 
 +   * @param implementingClass
 +   *          the class whose name will be used as a prefix for the property configuration key
 +   * @param conf
 +   *          the Hadoop configuration object to configure
 +   * @return an Accumulo instance
 +   * @since 1.5.0
 +   * @see #setZooKeeperInstance(Class, Configuration, String, String)
 +   * @see #setMockInstance(Class, Configuration, String)
 +   */
 +  public static Instance getInstance(Class<?> implementingClass, Configuration conf) {
 +    String instanceType = conf.get(enumToConfKey(implementingClass, InstanceOpts.TYPE), "");
 +    if ("MockInstance".equals(instanceType))
 +      return new MockInstance(conf.get(enumToConfKey(implementingClass, InstanceOpts.NAME)));
 +    else if ("ZooKeeperInstance".equals(instanceType)) {
 +      return new ZooKeeperInstance(conf.get(enumToConfKey(implementingClass, InstanceOpts.NAME)), conf.get(enumToConfKey(implementingClass,
 +          InstanceOpts.ZOO_KEEPERS)));
 +    } else if (instanceType.isEmpty())
 +      throw new IllegalStateException("Instance has not been configured for " + implementingClass.getSimpleName());
 +    else
 +      throw new IllegalStateException("Unrecognized instance type " + instanceType);
 +  }
 +
 +  /**
 +   * Sets the log level for this job.
 +   * 
 +   * @param implementingClass
 +   *          the class whose name will be used as a prefix for the property configuration key
 +   * @param conf
 +   *          the Hadoop configuration object to configure
 +   * @param level
 +   *          the logging level
 +   * @since 1.5.0
 +   */
 +  public static void setLogLevel(Class<?> implementingClass, Configuration conf, Level level) {
 +    ArgumentChecker.notNull(level);
 +    Logger.getLogger(implementingClass).setLevel(level);
 +    conf.setInt(enumToConfKey(implementingClass, GeneralOpts.LOG_LEVEL), level.toInt());
 +  }
 +
 +  /**
 +   * Gets the log level from this configuration.
 +   * 
 +   * @param implementingClass
 +   *          the class whose name will be used as a prefix for the property configuration key
 +   * @param conf
 +   *          the Hadoop configuration object to configure
 +   * @return the log level
 +   * @since 1.5.0
 +   * @see #setLogLevel(Class, Configuration, Level)
 +   */
 +  public static Level getLogLevel(Class<?> implementingClass, Configuration conf) {
 +    return Level.toLevel(conf.getInt(enumToConfKey(implementingClass, GeneralOpts.LOG_LEVEL), Level.INFO.toInt()));
 +  }
 +
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/92613388/core/src/main/java/org/apache/accumulo/core/client/mock/MockBatchDeleter.java
----------------------------------------------------------------------
diff --cc core/src/main/java/org/apache/accumulo/core/client/mock/MockBatchDeleter.java
index 67362a2,0000000..6f321ff
mode 100644,000000..100644
--- a/core/src/main/java/org/apache/accumulo/core/client/mock/MockBatchDeleter.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mock/MockBatchDeleter.java
@@@ -1,77 -1,0 +1,73 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.core.client.mock;
 +
 +import java.util.Iterator;
 +import java.util.Map.Entry;
 +
 +import org.apache.accumulo.core.client.BatchDeleter;
 +import org.apache.accumulo.core.client.BatchWriter;
 +import org.apache.accumulo.core.client.MutationsRejectedException;
 +import org.apache.accumulo.core.client.TableNotFoundException;
 +import org.apache.accumulo.core.data.Key;
 +import org.apache.accumulo.core.data.Mutation;
 +import org.apache.accumulo.core.data.Value;
 +import org.apache.accumulo.core.security.Authorizations;
 +import org.apache.accumulo.core.security.ColumnVisibility;
 +
 +/**
 + * {@link BatchDeleter} for a {@link MockAccumulo} instance. Behaves similarly to a regular {@link BatchDeleter}, with a few exceptions:
 + * <ol>
 + * <li>There is no waiting for memory to fill before flushing</li>
 + * <li>Only one thread is used for writing</li>
 + * </ol>
 + * 
 + * Otherwise, it behaves as expected.
 + */
 +public class MockBatchDeleter extends MockBatchScanner implements BatchDeleter {
 +  
 +  private final MockAccumulo acc;
 +  private final String tableName;
 +  
 +  /**
 +   * Create a {@link BatchDeleter} for the specified instance on the specified table where the writer uses the specified {@link Authorizations}.
-    * 
-    * @param acc
-    * @param tableName
-    * @param auths
 +   */
 +  public MockBatchDeleter(MockAccumulo acc, String tableName, Authorizations auths) {
 +    super(acc.tables.get(tableName), auths);
 +    this.acc = acc;
 +    this.tableName = tableName;
 +  }
 +  
 +  @Override
 +  public void delete() throws MutationsRejectedException, TableNotFoundException {
 +    
 +    BatchWriter writer = new MockBatchWriter(acc, tableName);
 +    try {
 +      Iterator<Entry<Key,Value>> iter = super.iterator();
 +      while (iter.hasNext()) {
 +        Entry<Key,Value> next = iter.next();
 +        Key k = next.getKey();
 +        Mutation m = new Mutation(k.getRow());
 +        m.putDelete(k.getColumnFamily(), k.getColumnQualifier(), new ColumnVisibility(k.getColumnVisibility()), k.getTimestamp());
 +        writer.addMutation(m);
 +      }
 +    } finally {
 +      writer.close();
 +    }
 +  }
 +  
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/92613388/core/src/main/java/org/apache/accumulo/core/client/mock/MockInstanceOperations.java
----------------------------------------------------------------------
diff --cc core/src/main/java/org/apache/accumulo/core/client/mock/MockInstanceOperations.java
index 34eb3de,0000000..cb9481f
mode 100644,000000..100644
--- a/core/src/main/java/org/apache/accumulo/core/client/mock/MockInstanceOperations.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mock/MockInstanceOperations.java
@@@ -1,133 -1,0 +1,90 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.core.client.mock;
 +
 +import java.util.ArrayList;
 +import java.util.List;
 +import java.util.Map;
 +
 +import org.apache.accumulo.core.client.AccumuloException;
 +import org.apache.accumulo.core.client.AccumuloSecurityException;
 +import org.apache.accumulo.core.client.admin.ActiveCompaction;
 +import org.apache.accumulo.core.client.admin.ActiveScan;
 +import org.apache.accumulo.core.client.admin.InstanceOperations;
 +import org.apache.accumulo.start.classloader.vfs.AccumuloVFSClassLoader;
 +
 +/**
 + * 
 + */
 +public class MockInstanceOperations implements InstanceOperations {
 +  MockAccumulo acu;
 +  
-   /**
-    * @param acu
-    */
 +  public MockInstanceOperations(MockAccumulo acu) {
 +    this.acu = acu;
 +  }
 +  
-   /*
-    * (non-Javadoc)
-    * 
-    * @see org.apache.accumulo.core.client.admin.InstanceOperations#setProperty(java.lang.String, java.lang.String)
-    */
 +  @Override
 +  public void setProperty(String property, String value) throws AccumuloException, AccumuloSecurityException {
 +    acu.setProperty(property, value);
 +  }
 +  
-   /*
-    * (non-Javadoc)
-    * 
-    * @see org.apache.accumulo.core.client.admin.InstanceOperations#removeProperty(java.lang.String)
-    */
 +  @Override
 +  public void removeProperty(String property) throws AccumuloException, AccumuloSecurityException {
 +    acu.removeProperty(property);
 +  }
 +  
-   /*
-    * (non-Javadoc)
-    * 
-    * @see org.apache.accumulo.core.client.admin.InstanceOperations#getSystemConfiguration()
-    */
 +  @Override
 +  public Map<String,String> getSystemConfiguration() throws AccumuloException, AccumuloSecurityException {
 +    return acu.systemProperties;
 +  }
 +  
-   /*
-    * (non-Javadoc)
-    * 
-    * @see org.apache.accumulo.core.client.admin.InstanceOperations#getSiteConfiguration()
-    */
 +  @Override
 +  public Map<String,String> getSiteConfiguration() throws AccumuloException, AccumuloSecurityException {
 +    return acu.systemProperties;
 +  }
 +  
-   /*
-    * (non-Javadoc)
-    * 
-    * @see org.apache.accumulo.core.client.admin.InstanceOperations#getTabletServers()
-    */
 +  @Override
 +  public List<String> getTabletServers() {
 +    return new ArrayList<String>();
 +  }
 +  
-   /*
-    * (non-Javadoc)
-    * 
-    * @see org.apache.accumulo.core.client.admin.InstanceOperations#getActiveScans(java.lang.String)
-    */
 +  @Override
 +  public List<ActiveScan> getActiveScans(String tserver) throws AccumuloException, AccumuloSecurityException {
 +    return new ArrayList<ActiveScan>();
 +  }
 +  
-   /*
-    * (non-Javadoc)
-    * 
-    * @see org.apache.accumulo.core.client.admin.InstanceOperations#testClassLoad(java.lang.String, java.lang.String)
-    */
 +  @Override
 +  public boolean testClassLoad(String className, String asTypeName) throws AccumuloException, AccumuloSecurityException {
 +    try {
 +      AccumuloVFSClassLoader.loadClass(className, Class.forName(asTypeName));
 +    } catch (ClassNotFoundException e) {
 +      e.printStackTrace();
 +      return false;
 +    }
 +    return true;
 +  }
 +  
-   /*
-    * (non-Javadoc)
-    * 
-    * @see org.apache.accumulo.core.client.admin.InstanceOperations#getActiveCompactions(java.lang.String)
-    */
 +  @Override
 +  public List<ActiveCompaction> getActiveCompactions(String tserver) throws AccumuloException, AccumuloSecurityException {
 +    return new ArrayList<ActiveCompaction>();
 +  }
 +  
 +  @Override
 +  public void ping(String tserver) throws AccumuloException {
 +
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/92613388/core/src/main/java/org/apache/accumulo/core/data/ColumnUpdate.java
----------------------------------------------------------------------
diff --cc core/src/main/java/org/apache/accumulo/core/data/ColumnUpdate.java
index bfba00f,0000000..78f2d15
mode 100644,000000..100644
--- a/core/src/main/java/org/apache/accumulo/core/data/ColumnUpdate.java
+++ b/core/src/main/java/org/apache/accumulo/core/data/ColumnUpdate.java
@@@ -1,110 -1,0 +1,109 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.core.data;
 +
 +import java.util.Arrays;
 +
 +/**
 + * A single column and value pair within a mutation
 + * 
 + */
 +
 +public class ColumnUpdate {
 +  
 +  private byte[] columnFamily;
 +  private byte[] columnQualifier;
 +  private byte[] columnVisibility;
 +  private long timestamp;
 +  private boolean hasTimestamp;
 +  private byte[] val;
 +  private boolean deleted;
 +  
 +  public ColumnUpdate(byte[] cf, byte[] cq, byte[] cv, boolean hasts, long ts, boolean deleted, byte[] val) {
 +    this.columnFamily = cf;
 +    this.columnQualifier = cq;
 +    this.columnVisibility = cv;
 +    this.hasTimestamp = hasts;
 +    this.timestamp = ts;
 +    this.deleted = deleted;
 +    this.val = val;
 +  }
 +  
 +  /**
 +   * @deprecated use setTimestamp(long);
-    * @param timestamp
 +   */
 +  @Deprecated
 +  public void setSystemTimestamp(long timestamp) {
 +    if (hasTimestamp)
 +      throw new IllegalStateException("Cannot set system timestamp when user set a timestamp");
 +  }
 +  
 +  public boolean hasTimestamp() {
 +    return hasTimestamp;
 +  }
 +  
 +  /**
 +   * Returns the column
 +   * 
 +   */
 +  public byte[] getColumnFamily() {
 +    return columnFamily;
 +  }
 +  
 +  public byte[] getColumnQualifier() {
 +    return columnQualifier;
 +  }
 +  
 +  public byte[] getColumnVisibility() {
 +    return columnVisibility;
 +  }
 +  
 +  public long getTimestamp() {
 +    return this.timestamp;
 +  }
 +  
 +  public boolean isDeleted() {
 +    return this.deleted;
 +  }
 +  
 +  public byte[] getValue() {
 +    return this.val;
 +  }
 +  
 +  @Override
 +  public String toString() {
 +    return Arrays.toString(columnFamily) + ":" + Arrays.toString(columnQualifier) + " ["
 +        + Arrays.toString(columnVisibility) + "] " + (hasTimestamp ? timestamp : "NO_TIME_STAMP") + " " + Arrays.toString(val) + " " + deleted;
 +  }
 +  
 +  @Override
 +  public boolean equals(Object obj) {
 +    if (!(obj instanceof ColumnUpdate))
 +      return false;
 +    ColumnUpdate upd = (ColumnUpdate) obj;
 +    return Arrays.equals(getColumnFamily(), upd.getColumnFamily()) && Arrays.equals(getColumnQualifier(), upd.getColumnQualifier())
 +        && Arrays.equals(getColumnVisibility(), upd.getColumnVisibility()) && isDeleted() == upd.isDeleted() && Arrays.equals(getValue(), upd.getValue())
 +        && hasTimestamp() == upd.hasTimestamp() && getTimestamp() == upd.getTimestamp();
 +  }
 +  
 +  @Override
 +  public int hashCode() {
 +    return Arrays.hashCode(columnFamily) + Arrays.hashCode(columnQualifier) + Arrays.hashCode(columnVisibility)
 +        + (hasTimestamp ? (Boolean.TRUE.hashCode() + Long.valueOf(timestamp).hashCode()) : Boolean.FALSE.hashCode())
 +        + (deleted ? Boolean.TRUE.hashCode() : (Boolean.FALSE.hashCode() + Arrays.hashCode(val)));
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/92613388/core/src/main/java/org/apache/accumulo/core/data/Key.java
----------------------------------------------------------------------
diff --cc core/src/main/java/org/apache/accumulo/core/data/Key.java
index 4b6867f,0000000..2b44359
mode 100644,000000..100644
--- a/core/src/main/java/org/apache/accumulo/core/data/Key.java
+++ b/core/src/main/java/org/apache/accumulo/core/data/Key.java
@@@ -1,863 -1,0 +1,864 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.core.data;
 +
 +/**
 + * This is the Key used to store and access individual values in Accumulo.  A Key is a tuple composed of a row, column family, column qualifier, 
 + * column visibility, timestamp, and delete marker.
 + * 
 + * Keys are comparable and therefore have a sorted order defined by {@link #compareTo(Key)}.
 + * 
 + */
 +
 +import static org.apache.accumulo.core.util.ByteBufferUtil.toBytes;
 +
 +import java.io.DataInput;
 +import java.io.DataOutput;
 +import java.io.IOException;
 +import java.nio.ByteBuffer;
 +import java.util.Arrays;
 +import java.util.List;
 +
 +import org.apache.accumulo.core.Constants;
 +import org.apache.accumulo.core.data.thrift.TKey;
 +import org.apache.accumulo.core.data.thrift.TKeyValue;
 +import org.apache.accumulo.core.security.ColumnVisibility;
 +import org.apache.hadoop.io.Text;
 +import org.apache.hadoop.io.WritableComparable;
 +import org.apache.hadoop.io.WritableComparator;
 +import org.apache.hadoop.io.WritableUtils;
 +
 +public class Key implements WritableComparable<Key>, Cloneable {
 +  
 +  protected byte[] row;
 +  protected byte[] colFamily;
 +  protected byte[] colQualifier;
 +  protected byte[] colVisibility;
 +  protected long timestamp;
 +  protected boolean deleted;
 +  
 +  @Override
 +  public boolean equals(Object o) {
 +    if (o instanceof Key)
 +      return this.equals((Key) o, PartialKey.ROW_COLFAM_COLQUAL_COLVIS_TIME_DEL);
 +    return false;
 +  }
 +  
 +  private static final byte EMPTY_BYTES[] = new byte[0];
 +  
 +  private byte[] copyIfNeeded(byte ba[], int off, int len, boolean copyData) {
 +    if (len == 0)
 +      return EMPTY_BYTES;
 +    
 +    if (!copyData && ba.length == len && off == 0)
 +      return ba;
 +    
 +    byte[] copy = new byte[len];
 +    System.arraycopy(ba, off, copy, 0, len);
 +    return copy;
 +  }
 +  
 +  private final void init(byte r[], int rOff, int rLen, byte cf[], int cfOff, int cfLen, byte cq[], int cqOff, int cqLen, byte cv[], int cvOff, int cvLen,
 +      long ts, boolean del, boolean copy) {
 +    row = copyIfNeeded(r, rOff, rLen, copy);
 +    colFamily = copyIfNeeded(cf, cfOff, cfLen, copy);
 +    colQualifier = copyIfNeeded(cq, cqOff, cqLen, copy);
 +    colVisibility = copyIfNeeded(cv, cvOff, cvLen, copy);
 +    timestamp = ts;
 +    deleted = del;
 +  }
 +  
 +  /**
 +   * Creates a key with empty row, empty column family, empty column qualifier, empty column visibility, timestamp {@link Long#MAX_VALUE}, and delete marker
 +   * false.
 +   */
 +  public Key() {
 +    row = EMPTY_BYTES;
 +    colFamily = EMPTY_BYTES;
 +    colQualifier = EMPTY_BYTES;
 +    colVisibility = EMPTY_BYTES;
 +    timestamp = Long.MAX_VALUE;
 +    deleted = false;
 +  }
 +  
 +  /**
 +   * Creates a key with the specified row, empty column family, empty column qualifier, empty column visibility, timestamp {@link Long#MAX_VALUE}, and delete
 +   * marker false.
 +   */
 +  public Key(Text row) {
 +    init(row.getBytes(), 0, row.getLength(), EMPTY_BYTES, 0, 0, EMPTY_BYTES, 0, 0, EMPTY_BYTES, 0, 0, Long.MAX_VALUE, false, true);
 +  }
 +  
 +  /**
 +   * Creates a key with the specified row, empty column family, empty column qualifier, empty column visibility, the specified timestamp, and delete marker
 +   * false.
 +   */
 +  public Key(Text row, long ts) {
 +    this(row);
 +    timestamp = ts;
 +  }
 +  
 +  public Key(byte row[], int rOff, int rLen, byte cf[], int cfOff, int cfLen, byte cq[], int cqOff, int cqLen, byte cv[], int cvOff, int cvLen, long ts) {
 +    init(row, rOff, rLen, cf, cfOff, cfLen, cq, cqOff, cqLen, cv, cvOff, cvLen, ts, false, true);
 +  }
 +  
 +  public Key(byte[] row, byte[] colFamily, byte[] colQualifier, byte[] colVisibility, long timestamp) {
 +    this(row, colFamily, colQualifier, colVisibility, timestamp, false, true);
 +  }
 +  
 +  public Key(byte[] row, byte[] cf, byte[] cq, byte[] cv, long ts, boolean deleted) {
 +    this(row, cf, cq, cv, ts, deleted, true);
 +  }
 +  
 +  public Key(byte[] row, byte[] cf, byte[] cq, byte[] cv, long ts, boolean deleted, boolean copy) {
 +    init(row, 0, row.length, cf, 0, cf.length, cq, 0, cq.length, cv, 0, cv.length, ts, deleted, copy);
 +  }
 +  
 +  /**
 +   * Creates a key with the specified row, the specified column family, empty column qualifier, empty column visibility, timestamp {@link Long#MAX_VALUE}, and
 +   * delete marker false.
 +   */
 +  public Key(Text row, Text cf) {
 +    init(row.getBytes(), 0, row.getLength(), cf.getBytes(), 0, cf.getLength(), EMPTY_BYTES, 0, 0, EMPTY_BYTES, 0, 0, Long.MAX_VALUE, false, true);
 +  }
 +  
 +  /**
 +   * Creates a key with the specified row, the specified column family, the specified column qualifier, empty column visibility, timestamp
 +   * {@link Long#MAX_VALUE}, and delete marker false.
 +   */
 +  public Key(Text row, Text cf, Text cq) {
 +    init(row.getBytes(), 0, row.getLength(), cf.getBytes(), 0, cf.getLength(), cq.getBytes(), 0, cq.getLength(), EMPTY_BYTES, 0, 0, Long.MAX_VALUE, false, true);
 +  }
 +  
 +  /**
 +   * Creates a key with the specified row, the specified column family, the specified column qualifier, the specified column visibility, timestamp
 +   * {@link Long#MAX_VALUE}, and delete marker false.
 +   */
 +  public Key(Text row, Text cf, Text cq, Text cv) {
 +    init(row.getBytes(), 0, row.getLength(), cf.getBytes(), 0, cf.getLength(), cq.getBytes(), 0, cq.getLength(), cv.getBytes(), 0, cv.getLength(),
 +        Long.MAX_VALUE, false, true);
 +  }
 +  
 +  /**
 +   * Creates a key with the specified row, the specified column family, the specified column qualifier, empty column visibility, the specified timestamp, and
 +   * delete marker false.
 +   */
 +  public Key(Text row, Text cf, Text cq, long ts) {
 +    init(row.getBytes(), 0, row.getLength(), cf.getBytes(), 0, cf.getLength(), cq.getBytes(), 0, cq.getLength(), EMPTY_BYTES, 0, 0, ts, false, true);
 +  }
 +  
 +  /**
 +   * Creates a key with the specified row, the specified column family, the specified column qualifier, the specified column visibility, the specified
 +   * timestamp, and delete marker false.
 +   */
 +  public Key(Text row, Text cf, Text cq, Text cv, long ts) {
 +    init(row.getBytes(), 0, row.getLength(), cf.getBytes(), 0, cf.getLength(), cq.getBytes(), 0, cq.getLength(), cv.getBytes(), 0, cv.getLength(), ts, false,
 +        true);
 +  }
 +  
 +  /**
 +   * Creates a key with the specified row, the specified column family, the specified column qualifier, the specified column visibility, the specified
 +   * timestamp, and delete marker false.
 +   */
 +  public Key(Text row, Text cf, Text cq, ColumnVisibility cv, long ts) {
 +    byte[] expr = cv.getExpression();
 +    init(row.getBytes(), 0, row.getLength(), cf.getBytes(), 0, cf.getLength(), cq.getBytes(), 0, cq.getLength(), expr, 0, expr.length, ts, false, true);
 +  }
 +  
 +  /**
 +   * Converts CharSequence to Text and creates a Key using {@link #Key(Text)}.
 +   */
 +  public Key(CharSequence row) {
 +    this(new Text(row.toString()));
 +  }
 +  
 +  /**
 +   * Converts CharSequence to Text and creates a Key using {@link #Key(Text,Text)}.
 +   */
 +  public Key(CharSequence row, CharSequence cf) {
 +    this(new Text(row.toString()), new Text(cf.toString()));
 +  }
 +  
 +  /**
 +   * Converts CharSequence to Text and creates a Key using {@link #Key(Text,Text,Text)}.
 +   */
 +  public Key(CharSequence row, CharSequence cf, CharSequence cq) {
 +    this(new Text(row.toString()), new Text(cf.toString()), new Text(cq.toString()));
 +  }
 +  
 +  /**
 +   * Converts CharSequence to Text and creates a Key using {@link #Key(Text,Text,Text,Text)}.
 +   */
 +  public Key(CharSequence row, CharSequence cf, CharSequence cq, CharSequence cv) {
 +    this(new Text(row.toString()), new Text(cf.toString()), new Text(cq.toString()), new Text(cv.toString()));
 +  }
 +  
 +  /**
 +   * Converts CharSequence to Text and creates a Key using {@link #Key(Text,Text,Text,long)}.
 +   */
 +  public Key(CharSequence row, CharSequence cf, CharSequence cq, long ts) {
 +    this(new Text(row.toString()), new Text(cf.toString()), new Text(cq.toString()), ts);
 +  }
 +  
 +  /**
 +   * Converts CharSequence to Text and creates a Key using {@link #Key(Text,Text,Text,Text,long)}.
 +   */
 +  public Key(CharSequence row, CharSequence cf, CharSequence cq, CharSequence cv, long ts) {
 +    this(new Text(row.toString()), new Text(cf.toString()), new Text(cq.toString()), new Text(cv.toString()), ts);
 +  }
 +  
 +  /**
 +   * Converts CharSequence to Text and creates a Key using {@link #Key(Text,Text,Text,ColumnVisibility,long)}.
 +   */
 +  public Key(CharSequence row, CharSequence cf, CharSequence cq, ColumnVisibility cv, long ts) {
 +    this(new Text(row.toString()), new Text(cf.toString()), new Text(cq.toString()), new Text(cv.getExpression()), ts);
 +  }
 +  
 +  private byte[] followingArray(byte ba[]) {
 +    byte[] fba = new byte[ba.length + 1];
 +    System.arraycopy(ba, 0, fba, 0, ba.length);
 +    fba[ba.length] = (byte) 0x00;
 +    return fba;
 +  }
 +  
 +  /**
 +   * Returns a key that will sort immediately after this key.
 +   * 
 +   * @param part
 +   *          PartialKey except {@link PartialKey#ROW_COLFAM_COLQUAL_COLVIS_TIME_DEL}
 +   */
 +  public Key followingKey(PartialKey part) {
 +    Key returnKey = new Key();
 +    switch (part) {
 +      case ROW:
 +        returnKey.row = followingArray(row);
 +        break;
 +      case ROW_COLFAM:
 +        returnKey.row = row;
 +        returnKey.colFamily = followingArray(colFamily);
 +        break;
 +      case ROW_COLFAM_COLQUAL:
 +        returnKey.row = row;
 +        returnKey.colFamily = colFamily;
 +        returnKey.colQualifier = followingArray(colQualifier);
 +        break;
 +      case ROW_COLFAM_COLQUAL_COLVIS:
 +        // This isn't useful for inserting into accumulo, but may be useful for lookups.
 +        returnKey.row = row;
 +        returnKey.colFamily = colFamily;
 +        returnKey.colQualifier = colQualifier;
 +        returnKey.colVisibility = followingArray(colVisibility);
 +        break;
 +      case ROW_COLFAM_COLQUAL_COLVIS_TIME:
 +        returnKey.row = row;
 +        returnKey.colFamily = colFamily;
 +        returnKey.colQualifier = colQualifier;
 +        returnKey.colVisibility = colVisibility;
 +        returnKey.setTimestamp(timestamp - 1);
 +        returnKey.deleted = false;
 +        break;
 +      default:
 +        throw new IllegalArgumentException("Partial key specification " + part + " disallowed");
 +    }
 +    return returnKey;
 +  }
 +  
 +  /**
 +   * Creates a key with the same row, column family, column qualifier, column visibility, timestamp, and delete marker as the given key.
 +   */
 +  public Key(Key other) {
 +    set(other);
 +  }
 +  
 +  public Key(TKey tkey) {
 +    this.row = toBytes(tkey.row);
 +    this.colFamily = toBytes(tkey.colFamily);
 +    this.colQualifier = toBytes(tkey.colQualifier);
 +    this.colVisibility = toBytes(tkey.colVisibility);
 +    this.timestamp = tkey.timestamp;
 +    this.deleted = false;
 +
 +    if (row == null) {
 +      throw new IllegalArgumentException("null row");
 +    }
 +    if (colFamily == null) {
 +      throw new IllegalArgumentException("null column family");
 +    }
 +    if (colQualifier == null) {
 +      throw new IllegalArgumentException("null column qualifier");
 +    }
 +    if (colVisibility == null) {
 +      throw new IllegalArgumentException("null column visibility");
 +    }
 +  }
 +  
 +  /**
 +   * This method gives users control over allocation of Text objects by copying into the passed in text.
 +   * 
 +   * @param r
 +   *          the key's row will be copied into this Text
 +   * @return the Text that was passed in
 +   */
 +  
 +  public Text getRow(Text r) {
 +    r.set(row, 0, row.length);
 +    return r;
 +  }
 +  
 +  /**
 +   * This method returns a pointer to the keys internal data and does not copy it.
 +   * 
 +   * @return ByteSequence that points to the internal key row data.
 +   */
 +  
 +  public ByteSequence getRowData() {
 +    return new ArrayByteSequence(row);
 +  }
 +  
 +  /**
 +   * This method allocates a Text object and copies into it.
 +   * 
 +   * @return Text containing the row field
 +   */
 +  
 +  public Text getRow() {
 +    return getRow(new Text());
 +  }
 +  
 +  /**
 +   * Efficiently compare the the row of a key w/o allocating a text object and copying the row into it.
 +   * 
 +   * @param r
 +   *          row to compare to keys row
 +   * @return same as {@link #getRow()}.compareTo(r)
 +   */
 +  
 +  public int compareRow(Text r) {
 +    return WritableComparator.compareBytes(row, 0, row.length, r.getBytes(), 0, r.getLength());
 +  }
 +  
 +  /**
 +   * This method returns a pointer to the keys internal data and does not copy it.
 +   * 
 +   * @return ByteSequence that points to the internal key column family data.
 +   */
 +  
 +  public ByteSequence getColumnFamilyData() {
 +    return new ArrayByteSequence(colFamily);
 +  }
 +  
 +  /**
 +   * This method gives users control over allocation of Text objects by copying into the passed in text.
 +   * 
 +   * @param cf
 +   *          the key's column family will be copied into this Text
 +   * @return the Text that was passed in
 +   */
 +  
 +  public Text getColumnFamily(Text cf) {
 +    cf.set(colFamily, 0, colFamily.length);
 +    return cf;
 +  }
 +  
 +  /**
 +   * This method allocates a Text object and copies into it.
 +   * 
 +   * @return Text containing the column family field
 +   */
 +  
 +  public Text getColumnFamily() {
 +    return getColumnFamily(new Text());
 +  }
 +  
 +  /**
 +   * Efficiently compare the the column family of a key w/o allocating a text object and copying the column qualifier into it.
 +   * 
 +   * @param cf
 +   *          column family to compare to keys column family
 +   * @return same as {@link #getColumnFamily()}.compareTo(cf)
 +   */
 +  
 +  public int compareColumnFamily(Text cf) {
 +    return WritableComparator.compareBytes(colFamily, 0, colFamily.length, cf.getBytes(), 0, cf.getLength());
 +  }
 +  
 +  /**
 +   * This method returns a pointer to the keys internal data and does not copy it.
 +   * 
 +   * @return ByteSequence that points to the internal key column qualifier data.
 +   */
 +  
 +  public ByteSequence getColumnQualifierData() {
 +    return new ArrayByteSequence(colQualifier);
 +  }
 +  
 +  /**
 +   * This method gives users control over allocation of Text objects by copying into the passed in text.
 +   * 
 +   * @param cq
 +   *          the key's column qualifier will be copied into this Text
 +   * @return the Text that was passed in
 +   */
 +  
 +  public Text getColumnQualifier(Text cq) {
 +    cq.set(colQualifier, 0, colQualifier.length);
 +    return cq;
 +  }
 +  
 +  /**
 +   * This method allocates a Text object and copies into it.
 +   * 
 +   * @return Text containing the column qualifier field
 +   */
 +  
 +  public Text getColumnQualifier() {
 +    return getColumnQualifier(new Text());
 +  }
 +  
 +  /**
 +   * Efficiently compare the the column qualifier of a key w/o allocating a text object and copying the column qualifier into it.
 +   * 
 +   * @param cq
 +   *          column family to compare to keys column qualifier
 +   * @return same as {@link #getColumnQualifier()}.compareTo(cq)
 +   */
 +  
 +  public int compareColumnQualifier(Text cq) {
 +    return WritableComparator.compareBytes(colQualifier, 0, colQualifier.length, cq.getBytes(), 0, cq.getLength());
 +  }
 +  
 +  public void setTimestamp(long ts) {
 +    this.timestamp = ts;
 +  }
 +  
 +  public long getTimestamp() {
 +    return timestamp;
 +  }
 +  
 +  public boolean isDeleted() {
 +    return deleted;
 +  }
 +  
 +  public void setDeleted(boolean deleted) {
 +    this.deleted = deleted;
 +  }
 +  
 +  /**
 +   * This method returns a pointer to the keys internal data and does not copy it.
 +   * 
 +   * @return ByteSequence that points to the internal key column visibility data.
 +   */
 +  
 +  public ByteSequence getColumnVisibilityData() {
 +    return new ArrayByteSequence(colVisibility);
 +  }
 +  
 +  /**
 +   * This method allocates a Text object and copies into it.
 +   * 
 +   * @return Text containing the column visibility field
 +   */
 +  
 +  public final Text getColumnVisibility() {
 +    return getColumnVisibility(new Text());
 +  }
 +  
 +  /**
 +   * This method gives users control over allocation of Text objects by copying into the passed in text.
 +   * 
 +   * @param cv
 +   *          the key's column visibility will be copied into this Text
 +   * @return the Text that was passed in
 +   */
 +  
 +  public final Text getColumnVisibility(Text cv) {
 +    cv.set(colVisibility, 0, colVisibility.length);
 +    return cv;
 +  }
 +  
 +  /**
 +   * This method creates a new ColumnVisibility representing the column visibility for this key
 +   * 
 +   * WARNING: using this method may inhibit performance since a new ColumnVisibility object is created on every call.
 +   * 
 +   * @return A new object representing the column visibility field
 +   * @since 1.5.0
 +   */
 +  public final ColumnVisibility getColumnVisibilityParsed() {
 +    return new ColumnVisibility(colVisibility);
 +  }
 +  
 +  /**
 +   * Sets this key's row, column family, column qualifier, column visibility, timestamp, and delete marker to be the same as another key's.
 +   */
 +  public void set(Key k) {
 +    row = k.row;
 +    colFamily = k.colFamily;
 +    colQualifier = k.colQualifier;
 +    colVisibility = k.colVisibility;
 +    timestamp = k.timestamp;
 +    deleted = k.deleted;
 +    
 +  }
 +  
++  @Override
 +  public void readFields(DataInput in) throws IOException {
 +    // this method is a little screwy so it will be compatible with older
 +    // code that serialized data
 +    
 +    int colFamilyOffset = WritableUtils.readVInt(in);
 +    int colQualifierOffset = WritableUtils.readVInt(in);
 +    int colVisibilityOffset = WritableUtils.readVInt(in);
 +    int totalLen = WritableUtils.readVInt(in);
 +    
 +    row = new byte[colFamilyOffset];
 +    colFamily = new byte[colQualifierOffset - colFamilyOffset];
 +    colQualifier = new byte[colVisibilityOffset - colQualifierOffset];
 +    colVisibility = new byte[totalLen - colVisibilityOffset];
 +    
 +    in.readFully(row);
 +    in.readFully(colFamily);
 +    in.readFully(colQualifier);
 +    in.readFully(colVisibility);
 +    
 +    timestamp = WritableUtils.readVLong(in);
 +    deleted = in.readBoolean();
 +  }
 +  
++  @Override
 +  public void write(DataOutput out) throws IOException {
 +    
 +    int colFamilyOffset = row.length;
 +    int colQualifierOffset = colFamilyOffset + colFamily.length;
 +    int colVisibilityOffset = colQualifierOffset + colQualifier.length;
 +    int totalLen = colVisibilityOffset + colVisibility.length;
 +    
 +    WritableUtils.writeVInt(out, colFamilyOffset);
 +    WritableUtils.writeVInt(out, colQualifierOffset);
 +    WritableUtils.writeVInt(out, colVisibilityOffset);
 +    
 +    WritableUtils.writeVInt(out, totalLen);
 +    
 +    out.write(row);
 +    out.write(colFamily);
 +    out.write(colQualifier);
 +    out.write(colVisibility);
 +    
 +    WritableUtils.writeVLong(out, timestamp);
 +    out.writeBoolean(deleted);
 +  }
 +  
 +  /**
 +   * Compare part of a key. For example compare just the row and column family, and if those are equal then return true.
 +   * 
 +   */
 +  
 +  public boolean equals(Key other, PartialKey part) {
 +    switch (part) {
 +      case ROW:
 +        return isEqual(row, other.row);
 +      case ROW_COLFAM:
 +        return isEqual(row, other.row) && isEqual(colFamily, other.colFamily);
 +      case ROW_COLFAM_COLQUAL:
 +        return isEqual(row, other.row) && isEqual(colFamily, other.colFamily) && isEqual(colQualifier, other.colQualifier);
 +      case ROW_COLFAM_COLQUAL_COLVIS:
 +        return isEqual(row, other.row) && isEqual(colFamily, other.colFamily) && isEqual(colQualifier, other.colQualifier)
 +            && isEqual(colVisibility, other.colVisibility);
 +      case ROW_COLFAM_COLQUAL_COLVIS_TIME:
 +        return isEqual(row, other.row) && isEqual(colFamily, other.colFamily) && isEqual(colQualifier, other.colQualifier)
 +            && isEqual(colVisibility, other.colVisibility) && timestamp == other.timestamp;
 +      case ROW_COLFAM_COLQUAL_COLVIS_TIME_DEL:
 +        return isEqual(row, other.row) && isEqual(colFamily, other.colFamily) && isEqual(colQualifier, other.colQualifier)
 +            && isEqual(colVisibility, other.colVisibility) && timestamp == other.timestamp && deleted == other.deleted;
 +      default:
 +        throw new IllegalArgumentException("Unrecognized partial key specification " + part);
 +    }
 +  }
 +  
 +  /**
 +   * Compare elements of a key given by a {@link PartialKey}. For example, for {@link PartialKey#ROW_COLFAM}, compare just the row and column family. If the
 +   * rows are not equal, return the result of the row comparison; otherwise, return the result of the column family comparison.
 +   * 
 +   * @see #compareTo(Key)
 +   */
 +  
 +  public int compareTo(Key other, PartialKey part) {
 +    // check for matching row
 +    int result = WritableComparator.compareBytes(row, 0, row.length, other.row, 0, other.row.length);
 +    if (result != 0 || part.equals(PartialKey.ROW))
 +      return result;
 +    
 +    // check for matching column family
 +    result = WritableComparator.compareBytes(colFamily, 0, colFamily.length, other.colFamily, 0, other.colFamily.length);
 +    if (result != 0 || part.equals(PartialKey.ROW_COLFAM))
 +      return result;
 +    
 +    // check for matching column qualifier
 +    result = WritableComparator.compareBytes(colQualifier, 0, colQualifier.length, other.colQualifier, 0, other.colQualifier.length);
 +    if (result != 0 || part.equals(PartialKey.ROW_COLFAM_COLQUAL))
 +      return result;
 +    
 +    // check for matching column visibility
 +    result = WritableComparator.compareBytes(colVisibility, 0, colVisibility.length, other.colVisibility, 0, other.colVisibility.length);
 +    if (result != 0 || part.equals(PartialKey.ROW_COLFAM_COLQUAL_COLVIS))
 +      return result;
 +    
 +    // check for matching timestamp
 +    if (timestamp < other.timestamp)
 +      result = 1;
 +    else if (timestamp > other.timestamp)
 +      result = -1;
 +    else
 +      result = 0;
 +    
 +    if (result != 0 || part.equals(PartialKey.ROW_COLFAM_COLQUAL_COLVIS_TIME))
 +      return result;
 +    
 +    // check for matching deleted flag
 +    if (deleted)
 +      result = other.deleted ? 0 : -1;
 +    else
 +      result = other.deleted ? 1 : 0;
 +    
 +    return result;
 +  }
 +  
 +  /**
 +   * Compare all elements of a key. The elements (row, column family, column qualifier, column visibility, timestamp, and delete marker) are compared in order
 +   * until an unequal element is found. If the row is equal, then compare the column family, etc. The row, column family, column qualifier, and column
 +   * visibility are compared lexographically and sorted ascending. The timestamps are compared numerically and sorted descending so that the most recent data
 +   * comes first. Lastly, a delete marker of true sorts before a delete marker of false.
 +   */
 +  
++  @Override
 +  public int compareTo(Key other) {
 +    return compareTo(other, PartialKey.ROW_COLFAM_COLQUAL_COLVIS_TIME_DEL);
 +  }
 +  
 +  @Override
 +  public int hashCode() {
 +    return WritableComparator.hashBytes(row, row.length) + WritableComparator.hashBytes(colFamily, colFamily.length)
 +        + WritableComparator.hashBytes(colQualifier, colQualifier.length) + WritableComparator.hashBytes(colVisibility, colVisibility.length)
 +        + (int) (timestamp ^ (timestamp >>> 32));
 +  }
 +  
 +  public static String toPrintableString(byte ba[], int offset, int len, int maxLen) {
 +    return appendPrintableString(ba, offset, len, maxLen, new StringBuilder()).toString();
 +  }
 +  
 +  public static StringBuilder appendPrintableString(byte ba[], int offset, int len, int maxLen, StringBuilder sb) {
 +    int plen = Math.min(len, maxLen);
 +    
 +    for (int i = 0; i < plen; i++) {
 +      int c = 0xff & ba[offset + i];
 +      if (c >= 32 && c <= 126)
 +        sb.append((char) c);
 +      else
 +        sb.append("%" + String.format("%02x;", c));
 +    }
 +    
 +    if (len > maxLen) {
 +      sb.append("... TRUNCATED");
 +    }
 +    
 +    return sb;
 +  }
 +  
 +  private StringBuilder rowColumnStringBuilder() {
 +    StringBuilder sb = new StringBuilder();
 +    appendPrintableString(row, 0, row.length, Constants.MAX_DATA_TO_PRINT, sb);
 +    sb.append(" ");
 +    appendPrintableString(colFamily, 0, colFamily.length, Constants.MAX_DATA_TO_PRINT, sb);
 +    sb.append(":");
 +    appendPrintableString(colQualifier, 0, colQualifier.length, Constants.MAX_DATA_TO_PRINT, sb);
 +    sb.append(" [");
 +    appendPrintableString(colVisibility, 0, colVisibility.length, Constants.MAX_DATA_TO_PRINT, sb);
 +    sb.append("]");
 +    return sb;
 +  }
 +  
++  @Override
 +  public String toString() {
 +    StringBuilder sb = rowColumnStringBuilder();
 +    sb.append(" ");
 +    sb.append(Long.toString(timestamp));
 +    sb.append(" ");
 +    sb.append(deleted);
 +    return sb.toString();
 +  }
 +  
 +  public String toStringNoTime() {
 +    return rowColumnStringBuilder().toString();
 +  }
 +  
 +  /**
 +   * Returns the sums of the lengths of the row, column family, column qualifier, and visibility.
 +   * 
 +   * @return row.length + colFamily.length + colQualifier.length + colVisibility.length;
 +   */
 +  public int getLength() {
 +    return row.length + colFamily.length + colQualifier.length + colVisibility.length;
 +  }
 +  
 +  /**
 +   * Same as {@link #getLength()}.
 +   */
 +  public int getSize() {
 +    return getLength();
 +  }
 +  
 +  private static boolean isEqual(byte a1[], byte a2[]) {
 +    if (a1 == a2)
 +      return true;
 +    
 +    int last = a1.length;
 +    
 +    if (last != a2.length)
 +      return false;
 +    
 +    if (last == 0)
 +      return true;
 +    
 +    // since sorted data is usually compared in accumulo,
 +    // the prefixes will normally be the same... so compare
 +    // the last two charachters first.. the most likely place
 +    // to have disorder is at end of the strings when the
 +    // data is sorted... if those are the same compare the rest
 +    // of the data forward... comparing backwards is slower
 +    // (compiler and cpu optimized for reading data forward)..
 +    // do not want slower comparisons when data is equal...
 +    // sorting brings equals data together
 +    
 +    last--;
 +    
 +    if (a1[last] == a2[last]) {
 +      for (int i = 0; i < last; i++)
 +        if (a1[i] != a2[i])
 +          return false;
 +    } else {
 +      return false;
 +    }
 +    
 +    return true;
 +    
 +  }
 +  
 +  /**
 +   * Use this to compress a list of keys before sending them via thrift.
 +   * 
 +   * @param param
 +   *          a list of key/value pairs
 +   */
 +  public static List<TKeyValue> compress(List<? extends KeyValue> param) {
 +    
 +    List<TKeyValue> tkvl = Arrays.asList(new TKeyValue[param.size()]);
 +    
 +    if (param.size() > 0)
 +      tkvl.set(0, new TKeyValue(param.get(0).key.toThrift(), ByteBuffer.wrap(param.get(0).value)));
 +    
 +    for (int i = param.size() - 1; i > 0; i--) {
 +      Key prevKey = param.get(i - 1).key;
 +      KeyValue kv = param.get(i);
 +      Key key = kv.key;
 +      
 +      TKey newKey = null;
 +      
 +      if (isEqual(prevKey.row, key.row)) {
 +        newKey = key.toThrift();
 +        newKey.row = null;
 +      }
 +      
 +      if (isEqual(prevKey.colFamily, key.colFamily)) {
 +        if (newKey == null)
 +          newKey = key.toThrift();
 +        newKey.colFamily = null;
 +      }
 +      
 +      if (isEqual(prevKey.colQualifier, key.colQualifier)) {
 +        if (newKey == null)
 +          newKey = key.toThrift();
 +        newKey.colQualifier = null;
 +      }
 +      
 +      if (isEqual(prevKey.colVisibility, key.colVisibility)) {
 +        if (newKey == null)
 +          newKey = key.toThrift();
 +        newKey.colVisibility = null;
 +      }
 +      
 +      if (newKey == null)
 +        newKey = key.toThrift();
 +      
 +      tkvl.set(i, new TKeyValue(newKey, ByteBuffer.wrap(kv.value)));
 +    }
 +    
 +    return tkvl;
 +  }
 +  
 +  /**
 +   * Use this to decompress a list of keys received from thrift.
-    * 
-    * @param param
 +   */
-   
 +  public static void decompress(List<TKeyValue> param) {
 +    for (int i = 1; i < param.size(); i++) {
 +      TKey prevKey = param.get(i - 1).key;
 +      TKey key = param.get(i).key;
 +      
 +      if (key.row == null) {
 +        key.row = prevKey.row;
 +      }
 +      if (key.colFamily == null) {
 +        key.colFamily = prevKey.colFamily;
 +      }
 +      if (key.colQualifier == null) {
 +        key.colQualifier = prevKey.colQualifier;
 +      }
 +      if (key.colVisibility == null) {
 +        key.colVisibility = prevKey.colVisibility;
 +      }
 +    }
 +  }
 +  
 +  byte[] getRowBytes() {
 +    return row;
 +  }
 +  
 +  byte[] getColFamily() {
 +    return colFamily;
 +  }
 +  
 +  byte[] getColQualifier() {
 +    return colQualifier;
 +  }
 +  
 +  byte[] getColVisibility() {
 +    return colVisibility;
 +  }
 +  
 +  public TKey toThrift() {
 +    return new TKey(ByteBuffer.wrap(row), ByteBuffer.wrap(colFamily), ByteBuffer.wrap(colQualifier), ByteBuffer.wrap(colVisibility), timestamp);
 +  }
 +  
 +  @Override
 +  public Object clone() throws CloneNotSupportedException {
 +    Key r = (Key) super.clone();
 +    r.row = Arrays.copyOf(row, row.length);
 +    r.colFamily = Arrays.copyOf(colFamily, colFamily.length);
 +    r.colQualifier = Arrays.copyOf(colQualifier, colQualifier.length);
 +    r.colVisibility = Arrays.copyOf(colVisibility, colVisibility.length);
 +    return r;
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/92613388/core/src/main/java/org/apache/accumulo/core/data/KeyExtent.java
----------------------------------------------------------------------
diff --cc core/src/main/java/org/apache/accumulo/core/data/KeyExtent.java
index e48914d,0000000..63c594c
mode 100644,000000..100644
--- a/core/src/main/java/org/apache/accumulo/core/data/KeyExtent.java
+++ b/core/src/main/java/org/apache/accumulo/core/data/KeyExtent.java
@@@ -1,783 -1,0 +1,783 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.core.data;
 +
 +/**
 + * keeps track of information needed to identify a tablet
 + * apparently, we only need the endKey and not the start as well
 + * 
 + */
 +
 +import java.io.ByteArrayOutputStream;
 +import java.io.DataInput;
 +import java.io.DataOutput;
 +import java.io.DataOutputStream;
 +import java.io.IOException;
 +import java.lang.ref.WeakReference;
 +import java.util.ArrayList;
 +import java.util.Collection;
 +import java.util.Collections;
 +import java.util.Map.Entry;
 +import java.util.Set;
 +import java.util.SortedMap;
 +import java.util.SortedSet;
 +import java.util.TreeSet;
 +import java.util.UUID;
 +import java.util.WeakHashMap;
 +
 +import org.apache.accumulo.core.Constants;
 +import org.apache.accumulo.core.data.thrift.TKeyExtent;
 +import org.apache.accumulo.core.util.ByteBufferUtil;
 +import org.apache.accumulo.core.util.TextUtil;
 +import org.apache.hadoop.io.BinaryComparable;
 +import org.apache.hadoop.io.Text;
 +import org.apache.hadoop.io.WritableComparable;
 +
 +public class KeyExtent implements WritableComparable<KeyExtent> {
 +  
 +  private static final WeakHashMap<Text,WeakReference<Text>> tableIds = new WeakHashMap<Text,WeakReference<Text>>();
 +  
 +  private static Text dedupeTableId(Text tableId) {
 +    synchronized (tableIds) {
 +      WeakReference<Text> etir = tableIds.get(tableId);
 +      if (etir != null) {
 +        Text eti = etir.get();
 +        if (eti != null) {
 +          return eti;
 +        }
 +      }
 +      
 +      tableId = new Text(tableId);
 +      tableIds.put(tableId, new WeakReference<Text>(tableId));
 +      return tableId;
 +    }
 +  }
 +  
 +  private Text textTableId;
 +  private Text textEndRow;
 +  private Text textPrevEndRow;
 +  
 +  private void check() {
 +    
 +    if (getTableId() == null)
 +      throw new IllegalArgumentException("null table id not allowed");
 +    
 +    if (getEndRow() == null || getPrevEndRow() == null)
 +      return;
 +    
 +    if (getPrevEndRow().compareTo(getEndRow()) >= 0) {
 +      throw new IllegalArgumentException("prevEndRow (" + getPrevEndRow() + ") >= endRow (" + getEndRow() + ")");
 +    }
 +  }
 +  
 +  /**
 +   * Default constructor
 +   * 
 +   */
 +  public KeyExtent() {
 +    this.setTableId(new Text());
 +    this.setEndRow(new Text(), false, false);
 +    this.setPrevEndRow(new Text(), false, false);
 +  }
 +  
 +  public KeyExtent(Text table, Text endRow, Text prevEndRow) {
 +    this.setTableId(table);
 +    this.setEndRow(endRow, false, true);
 +    this.setPrevEndRow(prevEndRow, false, true);
 +    
 +    check();
 +  }
 +  
 +  public KeyExtent(KeyExtent extent) {
 +    // extent has already deduped table id, so there is no need to do it again
 +    this.textTableId = extent.textTableId;
 +    this.setEndRow(extent.getEndRow(), false, true);
 +    this.setPrevEndRow(extent.getPrevEndRow(), false, true);
 +    
 +    check();
 +  }
 +  
 +  public KeyExtent(TKeyExtent tke) {
 +    this.setTableId(new Text(ByteBufferUtil.toBytes(tke.table)));
 +    this.setEndRow(tke.endRow == null ? null : new Text(ByteBufferUtil.toBytes(tke.endRow)), false, false);
 +    this.setPrevEndRow(tke.prevEndRow == null ? null : new Text(ByteBufferUtil.toBytes(tke.prevEndRow)), false, false);
 +    
 +    check();
 +  }
 +  
 +  /**
 +   * Returns a String representing this extent's entry in the Metadata table
 +   * 
 +   */
 +  public Text getMetadataEntry() {
 +    return getMetadataEntry(getTableId(), getEndRow());
 +  }
 +  
 +  public static Text getMetadataEntry(Text table, Text row) {
 +    Text entry = new Text(table);
 +    
 +    if (row == null) {
 +      entry.append(new byte[] {'<'}, 0, 1);
 +    } else {
 +      entry.append(new byte[] {';'}, 0, 1);
 +      entry.append(row.getBytes(), 0, row.getLength());
 +    }
 +    
 +    return entry;
 +    
 +  }
 +  
 +  // constructor for loading extents from metadata rows
 +  public KeyExtent(Text flattenedExtent, Value prevEndRow) {
 +    decodeMetadataRow(flattenedExtent);
 +    
 +    // decode the prev row
 +    this.setPrevEndRow(decodePrevEndRow(prevEndRow), false, true);
 +    
 +    check();
 +  }
 +  
 +  // recreates an encoded extent from a string representation
 +  // this encoding is what is stored as the row id of the metadata table
 +  public KeyExtent(Text flattenedExtent, Text prevEndRow) {
 +    
 +    decodeMetadataRow(flattenedExtent);
 +    
 +    this.setPrevEndRow(null, false, false);
 +    if (prevEndRow != null)
 +      this.setPrevEndRow(prevEndRow, false, true);
 +    
 +    check();
 +  }
 +  
 +  /**
 +   * Sets the extents table id
 +   * 
 +   */
 +  public void setTableId(Text tId) {
 +    
 +    if (tId == null)
 +      throw new IllegalArgumentException("null table name not allowed");
 +    
 +    this.textTableId = dedupeTableId(tId);
 +    
 +    hashCode = 0;
 +  }
 +  
 +  /**
 +   * Returns the extent's table id
 +   * 
 +   */
 +  public Text getTableId() {
 +    return textTableId;
 +  }
 +  
 +  private void setEndRow(Text endRow, boolean check, boolean copy) {
 +    if (endRow != null)
 +      if (copy)
 +        this.textEndRow = new Text(endRow);
 +      else
 +        this.textEndRow = endRow;
 +    else
 +      this.textEndRow = null;
 +    
 +    hashCode = 0;
 +    if (check)
 +      check();
 +  }
 +  
 +  /**
 +   * Sets this extent's end row
 +   * 
 +   */
 +  public void setEndRow(Text endRow) {
 +    setEndRow(endRow, true, true);
 +  }
 +  
 +  /**
 +   * Returns this extent's end row
 +   * 
 +   */
 +  public Text getEndRow() {
 +    return textEndRow;
 +  }
 +  
 +  /**
 +   * Return the previous extent's end row
 +   * 
 +   */
 +  public Text getPrevEndRow() {
 +    return textPrevEndRow;
 +  }
 +  
 +  private void setPrevEndRow(Text prevEndRow, boolean check, boolean copy) {
 +    if (prevEndRow != null)
 +      if (copy)
 +        this.textPrevEndRow = new Text(prevEndRow);
 +      else
 +        this.textPrevEndRow = prevEndRow;
 +    else
 +      this.textPrevEndRow = null;
 +    
 +    hashCode = 0;
 +    if (check)
 +      check();
 +  }
 +  
 +  /**
 +   * Sets the previous extent's end row
 +   * 
 +   */
 +  public void setPrevEndRow(Text prevEndRow) {
 +    setPrevEndRow(prevEndRow, true, true);
 +  }
 +  
 +  /**
 +   * Populates the extents data fields from a DataInput object
 +   * 
 +   */
++  @Override
 +  public void readFields(DataInput in) throws IOException {
 +    Text tid = new Text();
 +    tid.readFields(in);
 +    setTableId(tid);
 +    boolean hasRow = in.readBoolean();
 +    if (hasRow) {
 +      Text er = new Text();
 +      er.readFields(in);
 +      setEndRow(er, false, false);
 +    } else {
 +      setEndRow(null, false, false);
 +    }
 +    boolean hasPrevRow = in.readBoolean();
 +    if (hasPrevRow) {
 +      Text per = new Text();
 +      per.readFields(in);
 +      setPrevEndRow(per, false, true);
 +    } else {
 +      setPrevEndRow((Text) null);
 +    }
 +    
 +    hashCode = 0;
 +    check();
 +  }
 +  
 +  /**
 +   * Writes this extent's data fields to a DataOutput object
 +   * 
 +   */
++  @Override
 +  public void write(DataOutput out) throws IOException {
 +    getTableId().write(out);
 +    if (getEndRow() != null) {
 +      out.writeBoolean(true);
 +      getEndRow().write(out);
 +    } else {
 +      out.writeBoolean(false);
 +    }
 +    if (getPrevEndRow() != null) {
 +      out.writeBoolean(true);
 +      getPrevEndRow().write(out);
 +    } else {
 +      out.writeBoolean(false);
 +    }
 +  }
 +  
 +  /**
 +   * Returns a String representing the previous extent's entry in the Metadata table
 +   * 
 +   */
 +  public Mutation getPrevRowUpdateMutation() {
 +    return getPrevRowUpdateMutation(this);
 +  }
 +  
 +  /**
 +   * Empty start or end rows tell the method there are no start or end rows, and to use all the keyextents that are before the end row if no start row etc.
 +   * 
 +   * @return all the key extents that the rows cover
 +   */
 +  
 +  public static Collection<KeyExtent> getKeyExtentsForRange(Text startRow, Text endRow, Set<KeyExtent> kes) {
 +    if (kes == null)
 +      return Collections.emptyList();
 +    if (startRow == null)
 +      startRow = new Text();
 +    if (endRow == null)
 +      endRow = new Text();
 +    Collection<KeyExtent> keys = new ArrayList<KeyExtent>();
 +    for (KeyExtent ckes : kes) {
 +      if (ckes.getPrevEndRow() == null) {
 +        if (ckes.getEndRow() == null) {
 +          // only tablet
 +          keys.add(ckes);
 +        } else {
 +          // first tablet
 +          // if start row = '' then we want everything up to the endRow which will always include the first tablet
 +          if (startRow.getLength() == 0) {
 +            keys.add(ckes);
 +          } else if (ckes.getEndRow().compareTo(startRow) >= 0) {
 +            keys.add(ckes);
 +          }
 +        }
 +      } else {
 +        if (ckes.getEndRow() == null) {
 +          // last tablet
 +          // if endRow = '' and we're at the last tablet, add it
 +          if (endRow.getLength() == 0) {
 +            keys.add(ckes);
 +          }
 +          if (ckes.getPrevEndRow().compareTo(endRow) < 0) {
 +            keys.add(ckes);
 +          }
 +        } else {
 +          // tablet in the middle
 +          if (startRow.getLength() == 0) {
 +            // no start row
 +            
 +            if (endRow.getLength() == 0) {
 +              // no start & end row
 +              keys.add(ckes);
 +            } else {
 +              // just no start row
 +              if (ckes.getPrevEndRow().compareTo(endRow) < 0) {
 +                keys.add(ckes);
 +              }
 +            }
 +          } else if (endRow.getLength() == 0) {
 +            // no end row
 +            if (ckes.getEndRow().compareTo(startRow) >= 0) {
 +              keys.add(ckes);
 +            }
 +          } else {
 +            // no null prevend or endrows and no empty string start or end rows
 +            if ((ckes.getPrevEndRow().compareTo(endRow) < 0 && ckes.getEndRow().compareTo(startRow) >= 0)) {
 +              keys.add(ckes);
 +            }
 +          }
 +          
 +        }
 +      }
 +    }
 +    return keys;
 +  }
 +  
 +  public static Text decodePrevEndRow(Value ibw) {
 +    Text per = null;
 +    
 +    if (ibw.get()[0] != 0) {
 +      per = new Text();
 +      per.set(ibw.get(), 1, ibw.get().length - 1);
 +    }
 +    
 +    return per;
 +  }
 +  
 +  public static Value encodePrevEndRow(Text per) {
 +    if (per == null)
 +      return new Value(new byte[] {0});
 +    byte[] b = new byte[per.getLength() + 1];
 +    b[0] = 1;
 +    System.arraycopy(per.getBytes(), 0, b, 1, per.getLength());
 +    return new Value(b);
 +  }
 +  
 +  public static Mutation getPrevRowUpdateMutation(KeyExtent ke) {
 +    Mutation m = new Mutation(ke.getMetadataEntry());
 +    Constants.METADATA_PREV_ROW_COLUMN.put(m, encodePrevEndRow(ke.getPrevEndRow()));
 +    return m;
 +  }
 +  
 +  /**
 +   * Compares extents based on rows
 +   * 
 +   */
++  @Override
 +  public int compareTo(KeyExtent other) {
 +    
 +    int result = getTableId().compareTo(other.getTableId());
 +    if (result != 0)
 +      return result;
 +    
 +    if (this.getEndRow() == null) {
 +      if (other.getEndRow() != null)
 +        return 1;
 +    } else {
 +      if (other.getEndRow() == null)
 +        return -1;
 +      
 +      result = getEndRow().compareTo(other.getEndRow());
 +      if (result != 0)
 +        return result;
 +    }
 +    if (this.getPrevEndRow() == null) {
 +      if (other.getPrevEndRow() == null)
 +        return 0;
 +      return -1;
 +    }
 +    if (other.getPrevEndRow() == null)
 +      return 1;
 +    return this.getPrevEndRow().compareTo(other.getPrevEndRow());
 +  }
 +  
 +  private int hashCode = 0;
 +  
 +  @Override
 +  public int hashCode() {
 +    if (hashCode != 0)
 +      return hashCode;
 +    
 +    int prevEndRowHash = 0;
 +    int endRowHash = 0;
 +    if (this.getEndRow() != null) {
 +      endRowHash = this.getEndRow().hashCode();
 +    }
 +    
 +    if (this.getPrevEndRow() != null) {
 +      prevEndRowHash = this.getPrevEndRow().hashCode();
 +    }
 +    
 +    hashCode = getTableId().hashCode() + endRowHash + prevEndRowHash;
 +    return hashCode;
 +  }
 +  
 +  private boolean equals(Text t1, Text t2) {
 +    if (t1 == null || t2 == null)
 +      return t1 == t2;
 +    
 +    return t1.equals(t2);
 +  }
 +  
 +  @Override
 +  public boolean equals(Object o) {
 +    if (o == this)
 +      return true;
 +    if (!(o instanceof KeyExtent))
 +      return false;
 +    KeyExtent oke = (KeyExtent) o;
 +    return textTableId.equals(oke.textTableId) && equals(textEndRow, oke.textEndRow) && equals(textPrevEndRow, oke.textPrevEndRow);
 +  }
 +  
 +  @Override
 +  public String toString() {
 +    String endRowString;
 +    String prevEndRowString;
 +    String tableIdString = getTableId().toString().replaceAll(";", "\\\\;").replaceAll("\\\\", "\\\\\\\\");
 +    
 +    if (getEndRow() == null)
 +      endRowString = "<";
 +    else
 +      endRowString = ";" + TextUtil.truncate(getEndRow()).toString().replaceAll(";", "\\\\;").replaceAll("\\\\", "\\\\\\\\");
 +    
 +    if (getPrevEndRow() == null)
 +      prevEndRowString = "<";
 +    else
 +      prevEndRowString = ";" + TextUtil.truncate(getPrevEndRow()).toString().replaceAll(";", "\\\\;").replaceAll("\\\\", "\\\\\\\\");
 +    
 +    return tableIdString + endRowString + prevEndRowString;
 +  }
 +  
 +  public UUID getUUID() {
 +    try {
 +      
 +      ByteArrayOutputStream baos = new ByteArrayOutputStream();
 +      DataOutputStream dos = new DataOutputStream(baos);
 +      
 +      // to get a unique hash it is important to encode the data
 +      // like it is being serialized
 +      
 +      this.write(dos);
 +      
 +      dos.close();
 +      
 +      return UUID.nameUUIDFromBytes(baos.toByteArray());
 +      
 +    } catch (IOException e) {
 +      // should not happen since we are writing to memory
 +      throw new RuntimeException(e);
 +    }
 +  }
 +  
 +  // note: this is only the encoding of the table id and the last row, not the prev row
 +  /**
 +   * Populates the extent's fields based on a flatted extent
 +   * 
 +   */
 +  private void decodeMetadataRow(Text flattenedExtent) {
 +    int semiPos = -1;
 +    int ltPos = -1;
 +    
 +    for (int i = 0; i < flattenedExtent.getLength(); i++) {
 +      if (flattenedExtent.getBytes()[i] == ';' && semiPos < 0) {
 +        // want the position of the first semicolon
 +        semiPos = i;
 +      }
 +      
 +      if (flattenedExtent.getBytes()[i] == '<') {
 +        ltPos = i;
 +      }
 +    }
 +    
 +    if (semiPos < 0 && ltPos < 0) {
 +      throw new IllegalArgumentException("Metadata row does not contain ; or <  " + flattenedExtent);
 +    }
 +    
 +    if (semiPos < 0) {
 +      
 +      if (ltPos != flattenedExtent.getLength() - 1) {
 +        throw new IllegalArgumentException("< must come at end of Metadata row  " + flattenedExtent);
 +      }
 +      
 +      Text tableId = new Text();
 +      tableId.set(flattenedExtent.getBytes(), 0, flattenedExtent.getLength() - 1);
 +      this.setTableId(tableId);
 +      this.setEndRow(null, false, false);
 +    } else {
 +      
 +      Text tableId = new Text();
 +      tableId.set(flattenedExtent.getBytes(), 0, semiPos);
 +      
 +      Text endRow = new Text();
 +      endRow.set(flattenedExtent.getBytes(), semiPos + 1, flattenedExtent.getLength() - (semiPos + 1));
 +      
 +      this.setTableId(tableId);
 +      
 +      this.setEndRow(endRow, false, false);
 +    }
 +  }
 +  
 +  public static byte[] tableOfMetadataRow(Text row) {
 +    KeyExtent ke = new KeyExtent();
 +    ke.decodeMetadataRow(row);
 +    return TextUtil.getBytes(ke.getTableId());
 +  }
 +  
 +  public boolean contains(final ByteSequence bsrow) {
 +    if (bsrow == null) {
 +      throw new IllegalArgumentException("Passing null to contains is ambiguous, could be in first or last extent of table");
 +    }
 +    
 +    BinaryComparable row = new BinaryComparable() {
 +      
 +      @Override
 +      public int getLength() {
 +        return bsrow.length();
 +      }
 +      
 +      @Override
 +      public byte[] getBytes() {
 +        if (bsrow.isBackedByArray() && bsrow.offset() == 0)
 +          return bsrow.getBackingArray();
 +        
 +        return bsrow.toArray();
 +      }
 +    };
 +    
 +    if ((this.getPrevEndRow() == null || this.getPrevEndRow().compareTo(row) < 0) && (this.getEndRow() == null || this.getEndRow().compareTo(row) >= 0)) {
 +      return true;
 +    }
 +    return false;
 +  }
 +  
 +  public boolean contains(BinaryComparable row) {
 +    if (row == null) {
 +      throw new IllegalArgumentException("Passing null to contains is ambiguous, could be in first or last extent of table");
 +    }
 +    
 +    if ((this.getPrevEndRow() == null || this.getPrevEndRow().compareTo(row) < 0) && (this.getEndRow() == null || this.getEndRow().compareTo(row) >= 0)) {
 +      return true;
 +    }
 +    return false;
 +  }
 +  
 +  public Range toDataRange() {
 +    return new Range(getPrevEndRow(), false, getEndRow(), true);
 +  }
 +  
 +  public Range toMetadataRange() {
 +    Text metadataPrevRow = new Text(getTableId());
 +    metadataPrevRow.append(new byte[] {';'}, 0, 1);
 +    if (getPrevEndRow() != null) {
 +      metadataPrevRow.append(getPrevEndRow().getBytes(), 0, getPrevEndRow().getLength());
 +    }
 +    
 +    Range range = new Range(metadataPrevRow, getPrevEndRow() == null, getMetadataEntry(), true);
 +    return range;
 +  }
 +  
 +  public static SortedSet<KeyExtent> findChildren(KeyExtent ke, SortedSet<KeyExtent> tablets) {
 +    
 +    SortedSet<KeyExtent> children = null;
 +    
 +    for (KeyExtent tabletKe : tablets) {
 +      
 +      if (ke.getPrevEndRow() == tabletKe.getPrevEndRow() || ke.getPrevEndRow() != null && tabletKe.getPrevEndRow() != null
 +          && tabletKe.getPrevEndRow().compareTo(ke.getPrevEndRow()) == 0) {
 +        children = new TreeSet<KeyExtent>();
 +      }
 +      
 +      if (children != null) {
 +        children.add(tabletKe);
 +      }
 +      
 +      if (ke.getEndRow() == tabletKe.getEndRow() || ke.getEndRow() != null && tabletKe.getEndRow() != null
 +          && tabletKe.getEndRow().compareTo(ke.getEndRow()) == 0) {
 +        return children;
 +      }
 +    }
 +    
 +    return new TreeSet<KeyExtent>();
 +  }
 +  
 +  public static KeyExtent findContainingExtent(KeyExtent extent, SortedSet<KeyExtent> extents) {
 +    
 +    KeyExtent lookupExtent = new KeyExtent(extent);
 +    lookupExtent.setPrevEndRow((Text) null);
 +    
 +    SortedSet<KeyExtent> tailSet = extents.tailSet(lookupExtent);
 +    
 +    if (tailSet.isEmpty()) {
 +      return null;
 +    }
 +    
 +    KeyExtent first = tailSet.first();
 +    
 +    if (first.getTableId().compareTo(extent.getTableId()) != 0) {
 +      return null;
 +    }
 +    
 +    if (first.getPrevEndRow() == null) {
 +      return first;
 +    }
 +    
 +    if (extent.getPrevEndRow() == null) {
 +      return null;
 +    }
 +    
 +    if (extent.getPrevEndRow().compareTo(first.getPrevEndRow()) >= 0)
 +      return first;
 +    return null;
 +  }
 +  
 +  private static boolean startsAfter(KeyExtent nke, KeyExtent ke) {
 +    
 +    int tiCmp = ke.getTableId().compareTo(nke.getTableId());
 +    
 +    if (tiCmp > 0) {
 +      return true;
 +    }
 +    
 +    return ke.getPrevEndRow() != null && nke.getEndRow() != null && ke.getPrevEndRow().compareTo(nke.getEndRow()) >= 0;
 +  }
 +  
 +  private static Text rowAfterPrevRow(KeyExtent nke) {
 +    Text row = new Text(nke.getPrevEndRow());
 +    row.append(new byte[] {0}, 0, 1);
 +    return row;
 +  }
 +  
 +  // Some duplication with TabletLocatorImpl
 +  public static Set<KeyExtent> findOverlapping(KeyExtent nke, SortedSet<KeyExtent> extents) {
 +    if (nke == null || extents == null || extents.isEmpty())
 +      return Collections.emptySet();
 +    
 +    SortedSet<KeyExtent> start;
 +    
 +    if (nke.getPrevEndRow() != null) {
 +      Text row = rowAfterPrevRow(nke);
 +      KeyExtent lookupKey = new KeyExtent(nke.getTableId(), row, null);
 +      start = extents.tailSet(lookupKey);
 +    } else {
 +      KeyExtent lookupKey = new KeyExtent(nke.getTableId(), new Text(), null);
 +      start = extents.tailSet(lookupKey);
 +    }
 +    
 +    TreeSet<KeyExtent> result = new TreeSet<KeyExtent>();
 +    for (KeyExtent ke : start) {
 +      if (startsAfter(nke, ke)) {
 +        break;
 +      }
 +      result.add(ke);
 +    }
 +    return result;
 +  }
 +  
 +  public boolean overlaps(KeyExtent other) {
 +    SortedSet<KeyExtent> set = new TreeSet<KeyExtent>();
 +    set.add(other);
 +    return !findOverlapping(this, set).isEmpty();
 +  }
 +  
 +  // Specialization of findOverlapping(KeyExtent, SortedSet<KeyExtent> to work with SortedMap
 +  public static Set<KeyExtent> findOverlapping(KeyExtent nke, SortedMap<KeyExtent,? extends Object> extents) {
 +    if (nke == null || extents == null || extents.isEmpty())
 +      return Collections.emptySet();
 +    
 +    SortedMap<KeyExtent,? extends Object> start;
 +    
 +    if (nke.getPrevEndRow() != null) {
 +      Text row = rowAfterPrevRow(nke);
 +      KeyExtent lookupKey = new KeyExtent(nke.getTableId(), row, null);
 +      start = extents.tailMap(lookupKey);
 +    } else {
 +      KeyExtent lookupKey = new KeyExtent(nke.getTableId(), new Text(), null);
 +      start = extents.tailMap(lookupKey);
 +    }
 +    
 +    TreeSet<KeyExtent> result = new TreeSet<KeyExtent>();
 +    for (Entry<KeyExtent,? extends Object> entry : start.entrySet()) {
 +      KeyExtent ke = entry.getKey();
 +      if (startsAfter(nke, ke)) {
 +        break;
 +      }
 +      result.add(ke);
 +    }
 +    return result;
 +  }
 +  
 +  public static Text getMetadataEntry(KeyExtent extent) {
 +    return getMetadataEntry(extent.getTableId(), extent.getEndRow());
 +  }
 +  
 +  public TKeyExtent toThrift() {
 +    return new TKeyExtent(TextUtil.getByteBuffer(textTableId), textEndRow == null ? null : TextUtil.getByteBuffer(textEndRow), textPrevEndRow == null ? null
 +        : TextUtil.getByteBuffer(textPrevEndRow));
 +  }
 +  
-   /**
-    * @param prevExtent
-    */
 +  public boolean isPreviousExtent(KeyExtent prevExtent) {
 +    if (prevExtent == null)
 +      return getPrevEndRow() == null;
 +    
 +    if (!prevExtent.getTableId().equals(getTableId()))
 +      throw new IllegalArgumentException("Cannot compare accross tables " + prevExtent + " " + this);
 +    
 +    if (prevExtent.getEndRow() == null)
 +      return false;
 +    
 +    if (getPrevEndRow() == null)
 +      return false;
 +    
 +    return prevExtent.getEndRow().equals(getPrevEndRow());
 +  }
 +  
 +  public boolean isMeta() {
 +    return getTableId().toString().equals(Constants.METADATA_TABLE_ID);
 +  }
 +  
 +  public boolean isRootTablet() {
 +    return this.compareTo(Constants.ROOT_TABLET_EXTENT) == 0;
 +  }
 +}