You are viewing a plain text version of this content. The canonical link for it is here.
Posted to dev@hbase.apache.org by stack <st...@duboce.net> on 2008/08/31 07:12:29 UTC

Re: svn commit: r690637 - in /hadoop/hbase/trunk: ./ src/java/org/apache/hadoop/hbase/ src/java/org/apache/hadoop/hbase/regionserver/ src/java/org/apache/hadoop/hbase/rest/ src/test/org/apache/hadoop/hbase/

I committed 832 below along with 840.  Meant to do it after 840 rather 
than as part of 840.
St.Ack

stack@apache.org wrote:
> Author: stack
> Date: Sat Aug 30 21:48:57 2008
> New Revision: 690637
>
> URL: http://svn.apache.org/viewvc?rev=690637&view=rev
> Log:
> HBASE-840 More options on the row query in REST interface
>
> Modified:
>     hadoop/hbase/trunk/CHANGES.txt
>     hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HConstants.java
>     hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HStoreKey.java
>     hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java
>     hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/Memcache.java
>     hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/rest/GenericHandler.java
>     hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/rest/RowHandler.java
>     hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestCompare.java
>
> Modified: hadoop/hbase/trunk/CHANGES.txt
> URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/CHANGES.txt?rev=690637&r1=690636&r2=690637&view=diff
> ==============================================================================
> --- hadoop/hbase/trunk/CHANGES.txt (original)
> +++ hadoop/hbase/trunk/CHANGES.txt Sat Aug 30 21:48:57 2008
> @@ -67,6 +67,8 @@
>     HBASE-784  Base hbase-0.3.0 on hadoop-0.18
>     HBASE-841  Consolidate multiple overloaded methods in HRegionInterface,
>                HRegionServer (Jean-Daniel Cryans via Jim Kellerman)
> +   HBASE-840  More options on the row query in REST interface
> +              (Sishen Freecity via Stack)
>  
>    NEW FEATURES
>     HBASE-787  Postgresql to HBase table replication example (Tim Sell via Stack)
>
> Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HConstants.java
> URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HConstants.java?rev=690637&r1=690636&r2=690637&view=diff
> ==============================================================================
> --- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HConstants.java (original)
> +++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HConstants.java Sat Aug 30 21:48:57 2008
> @@ -132,7 +132,10 @@
>    static final byte [] ROOT_TABLE_NAME = Bytes.toBytes("-ROOT-");
>  
>    /** The META table's name. */
> -  static final byte [] META_TABLE_NAME = Bytes.toBytes(".META.");
> +  static final byte [] META_TABLE_NAME = Bytes.toBytes(".META.");  
> +
> +  /** delimiter used between portions of a region name */
> +  public static final int META_ROW_DELIMITER = ',';
>  
>    // Defines for the column names used in both ROOT and META HBase 'meta' tables.
>    
>
> Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HStoreKey.java
> URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HStoreKey.java?rev=690637&r1=690636&r2=690637&view=diff
> ==============================================================================
> --- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HStoreKey.java (original)
> +++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HStoreKey.java Sat Aug 30 21:48:57 2008
> @@ -40,6 +40,12 @@
>    private byte [] column = HConstants.EMPTY_BYTE_ARRAY;
>    private long timestamp = Long.MAX_VALUE;
>  
> +  /*
> +   * regionInfo is only used as a hack to compare HSKs.
> +   * It is not serialized.  See https://issues.apache.org/jira/browse/HBASE-832
> +   */
> +  private HRegionInfo regionInfo = null;
> +
>    /** Default constructor used in conjunction with Writable interface */
>    public HStoreKey() {
>      super();
> @@ -47,8 +53,8 @@
>    
>    /**
>     * Create an HStoreKey specifying only the row
> -   * The column defaults to the empty string and the time stamp defaults to
> -   * Long.MAX_VALUE
> +   * The column defaults to the empty string, the time stamp defaults to
> +   * Long.MAX_VALUE and the table defaults to empty string
>     * 
>     * @param row - row key
>     */
> @@ -58,8 +64,8 @@
>  
>    /**
>     * Create an HStoreKey specifying only the row
> -   * The column defaults to the empty string and the time stamp defaults to
> -   * Long.MAX_VALUE
> +   * The column defaults to the empty string, the time stamp defaults to
> +   * Long.MAX_VALUE and the table defaults to empty string
>     * 
>     * @param row - row key
>     */
> @@ -69,7 +75,7 @@
>  
>    /**
>     * Create an HStoreKey specifying the row and timestamp
> -   * The column name defaults to the empty string
> +   * The column and table names default to the empty string
>     * 
>     * @param row row key
>     * @param timestamp timestamp value
> @@ -80,29 +86,31 @@
>  
>    /**
>     * Create an HStoreKey specifying the row and timestamp
> -   * The column name defaults to the empty string
> +   * The column and table names default to the empty string
>     * 
>     * @param row row key
>     * @param timestamp timestamp value
>     */
>    public HStoreKey(final String row, long timestamp) {
> -    this (row, "", timestamp);
> +    this (row, "", timestamp, new HRegionInfo());
>    }
>  
>    /**
>     * Create an HStoreKey specifying the row and column names
>     * The timestamp defaults to LATEST_TIMESTAMP
> +   * and table name defaults to the empty string
>     * 
>     * @param row row key
>     * @param column column key
>     */
>    public HStoreKey(final String row, final String column) {
> -    this(row, column, HConstants.LATEST_TIMESTAMP);
> +    this(row, column, HConstants.LATEST_TIMESTAMP, new HRegionInfo());
>    }
>  
>    /**
>     * Create an HStoreKey specifying the row and column names
>     * The timestamp defaults to LATEST_TIMESTAMP
> +   * and table name defaults to the empty string
>     * 
>     * @param row row key
>     * @param column column key
> @@ -110,6 +118,19 @@
>    public HStoreKey(final byte [] row, final byte [] column) {
>      this(row, column, HConstants.LATEST_TIMESTAMP);
>    }
> +  
> +  /**
> +   * Create an HStoreKey specifying the row, column names and table name
> +   * The timestamp defaults to LATEST_TIMESTAMP
> +   * 
> +   * @param row row key
> +   * @param column column key
> +   * @param regionInfo region info
> +   */
> +  public HStoreKey(final byte [] row, 
> +      final byte [] column, final HRegionInfo regionInfo) {
> +    this(row, column, HConstants.LATEST_TIMESTAMP, regionInfo);
> +  }
>  
>    /**
>     * Create an HStoreKey specifying all the fields
> @@ -118,13 +139,16 @@
>     * @param row row key
>     * @param column column key
>     * @param timestamp timestamp value
> +   * @param regionInfo region info
>     */
> -  public HStoreKey(final String row, final String column, long timestamp) {
> -    this (Bytes.toBytes(row), Bytes.toBytes(column), timestamp);
> +  public HStoreKey(final String row, 
> +      final String column, long timestamp, final HRegionInfo regionInfo) {
> +    this (Bytes.toBytes(row), Bytes.toBytes(column), 
> +        timestamp, regionInfo);
>    }
>  
>    /**
> -   * Create an HStoreKey specifying all the fields
> +   * Create an HStoreKey specifying all the fields with unspecified table
>     * Does not make copies of the passed byte arrays. Presumes the passed 
>     * arrays immutable.
>     * @param row row key
> @@ -132,10 +156,25 @@
>     * @param timestamp timestamp value
>     */
>    public HStoreKey(final byte [] row, final byte [] column, long timestamp) {
> +    this(row, column, timestamp, null);
> +  }
> +  
> +  /**
> +   * Create an HStoreKey specifying all the fields with specified table
> +   * Does not make copies of the passed byte arrays. Presumes the passed 
> +   * arrays immutable.
> +   * @param row row key
> +   * @param column column key
> +   * @param timestamp timestamp value
> +   * @param regionInfo region info
> +   */
> +  public HStoreKey(final byte [] row, 
> +      final byte [] column, long timestamp, final HRegionInfo regionInfo) {
>      // Make copies
>      this.row = row;
>      this.column = column;
>      this.timestamp = timestamp;
> +    this.regionInfo = regionInfo;
>    }
>    
>    /** @return Approximate size in bytes of this key. */
> @@ -205,6 +244,11 @@
>      return this.timestamp;
>    }
>    
> +  /** @return value of regioninfo */
> +  public HRegionInfo getHRegionInfo() {
> +    return this.regionInfo;
> +  }
> +  
>    /**
>     * Compares the row and column of two keys
>     * @param other Key to compare against. Compares row and column.
> @@ -274,7 +318,7 @@
>    /** {@inheritDoc} */
>    public int compareTo(Object o) {
>      HStoreKey other = (HStoreKey)o;
> -    int result = Bytes.compareTo(this.row, other.row);
> +    int result = compareTwoRowKeys(this.regionInfo, this.row, other.row);
>      if (result != 0) {
>        return result;
>      }
> @@ -419,6 +463,66 @@
>      return Bytes.add(hsk.getRow(), hsk.getColumn());
>    }
>    
> +  /**
> +   * Utility method to compare two row keys.
> +   * This is required because of the meta delimiters.
> +   * This is a hack.
> +   * @param regioninfo
> +   * @param rowA
> +   * @param rowB
> +   * @return value of the comparison
> +   */
> +  public static int compareTwoRowKeys(HRegionInfo regionInfo, 
> +      byte[] rowA, byte[] rowB) {
> +    if(regionInfo != null && (regionInfo.isMetaRegion() ||
> +        regionInfo.isRootRegion())) {
> +      byte[][] keysA = stripStartKeyMeta(rowA);
> +      byte[][] KeysB = stripStartKeyMeta(rowB);
> +      int rowCompare = Bytes.compareTo(keysA[0], KeysB[0]);
> +      if(rowCompare == 0)
> +        rowCompare = Bytes.compareTo(keysA[1], KeysB[1]);
> +      return rowCompare;
> +    } else {
> +      return Bytes.compareTo(rowA, rowB);
> +    }
> +  }
> +  
> +  /**
> +   * Utility method to check if two row keys are equal.
> +   * This is required because of the meta delimiters
> +   * This is a hack
> +   * @param regioninfo
> +   * @param rowA
> +   * @param rowB
> +   * @return if it's equal
> +   */
> +  public static boolean equalsTwoRowKeys(HRegionInfo regionInfo, 
> +      byte[] rowA, byte[] rowB) {
> +    return rowA == null && rowB == null? true:
> +      rowA == null && rowB != null? false:
> +        rowA != null && rowB == null? false:
> +          rowA.length != rowB.length? false:
> +        compareTwoRowKeys(regionInfo,rowA,rowB) == 0;
> +  }
> +  
> +  private static byte[][] stripStartKeyMeta(byte[] rowKey) {
> +    int offset = -1;
> +    for (int i = rowKey.length - 1; i > 0; i--) {
> +      if (rowKey[i] == HConstants.META_ROW_DELIMITER) {
> +        offset = i;
> +        break;
> +      }
> +    }
> +    byte [] row = new byte[offset];
> +    System.arraycopy(rowKey, 0, row, 0,offset);
> +    byte [] timestamp = new byte[rowKey.length - offset - 1];
> +    System.arraycopy(rowKey, offset+1, timestamp, 0,rowKey.length - offset - 1);
> +    byte[][] elements = new byte[2][];
> +    elements[0] = row;
> +    elements[1] = timestamp;
> +    return elements;
> +  }
> +  
>    // Writable
>  
>    /** {@inheritDoc} */
> @@ -434,4 +538,4 @@
>      this.column = Bytes.readByteArray(in);
>      this.timestamp = in.readLong();
>    }
> -}
> \ No newline at end of file
> +}
>
> Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java
> URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java?rev=690637&r1=690636&r2=690637&view=diff
> ==============================================================================
> --- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java (original)
> +++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java Sat Aug 30 21:48:57 2008
> @@ -1246,13 +1246,14 @@
>          // get the closest key
>          byte [] closestKey = store.getRowKeyAtOrBefore(row);
>          // if it happens to be an exact match, we can stop looping
> -        if (Bytes.equals(row, closestKey)) {
> +        if (HStoreKey.equalsTwoRowKeys(regionInfo,row, closestKey)) {
>            key = new HStoreKey(closestKey);
>            break;
>          }
>          // otherwise, we need to check if it's the max and move to the next
>          if (closestKey != null 
> -          && (key == null || Bytes.compareTo(closestKey, key.getRow()) > 0) ) {
> +          && (key == null || HStoreKey.compareTwoRowKeys(
> +              regionInfo,closestKey, key.getRow()) > 0) ) {
>            key = new HStoreKey(closestKey);
>          }
>        }
>
> Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/Memcache.java
> URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/Memcache.java?rev=690637&r1=690636&r2=690637&view=diff
> ==============================================================================
> --- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/Memcache.java (original)
> +++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/Memcache.java Sat Aug 30 21:48:57 2008
> @@ -37,6 +37,7 @@
>  import org.apache.commons.logging.Log;
>  import org.apache.commons.logging.LogFactory;
>  import org.apache.hadoop.hbase.HConstants;
> +import org.apache.hadoop.hbase.HRegionInfo;
>  import org.apache.hadoop.hbase.HStoreKey;
>  import org.apache.hadoop.hbase.io.Cell;
>  import org.apache.hadoop.hbase.util.Bytes;
> @@ -53,6 +54,8 @@
>    private final Log LOG = LogFactory.getLog(this.getClass().getName());
>    
>    private final long ttl;
> +  
> +  private HRegionInfo regionInfo;
>  
>    // Note that since these structures are always accessed with a lock held,
>    // so no additional synchronization is required.
> @@ -72,14 +75,17 @@
>     */
>    public Memcache() {
>      this.ttl = HConstants.FOREVER;
> +    this.regionInfo = null;
>    }
>  
>    /**
>     * Constructor.
>     * @param ttl The TTL for cache entries, in milliseconds.
> +   * @param regionInfo The HRI for this cache 
>     */
> -  public Memcache(final long ttl) {
> +  public Memcache(final long ttl, HRegionInfo regionInfo) {
>      this.ttl = ttl;
> +    this.regionInfo = regionInfo;
>    }
>  
>    /*
> @@ -383,7 +389,8 @@
>      // the search key, or a range of values between the first candidate key
>      // and the ultimate search key (or the end of the cache)
>      if (!tailMap.isEmpty() &&
> -        Bytes.compareTo(tailMap.firstKey().getRow(), search_key.getRow()) <= 0) {
> +        HStoreKey.compareTwoRowKeys(regionInfo, 
> +            tailMap.firstKey().getRow(), search_key.getRow()) <= 0) {
>        Iterator<HStoreKey> key_iterator = tailMap.keySet().iterator();
>  
>        // Keep looking at cells as long as they are no greater than the 
> @@ -391,9 +398,11 @@
>        HStoreKey deletedOrExpiredRow = null;
>        for (HStoreKey found_key = null; key_iterator.hasNext() &&
>            (found_key == null ||
> -            Bytes.compareTo(found_key.getRow(), row) <= 0);) {
> +            HStoreKey.compareTwoRowKeys(regionInfo, 
> +                found_key.getRow(), row) <= 0);) {
>          found_key = key_iterator.next();
> -        if (Bytes.compareTo(found_key.getRow(), row) <= 0) {
> +        if (HStoreKey.compareTwoRowKeys(regionInfo, 
> +            found_key.getRow(), row) <= 0) {
>            if (HLogEdit.isDeleted(tailMap.get(found_key))) {
>              HStore.handleDeleted(found_key, candidateKeys, deletes);
>              if (deletedOrExpiredRow == null) {
>
> Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/rest/GenericHandler.java
> URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/rest/GenericHandler.java?rev=690637&r1=690636&r2=690637&view=diff
> ==============================================================================
> --- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/rest/GenericHandler.java (original)
> +++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/rest/GenericHandler.java Sat Aug 30 21:48:57 2008
> @@ -55,6 +55,7 @@
>    protected static final String CONTENT_TYPE = "content-type";
>    protected static final String ROW = "row";
>    protected static final String REGIONS = "regions";
> +  protected static final String VERSION = "version";
>    
>    protected final Log LOG = LogFactory.getLog(this.getClass());
>  
> @@ -233,13 +234,32 @@
>        outputter.startTag(COLUMN);
>        doElement(outputter, "name", 
>          org.apache.hadoop.hbase.util.Base64.encodeBytes(e.getKey()));
> -      // We don't know String from binary data so we always base64 encode.
> -      doElement(outputter, "value",
> -        org.apache.hadoop.hbase.util.Base64.encodeBytes(e.getValue().getValue()));
> +      outputCellXml(outputter, e.getValue());
>        outputter.endTag();
>      }
>    }
>  
> +  protected void outputColumnsWithMultiVersionsXml(final XMLOutputter outputter,
> +    final Map<byte [], Cell[]> m)
> +  throws IllegalStateException, IllegalArgumentException, IOException {
> +    for (Map.Entry<byte [], Cell[]> e: m.entrySet()) {
> +      for (Cell c : e.getValue()) {
> +        outputter.startTag(COLUMN);
> +        doElement(outputter, "name", 
> +            org.apache.hadoop.hbase.util.Base64.encodeBytes(e.getKey())); 
> +        outputCellXml(outputter, c);
> +        outputter.endTag();       
> +      }
> +    }
> +  }
> +  
> +  protected void outputCellXml(final XMLOutputter outputter, Cell c) 
> +  throws IllegalStateException, IllegalArgumentException, IOException {
> +    // We don't know String from binary data so we always base64 encode.
> +    doElement(outputter, "value",
> +        org.apache.hadoop.hbase.util.Base64.encodeBytes(c.getValue()));
> +    doElement(outputter, "timestamp", String.valueOf(c.getTimestamp()));    
> +  }
>  //  Commented - multipart support is currently nonexistant.
>  //  protected void outputColumnsMime(final MultiPartResponse mpr,
>  //     final Map<Text, Cell> m)
>
> Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/rest/RowHandler.java
> URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/rest/RowHandler.java?rev=690637&r1=690636&r2=690637&view=diff
> ==============================================================================
> --- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/rest/RowHandler.java (original)
> +++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/rest/RowHandler.java Sat Aug 30 21:48:57 2008
> @@ -2,9 +2,9 @@
>  
>  import java.io.IOException;
>  import java.net.URLDecoder;
> -import java.util.HashSet;
> +import java.util.ArrayList;
> +import java.util.List;
>  import java.util.Map;
> -import java.util.Set;
>  import java.util.TreeMap;
>  
>  import javax.servlet.ServletException;
> @@ -72,7 +72,7 @@
>      final HttpServletResponse response, final String [] pathSegments)
>    throws IOException {
>      // pull the row key out of the path
> -    String row = URLDecoder.decode(pathSegments[2], HConstants.UTF8_ENCODING);
> +    byte[] row = Bytes.toBytes(URLDecoder.decode(pathSegments[2], HConstants.UTF8_ENCODING));
>  
>      String timestampStr = null;
>      if (pathSegments.length == 4) {
> @@ -85,64 +85,63 @@
>        }
>      }
>      
> -    String[] columns = request.getParameterValues(COLUMN);
> -        
> -    if (columns == null || columns.length == 0) {
> -      // They want full row returned. 
> -
> -      // Presumption is that this.table has already been focused on target table.
> -      Map<byte [], Cell> result = timestampStr == null ? 
> -        table.getRow(Bytes.toBytes(row)) 
> -        : table.getRow(Bytes.toBytes(row), Long.parseLong(timestampStr));
> -        
> +    String[] column_params = request.getParameterValues(COLUMN);
> +    
> +    byte[][] columns = null;
> +    
> +    if (column_params != null && column_params.length > 0) {
> +      List<String> available_columns = new ArrayList<String>();
> +      for (String column_param : column_params) {
> +        if (column_param.length() > 0 && table.getTableDescriptor().hasFamily(Bytes.toBytes(column_param))) {
> +          available_columns.add(column_param);
> +        }
> +      }
> +      columns = Bytes.toByteArrays(available_columns.toArray(new String[0]));
> +    }
> +    
> +    String[] version_params = request.getParameterValues(VERSION);
> +    int version = 0;
> +    if (version_params != null && version_params.length == 1) {
> +      version = Integer.parseInt(version_params[0]);
> +    }
> +    
> +    if (version > 0 && columns != null) {
> +      Map<byte[], Cell[]> result = new TreeMap<byte [], Cell[]>(Bytes.BYTES_COMPARATOR);      
> +      
> +      for (byte[] col : columns) {
> +        Cell[] cells = timestampStr == null ? table.get(row, col, version)
> +                      : table.get(row, col, Long.parseLong(timestampStr), version);
> +        if (cells != null) {
> +          result.put(col, cells);
> +        }
> +      }
> +      
>        if (result == null || result.size() == 0) {
>          doNotFound(response, "Row not found!");
>        } else {
>          switch (ContentType.getContentType(request.getHeader(ACCEPT))) {
>          case XML:
> -          outputRowXml(response, result);
> +          outputRowWithMultiVersionsXml(response, result);
>            break;
>          case MIME:
>          default:
> -          doNotAcceptable(response, "Unsupported Accept Header Content: " +
> -            request.getHeader(CONTENT_TYPE));
> +          doNotAcceptable(response, "Unsupported Accept Header Content: "
> +              + request.getHeader(CONTENT_TYPE));
>          }
> -      }
> +      }      
>      } else {
> -      Map<byte [], Cell> prefiltered_result = table.getRow(Bytes.toBytes(row));
> -    
> -      if (prefiltered_result == null || prefiltered_result.size() == 0) {
> +      Map<byte[], Cell> result = timestampStr == null ? table.getRow(row, columns) : table.getRow(row, columns, Long.parseLong(timestampStr));
> +      if (result == null || result.size() == 0) {
>          doNotFound(response, "Row not found!");
>        } else {
> -        // create a Set from the columns requested so we can
> -        // efficiently filter the actual found columns
> -        Set<String> requested_columns_set = new HashSet<String>();
> -        for(int i = 0; i < columns.length; i++){
> -          requested_columns_set.add(columns[i]);
> -        }
> -  
> -        // output map that will contain the filtered results
> -        Map<byte [], Cell> m =
> -          new TreeMap<byte [], Cell>(Bytes.BYTES_COMPARATOR);
> -
> -        // get an array of all the columns retrieved
> -        Set<byte []> columns_retrieved = prefiltered_result.keySet();
> -
> -        // copy over those cells with requested column names
> -        for(byte [] current_column: columns_retrieved) {
> -          if (requested_columns_set.contains(Bytes.toString(current_column))) {
> -            m.put(current_column, prefiltered_result.get(current_column));            
> -          }
> -        }
> -        
>          switch (ContentType.getContentType(request.getHeader(ACCEPT))) {
> -          case XML:
> -            outputRowXml(response, m);
> -            break;
> -          case MIME:
> -          default:
> -            doNotAcceptable(response, "Unsupported Accept Header Content: " +
> -              request.getHeader(CONTENT_TYPE));
> +        case XML:
> +          outputRowXml(response, result);
> +          break;
> +        case MIME:
> +        default:
> +          doNotAcceptable(response, "Unsupported Accept Header Content: "
> +              + request.getHeader(CONTENT_TYPE));
>          }
>        }
>      }
> @@ -167,6 +166,18 @@
>      outputter.getWriter().close();
>    }
>    
> +  private void outputRowWithMultiVersionsXml(final HttpServletResponse response,
> +      final Map<byte[], Cell[]> result) 
> +  throws IOException {
> +    setResponseHeader(response, result.size() > 0? 200: 204,
> +        ContentType.XML.toString());
> +    XMLOutputter outputter = getXMLOutputter(response.getWriter());
> +    outputter.startTag(ROW);
> +    outputColumnsWithMultiVersionsXml(outputter, result);
> +    outputter.endTag();
> +    outputter.endDocument();
> +    outputter.getWriter().close();   
> +  }
>    /*
>     * @param response
>     * @param result
>
> Modified: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestCompare.java
> URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestCompare.java?rev=690637&r1=690636&r2=690637&view=diff
> ==============================================================================
> --- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestCompare.java (original)
> +++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestCompare.java Sat Aug 30 21:48:57 2008
> @@ -54,6 +54,43 @@
>    }
>    
>    /**
> +   * Tests cases where rows keys have characters below the ','.
> +   * See HBASE-832
> +   */
> +  public void testHStoreKeyBorderCases() {
> +    HRegionInfo info = new HRegionInfo(new HTableDescriptor("testtable"),
> +        HConstants.EMPTY_BYTE_ARRAY,HConstants.EMPTY_BYTE_ARRAY);
> +    HStoreKey rowA = new HStoreKey("testtable,www.hbase.org/,1234",
> +        "", Long.MAX_VALUE, info);
> +    HStoreKey rowB = new HStoreKey("testtable,www.hbase.org/%20,99999",
> +        "", Long.MAX_VALUE, info);
> +
> +    assertTrue(rowA.compareTo(rowB) > 0);
> +
> +    rowA = new HStoreKey("testtable,www.hbase.org/,1234",
> +        "", Long.MAX_VALUE, HRegionInfo.FIRST_META_REGIONINFO);
> +    rowB = new HStoreKey("testtable,www.hbase.org/%20,99999",
> +        "", Long.MAX_VALUE, HRegionInfo.FIRST_META_REGIONINFO);
> +
> +    assertTrue(rowA.compareTo(rowB) < 0);
> +
> +    rowA = new HStoreKey("testtable,,1234",
> +        "", Long.MAX_VALUE, HRegionInfo.FIRST_META_REGIONINFO);
> +    rowB = new HStoreKey("testtable,$www.hbase.org/,99999",
> +        "", Long.MAX_VALUE, HRegionInfo.FIRST_META_REGIONINFO);
> +
> +    assertTrue(rowA.compareTo(rowB) < 0);
> +
> +    rowA = new HStoreKey(".META.,testtable,www.hbase.org/,1234,4321",
> +        "", Long.MAX_VALUE, HRegionInfo.ROOT_REGIONINFO);
> +    rowB = new HStoreKey(".META.,testtable,www.hbase.org/%20,99999,99999",
> +        "", Long.MAX_VALUE, HRegionInfo.ROOT_REGIONINFO);
> +
> +    assertTrue(rowA.compareTo(rowB) > 0);
> +  }
> +
> +  
> +  /**
>     * Sort of HRegionInfo.
>     */
>    public void testHRegionInfo() {
>
>
>