You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by ec...@apache.org on 2014/10/10 18:53:18 UTC

[37/38] HBASE-12197 Move rest to it's on module

http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java
new file mode 100644
index 0000000..7db5328
--- /dev/null
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java
@@ -0,0 +1,598 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import javax.ws.rs.Consumes;
+import javax.ws.rs.DELETE;
+import javax.ws.rs.GET;
+import javax.ws.rs.POST;
+import javax.ws.rs.PUT;
+import javax.ws.rs.Produces;
+import javax.ws.rs.core.Context;
+import javax.ws.rs.core.HttpHeaders;
+import javax.ws.rs.core.MultivaluedMap;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.Response.ResponseBuilder;
+import javax.ws.rs.core.UriInfo;
+
+import org.apache.commons.lang.StringUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.HTableInterface;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.rest.model.CellModel;
+import org.apache.hadoop.hbase.rest.model.CellSetModel;
+import org.apache.hadoop.hbase.rest.model.RowModel;
+import org.apache.hadoop.hbase.util.Bytes;
+
+@InterfaceAudience.Private
+public class RowResource extends ResourceBase {
+  private static final Log LOG = LogFactory.getLog(RowResource.class);
+
+  static final String CHECK_PUT = "put";
+  static final String CHECK_DELETE = "delete";
+
+  TableResource tableResource;
+  RowSpec rowspec;
+  private String check = null;
+
+  /**
+   * Constructor
+   * @param tableResource
+   * @param rowspec
+   * @param versions
+   * @throws IOException
+   */
+  public RowResource(TableResource tableResource, String rowspec,
+      String versions, String check) throws IOException {
+    super();
+    this.tableResource = tableResource;
+    this.rowspec = new RowSpec(rowspec);
+    if (versions != null) {
+      this.rowspec.setMaxVersions(Integer.valueOf(versions));
+    }
+    this.check = check;
+  }
+
+  @GET
+  @Produces({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF,
+    MIMETYPE_PROTOBUF_IETF})
+  public Response get(final @Context UriInfo uriInfo) {
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("GET " + uriInfo.getAbsolutePath());
+    }
+    servlet.getMetrics().incrementRequests(1);
+    MultivaluedMap<String, String> params = uriInfo.getQueryParameters();
+    try {
+      ResultGenerator generator =
+        ResultGenerator.fromRowSpec(tableResource.getName(), rowspec, null,
+          !params.containsKey(NOCACHE_PARAM_NAME));
+      if (!generator.hasNext()) {
+        servlet.getMetrics().incrementFailedGetRequests(1);
+        return Response.status(Response.Status.NOT_FOUND)
+          .type(MIMETYPE_TEXT).entity("Not found" + CRLF)
+          .build();
+      }
+      int count = 0;
+      CellSetModel model = new CellSetModel();
+      Cell value = generator.next();
+      byte[] rowKey = CellUtil.cloneRow(value);
+      RowModel rowModel = new RowModel(rowKey);
+      do {
+        if (!Bytes.equals(CellUtil.cloneRow(value), rowKey)) {
+          model.addRow(rowModel);
+          rowKey = CellUtil.cloneRow(value);
+          rowModel = new RowModel(rowKey);
+        }
+        rowModel.addCell(new CellModel(CellUtil.cloneFamily(value), CellUtil.cloneQualifier(value),
+          value.getTimestamp(), CellUtil.cloneValue(value)));
+        if (++count > rowspec.getMaxValues()) {
+          break;
+        }
+        value = generator.next();
+      } while (value != null);
+      model.addRow(rowModel);
+      servlet.getMetrics().incrementSucessfulGetRequests(1);
+      return Response.ok(model).build();
+    } catch (Exception e) {
+      servlet.getMetrics().incrementFailedPutRequests(1);
+      return processException(e);
+    }
+  }
+
+  @GET
+  @Produces(MIMETYPE_BINARY)
+  public Response getBinary(final @Context UriInfo uriInfo) {
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("GET " + uriInfo.getAbsolutePath() + " as "+ MIMETYPE_BINARY);
+    }
+    servlet.getMetrics().incrementRequests(1);
+    // doesn't make sense to use a non specific coordinate as this can only
+    // return a single cell
+    if (!rowspec.hasColumns() || rowspec.getColumns().length > 1) {
+      servlet.getMetrics().incrementFailedGetRequests(1);
+      return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT)
+          .entity("Bad request: Either 0 or more than 1 columns specified." + CRLF).build();
+    }
+    MultivaluedMap<String, String> params = uriInfo.getQueryParameters();
+    try {
+      ResultGenerator generator =
+        ResultGenerator.fromRowSpec(tableResource.getName(), rowspec, null,
+          !params.containsKey(NOCACHE_PARAM_NAME));
+      if (!generator.hasNext()) {
+        servlet.getMetrics().incrementFailedGetRequests(1);
+        return Response.status(Response.Status.NOT_FOUND)
+          .type(MIMETYPE_TEXT).entity("Not found" + CRLF)
+          .build();
+      }
+      Cell value = generator.next();
+      ResponseBuilder response = Response.ok(CellUtil.cloneValue(value));
+      response.header("X-Timestamp", value.getTimestamp());
+      servlet.getMetrics().incrementSucessfulGetRequests(1);
+      return response.build();
+    } catch (Exception e) {
+      servlet.getMetrics().incrementFailedGetRequests(1);
+      return processException(e);
+    }
+  }
+
+  Response update(final CellSetModel model, final boolean replace) {
+    servlet.getMetrics().incrementRequests(1);
+    if (servlet.isReadOnly()) {
+      servlet.getMetrics().incrementFailedPutRequests(1);
+      return Response.status(Response.Status.FORBIDDEN)
+        .type(MIMETYPE_TEXT).entity("Forbidden" + CRLF)
+        .build();
+    }
+
+    if (CHECK_PUT.equalsIgnoreCase(check)) {
+      return checkAndPut(model);
+    } else if (CHECK_DELETE.equalsIgnoreCase(check)) {
+      return checkAndDelete(model);
+    } else if (check != null && check.length() > 0) {
+      return Response.status(Response.Status.BAD_REQUEST)
+        .type(MIMETYPE_TEXT).entity("Invalid check value '" + check + "'" + CRLF)
+        .build();
+    }
+
+    HTableInterface table = null;
+    try {
+      List<RowModel> rows = model.getRows();
+      List<Put> puts = new ArrayList<Put>();
+      for (RowModel row: rows) {
+        byte[] key = row.getKey();
+        if (key == null) {
+          key = rowspec.getRow();
+        }
+        if (key == null) {
+          servlet.getMetrics().incrementFailedPutRequests(1);
+          return Response.status(Response.Status.BAD_REQUEST)
+            .type(MIMETYPE_TEXT).entity("Bad request: Row key not specified." + CRLF)
+            .build();
+        }
+        Put put = new Put(key);
+        int i = 0;
+        for (CellModel cell: row.getCells()) {
+          byte[] col = cell.getColumn();
+          if (col == null) try {
+            col = rowspec.getColumns()[i++];
+          } catch (ArrayIndexOutOfBoundsException e) {
+            col = null;
+          }
+          if (col == null) {
+            servlet.getMetrics().incrementFailedPutRequests(1);
+            return Response.status(Response.Status.BAD_REQUEST)
+              .type(MIMETYPE_TEXT).entity("Bad request: Column found to be null." + CRLF)
+              .build();
+          }
+          byte [][] parts = KeyValue.parseColumn(col);
+          if (parts.length != 2) {
+            return Response.status(Response.Status.BAD_REQUEST)
+              .type(MIMETYPE_TEXT).entity("Bad request" + CRLF)
+              .build();
+          }
+          put.addImmutable(parts[0], parts[1], cell.getTimestamp(), cell.getValue());
+        }
+        puts.add(put);
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("PUT " + put.toString());
+        }
+      }
+      table = servlet.getTable(tableResource.getName());
+      table.put(puts);
+      table.flushCommits();
+      ResponseBuilder response = Response.ok();
+      servlet.getMetrics().incrementSucessfulPutRequests(1);
+      return response.build();
+    } catch (Exception e) {
+      servlet.getMetrics().incrementFailedPutRequests(1);
+      return processException(e);
+    } finally {
+      if (table != null) try {
+        table.close();
+      } catch (IOException ioe) {
+        LOG.debug("Exception received while closing the table", ioe);
+      }
+    }
+  }
+
+  // This currently supports only update of one row at a time.
+  Response updateBinary(final byte[] message, final HttpHeaders headers,
+      final boolean replace) {
+    servlet.getMetrics().incrementRequests(1);
+    if (servlet.isReadOnly()) {
+      servlet.getMetrics().incrementFailedPutRequests(1);
+      return Response.status(Response.Status.FORBIDDEN)
+        .type(MIMETYPE_TEXT).entity("Forbidden" + CRLF)
+        .build();
+    }
+    HTableInterface table = null;
+    try {
+      byte[] row = rowspec.getRow();
+      byte[][] columns = rowspec.getColumns();
+      byte[] column = null;
+      if (columns != null) {
+        column = columns[0];
+      }
+      long timestamp = HConstants.LATEST_TIMESTAMP;
+      List<String> vals = headers.getRequestHeader("X-Row");
+      if (vals != null && !vals.isEmpty()) {
+        row = Bytes.toBytes(vals.get(0));
+      }
+      vals = headers.getRequestHeader("X-Column");
+      if (vals != null && !vals.isEmpty()) {
+        column = Bytes.toBytes(vals.get(0));
+      }
+      vals = headers.getRequestHeader("X-Timestamp");
+      if (vals != null && !vals.isEmpty()) {
+        timestamp = Long.valueOf(vals.get(0));
+      }
+      if (column == null) {
+        servlet.getMetrics().incrementFailedPutRequests(1);
+        return Response.status(Response.Status.BAD_REQUEST)
+            .type(MIMETYPE_TEXT).entity("Bad request: Column found to be null." + CRLF)
+            .build();
+      }
+      Put put = new Put(row);
+      byte parts[][] = KeyValue.parseColumn(column);
+      if (parts.length != 2) {
+        return Response.status(Response.Status.BAD_REQUEST)
+          .type(MIMETYPE_TEXT).entity("Bad request" + CRLF)
+          .build();
+      }
+      put.addImmutable(parts[0], parts[1], timestamp, message);
+      table = servlet.getTable(tableResource.getName());
+      table.put(put);
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("PUT " + put.toString());
+      }
+      servlet.getMetrics().incrementSucessfulPutRequests(1);
+      return Response.ok().build();
+    } catch (Exception e) {
+      servlet.getMetrics().incrementFailedPutRequests(1);
+      return processException(e);
+    } finally {
+      if (table != null) try {
+        table.close();
+      } catch (IOException ioe) {
+        LOG.debug(ioe);
+      }
+    }
+  }
+
+  @PUT
+  @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF,
+    MIMETYPE_PROTOBUF_IETF})
+  public Response put(final CellSetModel model,
+      final @Context UriInfo uriInfo) {
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("PUT " + uriInfo.getAbsolutePath()
+        + " " + uriInfo.getQueryParameters());
+    }
+    return update(model, true);
+  }
+
+  @PUT
+  @Consumes(MIMETYPE_BINARY)
+  public Response putBinary(final byte[] message,
+      final @Context UriInfo uriInfo, final @Context HttpHeaders headers) {
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("PUT " + uriInfo.getAbsolutePath() + " as "+ MIMETYPE_BINARY);
+    }
+    return updateBinary(message, headers, true);
+  }
+
+  @POST
+  @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF,
+    MIMETYPE_PROTOBUF_IETF})
+  public Response post(final CellSetModel model,
+      final @Context UriInfo uriInfo) {
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("POST " + uriInfo.getAbsolutePath()
+        + " " + uriInfo.getQueryParameters());
+    }
+    return update(model, false);
+  }
+
+  @POST
+  @Consumes(MIMETYPE_BINARY)
+  public Response postBinary(final byte[] message,
+      final @Context UriInfo uriInfo, final @Context HttpHeaders headers) {
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("POST " + uriInfo.getAbsolutePath() + " as "+MIMETYPE_BINARY);
+    }
+    return updateBinary(message, headers, false);
+  }
+
+  @DELETE
+  public Response delete(final @Context UriInfo uriInfo) {
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("DELETE " + uriInfo.getAbsolutePath());
+    }
+    servlet.getMetrics().incrementRequests(1);
+    if (servlet.isReadOnly()) {
+      servlet.getMetrics().incrementFailedDeleteRequests(1);
+      return Response.status(Response.Status.FORBIDDEN)
+        .type(MIMETYPE_TEXT).entity("Forbidden" + CRLF)
+        .build();
+    }
+    Delete delete = null;
+    if (rowspec.hasTimestamp())
+      delete = new Delete(rowspec.getRow(), rowspec.getTimestamp());
+    else
+      delete = new Delete(rowspec.getRow());
+
+    for (byte[] column: rowspec.getColumns()) {
+      byte[][] split = KeyValue.parseColumn(column);
+      if (rowspec.hasTimestamp()) {
+        if (split.length == 1) {
+          delete.deleteFamily(split[0], rowspec.getTimestamp());
+        } else if (split.length == 2) {
+          delete.deleteColumns(split[0], split[1], rowspec.getTimestamp());
+        } else {
+          return Response.status(Response.Status.BAD_REQUEST)
+            .type(MIMETYPE_TEXT).entity("Bad request" + CRLF)
+            .build();
+        }
+      } else {
+        if (split.length == 1) {
+          delete.deleteFamily(split[0]);
+        } else if (split.length == 2) {
+          delete.deleteColumns(split[0], split[1]);
+        } else {
+          return Response.status(Response.Status.BAD_REQUEST)
+            .type(MIMETYPE_TEXT).entity("Bad request" + CRLF)
+            .build();
+        }
+      }
+    }
+    HTableInterface table = null;
+    try {
+      table = servlet.getTable(tableResource.getName());
+      table.delete(delete);
+      servlet.getMetrics().incrementSucessfulDeleteRequests(1);
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("DELETE " + delete.toString());
+      }
+    } catch (Exception e) {
+      servlet.getMetrics().incrementFailedDeleteRequests(1);
+      return processException(e);
+    } finally {
+      if (table != null) try {
+        table.close();
+      } catch (IOException ioe) {
+        LOG.debug(ioe);
+      }
+    }
+    return Response.ok().build();
+  }
+
+  /**
+   * Validates the input request parameters, parses columns from CellSetModel,
+   * and invokes checkAndPut on HTable.
+   *
+   * @param model instance of CellSetModel
+   * @return Response 200 OK, 304 Not modified, 400 Bad request
+   */
+  Response checkAndPut(final CellSetModel model) {
+    HTableInterface table = null;
+    try {
+      table = servlet.getTable(tableResource.getName());
+      if (model.getRows().size() != 1) {
+        servlet.getMetrics().incrementFailedPutRequests(1);
+        return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT)
+            .entity("Bad request: Number of rows specified is not 1." + CRLF).build();
+      }
+
+      RowModel rowModel = model.getRows().get(0);
+      byte[] key = rowModel.getKey();
+      if (key == null) {
+        key = rowspec.getRow();
+      }
+
+      List<CellModel> cellModels = rowModel.getCells();
+      int cellModelCount = cellModels.size();
+      if (key == null || cellModelCount <= 1) {
+        servlet.getMetrics().incrementFailedPutRequests(1);
+        return Response
+            .status(Response.Status.BAD_REQUEST)
+            .type(MIMETYPE_TEXT)
+            .entity(
+              "Bad request: Either row key is null or no data found for columns specified." + CRLF)
+            .build();
+      }
+
+      Put put = new Put(key);
+      boolean retValue;
+      CellModel valueToCheckCell = cellModels.get(cellModelCount - 1);
+      byte[] valueToCheckColumn = valueToCheckCell.getColumn();
+      byte[][] valueToPutParts = KeyValue.parseColumn(valueToCheckColumn);
+      if (valueToPutParts.length == 2 && valueToPutParts[1].length > 0) {
+        CellModel valueToPutCell = null;
+        for (int i = 0, n = cellModelCount - 1; i < n ; i++) {
+          if(Bytes.equals(cellModels.get(i).getColumn(),
+              valueToCheckCell.getColumn())) {
+            valueToPutCell = cellModels.get(i);
+            break;
+          }
+        }
+        if (valueToPutCell == null) {
+          servlet.getMetrics().incrementFailedPutRequests(1);
+          return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT)
+              .entity("Bad request: The column to put and check do not match." + CRLF).build();
+        } else {
+          put.addImmutable(valueToPutParts[0], valueToPutParts[1], valueToPutCell.getTimestamp(),
+            valueToPutCell.getValue());
+          retValue = table.checkAndPut(key, valueToPutParts[0], valueToPutParts[1],
+            valueToCheckCell.getValue(), put);
+        }
+      } else {
+        servlet.getMetrics().incrementFailedPutRequests(1);
+        return Response.status(Response.Status.BAD_REQUEST)
+          .type(MIMETYPE_TEXT).entity("Bad request: Column incorrectly specified." + CRLF)
+          .build();
+      }
+
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("CHECK-AND-PUT " + put.toString() + ", returns " + retValue);
+      }
+      if (!retValue) {
+        servlet.getMetrics().incrementFailedPutRequests(1);
+        return Response.status(Response.Status.NOT_MODIFIED)
+          .type(MIMETYPE_TEXT).entity("Value not Modified" + CRLF)
+          .build();
+      }
+      table.flushCommits();
+      ResponseBuilder response = Response.ok();
+      servlet.getMetrics().incrementSucessfulPutRequests(1);
+      return response.build();
+    } catch (Exception e) {
+      servlet.getMetrics().incrementFailedPutRequests(1);
+      return processException(e);
+    } finally {
+      if (table != null) try {
+        table.close();
+      } catch (IOException ioe) { 
+        LOG.debug("Exception received while closing the table", ioe);
+      }
+    }
+  }
+
+  /**
+   * Validates the input request parameters, parses columns from CellSetModel,
+   * and invokes checkAndDelete on HTable.
+   *
+   * @param model instance of CellSetModel
+   * @return Response 200 OK, 304 Not modified, 400 Bad request
+   */
+  Response checkAndDelete(final CellSetModel model) {
+    HTableInterface table = null;
+    Delete delete = null;
+    try {
+      table = servlet.getTable(tableResource.getName());
+      if (model.getRows().size() != 1) {
+        servlet.getMetrics().incrementFailedDeleteRequests(1);
+        return Response.status(Response.Status.BAD_REQUEST)
+          .type(MIMETYPE_TEXT).entity("Bad request" + CRLF)
+          .build();
+      }
+      RowModel rowModel = model.getRows().get(0);
+      byte[] key = rowModel.getKey();
+      if (key == null) {
+        key = rowspec.getRow();
+      }
+      if (key == null) {
+        servlet.getMetrics().incrementFailedDeleteRequests(1);
+        return Response.status(Response.Status.BAD_REQUEST)
+          .type(MIMETYPE_TEXT).entity("Bad request: Row key found to be null." + CRLF)
+          .build();
+      }
+
+      delete = new Delete(key);
+      boolean retValue;
+      CellModel valueToDeleteCell = rowModel.getCells().get(0);
+      byte[] valueToDeleteColumn = valueToDeleteCell.getColumn();
+      if (valueToDeleteColumn == null) {
+        try {
+          valueToDeleteColumn = rowspec.getColumns()[0];
+        } catch (final ArrayIndexOutOfBoundsException e) {
+          servlet.getMetrics().incrementFailedDeleteRequests(1);
+          return Response.status(Response.Status.BAD_REQUEST)
+            .type(MIMETYPE_TEXT).entity("Bad request: Column not specified for check." + CRLF)
+            .build();
+        }
+      }
+      byte[][] parts = KeyValue.parseColumn(valueToDeleteColumn);
+      if (parts.length == 2) {
+        if (parts[1].length != 0) {
+          delete.deleteColumns(parts[0], parts[1]);
+          retValue = table.checkAndDelete(key, parts[0], parts[1],
+            valueToDeleteCell.getValue(), delete);
+        } else {
+          // The case of empty qualifier.
+          delete.deleteColumns(parts[0], Bytes.toBytes(StringUtils.EMPTY));
+          retValue = table.checkAndDelete(key, parts[0], Bytes.toBytes(StringUtils.EMPTY),
+            valueToDeleteCell.getValue(), delete);
+        }
+      } else {
+        servlet.getMetrics().incrementFailedDeleteRequests(1);
+        return Response.status(Response.Status.BAD_REQUEST)
+          .type(MIMETYPE_TEXT).entity("Bad request: Column incorrectly specified." + CRLF)
+          .build();
+      }
+      delete.deleteColumns(parts[0], parts[1]);
+
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("CHECK-AND-DELETE " + delete.toString() + ", returns "
+          + retValue);
+      }
+
+      if (!retValue) {
+        servlet.getMetrics().incrementFailedDeleteRequests(1);
+        return Response.status(Response.Status.NOT_MODIFIED)
+            .type(MIMETYPE_TEXT).entity(" Delete check failed." + CRLF)
+            .build();
+      }
+      table.flushCommits();
+      ResponseBuilder response = Response.ok();
+      servlet.getMetrics().incrementSucessfulDeleteRequests(1);
+      return response.build();
+    } catch (Exception e) {
+      servlet.getMetrics().incrementFailedDeleteRequests(1);
+      return processException(e);
+    } finally {
+      if (table != null) try {
+        table.close();
+      } catch (IOException ioe) {
+        LOG.debug("Exception received while closing the table", ioe);
+      }
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResultGenerator.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResultGenerator.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResultGenerator.java
new file mode 100644
index 0000000..b9492dd
--- /dev/null
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResultGenerator.java
@@ -0,0 +1,122 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest;
+
+import java.io.IOException;
+import java.util.Iterator;
+import java.util.NoSuchElementException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.DoNotRetryIOException;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.HTableInterface;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.filter.Filter;
+import org.apache.hadoop.util.StringUtils;
+
+@InterfaceAudience.Private
+public class RowResultGenerator extends ResultGenerator {
+  private static final Log LOG = LogFactory.getLog(RowResultGenerator.class);
+
+  private Iterator<Cell> valuesI;
+  private Cell cache;
+
+  public RowResultGenerator(final String tableName, final RowSpec rowspec,
+      final Filter filter, final boolean cacheBlocks)
+      throws IllegalArgumentException, IOException {
+    HTableInterface table = RESTServlet.getInstance().getTable(tableName);
+    try {
+      Get get = new Get(rowspec.getRow());
+      if (rowspec.hasColumns()) {
+        for (byte[] col: rowspec.getColumns()) {
+          byte[][] split = KeyValue.parseColumn(col);
+          if (split.length == 1) {
+            get.addFamily(split[0]);
+          } else if (split.length == 2) {
+            get.addColumn(split[0], split[1]);
+          } else {
+            throw new IllegalArgumentException("Invalid column specifier.");
+          }
+        }
+      }
+      get.setTimeRange(rowspec.getStartTime(), rowspec.getEndTime());
+      get.setMaxVersions(rowspec.getMaxVersions());
+      if (filter != null) {
+        get.setFilter(filter);
+      }
+      get.setCacheBlocks(cacheBlocks);
+      Result result = table.get(get);
+      if (result != null && !result.isEmpty()) {
+        valuesI = result.listCells().iterator();
+      }
+    } catch (DoNotRetryIOException e) {
+      // Warn here because Stargate will return 404 in the case if multiple
+      // column families were specified but one did not exist -- currently
+      // HBase will fail the whole Get.
+      // Specifying multiple columns in a URI should be uncommon usage but
+      // help to avoid confusion by leaving a record of what happened here in
+      // the log.
+      LOG.warn(StringUtils.stringifyException(e));
+    } finally {
+      table.close();
+    }
+  }
+
+  public void close() {
+  }
+
+  public boolean hasNext() {
+    if (cache != null) {
+      return true;
+    }
+    if (valuesI == null) {
+      return false;
+    }
+    return valuesI.hasNext();
+  }
+
+  public Cell next() {
+    if (cache != null) {
+      Cell kv = cache;
+      cache = null;
+      return kv;
+    }
+    if (valuesI == null) {
+      return null;
+    }
+    try {
+      return valuesI.next();
+    } catch (NoSuchElementException e) {
+      return null;
+    }
+  }
+
+  public void putBack(Cell kv) {
+    this.cache = kv;
+  }
+
+  public void remove() {
+    throw new UnsupportedOperationException("remove not supported");
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowSpec.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowSpec.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowSpec.java
new file mode 100644
index 0000000..b6c1ca8
--- /dev/null
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowSpec.java
@@ -0,0 +1,407 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest;
+
+import java.io.UnsupportedEncodingException;
+import java.net.URLDecoder;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.List;
+import java.util.TreeSet;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.util.Bytes;
+
+/**
+ * Parses a path based row/column/timestamp specification into its component
+ * elements.
+ * <p>
+ *  
+ */
+@InterfaceAudience.Private
+public class RowSpec {
+  public static final long DEFAULT_START_TIMESTAMP = 0;
+  public static final long DEFAULT_END_TIMESTAMP = Long.MAX_VALUE;
+  
+  private byte[] row = HConstants.EMPTY_START_ROW;
+  private byte[] endRow = null;
+  private TreeSet<byte[]> columns =
+    new TreeSet<byte[]>(Bytes.BYTES_COMPARATOR);
+  private List<String> labels = new ArrayList<String>();  
+  private long startTime = DEFAULT_START_TIMESTAMP;
+  private long endTime = DEFAULT_END_TIMESTAMP;
+  private int maxVersions = 1;
+  private int maxValues = Integer.MAX_VALUE;
+
+  public RowSpec(String path) throws IllegalArgumentException {
+    int i = 0;
+    while (path.charAt(i) == '/') {
+      i++;
+    }
+    i = parseRowKeys(path, i);
+    i = parseColumns(path, i);
+    i = parseTimestamp(path, i);
+    i = parseQueryParams(path, i);
+  }
+
+  private int parseRowKeys(final String path, int i)
+      throws IllegalArgumentException {
+    String startRow = null, endRow = null;
+    try {
+      StringBuilder sb = new StringBuilder();
+      char c;
+      while (i < path.length() && (c = path.charAt(i)) != '/') {
+        sb.append(c);
+        i++;
+      }
+      i++;
+      String row = startRow = sb.toString();
+      int idx = startRow.indexOf(',');
+      if (idx != -1) {
+        startRow = URLDecoder.decode(row.substring(0, idx),
+          HConstants.UTF8_ENCODING);
+        endRow = URLDecoder.decode(row.substring(idx + 1),
+          HConstants.UTF8_ENCODING);
+      } else {
+        startRow = URLDecoder.decode(row, HConstants.UTF8_ENCODING);
+      }
+    } catch (IndexOutOfBoundsException e) {
+      throw new IllegalArgumentException(e);
+    } catch (UnsupportedEncodingException e) {
+      throw new RuntimeException(e);
+    }
+    // HBase does not support wildcards on row keys so we will emulate a
+    // suffix glob by synthesizing appropriate start and end row keys for
+    // table scanning
+    if (startRow.charAt(startRow.length() - 1) == '*') {
+      if (endRow != null)
+        throw new IllegalArgumentException("invalid path: start row "+
+          "specified with wildcard");
+      this.row = Bytes.toBytes(startRow.substring(0, 
+        startRow.lastIndexOf("*")));
+      this.endRow = new byte[this.row.length + 1];
+      System.arraycopy(this.row, 0, this.endRow, 0, this.row.length);
+      this.endRow[this.row.length] = (byte)255;
+    } else {
+      this.row = Bytes.toBytes(startRow.toString());
+      if (endRow != null) {
+        this.endRow = Bytes.toBytes(endRow.toString());
+      }
+    }
+    return i;
+  }
+
+  private int parseColumns(final String path, int i) throws IllegalArgumentException {
+    if (i >= path.length()) {
+      return i;
+    }
+    try {
+      char c;
+      StringBuilder column = new StringBuilder();
+      while (i < path.length() && (c = path.charAt(i)) != '/') {
+        if (c == ',') {
+          if (column.length() < 1) {
+            throw new IllegalArgumentException("invalid path");
+          }
+          String s = URLDecoder.decode(column.toString(), HConstants.UTF8_ENCODING);
+          this.columns.add(Bytes.toBytes(s));
+          column.setLength(0);
+          i++;
+          continue;
+        }
+        column.append(c);
+        i++;
+      }
+      i++;
+      // trailing list entry
+      if (column.length() > 0) {
+        String s = URLDecoder.decode(column.toString(), HConstants.UTF8_ENCODING);
+        this.columns.add(Bytes.toBytes(s));
+      }
+    } catch (IndexOutOfBoundsException e) {
+      throw new IllegalArgumentException(e);
+    } catch (UnsupportedEncodingException e) {
+      // shouldn't happen
+      throw new RuntimeException(e);
+    }
+    return i;
+  }
+
+  private int parseTimestamp(final String path, int i)
+      throws IllegalArgumentException {
+    if (i >= path.length()) {
+      return i;
+    }
+    long time0 = 0, time1 = 0;
+    try {
+      char c = 0;
+      StringBuilder stamp = new StringBuilder();
+      while (i < path.length()) {
+        c = path.charAt(i);
+        if (c == '/' || c == ',') {
+          break;
+        }
+        stamp.append(c);
+        i++;
+      }
+      try {
+        time0 = Long.valueOf(URLDecoder.decode(stamp.toString(),
+          HConstants.UTF8_ENCODING));
+      } catch (NumberFormatException e) {
+        throw new IllegalArgumentException(e);
+      }
+      if (c == ',') {
+        stamp = new StringBuilder();
+        i++;
+        while (i < path.length() && ((c = path.charAt(i)) != '/')) {
+          stamp.append(c);
+          i++;
+        }
+        try {
+          time1 = Long.valueOf(URLDecoder.decode(stamp.toString(),
+            HConstants.UTF8_ENCODING));
+        } catch (NumberFormatException e) {
+          throw new IllegalArgumentException(e);
+        }
+      }
+      if (c == '/') {
+        i++;
+      }
+    } catch (IndexOutOfBoundsException e) {
+      throw new IllegalArgumentException(e);
+    } catch (UnsupportedEncodingException e) {
+      // shouldn't happen
+      throw new RuntimeException(e);
+    }
+    if (time1 != 0) {
+      startTime = time0;
+      endTime = time1;
+    } else {
+      endTime = time0;
+    }
+    return i;
+  }
+
+  private int parseQueryParams(final String path, int i) {
+    if (i >= path.length()) {
+      return i;
+    }
+    StringBuilder query = new StringBuilder();
+    try {
+      query.append(URLDecoder.decode(path.substring(i), 
+        HConstants.UTF8_ENCODING));
+    } catch (UnsupportedEncodingException e) {
+      // should not happen
+      throw new RuntimeException(e);
+    }
+    i += query.length();
+    int j = 0;
+    while (j < query.length()) {
+      char c = query.charAt(j);
+      if (c != '?' && c != '&') {
+        break;
+      }
+      if (++j > query.length()) {
+        throw new IllegalArgumentException("malformed query parameter");
+      }
+      char what = query.charAt(j);
+      if (++j > query.length()) {
+        break;
+      }
+      c = query.charAt(j);
+      if (c != '=') {
+        throw new IllegalArgumentException("malformed query parameter");
+      }
+      if (++j > query.length()) {
+        break;
+      }
+      switch (what) {
+      case 'm': {
+        StringBuilder sb = new StringBuilder();
+        while (j <= query.length()) {
+          c = query.charAt(j);
+          if (c < '0' || c > '9') {
+            j--;
+            break;
+          }
+          sb.append(c);
+        }
+        maxVersions = Integer.valueOf(sb.toString());
+      } break;
+      case 'n': {
+        StringBuilder sb = new StringBuilder();
+        while (j <= query.length()) {
+          c = query.charAt(j);
+          if (c < '0' || c > '9') {
+            j--;
+            break;
+          }
+          sb.append(c);
+        }
+        maxValues = Integer.valueOf(sb.toString());
+      } break;
+      default:
+        throw new IllegalArgumentException("unknown parameter '" + c + "'");
+      }
+    }
+    return i;
+  }
+
+  public RowSpec(byte[] startRow, byte[] endRow, byte[][] columns,
+      long startTime, long endTime, int maxVersions) {
+    this.row = startRow;
+    this.endRow = endRow;
+    if (columns != null) {
+      Collections.addAll(this.columns, columns);
+    }
+    this.startTime = startTime;
+    this.endTime = endTime;
+    this.maxVersions = maxVersions;
+  }
+
+  public RowSpec(byte[] startRow, byte[] endRow, Collection<byte[]> columns,
+      long startTime, long endTime, int maxVersions, Collection<String> labels) {
+    this(startRow, endRow, columns, startTime, endTime, maxVersions);
+    if(labels != null) {
+      this.labels.addAll(labels);
+    }
+  }
+  public RowSpec(byte[] startRow, byte[] endRow, Collection<byte[]> columns,
+      long startTime, long endTime, int maxVersions) {
+    this.row = startRow;
+    this.endRow = endRow;
+    if (columns != null) {
+      this.columns.addAll(columns);
+    }
+    this.startTime = startTime;
+    this.endTime = endTime;
+    this.maxVersions = maxVersions;
+  }
+
+  public boolean isSingleRow() {
+    return endRow == null;
+  }
+
+  public int getMaxVersions() {
+    return maxVersions;
+  }
+
+  public void setMaxVersions(final int maxVersions) {
+    this.maxVersions = maxVersions;
+  }
+
+  public int getMaxValues() {
+    return maxValues;
+  }
+
+  public void setMaxValues(final int maxValues) {
+    this.maxValues = maxValues;
+  }
+
+  public boolean hasColumns() {
+    return !columns.isEmpty();
+  }
+  
+  public boolean hasLabels() {
+    return !labels.isEmpty();
+  }
+
+  public byte[] getRow() {
+    return row;
+  }
+
+  public byte[] getStartRow() {
+    return row;
+  }
+
+  public boolean hasEndRow() {
+    return endRow != null;
+  }
+
+  public byte[] getEndRow() {
+    return endRow;
+  }
+
+  public void addColumn(final byte[] column) {
+    columns.add(column);
+  }
+
+  public byte[][] getColumns() {
+    return columns.toArray(new byte[columns.size()][]);
+  }
+  
+  public List<String> getLabels() {
+    return labels;
+  }
+
+  public boolean hasTimestamp() {
+    return (startTime == 0) && (endTime != Long.MAX_VALUE);
+  }
+
+  public long getTimestamp() {
+    return endTime;
+  }
+
+  public long getStartTime() {
+    return startTime;
+  }
+
+  public void setStartTime(final long startTime) {
+    this.startTime = startTime;
+  }
+
+  public long getEndTime() {
+    return endTime;
+  }
+
+  public void setEndTime(long endTime) {
+    this.endTime = endTime;
+  }
+
+  public String toString() {
+    StringBuilder result = new StringBuilder();
+    result.append("{startRow => '");
+    if (row != null) {
+      result.append(Bytes.toString(row));
+    }
+    result.append("', endRow => '");
+    if (endRow != null)  {
+      result.append(Bytes.toString(endRow));
+    }
+    result.append("', columns => [");
+    for (byte[] col: columns) {
+      result.append(" '");
+      result.append(Bytes.toString(col));
+      result.append("'");
+    }
+    result.append(" ], startTime => ");
+    result.append(Long.toString(startTime));
+    result.append(", endTime => ");
+    result.append(Long.toString(endTime));
+    result.append(", maxVersions => ");
+    result.append(Integer.toString(maxVersions));
+    result.append(", maxValues => ");
+    result.append(Integer.toString(maxValues));
+    result.append("}");
+    return result.toString();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerInstanceResource.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerInstanceResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerInstanceResource.java
new file mode 100644
index 0000000..ffb2fae
--- /dev/null
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerInstanceResource.java
@@ -0,0 +1,201 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest;
+
+import java.io.IOException;
+
+import javax.ws.rs.DELETE;
+import javax.ws.rs.GET;
+import javax.ws.rs.Produces;
+import javax.ws.rs.QueryParam;
+import javax.ws.rs.core.CacheControl;
+import javax.ws.rs.core.Context;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.Response.ResponseBuilder;
+import javax.ws.rs.core.UriInfo;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.rest.model.CellModel;
+import org.apache.hadoop.hbase.rest.model.CellSetModel;
+import org.apache.hadoop.hbase.rest.model.RowModel;
+import org.apache.hadoop.hbase.util.Base64;
+import org.apache.hadoop.hbase.util.Bytes;
+
+@InterfaceAudience.Private
+public class ScannerInstanceResource extends ResourceBase {
+  private static final Log LOG =
+    LogFactory.getLog(ScannerInstanceResource.class);
+
+  static CacheControl cacheControl;
+  static {
+    cacheControl = new CacheControl();
+    cacheControl.setNoCache(true);
+    cacheControl.setNoTransform(false);
+  }
+
+  ResultGenerator generator = null;
+  String id = null;
+  int batch = 1;
+
+  public ScannerInstanceResource() throws IOException { }
+
+  public ScannerInstanceResource(String table, String id, 
+      ResultGenerator generator, int batch) throws IOException {
+    this.id = id;
+    this.generator = generator;
+    this.batch = batch;
+  }
+
+  @GET
+  @Produces({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF,
+    MIMETYPE_PROTOBUF_IETF})
+  public Response get(final @Context UriInfo uriInfo, 
+      @QueryParam("n") int maxRows, final @QueryParam("c") int maxValues) {
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("GET " + uriInfo.getAbsolutePath());
+    }
+    servlet.getMetrics().incrementRequests(1);
+    if (generator == null) {
+      servlet.getMetrics().incrementFailedGetRequests(1);
+      return Response.status(Response.Status.NOT_FOUND)
+        .type(MIMETYPE_TEXT).entity("Not found" + CRLF)
+        .build();
+    }
+    CellSetModel model = new CellSetModel();
+    RowModel rowModel = null;
+    byte[] rowKey = null;
+    int limit = batch;
+    if (maxValues > 0) {
+      limit = maxValues;
+    }
+    int count = limit;
+    do {
+      Cell value = null;
+      try {
+        value = generator.next();
+      } catch (IllegalStateException e) {
+        if (ScannerResource.delete(id)) {
+          servlet.getMetrics().incrementSucessfulDeleteRequests(1);
+        } else {
+          servlet.getMetrics().incrementFailedDeleteRequests(1);
+        }
+        servlet.getMetrics().incrementFailedGetRequests(1);
+        return Response.status(Response.Status.GONE)
+          .type(MIMETYPE_TEXT).entity("Gone" + CRLF)
+          .build();
+      }
+      if (value == null) {
+        LOG.info("generator exhausted");
+        // respond with 204 (No Content) if an empty cell set would be
+        // returned
+        if (count == limit) {
+          return Response.noContent().build();
+        }
+        break;
+      }
+      if (rowKey == null) {
+        rowKey = CellUtil.cloneRow(value);
+        rowModel = new RowModel(rowKey);
+      }
+      if (!Bytes.equals(CellUtil.cloneRow(value), rowKey)) {
+        // if maxRows was given as a query param, stop if we would exceed the
+        // specified number of rows
+        if (maxRows > 0) { 
+          if (--maxRows == 0) {
+            generator.putBack(value);
+            break;
+          }
+        }
+        model.addRow(rowModel);
+        rowKey = CellUtil.cloneRow(value);
+        rowModel = new RowModel(rowKey);
+      }
+      rowModel.addCell(
+        new CellModel(CellUtil.cloneFamily(value), CellUtil.cloneQualifier(value), 
+          value.getTimestamp(), CellUtil.cloneValue(value)));
+    } while (--count > 0);
+    model.addRow(rowModel);
+    ResponseBuilder response = Response.ok(model);
+    response.cacheControl(cacheControl);
+    servlet.getMetrics().incrementSucessfulGetRequests(1);
+    return response.build();
+  }
+
+  @GET
+  @Produces(MIMETYPE_BINARY)
+  public Response getBinary(final @Context UriInfo uriInfo) {
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("GET " + uriInfo.getAbsolutePath() + " as " +
+        MIMETYPE_BINARY);
+    }
+    servlet.getMetrics().incrementRequests(1);
+    try {
+      Cell value = generator.next();
+      if (value == null) {
+        LOG.info("generator exhausted");
+        return Response.noContent().build();
+      }
+      ResponseBuilder response = Response.ok(CellUtil.cloneValue(value));
+      response.cacheControl(cacheControl);
+      response.header("X-Row", Base64.encodeBytes(CellUtil.cloneRow(value)));      
+      response.header("X-Column", 
+        Base64.encodeBytes(
+          KeyValue.makeColumn(CellUtil.cloneFamily(value), CellUtil.cloneQualifier(value))));
+      response.header("X-Timestamp", value.getTimestamp());
+      servlet.getMetrics().incrementSucessfulGetRequests(1);
+      return response.build();
+    } catch (IllegalStateException e) {
+      if (ScannerResource.delete(id)) {
+        servlet.getMetrics().incrementSucessfulDeleteRequests(1);
+      } else {
+        servlet.getMetrics().incrementFailedDeleteRequests(1);
+      }
+      servlet.getMetrics().incrementFailedGetRequests(1);
+      return Response.status(Response.Status.GONE)
+        .type(MIMETYPE_TEXT).entity("Gone" + CRLF)
+        .build();
+    }
+  }
+
+  @DELETE
+  public Response delete(final @Context UriInfo uriInfo) {
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("DELETE " + uriInfo.getAbsolutePath());
+    }
+    servlet.getMetrics().incrementRequests(1);
+    if (servlet.isReadOnly()) {
+      return Response.status(Response.Status.FORBIDDEN)
+        .type(MIMETYPE_TEXT).entity("Forbidden" + CRLF)
+        .build();
+    }
+    if (ScannerResource.delete(id)) {
+      servlet.getMetrics().incrementSucessfulDeleteRequests(1);
+    } else {
+      servlet.getMetrics().incrementFailedDeleteRequests(1);
+    }
+    return Response.ok().build();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResource.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResource.java
new file mode 100644
index 0000000..6c424ce
--- /dev/null
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResource.java
@@ -0,0 +1,164 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest;
+
+import java.io.IOException;
+import java.net.URI;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+
+import javax.ws.rs.Consumes;
+import javax.ws.rs.POST;
+import javax.ws.rs.PUT;
+import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.core.Context;
+import javax.ws.rs.core.MultivaluedMap;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.UriBuilder;
+import javax.ws.rs.core.UriInfo;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.TableNotFoundException;
+import org.apache.hadoop.hbase.filter.Filter;
+import org.apache.hadoop.hbase.rest.model.ScannerModel;
+
+@InterfaceAudience.Private
+public class ScannerResource extends ResourceBase {
+
+  private static final Log LOG = LogFactory.getLog(ScannerResource.class);
+
+  static final Map<String,ScannerInstanceResource> scanners =
+   Collections.synchronizedMap(new HashMap<String,ScannerInstanceResource>());
+
+  TableResource tableResource;
+
+  /**
+   * Constructor
+   * @param tableResource
+   * @throws IOException
+   */
+  public ScannerResource(TableResource tableResource)throws IOException {
+    super();
+    this.tableResource = tableResource;
+  }
+
+  static boolean delete(final String id) {
+    ScannerInstanceResource instance = scanners.remove(id);
+    if (instance != null) {
+      instance.generator.close();
+      return true;
+    } else {
+      return false;
+    }
+  }
+
+  Response update(final ScannerModel model, final boolean replace,
+      final UriInfo uriInfo) {
+    servlet.getMetrics().incrementRequests(1);
+    if (servlet.isReadOnly()) {
+      return Response.status(Response.Status.FORBIDDEN)
+        .type(MIMETYPE_TEXT).entity("Forbidden" + CRLF)
+        .build();
+    }
+    byte[] endRow = model.hasEndRow() ? model.getEndRow() : null;
+    RowSpec spec = null;
+    if (model.getLabels() != null) {
+      spec = new RowSpec(model.getStartRow(), endRow, model.getColumns(), model.getStartTime(),
+          model.getEndTime(), model.getMaxVersions(), model.getLabels());
+    } else {
+      spec = new RowSpec(model.getStartRow(), endRow, model.getColumns(), model.getStartTime(),
+          model.getEndTime(), model.getMaxVersions());
+    }
+    MultivaluedMap<String, String> params = uriInfo.getQueryParameters();
+    
+    try {
+      Filter filter = ScannerResultGenerator.buildFilterFromModel(model);
+      String tableName = tableResource.getName();
+      ScannerResultGenerator gen =
+        new ScannerResultGenerator(tableName, spec, filter, model.getCaching(),
+          model.getCacheBlocks());
+      String id = gen.getID();
+      ScannerInstanceResource instance =
+        new ScannerInstanceResource(tableName, id, gen, model.getBatch());
+      scanners.put(id, instance);
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("new scanner: " + id);
+      }
+      UriBuilder builder = uriInfo.getAbsolutePathBuilder();
+      URI uri = builder.path(id).build();
+      servlet.getMetrics().incrementSucessfulPutRequests(1);
+      return Response.created(uri).build();
+    } catch (Exception e) {
+      servlet.getMetrics().incrementFailedPutRequests(1);
+      if (e instanceof TableNotFoundException) {
+        return Response.status(Response.Status.NOT_FOUND)
+          .type(MIMETYPE_TEXT).entity("Not found" + CRLF)
+          .build();
+      } else if (e instanceof RuntimeException) {
+        return Response.status(Response.Status.BAD_REQUEST)
+          .type(MIMETYPE_TEXT).entity("Bad request" + CRLF)
+          .build();
+      }
+      return Response.status(Response.Status.SERVICE_UNAVAILABLE)
+        .type(MIMETYPE_TEXT).entity("Unavailable" + CRLF)
+        .build();
+    }
+  }
+
+  @PUT
+  @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF,
+    MIMETYPE_PROTOBUF_IETF})
+  public Response put(final ScannerModel model, 
+      final @Context UriInfo uriInfo) {
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("PUT " + uriInfo.getAbsolutePath());
+    }
+    return update(model, true, uriInfo);
+  }
+
+  @POST
+  @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF,
+    MIMETYPE_PROTOBUF_IETF})
+  public Response post(final ScannerModel model,
+      final @Context UriInfo uriInfo) {
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("POST " + uriInfo.getAbsolutePath());
+    }
+    return update(model, false, uriInfo);
+  }
+
+  @Path("{scanner: .+}")
+  public ScannerInstanceResource getScannerInstanceResource(
+      final @PathParam("scanner") String id) throws IOException {
+    ScannerInstanceResource instance = scanners.get(id);
+    if (instance == null) {
+      servlet.getMetrics().incrementFailedGetRequests(1);
+      return new ScannerInstanceResource();
+    } else {
+      servlet.getMetrics().incrementSucessfulGetRequests(1);
+    }
+    return instance;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResultGenerator.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResultGenerator.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResultGenerator.java
new file mode 100644
index 0000000..055c971
--- /dev/null
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResultGenerator.java
@@ -0,0 +1,191 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest;
+
+import java.io.IOException;
+import java.util.Iterator;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.UnknownScannerException;
+import org.apache.hadoop.hbase.client.HTableInterface;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.filter.Filter;
+import org.apache.hadoop.hbase.rest.model.ScannerModel;
+import org.apache.hadoop.hbase.security.visibility.Authorizations;
+import org.apache.hadoop.util.StringUtils;
+
+@InterfaceAudience.Private
+public class ScannerResultGenerator extends ResultGenerator {
+
+  private static final Log LOG =
+    LogFactory.getLog(ScannerResultGenerator.class);
+
+  public static Filter buildFilterFromModel(final ScannerModel model) 
+      throws Exception {
+    String filter = model.getFilter();
+    if (filter == null || filter.length() == 0) {
+      return null;
+    }
+    return buildFilter(filter);
+  }
+
+  private String id;
+  private Iterator<Cell> rowI;
+  private Cell cache;
+  private ResultScanner scanner;
+  private Result cached;
+
+  public ScannerResultGenerator(final String tableName, final RowSpec rowspec,
+      final Filter filter, final boolean cacheBlocks)
+      throws IllegalArgumentException, IOException {
+    this(tableName, rowspec, filter, -1, cacheBlocks);
+  }
+
+  public ScannerResultGenerator(final String tableName, final RowSpec rowspec,
+      final Filter filter, final int caching, final boolean cacheBlocks)
+      throws IllegalArgumentException, IOException {
+    HTableInterface table = RESTServlet.getInstance().getTable(tableName);
+    try {
+      Scan scan;
+      if (rowspec.hasEndRow()) {
+        scan = new Scan(rowspec.getStartRow(), rowspec.getEndRow());
+      } else {
+        scan = new Scan(rowspec.getStartRow());
+      }
+      if (rowspec.hasColumns()) {
+        byte[][] columns = rowspec.getColumns();
+        for (byte[] column: columns) {
+          byte[][] split = KeyValue.parseColumn(column);
+          if (split.length == 1) {
+            scan.addFamily(split[0]);
+          } else if (split.length == 2) {
+            scan.addColumn(split[0], split[1]);
+          } else {
+            throw new IllegalArgumentException("Invalid familyAndQualifier provided.");
+          }
+        }
+      }
+      scan.setTimeRange(rowspec.getStartTime(), rowspec.getEndTime());          
+      scan.setMaxVersions(rowspec.getMaxVersions());
+      if (filter != null) {
+        scan.setFilter(filter);
+      }
+      if (caching > 0 ) {
+        scan.setCaching(caching);
+      }
+      scan.setCacheBlocks(cacheBlocks);
+      if (rowspec.hasLabels()) {
+        scan.setAuthorizations(new Authorizations(rowspec.getLabels()));
+      }
+      scanner = table.getScanner(scan);
+      cached = null;
+      id = Long.toString(System.currentTimeMillis()) +
+             Integer.toHexString(scanner.hashCode());
+    } finally {
+      table.close();
+    }
+  }
+
+  public String getID() {
+    return id;
+  }
+
+  public void close() {
+    if (scanner != null) {
+      scanner.close();
+      scanner = null;
+    }
+  }
+
+  public boolean hasNext() {
+    if (cache != null) {
+      return true;
+    }
+    if (rowI != null && rowI.hasNext()) {
+      return true;
+    }
+    if (cached != null) {
+      return true;
+    }
+    try {
+      Result result = scanner.next();
+      if (result != null && !result.isEmpty()) {
+        cached = result;
+      }
+    } catch (UnknownScannerException e) {
+      throw new IllegalArgumentException(e);
+    } catch (IOException e) {
+      LOG.error(StringUtils.stringifyException(e));
+    }
+    return cached != null;
+  }
+
+  public Cell next() {
+    if (cache != null) {
+      Cell kv = cache;
+      cache = null;
+      return kv;
+    }
+    boolean loop;
+    do {
+      loop = false;
+      if (rowI != null) {
+        if (rowI.hasNext()) {
+          return rowI.next();
+        } else {
+          rowI = null;
+        }
+      }
+      if (cached != null) {
+        rowI = cached.listCells().iterator();
+        loop = true;
+        cached = null;
+      } else {
+        Result result = null;
+        try {
+          result = scanner.next();
+        } catch (UnknownScannerException e) {
+          throw new IllegalArgumentException(e);
+        } catch (IOException e) {
+          LOG.error(StringUtils.stringifyException(e));
+        }
+        if (result != null && !result.isEmpty()) {
+          rowI = result.listCells().iterator();
+          loop = true;
+        }
+      }
+    } while (loop);
+    return null;
+  }
+
+  public void putBack(Cell kv) {
+    this.cache = kv;
+  }
+
+  public void remove() {
+    throw new UnsupportedOperationException("remove not supported");
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/SchemaResource.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/SchemaResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/SchemaResource.java
new file mode 100644
index 0000000..5de6b38
--- /dev/null
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/SchemaResource.java
@@ -0,0 +1,246 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest;
+
+import java.io.IOException;
+import java.util.Map;
+
+import javax.ws.rs.Consumes;
+import javax.ws.rs.DELETE;
+import javax.ws.rs.GET;
+import javax.ws.rs.POST;
+import javax.ws.rs.PUT;
+import javax.ws.rs.Produces;
+import javax.ws.rs.core.CacheControl;
+import javax.ws.rs.core.Context;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.Response.ResponseBuilder;
+import javax.ws.rs.core.UriInfo;
+import javax.xml.namespace.QName;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.TableExistsException;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.TableNotEnabledException;
+import org.apache.hadoop.hbase.TableNotFoundException;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.HTableInterface;
+import org.apache.hadoop.hbase.rest.model.ColumnSchemaModel;
+import org.apache.hadoop.hbase.rest.model.TableSchemaModel;
+import org.apache.hadoop.hbase.util.Bytes;
+
+@InterfaceAudience.Private
+public class SchemaResource extends ResourceBase {
+  private static final Log LOG = LogFactory.getLog(SchemaResource.class);
+
+  static CacheControl cacheControl;
+  static {
+    cacheControl = new CacheControl();
+    cacheControl.setNoCache(true);
+    cacheControl.setNoTransform(false);
+  }
+
+  TableResource tableResource;
+
+  /**
+   * Constructor
+   * @param tableResource
+   * @throws IOException
+   */
+  public SchemaResource(TableResource tableResource) throws IOException {
+    super();
+    this.tableResource = tableResource;
+  }
+
+  private HTableDescriptor getTableSchema() throws IOException,
+      TableNotFoundException {
+    HTableInterface table = servlet.getTable(tableResource.getName());
+    try {
+      return table.getTableDescriptor();
+    } finally {
+      table.close();
+    }
+  }
+
+  @GET
+  @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF,
+    MIMETYPE_PROTOBUF_IETF})
+  public Response get(final @Context UriInfo uriInfo) {
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("GET " + uriInfo.getAbsolutePath());
+    }
+    servlet.getMetrics().incrementRequests(1);
+    try {
+      ResponseBuilder response =
+        Response.ok(new TableSchemaModel(getTableSchema()));
+      response.cacheControl(cacheControl);
+      servlet.getMetrics().incrementSucessfulGetRequests(1);
+      return response.build();
+    } catch (Exception e) {
+      servlet.getMetrics().incrementFailedGetRequests(1);
+      return processException(e);
+    } 
+  }
+
+  private Response replace(final byte[] name, final TableSchemaModel model,
+      final UriInfo uriInfo, final HBaseAdmin admin) {
+    if (servlet.isReadOnly()) {
+      return Response.status(Response.Status.FORBIDDEN)
+        .type(MIMETYPE_TEXT).entity("Forbidden" + CRLF)
+        .build();
+    }
+    try {
+      HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
+      for (Map.Entry<QName,Object> e: model.getAny().entrySet()) {
+        htd.setValue(e.getKey().getLocalPart(), e.getValue().toString());
+      }
+      for (ColumnSchemaModel family: model.getColumns()) {
+        HColumnDescriptor hcd = new HColumnDescriptor(family.getName());
+        for (Map.Entry<QName,Object> e: family.getAny().entrySet()) {
+          hcd.setValue(e.getKey().getLocalPart(), e.getValue().toString());
+        }
+        htd.addFamily(hcd);
+      }
+      if (admin.tableExists(name)) {
+        admin.disableTable(name);
+        admin.modifyTable(name, htd);
+        admin.enableTable(name);
+        servlet.getMetrics().incrementSucessfulPutRequests(1);
+      } else try {
+        admin.createTable(htd);
+        servlet.getMetrics().incrementSucessfulPutRequests(1);
+      } catch (TableExistsException e) {
+        // race, someone else created a table with the same name
+        return Response.status(Response.Status.NOT_MODIFIED)
+          .type(MIMETYPE_TEXT).entity("Not modified" + CRLF)
+          .build();
+      }
+      return Response.created(uriInfo.getAbsolutePath()).build();
+    } catch (Exception e) {
+      servlet.getMetrics().incrementFailedPutRequests(1);
+      return processException(e);
+    }
+  }
+
+  private Response update(final byte[] name, final TableSchemaModel model,
+      final UriInfo uriInfo, final HBaseAdmin admin) {
+    if (servlet.isReadOnly()) {
+      return Response.status(Response.Status.FORBIDDEN)
+        .type(MIMETYPE_TEXT).entity("Forbidden" + CRLF)
+        .build();
+    }
+    try {
+      HTableDescriptor htd = admin.getTableDescriptor(name);
+      admin.disableTable(name);
+      try {
+        for (ColumnSchemaModel family: model.getColumns()) {
+          HColumnDescriptor hcd = new HColumnDescriptor(family.getName());
+          for (Map.Entry<QName,Object> e: family.getAny().entrySet()) {
+            hcd.setValue(e.getKey().getLocalPart(), e.getValue().toString());
+          }
+          if (htd.hasFamily(hcd.getName())) {
+            admin.modifyColumn(name, hcd);
+          } else {
+            admin.addColumn(name, hcd);
+          }
+        }
+      } catch (IOException e) {
+        return Response.status(Response.Status.SERVICE_UNAVAILABLE)
+          .type(MIMETYPE_TEXT).entity("Unavailable" + CRLF)
+          .build();
+      } finally {
+        admin.enableTable(tableResource.getName());
+      }
+      servlet.getMetrics().incrementSucessfulPutRequests(1);
+      return Response.ok().build();
+    } catch (Exception e) {
+      servlet.getMetrics().incrementFailedPutRequests(1);
+      return processException(e);
+    }
+  }
+
+  private Response update(final TableSchemaModel model, final boolean replace,
+      final UriInfo uriInfo) {
+    try {
+      byte[] name = Bytes.toBytes(tableResource.getName());
+      HBaseAdmin admin = servlet.getAdmin();
+      if (replace || !admin.tableExists(name)) {
+        return replace(name, model, uriInfo, admin);
+      } else {
+        return update(name, model, uriInfo, admin);
+      }
+    } catch (Exception e) {
+      servlet.getMetrics().incrementFailedPutRequests(1);
+      return processException(e);
+    }
+  }
+
+  @PUT
+  @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF,
+    MIMETYPE_PROTOBUF_IETF})
+  public Response put(final TableSchemaModel model, 
+      final @Context UriInfo uriInfo) {
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("PUT " + uriInfo.getAbsolutePath());
+    }
+    servlet.getMetrics().incrementRequests(1);
+    return update(model, true, uriInfo);
+  }
+
+  @POST
+  @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF,
+    MIMETYPE_PROTOBUF_IETF})
+  public Response post(final TableSchemaModel model, 
+      final @Context UriInfo uriInfo) {
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("PUT " + uriInfo.getAbsolutePath());
+    }
+    servlet.getMetrics().incrementRequests(1);
+    return update(model, false, uriInfo);
+  }
+
+  @DELETE
+  public Response delete(final @Context UriInfo uriInfo) {
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("DELETE " + uriInfo.getAbsolutePath());
+    }
+    servlet.getMetrics().incrementRequests(1);
+    if (servlet.isReadOnly()) {
+      return Response.status(Response.Status.FORBIDDEN).type(MIMETYPE_TEXT)
+          .entity("Forbidden" + CRLF).build();
+    }
+    try {
+      HBaseAdmin admin = servlet.getAdmin();
+      try {
+        admin.disableTable(tableResource.getName());
+      } catch (TableNotEnabledException e) { /* this is what we want anyway */ }
+      admin.deleteTable(tableResource.getName());
+      servlet.getMetrics().incrementSucessfulDeleteRequests(1);
+      return Response.ok().build();
+    } catch (Exception e) {
+      servlet.getMetrics().incrementFailedDeleteRequests(1);
+      return processException(e);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java
new file mode 100644
index 0000000..a7e52bd
--- /dev/null
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java
@@ -0,0 +1,109 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest;
+
+import java.io.IOException;
+
+import javax.ws.rs.GET;
+import javax.ws.rs.Produces;
+import javax.ws.rs.core.CacheControl;
+import javax.ws.rs.core.Context;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.Response.ResponseBuilder;
+import javax.ws.rs.core.UriInfo;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.ClusterStatus;
+import org.apache.hadoop.hbase.ServerLoad;
+import org.apache.hadoop.hbase.RegionLoad;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.rest.model.StorageClusterStatusModel;
+
+@InterfaceAudience.Private
+public class StorageClusterStatusResource extends ResourceBase {
+  private static final Log LOG =
+    LogFactory.getLog(StorageClusterStatusResource.class);
+
+  static CacheControl cacheControl;
+  static {
+    cacheControl = new CacheControl();
+    cacheControl.setNoCache(true);
+    cacheControl.setNoTransform(false);
+  }
+
+  /**
+   * Constructor
+   * @throws IOException
+   */
+  public StorageClusterStatusResource() throws IOException {
+    super();
+  }
+
+  @GET
+  @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF,
+    MIMETYPE_PROTOBUF_IETF})
+  public Response get(final @Context UriInfo uriInfo) {
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("GET " + uriInfo.getAbsolutePath());
+    }
+    servlet.getMetrics().incrementRequests(1);
+    try {
+      ClusterStatus status = servlet.getAdmin().getClusterStatus();
+      StorageClusterStatusModel model = new StorageClusterStatusModel();
+      model.setRegions(status.getRegionsCount());
+      model.setRequests(status.getRequestsCount());
+      model.setAverageLoad(status.getAverageLoad());
+      for (ServerName info: status.getServers()) {
+        ServerLoad load = status.getLoad(info);
+        StorageClusterStatusModel.Node node =
+          model.addLiveNode(
+            info.getHostname() + ":" +
+            Integer.toString(info.getPort()),
+            info.getStartcode(), load.getUsedHeapMB(),
+            load.getMaxHeapMB());
+        node.setRequests(load.getNumberOfRequests());
+        for (RegionLoad region: load.getRegionsLoad().values()) {
+          node.addRegion(region.getName(), region.getStores(),
+            region.getStorefiles(), region.getStorefileSizeMB(),
+            region.getMemStoreSizeMB(), region.getStorefileIndexSizeMB(),
+            region.getReadRequestsCount(), region.getWriteRequestsCount(),
+            region.getRootIndexSizeKB(), region.getTotalStaticIndexSizeKB(),
+            region.getTotalStaticBloomSizeKB(), region.getTotalCompactingKVs(),
+            region.getCurrentCompactedKVs());
+        }
+      }
+      for (ServerName name: status.getDeadServerNames()) {
+        model.addDeadNode(name.toString());
+      }
+      ResponseBuilder response = Response.ok(model);
+      response.cacheControl(cacheControl);
+      servlet.getMetrics().incrementSucessfulGetRequests(1);
+      return response.build();
+    } catch (IOException e) {
+      servlet.getMetrics().incrementFailedGetRequests(1);
+      return Response.status(Response.Status.SERVICE_UNAVAILABLE)
+        .type(MIMETYPE_TEXT).entity("Unavailable" + CRLF)
+        .build();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterVersionResource.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterVersionResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterVersionResource.java
new file mode 100644
index 0000000..85e81f8
--- /dev/null
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterVersionResource.java
@@ -0,0 +1,79 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest;
+
+import java.io.IOException;
+
+import javax.ws.rs.GET;
+import javax.ws.rs.Produces;
+import javax.ws.rs.core.CacheControl;
+import javax.ws.rs.core.Context;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.UriInfo;
+import javax.ws.rs.core.Response.ResponseBuilder;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.rest.model.StorageClusterVersionModel;
+
+@InterfaceAudience.Private
+public class StorageClusterVersionResource extends ResourceBase {
+  private static final Log LOG =
+    LogFactory.getLog(StorageClusterVersionResource.class);
+
+  static CacheControl cacheControl;
+  static {
+    cacheControl = new CacheControl();
+    cacheControl.setNoCache(true);
+    cacheControl.setNoTransform(false);
+  }
+
+  /**
+   * Constructor
+   * @throws IOException
+   */
+  public StorageClusterVersionResource() throws IOException {
+    super();
+  }
+
+  @GET
+  @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON})
+  public Response get(final @Context UriInfo uriInfo) {
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("GET " + uriInfo.getAbsolutePath());
+    }
+    servlet.getMetrics().incrementRequests(1);
+    try {
+      StorageClusterVersionModel model = new StorageClusterVersionModel();
+      model.setVersion(servlet.getAdmin().getClusterStatus().getHBaseVersion());
+      ResponseBuilder response = Response.ok(model);
+      response.cacheControl(cacheControl);
+      servlet.getMetrics().incrementSucessfulGetRequests(1);
+      return response.build();
+    } catch (IOException e) {
+      servlet.getMetrics().incrementFailedGetRequests(1);
+      return Response.status(Response.Status.SERVICE_UNAVAILABLE)
+        .type(MIMETYPE_TEXT).entity("Unavailable" + CRLF)
+        .build();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableResource.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableResource.java
new file mode 100644
index 0000000..c458cfa
--- /dev/null
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableResource.java
@@ -0,0 +1,180 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest;
+
+import java.io.IOException;
+import java.util.List;
+
+import javax.ws.rs.DefaultValue;
+import javax.ws.rs.Encoded;
+import javax.ws.rs.HeaderParam;
+import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.QueryParam;
+import javax.ws.rs.core.Context;
+import javax.ws.rs.core.UriInfo;
+
+import org.apache.commons.lang.StringUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.HTableInterface;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.filter.Filter;
+import org.apache.hadoop.hbase.filter.PrefixFilter;
+import org.apache.hadoop.hbase.util.Bytes;
+
+@InterfaceAudience.Private
+public class TableResource extends ResourceBase {
+
+  String table;
+  private static final Log LOG = LogFactory.getLog(TableResource.class);
+
+  /**
+   * Constructor
+   * @param table
+   * @throws IOException
+   */
+  public TableResource(String table) throws IOException {
+    super();
+    this.table = table;
+  }
+
+  /** @return the table name */
+  String getName() {
+    return table;
+  }
+
+  /**
+   * @return true if the table exists
+   * @throws IOException
+   */
+  boolean exists() throws IOException {
+    return servlet.getAdmin().tableExists(table);
+  }
+
+  @Path("exists")
+  public ExistsResource getExistsResource() throws IOException {
+    return new ExistsResource(this);
+  }
+
+  @Path("regions")
+  public RegionsResource getRegionsResource() throws IOException {
+    return new RegionsResource(this);
+  }
+
+  @Path("scanner")
+  public ScannerResource getScannerResource() throws IOException {
+    return new ScannerResource(this);
+  }
+
+  @Path("schema")
+  public SchemaResource getSchemaResource() throws IOException {
+    return new SchemaResource(this);
+  }
+
+  @Path("multiget")
+  public MultiRowResource getMultipleRowResource(
+          final @QueryParam("v") String versions) throws IOException {
+    return new MultiRowResource(this, versions);
+  }
+
+  @Path("{rowspec: [^*]+}")
+  public RowResource getRowResource(
+      // We need the @Encoded decorator so Jersey won't urldecode before
+      // the RowSpec constructor has a chance to parse
+      final @PathParam("rowspec") @Encoded String rowspec,
+      final @QueryParam("v") String versions,
+      final @QueryParam("check") String check) throws IOException {
+    return new RowResource(this, rowspec, versions, check);
+  }
+
+  @Path("{suffixglobbingspec: .*\\*/.+}")
+  public RowResource getRowResourceWithSuffixGlobbing(
+      // We need the @Encoded decorator so Jersey won't urldecode before
+      // the RowSpec constructor has a chance to parse
+      final @PathParam("suffixglobbingspec") @Encoded String suffixglobbingspec,
+      final @QueryParam("v") String versions,
+      final @QueryParam("check") String check) throws IOException {
+    return new RowResource(this, suffixglobbingspec, versions, check);
+  }
+
+  @Path("{scanspec: .*[*]$}")
+  public TableScanResource  getScanResource(
+      final @Context UriInfo uriInfo,
+      final @PathParam("scanspec") String scanSpec,
+      final @HeaderParam("Accept") String contentType,
+      @DefaultValue(Integer.MAX_VALUE + "")
+      @QueryParam(Constants.SCAN_LIMIT) int userRequestedLimit,
+      @DefaultValue("") @QueryParam(Constants.SCAN_START_ROW) String startRow,
+      @DefaultValue("") @QueryParam(Constants.SCAN_END_ROW) String endRow,
+      @DefaultValue("") @QueryParam(Constants.SCAN_COLUMN) List<String> column,
+      @DefaultValue("1") @QueryParam(Constants.SCAN_MAX_VERSIONS) int maxVersions,
+      @DefaultValue("-1") @QueryParam(Constants.SCAN_BATCH_SIZE) int batchSize,
+      @DefaultValue("0") @QueryParam(Constants.SCAN_START_TIME) long startTime,
+      @DefaultValue(Long.MAX_VALUE + "") @QueryParam(Constants.SCAN_END_TIME) long endTime,
+      @DefaultValue("true") @QueryParam(Constants.SCAN_BATCH_SIZE) boolean cacheBlocks) {
+    try {
+      Filter filter = null;
+      if (scanSpec.indexOf('*') > 0) {
+        String prefix = scanSpec.substring(0, scanSpec.indexOf('*'));
+        filter = new PrefixFilter(Bytes.toBytes(prefix));
+      }
+      LOG.debug("Query parameters  : Table Name = > " + this.table + " Start Row => " + startRow
+          + " End Row => " + endRow + " Columns => " + column + " Start Time => " + startTime
+          + " End Time => " + endTime + " Cache Blocks => " + cacheBlocks + " Max Versions => "
+          + maxVersions + " Batch Size => " + batchSize);
+      HTableInterface hTable = RESTServlet.getInstance().getTable(this.table);
+      Scan tableScan = new Scan();
+      tableScan.setBatch(batchSize);
+      tableScan.setMaxVersions(maxVersions);
+      tableScan.setTimeRange(startTime, endTime);
+      tableScan.setStartRow(Bytes.toBytes(startRow));
+      tableScan.setStopRow(Bytes.toBytes(endRow));
+      for (String csplit : column) {
+        String[] familysplit = csplit.trim().split(":");
+        if (familysplit.length == 2) {
+          if (familysplit[1].length() > 0) {
+            LOG.debug("Scan family and column : " + familysplit[0] + "  " + familysplit[1]);
+            tableScan.addColumn(Bytes.toBytes(familysplit[0]), Bytes.toBytes(familysplit[1]));
+          } else {
+            tableScan.addFamily(Bytes.toBytes(familysplit[0]));
+            LOG.debug("Scan family : " + familysplit[0] + " and empty qualifier.");
+            tableScan.addColumn(Bytes.toBytes(familysplit[0]), null);
+          }
+        } else if (StringUtils.isNotEmpty(familysplit[0])){
+          LOG.debug("Scan family : " + familysplit[0]);
+          tableScan.addFamily(Bytes.toBytes(familysplit[0]));
+        }
+      }
+      if (filter != null) {
+        tableScan.setFilter(filter);
+      }
+      int fetchSize = this.servlet.getConfiguration().getInt(Constants.SCAN_FETCH_SIZE, 10);
+      tableScan.setCaching(fetchSize);
+     return new TableScanResource(hTable.getScanner(tableScan), userRequestedLimit);
+    } catch (Exception exp) {
+      servlet.getMetrics().incrementFailedScanRequests(1);
+      processException(exp);
+      LOG.warn(exp);
+      return null;
+    }
+  }
+}