You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by ec...@apache.org on 2014/10/10 18:53:16 UTC

[35/38] HBASE-12197 Move rest to it's on module

http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPResponseWrapper.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPResponseWrapper.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPResponseWrapper.java
new file mode 100644
index 0000000..2cfea1b
--- /dev/null
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPResponseWrapper.java
@@ -0,0 +1,147 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest.filter;
+
+import java.io.IOException;
+import java.io.PrintWriter;
+
+import javax.servlet.ServletOutputStream;
+import javax.servlet.http.HttpServletResponse;
+import javax.servlet.http.HttpServletResponseWrapper;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+
+@InterfaceAudience.Private
+public class GZIPResponseWrapper extends HttpServletResponseWrapper {
+  private HttpServletResponse response;
+  private ServletOutputStream os;
+  private PrintWriter writer;
+  private boolean compress = true;
+
+  public GZIPResponseWrapper(HttpServletResponse response) {
+    super(response);
+    this.response = response;
+  }
+
+  @Override
+  public void setStatus(int status) {
+    super.setStatus(status);
+    if (status < 200 || status >= 300) {
+      compress = false;
+    }
+  }
+
+  @Override
+  public void addHeader(String name, String value) {
+    if (!"content-length".equalsIgnoreCase(name)) {
+      super.addHeader(name, value);
+    }
+  }
+
+  @Override
+  public void setContentLength(int length) {
+    // do nothing
+  }
+
+  @Override
+  public void setIntHeader(String name, int value) {
+    if (!"content-length".equalsIgnoreCase(name)) {
+      super.setIntHeader(name, value);
+    }
+  }
+
+  @Override
+  public void setHeader(String name, String value) {
+    if (!"content-length".equalsIgnoreCase(name)) {
+      super.setHeader(name, value);
+    }
+  }
+
+  @Override
+  public void flushBuffer() throws IOException {
+    if (writer != null) {
+      writer.flush();
+    }
+    if (os != null && (os instanceof GZIPResponseStream)) {
+      ((GZIPResponseStream)os).finish();
+    } else {
+      getResponse().flushBuffer();
+    }
+  }
+
+  @Override
+  public void reset() {
+    super.reset();
+    if (os != null && (os instanceof GZIPResponseStream)) {
+      ((GZIPResponseStream)os).resetBuffer();
+    }
+    writer = null;
+    os = null;
+    compress = true;
+  }
+
+  @Override
+  public void resetBuffer() {
+    super.resetBuffer();
+    if (os != null && (os instanceof GZIPResponseStream)) {
+      ((GZIPResponseStream)os).resetBuffer();
+    }
+    writer = null;
+    os = null;
+  }
+
+  @Override
+  public void sendError(int status, String msg) throws IOException {
+    resetBuffer();
+    super.sendError(status, msg);
+  }
+
+  @Override
+  public void sendError(int status) throws IOException {
+    resetBuffer();
+    super.sendError(status);
+  }
+
+  @Override
+  public void sendRedirect(String location) throws IOException {
+    resetBuffer();
+    super.sendRedirect(location);
+  }
+
+  @Override
+  public ServletOutputStream getOutputStream() throws IOException {
+    if (os == null) {
+      if (!response.isCommitted() && compress) {
+        os = (ServletOutputStream)new GZIPResponseStream(response);
+      } else {
+        os = response.getOutputStream();
+      }
+    }
+    return os;
+  }
+
+  @Override
+  public PrintWriter getWriter() throws IOException {
+    if (writer == null) {
+      writer = new PrintWriter(getOutputStream());
+    }
+    return writer;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GzipFilter.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GzipFilter.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GzipFilter.java
new file mode 100644
index 0000000..4995b86
--- /dev/null
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GzipFilter.java
@@ -0,0 +1,85 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest.filter;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.util.HashSet;
+import java.util.Set;
+import java.util.StringTokenizer;
+
+import javax.servlet.Filter;
+import javax.servlet.FilterChain;
+import javax.servlet.FilterConfig;
+import javax.servlet.ServletException;
+import javax.servlet.ServletRequest;
+import javax.servlet.ServletResponse;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.HBaseInterfaceAudience;
+
+@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
+public class GzipFilter implements Filter {
+  private Set<String> mimeTypes = new HashSet<String>();
+
+  @Override
+  public void init(FilterConfig filterConfig) throws ServletException {
+    String s = filterConfig.getInitParameter("mimeTypes");
+    if (s != null) {
+      StringTokenizer tok = new StringTokenizer(s, ",", false);
+      while (tok.hasMoreTokens()) {
+        mimeTypes.add(tok.nextToken());
+      }
+    }
+  }
+
+  @Override
+  public void destroy() {
+  }
+
+  @Override
+  public void doFilter(ServletRequest req, ServletResponse rsp,
+      FilterChain chain) throws IOException, ServletException {
+    HttpServletRequest request = (HttpServletRequest)req;
+    HttpServletResponse response = (HttpServletResponse)rsp;
+    String contentEncoding = request.getHeader("content-encoding");
+    String acceptEncoding = request.getHeader("accept-encoding");
+    String contentType = request.getHeader("content-type");
+    if ((contentEncoding != null) &&
+        (contentEncoding.toLowerCase().indexOf("gzip") > -1)) {
+      request = new GZIPRequestWrapper(request);
+    }
+    if (((acceptEncoding != null) &&
+          (acceptEncoding.toLowerCase().indexOf("gzip") > -1)) ||
+        ((contentType != null) && mimeTypes.contains(contentType))) {
+      response = new GZIPResponseWrapper(response);
+    }
+    chain.doFilter(request, response);
+    if (response instanceof GZIPResponseWrapper) {
+      OutputStream os = response.getOutputStream();
+      if (os instanceof GZIPResponseStream) {
+        ((GZIPResponseStream)os).finish();
+      }
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellModel.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellModel.java
new file mode 100644
index 0000000..349d352
--- /dev/null
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellModel.java
@@ -0,0 +1,207 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest.model;
+
+import java.io.IOException;
+import java.io.Serializable;
+
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlAttribute;
+import javax.xml.bind.annotation.XmlRootElement;
+import javax.xml.bind.annotation.XmlValue;
+
+import org.apache.hadoop.hbase.util.ByteStringer;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.rest.ProtobufMessageHandler;
+import org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell;
+import org.codehaus.jackson.annotate.JsonProperty;
+
+/**
+ * Representation of a cell. A cell is a single value associated a column and
+ * optional qualifier, and either the timestamp when it was stored or the user-
+ * provided timestamp if one was explicitly supplied.
+ *
+ * <pre>
+ * &lt;complexType name="Cell"&gt;
+ *   &lt;sequence&gt;
+ *     &lt;element name="value" maxOccurs="1" minOccurs="1"&gt;
+ *       &lt;simpleType&gt;
+ *         &lt;restriction base="base64Binary"/&gt;
+ *       &lt;/simpleType&gt;
+ *     &lt;/element&gt;
+ *   &lt;/sequence&gt;
+ *   &lt;attribute name="column" type="base64Binary" /&gt;
+ *   &lt;attribute name="timestamp" type="int" /&gt;
+ * &lt;/complexType&gt;
+ * </pre>
+ */
+@XmlRootElement(name="Cell")
+@XmlAccessorType(XmlAccessType.FIELD)
+@InterfaceAudience.Private
+public class CellModel implements ProtobufMessageHandler, Serializable {
+  private static final long serialVersionUID = 1L;
+  
+  @JsonProperty("column")
+  @XmlAttribute
+  private byte[] column;
+
+  @JsonProperty("timestamp")
+  @XmlAttribute
+  private long timestamp = HConstants.LATEST_TIMESTAMP;
+
+  @JsonProperty("$")
+  @XmlValue
+  private byte[] value;
+
+  /**
+   * Default constructor
+   */
+  public CellModel() {}
+
+  /**
+   * Constructor
+   * @param column
+   * @param value
+   */
+  public CellModel(byte[] column, byte[] value) {
+    this(column, HConstants.LATEST_TIMESTAMP, value);
+  }
+
+  /**
+   * Constructor
+   * @param column
+   * @param qualifier
+   * @param value
+   */
+  public CellModel(byte[] column, byte[] qualifier, byte[] value) {
+    this(column, qualifier, HConstants.LATEST_TIMESTAMP, value);
+  }
+
+  /**
+   * Constructor from KeyValue
+   * @param kv
+   */
+  public CellModel(KeyValue kv) {
+    this(kv.getFamily(), kv.getQualifier(), kv.getTimestamp(), kv.getValue());
+  }
+
+  /**
+   * Constructor
+   * @param column
+   * @param timestamp
+   * @param value
+   */
+  public CellModel(byte[] column, long timestamp, byte[] value) {
+    this.column = column;
+    this.timestamp = timestamp;
+    this.value = value;
+  }
+
+  /**
+   * Constructor
+   * @param column
+   * @param qualifier
+   * @param timestamp
+   * @param value
+   */
+  public CellModel(byte[] column, byte[] qualifier, long timestamp,
+      byte[] value) {
+    this.column = KeyValue.makeColumn(column, qualifier);
+    this.timestamp = timestamp;
+    this.value = value;
+  }
+  
+  /**
+   * @return the column
+   */
+  public byte[] getColumn() {
+    return column;
+  }
+
+  /**
+   * @param column the column to set
+   */
+  public void setColumn(byte[] column) {
+    this.column = column;
+  }
+
+  /**
+   * @return true if the timestamp property has been specified by the
+   * user
+   */
+  public boolean hasUserTimestamp() {
+    return timestamp != HConstants.LATEST_TIMESTAMP;
+  }
+
+  /**
+   * @return the timestamp
+   */
+  public long getTimestamp() {
+    return timestamp;
+  }
+
+  /**
+   * @param timestamp the timestamp to set
+   */
+  public void setTimestamp(long timestamp) {
+    this.timestamp = timestamp;
+  }
+
+  /**
+   * @return the value
+   */
+  public byte[] getValue() {
+    return value;
+  }
+
+  /**
+   * @param value the value to set
+   */
+  public void setValue(byte[] value) {
+    this.value = value;
+  }
+
+  @Override
+  public byte[] createProtobufOutput() {
+    Cell.Builder builder = Cell.newBuilder();
+    builder.setColumn(ByteStringer.wrap(getColumn()));
+    builder.setData(ByteStringer.wrap(getValue()));
+    if (hasUserTimestamp()) {
+      builder.setTimestamp(getTimestamp());
+    }
+    return builder.build().toByteArray();
+  }
+
+  @Override
+  public ProtobufMessageHandler getObjectFromMessage(byte[] message)
+      throws IOException {
+    Cell.Builder builder = Cell.newBuilder();
+    builder.mergeFrom(message);
+    setColumn(builder.getColumn().toByteArray());
+    setValue(builder.getData().toByteArray());
+    if (builder.hasTimestamp()) {
+      setTimestamp(builder.getTimestamp());
+    }
+    return this;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellSetModel.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellSetModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellSetModel.java
new file mode 100644
index 0000000..094da36
--- /dev/null
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellSetModel.java
@@ -0,0 +1,152 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest.model;
+
+import java.io.IOException;
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.List;
+
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlRootElement;
+
+import org.apache.hadoop.hbase.util.ByteStringer;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.rest.ProtobufMessageHandler;
+import org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell;
+import org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet;
+
+/**
+ * Representation of a grouping of cells. May contain cells from more than
+ * one row. Encapsulates RowModel and CellModel models.
+ * 
+ * <pre>
+ * &lt;complexType name="CellSet"&gt;
+ *   &lt;sequence&gt;
+ *     &lt;element name="row" type="tns:Row" maxOccurs="unbounded" 
+ *       minOccurs="1"&gt;&lt;/element&gt;
+ *   &lt;/sequence&gt;
+ * &lt;/complexType&gt;
+ * 
+ * &lt;complexType name="Row"&gt;
+ *   &lt;sequence&gt;
+ *     &lt;element name="key" type="base64Binary"&gt;&lt;/element&gt;
+ *     &lt;element name="cell" type="tns:Cell" 
+ *       maxOccurs="unbounded" minOccurs="1"&gt;&lt;/element&gt;
+ *   &lt;/sequence&gt;
+ * &lt;/complexType&gt;
+ *
+ * &lt;complexType name="Cell"&gt;
+ *   &lt;sequence&gt;
+ *     &lt;element name="value" maxOccurs="1" minOccurs="1"&gt;
+ *       &lt;simpleType&gt;
+ *         &lt;restriction base="base64Binary"/&gt;
+ *       &lt;/simpleType&gt;
+ *     &lt;/element&gt;
+ *   &lt;/sequence&gt;
+ *   &lt;attribute name="column" type="base64Binary" /&gt;
+ *   &lt;attribute name="timestamp" type="int" /&gt;
+ * &lt;/complexType&gt;
+ * </pre>
+ */
+@XmlRootElement(name="CellSet")
+@XmlAccessorType(XmlAccessType.FIELD)
+@InterfaceAudience.Private
+public class CellSetModel implements Serializable, ProtobufMessageHandler {
+
+  private static final long serialVersionUID = 1L;
+
+  @XmlElement(name="Row")
+  private List<RowModel> rows;
+
+  /**  
+   * Constructor
+   */
+  public CellSetModel() {
+    this.rows = new ArrayList<RowModel>();
+  }
+  
+  /**
+   * @param rows the rows
+   */
+  public CellSetModel(List<RowModel> rows) {
+    super();
+    this.rows = rows;
+  }
+  
+  /**
+   * Add a row to this cell set
+   * @param row the row
+   */
+  public void addRow(RowModel row) {
+    rows.add(row);
+  }
+
+  /**
+   * @return the rows
+   */
+  public List<RowModel> getRows() {
+    return rows;
+  }
+
+  @Override
+  public byte[] createProtobufOutput() {
+    CellSet.Builder builder = CellSet.newBuilder();
+    for (RowModel row: getRows()) {
+      CellSet.Row.Builder rowBuilder = CellSet.Row.newBuilder();
+      rowBuilder.setKey(ByteStringer.wrap(row.getKey()));
+      for (CellModel cell: row.getCells()) {
+        Cell.Builder cellBuilder = Cell.newBuilder();
+        cellBuilder.setColumn(ByteStringer.wrap(cell.getColumn()));
+        cellBuilder.setData(ByteStringer.wrap(cell.getValue()));
+        if (cell.hasUserTimestamp()) {
+          cellBuilder.setTimestamp(cell.getTimestamp());
+        }
+        rowBuilder.addValues(cellBuilder);
+      }
+      builder.addRows(rowBuilder);
+    }
+    return builder.build().toByteArray();
+  }
+
+  @Override
+  public ProtobufMessageHandler getObjectFromMessage(byte[] message)
+      throws IOException {
+    CellSet.Builder builder = CellSet.newBuilder();
+    builder.mergeFrom(message);
+    for (CellSet.Row row: builder.getRowsList()) {
+      RowModel rowModel = new RowModel(row.getKey().toByteArray());
+      for (Cell cell: row.getValuesList()) {
+        long timestamp = HConstants.LATEST_TIMESTAMP;
+        if (cell.hasTimestamp()) {
+          timestamp = cell.getTimestamp();
+        }
+        rowModel.addCell(
+            new CellModel(cell.getColumn().toByteArray(), timestamp,
+                  cell.getData().toByteArray()));
+      }
+      addRow(rowModel);
+    }
+    return this;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ColumnSchemaModel.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ColumnSchemaModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ColumnSchemaModel.java
new file mode 100644
index 0000000..ba0eed8
--- /dev/null
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ColumnSchemaModel.java
@@ -0,0 +1,241 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest.model;
+
+import java.io.Serializable;
+import java.util.LinkedHashMap;
+import java.util.Map;
+
+import javax.xml.bind.annotation.XmlAnyAttribute;
+import javax.xml.bind.annotation.XmlAttribute;
+import javax.xml.bind.annotation.XmlRootElement;
+import javax.xml.namespace.QName;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+import org.codehaus.jackson.annotate.JsonAnyGetter;
+import org.codehaus.jackson.annotate.JsonAnySetter;
+
+/**
+ * Representation of a column family schema.
+ * 
+ * <pre>
+ * &lt;complexType name="ColumnSchema"&gt;
+ *   &lt;attribute name="name" type="string"&gt;&lt;/attribute&gt;
+ *   &lt;anyAttribute&gt;&lt;/anyAttribute&gt;
+ * &lt;/complexType&gt;
+ * </pre>
+ */
+@XmlRootElement(name="ColumnSchema")
+@InterfaceAudience.Private
+public class ColumnSchemaModel implements Serializable {
+  private static final long serialVersionUID = 1L;
+  private static QName BLOCKCACHE = new QName(HColumnDescriptor.BLOCKCACHE);
+  private static QName BLOCKSIZE = new QName(HColumnDescriptor.BLOCKSIZE);
+  private static QName BLOOMFILTER = new QName(HColumnDescriptor.BLOOMFILTER);
+  private static QName COMPRESSION = new QName(HColumnDescriptor.COMPRESSION);
+  private static QName IN_MEMORY = new QName(HConstants.IN_MEMORY);
+  private static QName TTL = new QName(HColumnDescriptor.TTL);
+  private static QName VERSIONS = new QName(HConstants.VERSIONS);
+
+  private String name;
+  private Map<QName,Object> attrs = new LinkedHashMap<QName,Object>();
+
+  /**
+   * Default constructor
+   */
+  public ColumnSchemaModel() {}
+
+  /**
+   * Add an attribute to the column family schema
+   * @param name the attribute name
+   * @param value the attribute value
+   */
+  @JsonAnySetter
+  public void addAttribute(String name, Object value) {
+    attrs.put(new QName(name), value);
+  }
+
+  /**
+   * @param name the attribute name
+   * @return the attribute value
+   */
+  public String getAttribute(String name) {
+    Object o = attrs.get(new QName(name));
+    return o != null ? o.toString(): null;
+  }
+
+  /**
+   * @return the column name
+   */
+  @XmlAttribute
+  public String getName() {
+    return name;
+  }
+
+  /**
+   * @return the map for holding unspecified (user) attributes
+   */
+  @XmlAnyAttribute
+  @JsonAnyGetter
+  public Map<QName,Object> getAny() {
+    return attrs;
+  }
+
+  /**
+   * @param name the table name
+   */
+  public void setName(String name) {
+    this.name = name;
+  }
+
+  /* (non-Javadoc)
+   * @see java.lang.Object#toString()
+   */
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder();
+    sb.append("{ NAME => '");
+    sb.append(name);
+    sb.append('\'');
+    for (Map.Entry<QName,Object> e: attrs.entrySet()) {
+      sb.append(", ");
+      sb.append(e.getKey().getLocalPart());
+      sb.append(" => '");
+      sb.append(e.getValue().toString());
+      sb.append('\'');
+    }
+    sb.append(" }");
+    return sb.toString();
+  }
+
+  // getters and setters for common schema attributes
+
+  // cannot be standard bean type getters and setters, otherwise this would
+  // confuse JAXB
+
+  /**
+   * @return true if the BLOCKCACHE attribute is present and true
+   */
+  public boolean __getBlockcache() {
+    Object o = attrs.get(BLOCKCACHE);
+    return o != null ? 
+      Boolean.valueOf(o.toString()) : HColumnDescriptor.DEFAULT_BLOCKCACHE;
+  }
+
+  /**
+   * @return the value of the BLOCKSIZE attribute or its default if it is unset
+   */
+  public int __getBlocksize() {
+    Object o = attrs.get(BLOCKSIZE);
+    return o != null ? 
+      Integer.valueOf(o.toString()) : HColumnDescriptor.DEFAULT_BLOCKSIZE;
+  }
+
+  /**
+   * @return the value of the BLOOMFILTER attribute or its default if unset
+   */
+  public String __getBloomfilter() {
+    Object o = attrs.get(BLOOMFILTER);
+    return o != null ? o.toString() : HColumnDescriptor.DEFAULT_BLOOMFILTER;
+  }
+
+  /**
+   * @return the value of the COMPRESSION attribute or its default if unset
+   */
+  public String __getCompression() {
+    Object o = attrs.get(COMPRESSION);
+    return o != null ? o.toString() : HColumnDescriptor.DEFAULT_COMPRESSION;
+  }
+
+  /**
+   * @return true if the IN_MEMORY attribute is present and true
+   */
+  public boolean __getInMemory() {
+    Object o = attrs.get(IN_MEMORY);
+    return o != null ? 
+      Boolean.valueOf(o.toString()) : HColumnDescriptor.DEFAULT_IN_MEMORY;
+  }
+
+  /**
+   * @return the value of the TTL attribute or its default if it is unset
+   */
+  public int __getTTL() {
+    Object o = attrs.get(TTL);
+    return o != null ? 
+      Integer.valueOf(o.toString()) : HColumnDescriptor.DEFAULT_TTL;
+  }
+
+  /**
+   * @return the value of the VERSIONS attribute or its default if it is unset
+   */
+  public int __getVersions() {
+    Object o = attrs.get(VERSIONS);
+    return o != null ? 
+      Integer.valueOf(o.toString()) : HColumnDescriptor.DEFAULT_VERSIONS;
+  }
+
+  /**
+   * @param value the desired value of the BLOCKSIZE attribute
+   */
+  public void __setBlocksize(int value) {
+    attrs.put(BLOCKSIZE, Integer.toString(value));
+  }
+
+  /**
+   * @param value the desired value of the BLOCKCACHE attribute
+   */
+  public void __setBlockcache(boolean value) {
+    attrs.put(BLOCKCACHE, Boolean.toString(value));
+  }
+
+  public void __setBloomfilter(String value) {
+    attrs.put(BLOOMFILTER, value);
+  }
+
+  /**
+   * @param value the desired value of the COMPRESSION attribute
+   */
+  public void __setCompression(String value) {
+    attrs.put(COMPRESSION, value); 
+  }
+
+  /**
+   * @param value the desired value of the IN_MEMORY attribute
+   */
+  public void __setInMemory(boolean value) {
+    attrs.put(IN_MEMORY, Boolean.toString(value));
+  }
+
+  /**
+   * @param value the desired value of the TTL attribute
+   */
+  public void __setTTL(int value) {
+    attrs.put(TTL, Integer.toString(value));
+  }
+
+  /**
+   * @param value the desired value of the VERSIONS attribute
+   */
+  public void __setVersions(int value) {
+    attrs.put(VERSIONS, Integer.toString(value));
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/RowModel.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/RowModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/RowModel.java
new file mode 100644
index 0000000..596c754
--- /dev/null
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/RowModel.java
@@ -0,0 +1,151 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest.model;
+
+import java.io.IOException;
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.List;
+
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlAttribute;
+import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlRootElement;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.rest.ProtobufMessageHandler;
+import org.codehaus.jackson.annotate.JsonProperty;
+
+/**
+ * Representation of a row. A row is a related set of cells, grouped by common
+ * row key. RowModels do not appear in results by themselves. They are always
+ * encapsulated within CellSetModels.
+ * 
+ * <pre>
+ * &lt;complexType name="Row"&gt;
+ *   &lt;sequence&gt;
+ *     &lt;element name="key" type="base64Binary"&gt;&lt;/element&gt;
+ *     &lt;element name="cell" type="tns:Cell" 
+ *       maxOccurs="unbounded" minOccurs="1"&gt;&lt;/element&gt;
+ *   &lt;/sequence&gt;
+ * &lt;/complexType&gt;
+ * </pre>
+ */
+@XmlRootElement(name="Row")
+@XmlAccessorType(XmlAccessType.FIELD)
+@InterfaceAudience.Private
+public class RowModel implements ProtobufMessageHandler, Serializable {
+  private static final long serialVersionUID = 1L;
+
+  @JsonProperty("key")
+  @XmlAttribute
+  private byte[] key;
+
+  @JsonProperty("Cell")
+  @XmlElement(name="Cell")
+  private List<CellModel> cells = new ArrayList<CellModel>();
+
+
+  /**
+   * Default constructor
+   */
+  public RowModel() { }
+
+  /**
+   * Constructor
+   * @param key the row key
+   */
+  public RowModel(final String key) {
+    this(key.getBytes());
+  }
+  
+  /**
+   * Constructor
+   * @param key the row key
+   */
+  public RowModel(final byte[] key) {
+    this.key = key;
+    cells = new ArrayList<CellModel>();
+  }
+
+  /**
+   * Constructor
+   * @param key the row key
+   * @param cells the cells
+   */
+  public RowModel(final String key, final List<CellModel> cells) {
+    this(key.getBytes(), cells);
+  }
+  
+  /**
+   * Constructor
+   * @param key the row key
+   * @param cells the cells
+   */
+  public RowModel(final byte[] key, final List<CellModel> cells) {
+    this.key = key;
+    this.cells = cells;
+  }
+  
+  /**
+   * Adds a cell to the list of cells for this row
+   * @param cell the cell
+   */
+  public void addCell(CellModel cell) {
+    cells.add(cell);
+  }
+
+  /**
+   * @return the row key
+   */
+  public byte[] getKey() {
+    return key;
+  }
+
+  /**
+   * @param key the row key
+   */
+  public void setKey(byte[] key) {
+    this.key = key;
+  }
+
+  /**
+   * @return the cells
+   */
+  public List<CellModel> getCells() {
+    return cells;
+  }
+
+  @Override
+  public byte[] createProtobufOutput() {
+    // there is no standalone row protobuf message
+    throw new UnsupportedOperationException(
+        "no protobuf equivalent to RowModel");
+  }
+
+  @Override
+  public ProtobufMessageHandler getObjectFromMessage(byte[] message)
+      throws IOException {
+    // there is no standalone row protobuf message
+    throw new UnsupportedOperationException(
+        "no protobuf equivalent to RowModel");
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java
new file mode 100644
index 0000000..2ffdd4f
--- /dev/null
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java
@@ -0,0 +1,852 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest.model;
+
+import java.io.IOException;
+import java.io.Serializable;
+import java.io.StringReader;
+import java.io.StringWriter;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.NavigableSet;
+
+import javax.xml.bind.annotation.XmlAttribute;
+import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlRootElement;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.filter.BinaryComparator;
+import org.apache.hadoop.hbase.filter.BinaryPrefixComparator;
+import org.apache.hadoop.hbase.filter.BitComparator;
+import org.apache.hadoop.hbase.filter.ByteArrayComparable;
+import org.apache.hadoop.hbase.filter.ColumnCountGetFilter;
+import org.apache.hadoop.hbase.filter.ColumnPaginationFilter;
+import org.apache.hadoop.hbase.filter.ColumnPrefixFilter;
+import org.apache.hadoop.hbase.filter.ColumnRangeFilter;
+import org.apache.hadoop.hbase.filter.CompareFilter;
+import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
+import org.apache.hadoop.hbase.filter.DependentColumnFilter;
+import org.apache.hadoop.hbase.filter.FamilyFilter;
+import org.apache.hadoop.hbase.filter.Filter;
+import org.apache.hadoop.hbase.filter.FilterList;
+import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter;
+import org.apache.hadoop.hbase.filter.InclusiveStopFilter;
+import org.apache.hadoop.hbase.filter.KeyOnlyFilter;
+import org.apache.hadoop.hbase.filter.MultipleColumnPrefixFilter;
+import org.apache.hadoop.hbase.filter.NullComparator;
+import org.apache.hadoop.hbase.filter.PageFilter;
+import org.apache.hadoop.hbase.filter.PrefixFilter;
+import org.apache.hadoop.hbase.filter.QualifierFilter;
+import org.apache.hadoop.hbase.filter.RandomRowFilter;
+import org.apache.hadoop.hbase.filter.RegexStringComparator;
+import org.apache.hadoop.hbase.filter.RowFilter;
+import org.apache.hadoop.hbase.filter.SingleColumnValueExcludeFilter;
+import org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
+import org.apache.hadoop.hbase.filter.SkipFilter;
+import org.apache.hadoop.hbase.filter.SubstringComparator;
+import org.apache.hadoop.hbase.filter.TimestampsFilter;
+import org.apache.hadoop.hbase.filter.ValueFilter;
+import org.apache.hadoop.hbase.filter.WhileMatchFilter;
+import org.apache.hadoop.hbase.rest.ProtobufMessageHandler;
+import org.apache.hadoop.hbase.rest.protobuf.generated.ScannerMessage.Scanner;
+import org.apache.hadoop.hbase.security.visibility.Authorizations;
+import org.apache.hadoop.hbase.util.Base64;
+import org.apache.hadoop.hbase.util.ByteStringer;
+import org.apache.hadoop.hbase.util.Bytes;
+
+import com.google.protobuf.ByteString;
+import com.sun.jersey.api.json.JSONConfiguration;
+import com.sun.jersey.api.json.JSONJAXBContext;
+import com.sun.jersey.api.json.JSONMarshaller;
+import com.sun.jersey.api.json.JSONUnmarshaller;
+
+/**
+ * A representation of Scanner parameters.
+ * 
+ * <pre>
+ * &lt;complexType name="Scanner"&gt;
+ *   &lt;sequence>
+ *     &lt;element name="column" type="base64Binary" minOccurs="0" maxOccurs="unbounded"/&gt;
+ *     &lt;element name="filter" type="string" minOccurs="0" maxOccurs="1"&gt;&lt;/element&gt;
+ *   &lt;/sequence&gt;
+ *   &lt;attribute name="startRow" type="base64Binary"&gt;&lt;/attribute&gt;
+ *   &lt;attribute name="endRow" type="base64Binary"&gt;&lt;/attribute&gt;
+ *   &lt;attribute name="batch" type="int"&gt;&lt;/attribute&gt;
+ *   &lt;attribute name="caching" type="int"&gt;&lt;/attribute&gt;
+ *   &lt;attribute name="startTime" type="int"&gt;&lt;/attribute&gt;
+ *   &lt;attribute name="endTime" type="int"&gt;&lt;/attribute&gt;
+ *   &lt;attribute name="maxVersions" type="int"&gt;&lt;/attribute&gt;
+ * &lt;/complexType&gt;
+ * </pre>
+ */
+@XmlRootElement(name="Scanner")
+@InterfaceAudience.Private
+public class ScannerModel implements ProtobufMessageHandler, Serializable {
+
+  private static final long serialVersionUID = 1L;
+
+  private byte[] startRow = HConstants.EMPTY_START_ROW;
+  private byte[] endRow = HConstants.EMPTY_END_ROW;;
+  private List<byte[]> columns = new ArrayList<byte[]>();
+  private int batch = Integer.MAX_VALUE;
+  private long startTime = 0;
+  private long endTime = Long.MAX_VALUE;
+  private String filter = null;
+  private int maxVersions = Integer.MAX_VALUE;
+  private int caching = -1;
+  private List<String> labels = new ArrayList<String>();
+  private boolean cacheBlocks = true;
+  
+  @XmlRootElement
+  static class FilterModel {
+    
+    @XmlRootElement
+    static class ByteArrayComparableModel {
+      @XmlAttribute public String type;
+      @XmlAttribute public String value;
+      @XmlAttribute public String op;
+
+      static enum ComparatorType {
+        BinaryComparator,
+        BinaryPrefixComparator,
+        BitComparator,
+        NullComparator,
+        RegexStringComparator,
+        SubstringComparator    
+      }
+
+      public ByteArrayComparableModel() { }
+
+      public ByteArrayComparableModel(
+          ByteArrayComparable comparator) {
+        String typeName = comparator.getClass().getSimpleName();
+        ComparatorType type = ComparatorType.valueOf(typeName);
+        this.type = typeName;
+        switch (type) {
+          case BinaryComparator:
+          case BinaryPrefixComparator:
+            this.value = Base64.encodeBytes(comparator.getValue());
+            break;
+          case BitComparator:
+            this.value = Base64.encodeBytes(comparator.getValue());
+            this.op = ((BitComparator)comparator).getOperator().toString();
+            break;
+          case NullComparator:
+            break;
+          case RegexStringComparator:
+          case SubstringComparator:
+            this.value = Bytes.toString(comparator.getValue());
+            break;
+          default:
+            throw new RuntimeException("unhandled filter type: " + type);
+        }
+      }
+
+      public ByteArrayComparable build() {
+        ByteArrayComparable comparator;
+        switch (ComparatorType.valueOf(type)) {
+          case BinaryComparator:
+            comparator = new BinaryComparator(Base64.decode(value));
+            break;
+          case BinaryPrefixComparator:
+            comparator = new BinaryPrefixComparator(Base64.decode(value));
+            break;
+          case BitComparator:
+            comparator = new BitComparator(Base64.decode(value),
+                BitComparator.BitwiseOp.valueOf(op));
+            break;
+          case NullComparator:
+            comparator = new NullComparator();
+            break;
+          case RegexStringComparator:
+            comparator = new RegexStringComparator(value);
+            break;
+          case SubstringComparator:
+            comparator = new SubstringComparator(value);
+            break;
+          default:
+            throw new RuntimeException("unhandled comparator type: " + type);
+        }
+        return comparator;
+      }
+
+    }
+
+    // A grab bag of fields, would have been a union if this were C.
+    // These are null by default and will only be serialized if set (non null).
+    @XmlAttribute public String type;
+    @XmlAttribute public String op;
+    @XmlElement ByteArrayComparableModel comparator;
+    @XmlAttribute public String value;
+    @XmlElement public List<FilterModel> filters;
+    @XmlAttribute public Integer limit;
+    @XmlAttribute public Integer offset;
+    @XmlAttribute public String family;
+    @XmlAttribute public String qualifier;
+    @XmlAttribute public Boolean ifMissing;
+    @XmlAttribute public Boolean latestVersion;
+    @XmlAttribute public String minColumn;
+    @XmlAttribute public Boolean minColumnInclusive;
+    @XmlAttribute public String maxColumn;
+    @XmlAttribute public Boolean maxColumnInclusive;
+    @XmlAttribute public Boolean dropDependentColumn;
+    @XmlAttribute public Float chance;
+    @XmlElement public List<String> prefixes;
+    @XmlElement public List<Long> timestamps;
+
+    static enum FilterType {
+      ColumnCountGetFilter,
+      ColumnPaginationFilter,
+      ColumnPrefixFilter,
+      ColumnRangeFilter,
+      DependentColumnFilter,
+      FamilyFilter,
+      FilterList,
+      FirstKeyOnlyFilter,
+      InclusiveStopFilter,
+      KeyOnlyFilter,
+      MultipleColumnPrefixFilter,
+      PageFilter,
+      PrefixFilter,
+      QualifierFilter,
+      RandomRowFilter,
+      RowFilter,
+      SingleColumnValueExcludeFilter,
+      SingleColumnValueFilter,
+      SkipFilter,
+      TimestampsFilter,
+      ValueFilter,
+      WhileMatchFilter    
+    }
+
+    public FilterModel() { }
+    
+    public FilterModel(Filter filter) { 
+      String typeName = filter.getClass().getSimpleName();
+      FilterType type = FilterType.valueOf(typeName);
+      this.type = typeName;
+      switch (type) {
+        case ColumnCountGetFilter:
+          this.limit = ((ColumnCountGetFilter)filter).getLimit();
+          break;
+        case ColumnPaginationFilter:
+          this.limit = ((ColumnPaginationFilter)filter).getLimit();
+          this.offset = ((ColumnPaginationFilter)filter).getOffset();
+          break;
+        case ColumnPrefixFilter:
+          this.value = Base64.encodeBytes(((ColumnPrefixFilter)filter).getPrefix());
+          break;
+        case ColumnRangeFilter:
+          this.minColumn = Base64.encodeBytes(((ColumnRangeFilter)filter).getMinColumn());
+          this.minColumnInclusive = ((ColumnRangeFilter)filter).getMinColumnInclusive();
+          this.maxColumn = Base64.encodeBytes(((ColumnRangeFilter)filter).getMaxColumn());
+          this.maxColumnInclusive = ((ColumnRangeFilter)filter).getMaxColumnInclusive();
+          break;
+        case DependentColumnFilter: {
+          DependentColumnFilter dcf = (DependentColumnFilter)filter;
+          this.family = Base64.encodeBytes(dcf.getFamily());
+          byte[] qualifier = dcf.getQualifier();
+          if (qualifier != null) {
+            this.qualifier = Base64.encodeBytes(qualifier);
+          }
+          this.op = dcf.getOperator().toString();
+          this.comparator = new ByteArrayComparableModel(dcf.getComparator());
+          this.dropDependentColumn = dcf.dropDependentColumn();
+        } break;
+        case FilterList:
+          this.op = ((FilterList)filter).getOperator().toString();
+          this.filters = new ArrayList<FilterModel>();
+          for (Filter child: ((FilterList)filter).getFilters()) {
+            this.filters.add(new FilterModel(child));
+          }
+          break;
+        case FirstKeyOnlyFilter:
+        case KeyOnlyFilter:
+          break;
+        case InclusiveStopFilter:
+          this.value = 
+            Base64.encodeBytes(((InclusiveStopFilter)filter).getStopRowKey());
+          break;
+        case MultipleColumnPrefixFilter:
+          this.prefixes = new ArrayList<String>();
+          for (byte[] prefix: ((MultipleColumnPrefixFilter)filter).getPrefix()) {
+            this.prefixes.add(Base64.encodeBytes(prefix));
+          }
+          break;
+        case PageFilter:
+          this.value = Long.toString(((PageFilter)filter).getPageSize());
+          break;
+        case PrefixFilter:
+          this.value = Base64.encodeBytes(((PrefixFilter)filter).getPrefix());
+          break;
+        case FamilyFilter:
+        case QualifierFilter:
+        case RowFilter:
+        case ValueFilter:
+          this.op = ((CompareFilter)filter).getOperator().toString();
+          this.comparator = 
+            new ByteArrayComparableModel(
+              ((CompareFilter)filter).getComparator());
+          break;
+        case RandomRowFilter:
+          this.chance = ((RandomRowFilter)filter).getChance();
+          break;
+        case SingleColumnValueExcludeFilter:
+        case SingleColumnValueFilter: {
+          SingleColumnValueFilter scvf = (SingleColumnValueFilter) filter;
+          this.family = Base64.encodeBytes(scvf.getFamily());
+          byte[] qualifier = scvf.getQualifier();
+          if (qualifier != null) {
+            this.qualifier = Base64.encodeBytes(qualifier);
+          }
+          this.op = scvf.getOperator().toString();
+          this.comparator = 
+            new ByteArrayComparableModel(scvf.getComparator());
+          if (scvf.getFilterIfMissing()) {
+            this.ifMissing = true;
+          }
+          if (scvf.getLatestVersionOnly()) {
+            this.latestVersion = true;
+          }
+        } break;
+        case SkipFilter:
+          this.filters = new ArrayList<FilterModel>();
+          this.filters.add(new FilterModel(((SkipFilter)filter).getFilter()));
+          break;
+        case TimestampsFilter:
+          this.timestamps = ((TimestampsFilter)filter).getTimestamps();
+          break;
+        case WhileMatchFilter:
+          this.filters = new ArrayList<FilterModel>();
+          this.filters.add(
+            new FilterModel(((WhileMatchFilter)filter).getFilter()));
+          break;
+        default:
+          throw new RuntimeException("unhandled filter type " + type);
+      }
+    }
+
+    public Filter build() {
+      Filter filter;
+      switch (FilterType.valueOf(type)) {
+      case ColumnCountGetFilter:
+        filter = new ColumnCountGetFilter(limit);
+        break;
+      case ColumnPaginationFilter:
+        filter = new ColumnPaginationFilter(limit, offset);
+        break;
+      case ColumnPrefixFilter:
+        filter = new ColumnPrefixFilter(Base64.decode(value));
+        break;
+      case ColumnRangeFilter:
+        filter = new ColumnRangeFilter(Base64.decode(minColumn),
+            minColumnInclusive, Base64.decode(maxColumn),
+            maxColumnInclusive);
+        break;
+      case DependentColumnFilter:
+        filter = new DependentColumnFilter(Base64.decode(family),
+            qualifier != null ? Base64.decode(qualifier) : null,
+            dropDependentColumn, CompareOp.valueOf(op), comparator.build());
+        break;
+      case FamilyFilter:
+        filter = new FamilyFilter(CompareOp.valueOf(op), comparator.build());
+        break;
+      case FilterList: {
+        List<Filter> list = new ArrayList<Filter>();
+        for (FilterModel model: filters) {
+          list.add(model.build());
+        }
+        filter = new FilterList(FilterList.Operator.valueOf(op), list);
+      } break;
+      case FirstKeyOnlyFilter:
+        filter = new FirstKeyOnlyFilter();
+        break;
+      case InclusiveStopFilter:
+        filter = new InclusiveStopFilter(Base64.decode(value));
+        break;
+      case KeyOnlyFilter:
+        filter = new KeyOnlyFilter();
+        break;
+      case MultipleColumnPrefixFilter: {
+        byte[][] values = new byte[prefixes.size()][];
+        for (int i = 0; i < prefixes.size(); i++) {
+          values[i] = Base64.decode(prefixes.get(i));
+        }
+        filter = new MultipleColumnPrefixFilter(values);
+      } break;
+      case PageFilter:
+        filter = new PageFilter(Long.valueOf(value));
+        break;
+      case PrefixFilter:
+        filter = new PrefixFilter(Base64.decode(value));
+        break;
+      case QualifierFilter:
+        filter = new QualifierFilter(CompareOp.valueOf(op), comparator.build());
+        break;
+      case RandomRowFilter:
+        filter = new RandomRowFilter(chance);
+        break;
+      case RowFilter:
+        filter = new RowFilter(CompareOp.valueOf(op), comparator.build());
+        break;
+      case SingleColumnValueFilter:
+        filter = new SingleColumnValueFilter(Base64.decode(family),
+          qualifier != null ? Base64.decode(qualifier) : null,
+          CompareOp.valueOf(op), comparator.build());
+        if (ifMissing != null) {
+          ((SingleColumnValueFilter)filter).setFilterIfMissing(ifMissing);
+        }
+        if (latestVersion != null) {
+          ((SingleColumnValueFilter)filter).setLatestVersionOnly(latestVersion);
+        }
+        break;
+      case SingleColumnValueExcludeFilter:
+        filter = new SingleColumnValueExcludeFilter(Base64.decode(family),
+          qualifier != null ? Base64.decode(qualifier) : null,
+          CompareOp.valueOf(op), comparator.build());
+        if (ifMissing != null) {
+          ((SingleColumnValueExcludeFilter)filter).setFilterIfMissing(ifMissing);
+        }
+        if (latestVersion != null) {
+          ((SingleColumnValueExcludeFilter)filter).setLatestVersionOnly(latestVersion);
+        }
+        break;
+      case SkipFilter:
+        filter = new SkipFilter(filters.get(0).build());
+        break;
+      case TimestampsFilter:
+        filter = new TimestampsFilter(timestamps);
+        break;
+      case ValueFilter:
+        filter = new ValueFilter(CompareOp.valueOf(op), comparator.build());
+        break;
+      case WhileMatchFilter:
+        filter = new WhileMatchFilter(filters.get(0).build());
+        break;
+      default:
+        throw new RuntimeException("unhandled filter type: " + type);
+      }
+      return filter;
+    }
+
+  }
+
+  /**
+   * @param s the JSON representation of the filter
+   * @return the filter
+   * @throws Exception
+   */
+  public static Filter buildFilter(String s) throws Exception {
+    JSONJAXBContext context =
+      new JSONJAXBContext(JSONConfiguration.natural().build(),
+        FilterModel.class);
+    JSONUnmarshaller unmarshaller = context.createJSONUnmarshaller();
+    FilterModel model = unmarshaller.unmarshalFromJSON(new StringReader(s),
+      FilterModel.class);
+    return model.build();
+  }
+
+  /**
+   * @param filter the filter
+   * @return the JSON representation of the filter
+   * @throws Exception 
+   */
+  public static String stringifyFilter(final Filter filter) throws Exception {
+    JSONJAXBContext context =
+      new JSONJAXBContext(JSONConfiguration.natural().build(),
+        FilterModel.class);
+    JSONMarshaller marshaller = context.createJSONMarshaller();
+    StringWriter writer = new StringWriter();
+    marshaller.marshallToJSON(new FilterModel(filter), writer);
+    return writer.toString();
+  }
+
+  private static final byte[] COLUMN_DIVIDER = Bytes.toBytes(":");
+
+  /**
+   * @param scan the scan specification
+   * @throws Exception 
+   */
+  public static ScannerModel fromScan(Scan scan) throws Exception {
+    ScannerModel model = new ScannerModel();
+    model.setStartRow(scan.getStartRow());
+    model.setEndRow(scan.getStopRow());
+    Map<byte [], NavigableSet<byte []>> families = scan.getFamilyMap();
+    if (families != null) {
+      for (Map.Entry<byte [], NavigableSet<byte []>> entry : families.entrySet()) {
+        if (entry.getValue() != null) {
+          for (byte[] qualifier: entry.getValue()) {
+            model.addColumn(Bytes.add(entry.getKey(), COLUMN_DIVIDER, qualifier));
+          }
+        } else {
+          model.addColumn(entry.getKey());
+        }
+      }
+    }
+    model.setStartTime(scan.getTimeRange().getMin());
+    model.setEndTime(scan.getTimeRange().getMax());
+    int caching = scan.getCaching();
+    if (caching > 0) {
+      model.setCaching(caching);
+    }
+    int batch = scan.getBatch();
+    if (batch > 0) {
+      model.setBatch(batch);
+    }
+    int maxVersions = scan.getMaxVersions();
+    if (maxVersions > 0) {
+      model.setMaxVersions(maxVersions);
+    }
+    Filter filter = scan.getFilter();
+    if (filter != null) {
+      model.setFilter(stringifyFilter(filter));
+    }
+    // Add the visbility labels if found in the attributes
+    Authorizations authorizations = scan.getAuthorizations();
+    if (authorizations != null) {
+      List<String> labels = authorizations.getLabels();
+      for (String label : labels) {
+        model.addLabel(label);
+      }
+    }
+    return model;
+  }
+
+  /**
+   * Default constructor
+   */
+  public ScannerModel() {}
+
+  /**
+   * Constructor
+   * @param startRow the start key of the row-range
+   * @param endRow the end key of the row-range
+   * @param columns the columns to scan
+   * @param batch the number of values to return in batch
+   * @param caching the number of rows that the scanner will fetch at once
+   * @param endTime the upper bound on timestamps of values of interest
+   * @param maxVersions the maximum number of versions to return
+   * @param filter a filter specification
+   * (values with timestamps later than this are excluded)
+   */
+  public ScannerModel(byte[] startRow, byte[] endRow, List<byte[]> columns,
+      int batch, int caching, long endTime, int maxVersions, String filter) {
+    super();
+    this.startRow = startRow;
+    this.endRow = endRow;
+    this.columns = columns;
+    this.batch = batch;
+    this.caching = caching;
+    this.endTime = endTime;
+    this.maxVersions = maxVersions;
+    this.filter = filter;
+  }
+
+  /**
+   * Constructor 
+   * @param startRow the start key of the row-range
+   * @param endRow the end key of the row-range
+   * @param columns the columns to scan
+   * @param batch the number of values to return in batch
+   * @param caching the number of rows that the scanner will fetch at once
+   * @param startTime the lower bound on timestamps of values of interest
+   * (values with timestamps earlier than this are excluded)
+   * @param endTime the upper bound on timestamps of values of interest
+   * (values with timestamps later than this are excluded)
+   * @param filter a filter specification
+   */
+  public ScannerModel(byte[] startRow, byte[] endRow, List<byte[]> columns,
+      int batch, int caching, long startTime, long endTime, String filter) {
+    super();
+    this.startRow = startRow;
+    this.endRow = endRow;
+    this.columns = columns;
+    this.batch = batch;
+    this.caching = caching;
+    this.startTime = startTime;
+    this.endTime = endTime;
+    this.filter = filter;
+  }
+
+  /**
+   * Add a column to the column set
+   * @param column the column name, as &lt;column&gt;(:&lt;qualifier&gt;)?
+   */
+  public void addColumn(byte[] column) {
+    columns.add(column);
+  }
+  
+  /**
+   * Add a visibility label to the scan
+   */
+  public void addLabel(String label) {
+    labels.add(label);
+  }
+  /**
+   * @return true if a start row was specified
+   */
+  public boolean hasStartRow() {
+    return !Bytes.equals(startRow, HConstants.EMPTY_START_ROW);
+  }
+
+  /**
+   * @return start row
+   */
+  @XmlAttribute
+  public byte[] getStartRow() {
+    return startRow;
+  }
+
+  /**
+   * @return true if an end row was specified
+   */
+  public boolean hasEndRow() {
+    return !Bytes.equals(endRow, HConstants.EMPTY_END_ROW);
+  }
+
+  /**
+   * @return end row
+   */
+  @XmlAttribute
+  public byte[] getEndRow() {
+    return endRow;
+  }
+
+  /**
+   * @return list of columns of interest in column:qualifier format, or empty for all
+   */
+  @XmlElement(name="column")
+  public List<byte[]> getColumns() {
+    return columns;
+  }
+  
+  @XmlElement(name="labels")
+  public List<String> getLabels() {
+    return labels;
+  }
+
+  /**
+   * @return the number of cells to return in batch
+   */
+  @XmlAttribute
+  public int getBatch() {
+    return batch;
+  }
+
+  /**
+   * @return the number of rows that the scanner to fetch at once
+   */
+  @XmlAttribute
+  public int getCaching() {
+    return caching;
+  }
+
+  /**
+   * @return true if HFile blocks should be cached on the servers for this scan, false otherwise
+   */
+  @XmlAttribute
+  public boolean getCacheBlocks() {
+    return cacheBlocks;
+  }
+
+  /**
+   * @return the lower bound on timestamps of items of interest
+   */
+  @XmlAttribute
+  public long getStartTime() {
+    return startTime;
+  }
+
+  /**
+   * @return the upper bound on timestamps of items of interest
+   */
+  @XmlAttribute
+  public long getEndTime() {
+    return endTime;
+  }
+
+  /**
+   * @return maximum number of versions to return
+   */
+  @XmlAttribute
+  public int getMaxVersions() {
+    return maxVersions;
+  }
+
+  /**
+   * @return the filter specification
+   */
+  @XmlElement
+  public String getFilter() {
+    return filter;
+  }
+
+  /**
+   * @param startRow start row
+   */
+  public void setStartRow(byte[] startRow) {
+    this.startRow = startRow;
+  }
+
+  /**
+   * @param endRow end row
+   */
+  public void setEndRow(byte[] endRow) {
+    this.endRow = endRow;
+  }
+
+  /**
+   * @param columns list of columns of interest in column:qualifier format, or empty for all
+   */
+  public void setColumns(List<byte[]> columns) {
+    this.columns = columns;
+  }
+
+  /**
+   * @param batch the number of cells to return in batch
+   */
+  public void setBatch(int batch) {
+    this.batch = batch;
+  }
+
+  /**
+   * @param caching the number of rows to fetch at once
+   */
+  public void setCaching(int caching) {
+    this.caching = caching;
+  }
+
+  /**
+   * @param value true if HFile blocks should be cached on the servers for this scan, false otherwise
+   */
+  public void setCacheBlocks(boolean value) {
+    this.cacheBlocks = value;
+  }
+
+  /**
+   * @param maxVersions maximum number of versions to return
+   */
+  public void setMaxVersions(int maxVersions) {
+    this.maxVersions = maxVersions;
+  }
+
+  /**
+   * @param startTime the lower bound on timestamps of values of interest
+   */
+  public void setStartTime(long startTime) {
+    this.startTime = startTime;
+  }
+
+  /**
+   * @param endTime the upper bound on timestamps of values of interest
+   */
+  public void setEndTime(long endTime) {
+    this.endTime = endTime;
+  }
+
+  /**
+   * @param filter the filter specification
+   */
+  public void setFilter(String filter) {
+    this.filter = filter;
+  }
+
+  @Override
+  public byte[] createProtobufOutput() {
+    Scanner.Builder builder = Scanner.newBuilder();
+    if (!Bytes.equals(startRow, HConstants.EMPTY_START_ROW)) {
+      builder.setStartRow(ByteStringer.wrap(startRow));
+    }
+    if (!Bytes.equals(endRow, HConstants.EMPTY_START_ROW)) {
+      builder.setEndRow(ByteStringer.wrap(endRow));
+    }
+    for (byte[] column: columns) {
+      builder.addColumns(ByteStringer.wrap(column));
+    }
+    if (startTime != 0) {
+      builder.setStartTime(startTime);
+    }
+    if (endTime != 0) {
+      builder.setEndTime(endTime);
+    }
+    builder.setBatch(getBatch());
+    if (caching > 0) {
+      builder.setCaching(caching);
+    }
+    builder.setMaxVersions(maxVersions);
+    if (filter != null) {
+      builder.setFilter(filter);
+    }
+    if (labels != null && labels.size() > 0) {
+      for (String label : labels)
+        builder.addLabels(label);
+    }
+    builder.setCacheBlocks(cacheBlocks);
+    return builder.build().toByteArray();
+  }
+
+  @Override
+  public ProtobufMessageHandler getObjectFromMessage(byte[] message)
+      throws IOException {
+    Scanner.Builder builder = Scanner.newBuilder();
+    builder.mergeFrom(message);
+    if (builder.hasStartRow()) {
+      startRow = builder.getStartRow().toByteArray();
+    }
+    if (builder.hasEndRow()) {
+      endRow = builder.getEndRow().toByteArray();
+    }
+    for (ByteString column: builder.getColumnsList()) {
+      addColumn(column.toByteArray());
+    }
+    if (builder.hasBatch()) {
+      batch = builder.getBatch();
+    }
+    if (builder.hasCaching()) {
+      caching = builder.getCaching();
+    }
+    if (builder.hasStartTime()) {
+      startTime = builder.getStartTime();
+    }
+    if (builder.hasEndTime()) {
+      endTime = builder.getEndTime();
+    }
+    if (builder.hasMaxVersions()) {
+      maxVersions = builder.getMaxVersions();
+    }
+    if (builder.hasFilter()) {
+      filter = builder.getFilter();
+    }
+    if (builder.getLabelsList() != null) {
+      List<String> labels = builder.getLabelsList();
+      for(String label :  labels) {
+        addLabel(label);
+      }
+    }
+    if (builder.hasCacheBlocks()) {
+      this.cacheBlocks = builder.getCacheBlocks();
+    }
+    return this;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterStatusModel.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterStatusModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterStatusModel.java
new file mode 100644
index 0000000..3b044e7
--- /dev/null
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterStatusModel.java
@@ -0,0 +1,790 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest.model;
+
+import java.io.IOException;
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.List;
+
+import javax.xml.bind.annotation.XmlAttribute;
+import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlElementWrapper;
+import javax.xml.bind.annotation.XmlRootElement;
+
+import org.apache.hadoop.hbase.util.ByteStringer;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.rest.ProtobufMessageHandler;
+import org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus;
+import org.apache.hadoop.hbase.util.Bytes;
+
+/**
+ * Representation of the status of a storage cluster:
+ * <p>
+ * <ul>
+ * <li>regions: the total number of regions served by the cluster</li>
+ * <li>requests: the total number of requests per second handled by the
+ * cluster in the last reporting interval</li>
+ * <li>averageLoad: the average load of the region servers in the cluster</li>
+ * <li>liveNodes: detailed status of the live region servers</li>
+ * <li>deadNodes: the names of region servers declared dead</li>
+ * </ul>
+ * 
+ * <pre>
+ * &lt;complexType name="StorageClusterStatus"&gt;
+ *   &lt;sequence&gt;
+ *     &lt;element name="liveNode" type="tns:Node"
+ *       maxOccurs="unbounded" minOccurs="0"&gt;
+ *     &lt;/element&gt;
+ *     &lt;element name="deadNode" type="string" maxOccurs="unbounded"
+ *       minOccurs="0"&gt;
+ *     &lt;/element&gt;
+ *   &lt;/sequence&gt;
+ *   &lt;attribute name="regions" type="int"&gt;&lt;/attribute&gt;
+ *   &lt;attribute name="requests" type="int"&gt;&lt;/attribute&gt;
+ *   &lt;attribute name="averageLoad" type="float"&gt;&lt;/attribute&gt;
+ * &lt;/complexType&gt;
+ *
+ * &lt;complexType name="Node"&gt;
+ *   &lt;sequence&gt;
+ *     &lt;element name="region" type="tns:Region" 
+ *       maxOccurs="unbounded" minOccurs="0"&gt;&lt;/element&gt;
+ *   &lt;/sequence&gt;
+ *   &lt;attribute name="name" type="string"&gt;&lt;/attribute&gt;
+ *   &lt;attribute name="startCode" type="int"&gt;&lt;/attribute&gt;
+ *   &lt;attribute name="requests" type="int"&gt;&lt;/attribute&gt;
+ *   &lt;attribute name="heapSizeMB" type="int"&gt;&lt;/attribute&gt;
+ *   &lt;attribute name="maxHeapSizeMB" type="int"&gt;&lt;/attribute&gt;
+ * &lt;/complexType&gt;
+ *
+ * &lt;complexType name="Region"&gt;
+ *   &lt;attribute name="name" type="base64Binary"&gt;&lt;/attribute&gt;
+ *   &lt;attribute name="stores" type="int"&gt;&lt;/attribute&gt;
+ *   &lt;attribute name="storefiles" type="int"&gt;&lt;/attribute&gt;
+ *   &lt;attribute name="storefileSizeMB" type="int"&gt;&lt;/attribute&gt;
+ *   &lt;attribute name="memstoreSizeMB" type="int"&gt;&lt;/attribute&gt;
+ *   &lt;attribute name="storefileIndexSizeMB" type="int"&gt;&lt;/attribute&gt;
+ *   &lt;attribute name="readRequestsCount" type="int"&gt;&lt;/attribute&gt;
+ *   &lt;attribute name="writeRequestsCount" type="int"&gt;&lt;/attribute&gt;
+ *   &lt;attribute name="rootIndexSizeKB" type="int"&gt;&lt;/attribute&gt;
+ *   &lt;attribute name="totalStaticIndexSizeKB" type="int"&gt;&lt;/attribute&gt;
+ *   &lt;attribute name="totalStaticBloomSizeKB" type="int"&gt;&lt;/attribute&gt;
+ *   &lt;attribute name="totalCompactingKVs" type="int"&gt;&lt;/attribute&gt;
+ *   &lt;attribute name="currentCompactedKVs" type="int"&gt;&lt;/attribute&gt;
+ * &lt;/complexType&gt;
+ * </pre>
+ */
+@XmlRootElement(name="ClusterStatus")
+@InterfaceAudience.Private
+public class StorageClusterStatusModel 
+    implements Serializable, ProtobufMessageHandler {
+  private static final long serialVersionUID = 1L;
+
+  /**
+   * Represents a region server.
+   */
+  public static class Node implements Serializable {
+    private static final long serialVersionUID = 1L;
+
+    /**
+     * Represents a region hosted on a region server.
+     */
+    public static class Region {
+      private byte[] name;
+      private int stores;
+      private int storefiles;
+      private int storefileSizeMB;
+      private int memstoreSizeMB;
+      private int storefileIndexSizeMB;
+      private long readRequestsCount;
+      private long writeRequestsCount;
+      private int rootIndexSizeKB;
+      private int totalStaticIndexSizeKB;
+      private int totalStaticBloomSizeKB;
+      private long totalCompactingKVs;
+      private long currentCompactedKVs;
+
+      /**
+       * Default constructor
+       */
+      public Region() {
+      }
+
+      /**
+       * Constructor
+       * @param name the region name
+       */
+      public Region(byte[] name) {
+        this.name = name;
+      }
+
+      /**
+       * Constructor
+       * @param name the region name
+       * @param stores the number of stores
+       * @param storefiles the number of store files
+       * @param storefileSizeMB total size of store files, in MB
+       * @param memstoreSizeMB total size of memstore, in MB
+       * @param storefileIndexSizeMB total size of store file indexes, in MB
+       */
+      public Region(byte[] name, int stores, int storefiles,
+          int storefileSizeMB, int memstoreSizeMB, int storefileIndexSizeMB,
+          long readRequestsCount, long writeRequestsCount, int rootIndexSizeKB,
+          int totalStaticIndexSizeKB, int totalStaticBloomSizeKB,
+          long totalCompactingKVs, long currentCompactedKVs) {
+        this.name = name;
+        this.stores = stores;
+        this.storefiles = storefiles;
+        this.storefileSizeMB = storefileSizeMB;
+        this.memstoreSizeMB = memstoreSizeMB;
+        this.storefileIndexSizeMB = storefileIndexSizeMB;
+        this.readRequestsCount = readRequestsCount;
+        this.writeRequestsCount = writeRequestsCount;
+        this.rootIndexSizeKB = rootIndexSizeKB;
+        this.totalStaticIndexSizeKB = totalStaticIndexSizeKB;
+        this.totalStaticBloomSizeKB = totalStaticBloomSizeKB;
+        this.totalCompactingKVs = totalCompactingKVs;
+        this.currentCompactedKVs = currentCompactedKVs;
+      }
+
+      /**
+       * @return the region name
+       */
+      @XmlAttribute
+      public byte[] getName() {
+        return name;
+      }
+
+      /**
+       * @return the number of stores
+       */
+      @XmlAttribute
+      public int getStores() {
+        return stores;
+      }
+
+      /**
+       * @return the number of store files 
+       */
+      @XmlAttribute
+      public int getStorefiles() {
+        return storefiles;
+      }
+
+      /**
+       * @return the total size of store files, in MB
+       */
+      @XmlAttribute
+      public int getStorefileSizeMB() {
+        return storefileSizeMB;
+      }
+
+      /**
+       * @return memstore size, in MB
+       */
+      @XmlAttribute
+      public int getMemstoreSizeMB() {
+        return memstoreSizeMB;
+      }
+
+      /**
+       * @return the total size of store file indexes, in MB
+       */
+      @XmlAttribute
+      public int getStorefileIndexSizeMB() {
+        return storefileIndexSizeMB;
+      }
+
+      /**
+       * @return the current total read requests made to region
+       */
+      @XmlAttribute
+      public long getReadRequestsCount() {
+        return readRequestsCount;
+      }
+
+      /**
+       * @return the current total write requests made to region
+       */
+      @XmlAttribute
+      public long getWriteRequestsCount() {
+        return writeRequestsCount;
+      }
+
+      /**
+       * @return The current total size of root-level indexes for the region, in KB.
+       */
+      @XmlAttribute
+      public int getRootIndexSizeKB() {
+        return rootIndexSizeKB;
+      }
+
+      /**
+       * @return The total size of static index, in KB
+       */
+      @XmlAttribute
+      public int getTotalStaticIndexSizeKB() {
+        return totalStaticIndexSizeKB;
+      }
+
+      /**
+       * @return The total size of static bloom, in KB
+       */
+      @XmlAttribute
+      public int getTotalStaticBloomSizeKB() {
+        return totalStaticBloomSizeKB;
+      }
+
+      /**
+       * @return The total number of compacting key-values 
+       */
+      @XmlAttribute
+      public long getTotalCompactingKVs() {
+        return totalCompactingKVs;
+      }
+
+      /**
+       * @return The number of current compacted key-values
+       */
+      @XmlAttribute
+      public long getCurrentCompactedKVs() {
+        return currentCompactedKVs;
+      }
+
+      /**
+       * @param readRequestsCount The current total read requests made to region
+       */
+      public void setReadRequestsCount(long readRequestsCount) {
+        this.readRequestsCount = readRequestsCount;
+      }
+
+      /**
+       * @param rootIndexSizeKB The current total size of root-level indexes
+       *                        for the region, in KB
+       */
+      public void setRootIndexSizeKB(int rootIndexSizeKB) {
+        this.rootIndexSizeKB = rootIndexSizeKB;
+      }
+
+      /**
+       * @param writeRequestsCount The current total write requests made to region
+       */
+      public void setWriteRequestsCount(long writeRequestsCount) {
+        this.writeRequestsCount = writeRequestsCount;
+      }
+
+      /**
+       * @param currentCompactedKVs The completed count of key values
+       *                            in currently running compaction
+       */
+      public void setCurrentCompactedKVs(long currentCompactedKVs) {
+        this.currentCompactedKVs = currentCompactedKVs;
+      }
+
+      /**
+       * @param totalCompactingKVs The total compacting key values
+       *                           in currently running compaction
+       */
+      public void setTotalCompactingKVs(long totalCompactingKVs) {
+        this.totalCompactingKVs = totalCompactingKVs;
+      }
+
+      /**
+       * @param totalStaticBloomSizeKB The total size of all Bloom filter blocks,
+       *                               not just loaded into the block cache, in KB.
+       */
+      public void setTotalStaticBloomSizeKB(int totalStaticBloomSizeKB) {
+        this.totalStaticBloomSizeKB = totalStaticBloomSizeKB;
+      }
+
+      /**
+       * @param totalStaticIndexSizeKB The total size of all index blocks,
+       *                               not just the root level, in KB.
+       */
+      public void setTotalStaticIndexSizeKB(int totalStaticIndexSizeKB) {
+        this.totalStaticIndexSizeKB = totalStaticIndexSizeKB;
+      }
+
+      /**
+       * @param name the region name
+       */
+      public void setName(byte[] name) {
+        this.name = name;
+      }
+
+      /**
+       * @param stores the number of stores
+       */
+      public void setStores(int stores) {
+        this.stores = stores;
+      }
+
+      /**
+       * @param storefiles the number of store files
+       */
+      public void setStorefiles(int storefiles) {
+        this.storefiles = storefiles;
+      }
+
+      /**
+       * @param storefileSizeMB total size of store files, in MB
+       */
+      public void setStorefileSizeMB(int storefileSizeMB) {
+        this.storefileSizeMB = storefileSizeMB;
+      }
+
+      /**
+       * @param memstoreSizeMB memstore size, in MB
+       */
+      public void setMemstoreSizeMB(int memstoreSizeMB) {
+        this.memstoreSizeMB = memstoreSizeMB;
+      }
+
+      /**
+       * @param storefileIndexSizeMB total size of store file indexes, in MB
+       */
+      public void setStorefileIndexSizeMB(int storefileIndexSizeMB) {
+        this.storefileIndexSizeMB = storefileIndexSizeMB;
+      }
+    }
+
+    private String name;
+    private long startCode;
+    private int requests;
+    private int heapSizeMB;
+    private int maxHeapSizeMB;
+    private List<Region> regions = new ArrayList<Region>();
+
+    /**
+     * Add a region name to the list
+     * @param name the region name
+     */
+    public void addRegion(byte[] name, int stores, int storefiles,
+        int storefileSizeMB, int memstoreSizeMB, int storefileIndexSizeMB,
+        long readRequestsCount, long writeRequestsCount, int rootIndexSizeKB,
+        int totalStaticIndexSizeKB, int totalStaticBloomSizeKB,
+        long totalCompactingKVs, long currentCompactedKVs) { 
+      regions.add(new Region(name, stores, storefiles, storefileSizeMB,
+        memstoreSizeMB, storefileIndexSizeMB, readRequestsCount,
+        writeRequestsCount, rootIndexSizeKB, totalStaticIndexSizeKB,
+        totalStaticBloomSizeKB, totalCompactingKVs, currentCompactedKVs));
+    }
+
+    /**
+     * @param index the index
+     * @return the region name
+     */
+    public Region getRegion(int index) {
+      return regions.get(index);
+    }
+
+    /**
+     * Default constructor
+     */
+    public Node() {}
+
+    /**
+     * Constructor
+     * @param name the region server name
+     * @param startCode the region server's start code
+     */
+    public Node(String name, long startCode) {
+      this.name = name;
+      this.startCode = startCode;
+    }
+
+    /**
+     * @return the region server's name
+     */
+    @XmlAttribute
+    public String getName() {
+      return name;
+    }
+
+    /**
+     * @return the region server's start code
+     */
+    @XmlAttribute
+    public long getStartCode() {
+      return startCode;
+    }
+
+    /**
+     * @return the current heap size, in MB
+     */
+    @XmlAttribute
+    public int getHeapSizeMB() {
+      return heapSizeMB;
+    }
+
+    /**
+     * @return the maximum heap size, in MB
+     */
+    @XmlAttribute
+    public int getMaxHeapSizeMB() {
+      return maxHeapSizeMB;
+    }
+
+    /**
+     * @return the list of regions served by the region server
+     */
+    @XmlElement(name="Region")
+    public List<Region> getRegions() {
+      return regions;
+    }
+
+    /**
+     * @return the number of requests per second processed by the region server
+     */
+    @XmlAttribute
+    public int getRequests() {
+      return requests;
+    }
+
+    /**
+     * @param name the region server's hostname
+     */
+    public void setName(String name) {
+      this.name = name;
+    }
+
+    /**
+     * @param startCode the region server's start code
+     */
+    public void setStartCode(long startCode) {
+      this.startCode = startCode;
+    }
+
+    /**
+     * @param heapSizeMB the current heap size, in MB
+     */
+    public void setHeapSizeMB(int heapSizeMB) {
+      this.heapSizeMB = heapSizeMB;
+    }
+
+    /**
+     * @param maxHeapSizeMB the maximum heap size, in MB
+     */
+    public void setMaxHeapSizeMB(int maxHeapSizeMB) {
+      this.maxHeapSizeMB = maxHeapSizeMB;
+    }
+
+    /**
+     * @param regions a list of regions served by the region server
+     */
+    public void setRegions(List<Region> regions) {
+      this.regions = regions;
+    }
+
+    /**
+     * @param requests the number of requests per second processed by the
+     * region server
+     */
+    public void setRequests(int requests) {
+      this.requests = requests;
+    }
+  }
+
+  private List<Node> liveNodes = new ArrayList<Node>();
+  private List<String> deadNodes = new ArrayList<String>();
+  private int regions;
+  private int requests;
+  private double averageLoad;
+
+  /**
+   * Add a live node to the cluster representation.
+   * @param name the region server name
+   * @param startCode the region server's start code
+   * @param heapSizeMB the current heap size, in MB
+   * @param maxHeapSizeMB the maximum heap size, in MB
+   */
+  public Node addLiveNode(String name, long startCode, int heapSizeMB, int maxHeapSizeMB) {
+    Node node = new Node(name, startCode);
+    node.setHeapSizeMB(heapSizeMB);
+    node.setMaxHeapSizeMB(maxHeapSizeMB);
+    liveNodes.add(node);
+    return node;
+  }
+
+  /**
+   * @param index the index
+   * @return the region server model
+   */
+  public Node getLiveNode(int index) {
+    return liveNodes.get(index);
+  }
+
+  /**
+   * Add a dead node to the cluster representation.
+   * @param node the dead region server's name
+   */
+  public void addDeadNode(String node) {
+    deadNodes.add(node);
+  }
+
+  /**
+   * @param index the index
+   * @return the dead region server's name
+   */
+  public String getDeadNode(int index) {
+    return deadNodes.get(index);
+  }
+
+  /**
+   * Default constructor
+   */
+  public StorageClusterStatusModel() {
+  }
+
+  /**
+   * @return the list of live nodes
+   */
+  @XmlElement(name = "Node")
+  @XmlElementWrapper(name = "LiveNodes")
+  public List<Node> getLiveNodes() {
+    return liveNodes;
+  }
+
+  /**
+   * @return the list of dead nodes
+   */
+  @XmlElement(name = "Node")
+  @XmlElementWrapper(name = "DeadNodes")
+  public List<String> getDeadNodes() {
+    return deadNodes;
+  }
+
+  /**
+   * @return the total number of regions served by the cluster
+   */
+  @XmlAttribute
+  public int getRegions() {
+    return regions;
+  }
+
+  /**
+   * @return the total number of requests per second handled by the cluster in
+   * the last reporting interval
+   */
+  @XmlAttribute
+  public int getRequests() {
+    return requests;
+  }
+
+  /**
+   * @return the average load of the region servers in the cluster
+   */
+  @XmlAttribute
+  public double getAverageLoad() {
+    return averageLoad;
+  }
+
+  /**
+   * @param nodes the list of live node models
+   */
+  public void setLiveNodes(List<Node> nodes) {
+    this.liveNodes = nodes;
+  }
+
+  /**
+   * @param nodes the list of dead node names
+   */
+  public void setDeadNodes(List<String> nodes) {
+    this.deadNodes = nodes;
+  }
+
+  /**
+   * @param regions the total number of regions served by the cluster
+   */
+  public void setRegions(int regions) {
+    this.regions = regions;
+  }
+
+  /**
+   * @param requests the total number of requests per second handled by the
+   * cluster
+   */
+  public void setRequests(int requests) {
+    this.requests = requests;
+  }
+
+  /**
+   * @param averageLoad the average load of region servers in the cluster
+   */
+  public void setAverageLoad(double averageLoad) {
+    this.averageLoad = averageLoad;
+  }
+
+  /*
+   * (non-Javadoc)
+   * @see java.lang.Object#toString()
+   */
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder();
+    sb.append(String.format("%d live servers, %d dead servers, " + 
+      "%.4f average load%n%n", liveNodes.size(), deadNodes.size(),
+      averageLoad));
+    if (!liveNodes.isEmpty()) {
+      sb.append(liveNodes.size());
+      sb.append(" live servers\n");
+      for (Node node: liveNodes) {
+        sb.append("    ");
+        sb.append(node.name);
+        sb.append(' ');
+        sb.append(node.startCode);
+        sb.append("\n        requests=");
+        sb.append(node.requests);
+        sb.append(", regions=");
+        sb.append(node.regions.size());
+        sb.append("\n        heapSizeMB=");
+        sb.append(node.heapSizeMB);
+        sb.append("\n        maxHeapSizeMB=");
+        sb.append(node.maxHeapSizeMB);
+        sb.append("\n\n");
+        for (Node.Region region: node.regions) {
+          sb.append("        ");
+          sb.append(Bytes.toString(region.name));
+          sb.append("\n            stores=");
+          sb.append(region.stores);
+          sb.append("\n            storefiless=");
+          sb.append(region.storefiles);
+          sb.append("\n            storefileSizeMB=");
+          sb.append(region.storefileSizeMB);
+          sb.append("\n            memstoreSizeMB=");
+          sb.append(region.memstoreSizeMB);
+          sb.append("\n            storefileIndexSizeMB=");
+          sb.append(region.storefileIndexSizeMB);
+          sb.append("\n            readRequestsCount=");
+          sb.append(region.readRequestsCount);
+          sb.append("\n            writeRequestsCount=");
+          sb.append(region.writeRequestsCount);
+          sb.append("\n            rootIndexSizeKB=");
+          sb.append(region.rootIndexSizeKB);
+          sb.append("\n            totalStaticIndexSizeKB=");
+          sb.append(region.totalStaticIndexSizeKB);
+          sb.append("\n            totalStaticBloomSizeKB=");
+          sb.append(region.totalStaticBloomSizeKB);
+          sb.append("\n            totalCompactingKVs=");
+          sb.append(region.totalCompactingKVs);
+          sb.append("\n            currentCompactedKVs=");
+          sb.append(region.currentCompactedKVs);
+          sb.append('\n');
+        }
+        sb.append('\n');
+      }
+    }
+    if (!deadNodes.isEmpty()) {
+      sb.append('\n');
+      sb.append(deadNodes.size());
+      sb.append(" dead servers\n");
+      for (String node: deadNodes) {
+        sb.append("    ");
+        sb.append(node);
+        sb.append('\n');
+      }
+    }
+    return sb.toString();
+  }
+
+  @Override
+  public byte[] createProtobufOutput() {
+    StorageClusterStatus.Builder builder = StorageClusterStatus.newBuilder();
+    builder.setRegions(regions);
+    builder.setRequests(requests);
+    builder.setAverageLoad(averageLoad);
+    for (Node node: liveNodes) {
+      StorageClusterStatus.Node.Builder nodeBuilder = 
+        StorageClusterStatus.Node.newBuilder();
+      nodeBuilder.setName(node.name);
+      nodeBuilder.setStartCode(node.startCode);
+      nodeBuilder.setRequests(node.requests);
+      nodeBuilder.setHeapSizeMB(node.heapSizeMB);
+      nodeBuilder.setMaxHeapSizeMB(node.maxHeapSizeMB);
+      for (Node.Region region: node.regions) {
+        StorageClusterStatus.Region.Builder regionBuilder =
+          StorageClusterStatus.Region.newBuilder();
+        regionBuilder.setName(ByteStringer.wrap(region.name));
+        regionBuilder.setStores(region.stores);
+        regionBuilder.setStorefiles(region.storefiles);
+        regionBuilder.setStorefileSizeMB(region.storefileSizeMB);
+        regionBuilder.setMemstoreSizeMB(region.memstoreSizeMB);
+        regionBuilder.setStorefileIndexSizeMB(region.storefileIndexSizeMB);
+        regionBuilder.setReadRequestsCount(region.readRequestsCount);
+        regionBuilder.setWriteRequestsCount(region.writeRequestsCount);
+        regionBuilder.setRootIndexSizeKB(region.rootIndexSizeKB);
+        regionBuilder.setTotalStaticIndexSizeKB(region.totalStaticIndexSizeKB);
+        regionBuilder.setTotalStaticBloomSizeKB(region.totalStaticBloomSizeKB);
+        regionBuilder.setTotalCompactingKVs(region.totalCompactingKVs);
+        regionBuilder.setCurrentCompactedKVs(region.currentCompactedKVs);
+        nodeBuilder.addRegions(regionBuilder);
+      }
+      builder.addLiveNodes(nodeBuilder);
+    }
+    for (String node: deadNodes) {
+      builder.addDeadNodes(node);
+    }
+    return builder.build().toByteArray();
+  }
+
+  @Override
+  public ProtobufMessageHandler getObjectFromMessage(byte[] message)
+      throws IOException {
+    StorageClusterStatus.Builder builder = StorageClusterStatus.newBuilder();
+    builder.mergeFrom(message);
+    if (builder.hasRegions()) {
+      regions = builder.getRegions();
+    }
+    if (builder.hasRequests()) {
+      requests = builder.getRequests();
+    }
+    if (builder.hasAverageLoad()) {
+      averageLoad = builder.getAverageLoad();
+    }
+    for (StorageClusterStatus.Node node: builder.getLiveNodesList()) {
+      long startCode = node.hasStartCode() ? node.getStartCode() : -1;
+      StorageClusterStatusModel.Node nodeModel = 
+        addLiveNode(node.getName(), startCode, node.getHeapSizeMB(),
+          node.getMaxHeapSizeMB());
+      int requests = node.hasRequests() ? node.getRequests() : 0;
+      nodeModel.setRequests(requests);
+      for (StorageClusterStatus.Region region: node.getRegionsList()) {
+        nodeModel.addRegion(
+          region.getName().toByteArray(),
+          region.getStores(),
+          region.getStorefiles(),
+          region.getStorefileSizeMB(),
+          region.getMemstoreSizeMB(),
+          region.getStorefileIndexSizeMB(),
+          region.getReadRequestsCount(),
+          region.getWriteRequestsCount(),
+          region.getRootIndexSizeKB(),
+          region.getTotalStaticIndexSizeKB(),
+          region.getTotalStaticBloomSizeKB(),
+          region.getTotalCompactingKVs(),
+          region.getCurrentCompactedKVs());
+      }
+    }
+    for (String node: builder.getDeadNodesList()) {
+      addDeadNode(node);
+    }
+    return this;
+  }
+}