You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by cn...@apache.org on 2016/03/09 19:20:35 UTC

[2/2] hadoop git commit: HDFS-9916. OzoneHandler : Add Key handler. Contributed by Anu Engineer.

HDFS-9916. OzoneHandler : Add Key handler. Contributed by Anu Engineer.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/28f770d8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/28f770d8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/28f770d8

Branch: refs/heads/HDFS-7240
Commit: 28f770d8b0ead12d0d80bc4dfcb29d5e5eb5f380
Parents: ea0bb4a
Author: Chris Nauroth <cn...@apache.org>
Authored: Wed Mar 9 10:20:24 2016 -0800
Committer: Chris Nauroth <cn...@apache.org>
Committed: Wed Mar 9 10:20:24 2016 -0800

----------------------------------------------------------------------
 .../hadoop/ozone/web/handlers/KeyArgs.java      | 125 +++++++++
 .../hadoop/ozone/web/handlers/KeyHandler.java   | 212 +++++++++++++++
 .../ozone/web/handlers/KeyProcessTemplate.java  | 208 +++++++++++++++
 .../hadoop/ozone/web/handlers/ListArgs.java     | 106 ++++++++
 .../hadoop/ozone/web/interfaces/Accounting.java |  57 ++++
 .../hadoop/ozone/web/interfaces/Keys.java       | 118 +++++++++
 .../LengthInputStreamMessageBodyWriter.java     |  59 +++++
 .../hadoop/ozone/web/response/KeyInfo.java      | 261 +++++++++++++++++++
 .../hadoop/ozone/web/response/ListKeys.java     | 183 +++++++++++++
 9 files changed, 1329 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/28f770d8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/KeyArgs.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/KeyArgs.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/KeyArgs.java
new file mode 100644
index 0000000..754c333
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/KeyArgs.java
@@ -0,0 +1,125 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.web.handlers;
+
+/**
+ * Class that packages all key Arguments.
+ */
+public class KeyArgs extends BucketArgs {
+  private String key;
+  private boolean delete;
+  private String hash;
+  private long size;
+
+  /**
+   * Constructor for Key Args.
+   *
+   * @param volumeName - Volume Name
+   * @param bucketName - Bucket Name
+   * @param objectName - Key
+   */
+  public KeyArgs(String volumeName, String bucketName,
+                 String objectName, UserArgs args) {
+    super(volumeName, bucketName, args);
+    this.key = objectName;
+  }
+
+  /**
+   * Get Key Name.
+   *
+   * @return String
+   */
+  public String getKeyName() {
+    return this.key;
+  }
+
+  /**
+   * Checks if this request is for a Delete key.
+   *
+   * @return boolean
+   */
+  public boolean isDelete() {
+    return delete;
+  }
+
+  /**
+   * Sets the key request as a Delete Request.
+   *
+   * @param delete bool, indicating if this is a delete request
+   */
+  public void setDelete(boolean delete) {
+    this.delete = delete;
+  }
+
+  /**
+   * Computed File hash.
+   *
+   * @return String
+   */
+  public String getHash() {
+    return hash;
+  }
+
+  /**
+   * Sets the hash String.
+   *
+   * @param hash String
+   */
+  public void setHash(String hash) {
+    this.hash = hash;
+  }
+
+  /**
+   * Returns the file size.
+   *
+   * @return long - file size
+   */
+  public long getSize() {
+    return size;
+  }
+
+  /**
+   * Set Size.
+   *
+   * @param size Size of the file
+   */
+  public void setSize(long size) {
+    this.size = size;
+  }
+
+  /**
+   * Returns the name of the resource.
+   *
+   * @return String
+   */
+  @Override
+  public String getResourceName() {
+    return super.getResourceName() + "/" + getKeyName();
+  }
+
+  /**
+   * Parent name of this resource.
+   *
+   * @return String.
+   */
+  @Override
+  public String getParentName() {
+    return super.getResourceName();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/28f770d8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/KeyHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/KeyHandler.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/KeyHandler.java
new file mode 100644
index 0000000..bf0643e
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/KeyHandler.java
@@ -0,0 +1,212 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.web.handlers;
+
+import org.apache.commons.codec.binary.Hex;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.LengthInputStream;
+import org.apache.hadoop.ozone.web.exceptions.OzoneException;
+import org.apache.hadoop.ozone.web.interfaces.Keys;
+import org.apache.hadoop.ozone.web.interfaces.StorageHandler;
+import org.apache.hadoop.ozone.web.utils.OzoneUtils;
+
+import javax.ws.rs.core.HttpHeaders;
+import javax.ws.rs.core.Request;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.UriInfo;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.security.MessageDigest;
+import java.security.NoSuchAlgorithmException;
+
+import static java.net.HttpURLConnection.HTTP_CREATED;
+import static java.net.HttpURLConnection.HTTP_OK;
+
+/**
+ * KeyHandler deals with basic Key Operations.
+ */
+public class KeyHandler implements Keys {
+
+  /**
+   * Gets the Key if it exists.
+   *
+   * @param volume  Storage Volume
+   * @param bucket  Name of the bucket
+   * @param req     Request
+   * @param info    - UriInfo
+   * @param headers Http Header
+   * @return Response
+   * @throws OzoneException
+   */
+  @Override
+  public Response getKey(String volume, String bucket, String key,
+                         Request req, UriInfo info, HttpHeaders headers)
+      throws OzoneException {
+    return new KeyProcessTemplate() {
+      /**
+       * Abstract function that gets implemented in the KeyHandler functions.
+       * This function will just deal with the core file system related logic
+       * and will rely on handleCall function for repetitive error checks
+       *
+       * @param args - parsed bucket args, name, userName, ACLs etc
+       * @param input - The body as an Input Stream
+       * @param request - Http request
+       * @param headers - Parsed http Headers.
+       * @param info - UriInfo
+       *
+       * @return Response
+       *
+       * @throws IOException - From the file system operations
+       */
+      @Override
+      public Response doProcess(KeyArgs args, InputStream input,
+                                Request request, HttpHeaders headers,
+                                UriInfo info)
+          throws IOException, OzoneException, NoSuchAlgorithmException {
+        StorageHandler fs = StorageHandlerBuilder.getStorageHandler();
+        LengthInputStream stream = fs.newKeyReader(args);
+        return OzoneUtils.getResponse(args, HTTP_OK, stream);
+      }
+    }.handleCall(volume, bucket, key, req, headers, info, null);
+  }
+
+  /**
+   * Adds a key to an existing bucket. If the object already exists this call
+   * will overwrite or add with new version number if the bucket versioning is
+   * turned on.
+   *
+   * @param volume  Storage Volume Name
+   * @param bucket  Name of the bucket
+   * @param keys    Name of the Object
+   * @param is      InputStream or File Data
+   * @param req     Request
+   * @param info    - UriInfo
+   * @param headers http headers
+   * @return Response
+   * @throws OzoneException
+   */
+  @Override
+  public Response putKey(String volume, String bucket, String keys,
+                         InputStream is, Request req, UriInfo info,
+                         HttpHeaders headers) throws OzoneException {
+
+    return new KeyProcessTemplate() {
+      /**
+       * Abstract function that gets implemented in the KeyHandler functions.
+       * This function will just deal with the core file system related logic
+       * and will rely on handleCall function for repetitive error checks
+       *
+       * @param args - parsed bucket args, name, userName, ACLs etc
+       * @param input - The body as an Input Stream
+       * @param request - Http request
+       * @param headers - Parsed http Headers.
+       * @param info - UriInfo
+       *
+       * @return Response
+       *
+       * @throws IOException - From the file system operations
+       */
+      @Override
+      public Response doProcess(KeyArgs args, InputStream input,
+                                Request request, HttpHeaders headers,
+                                UriInfo info)
+          throws IOException, OzoneException, NoSuchAlgorithmException {
+        final int eof = -1;
+        StorageHandler fs = StorageHandlerBuilder.getStorageHandler();
+
+        byte[] buffer = new byte[4 * 1024];
+        String contentLenString = getContentLength(headers, args);
+        String newLen = contentLenString.replaceAll("\"", "");
+        int contentLen = Integer.parseInt(newLen);
+
+        MessageDigest md5 = MessageDigest.getInstance("MD5");
+        int bytesRead = 0;
+        int len = 0;
+        OutputStream stream = fs.newKeyWriter(args);
+        while ((bytesRead < contentLen) && (len != eof)) {
+          int readSize =
+              (contentLen - bytesRead > buffer.length) ? buffer.length :
+                  contentLen - bytesRead;
+          len = input.read(buffer, 0, readSize);
+          if (len != eof) {
+            stream.write(buffer, 0, len);
+            md5.update(buffer, 0, len);
+            bytesRead += len;
+
+          }
+        }
+
+        checkFileLengthMatch(args, fs, contentLen, bytesRead);
+
+        String hashString = Hex.encodeHexString(md5.digest());
+// TODO : Enable hash value checking.
+//          String contentHash = getContentMD5(headers, args);
+//          checkFileHashMatch(args, hashString, fs, contentHash);
+        args.setHash(hashString);
+        args.setSize(bytesRead);
+        fs.commitKey(args, stream);
+        return OzoneUtils.getResponse(args, HTTP_CREATED, "");
+      }
+    }.handleCall(volume, bucket, keys, req, headers, info, is);
+  }
+
+  /**
+   * Deletes an existing key.
+   *
+   * @param volume  Storage Volume Name
+   * @param bucket  Name of the bucket
+   * @param keys    Name of the Object
+   * @param req     http Request
+   * @param info    - UriInfo
+   * @param headers HttpHeaders
+   * @return Response
+   * @throws OzoneException
+   */
+  @Override
+  public Response deleteKey(String volume, String bucket, String keys,
+                            Request req, UriInfo info, HttpHeaders headers)
+      throws OzoneException {
+    return new KeyProcessTemplate() {
+      /**
+       * Abstract function that gets implemented in the KeyHandler functions.
+       * This function will just deal with the core file system related logic
+       * and will rely on handleCall function for repetitive error checks
+       *
+       * @param args - parsed bucket args, name, userName, ACLs etc
+       * @param input - The body as an Input Stream
+       * @param request - Http request
+       * @param headers - Parsed http Headers.
+       * @param info - UriInfo
+       *
+       * @return Response
+       *
+       * @throws IOException - From the file system operations
+       */
+      @Override
+      public Response doProcess(KeyArgs args, InputStream input,
+                                Request request, HttpHeaders headers,
+                                UriInfo info)
+          throws IOException, OzoneException, NoSuchAlgorithmException {
+        StorageHandler fs = StorageHandlerBuilder.getStorageHandler();
+        fs.deleteKey(args);
+        return OzoneUtils.getResponse(args, HTTP_OK, "");
+      }
+    }.handleCall(volume, bucket, keys, req, headers, info, null);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/28f770d8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/KeyProcessTemplate.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/KeyProcessTemplate.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/KeyProcessTemplate.java
new file mode 100644
index 0000000..7607434
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/KeyProcessTemplate.java
@@ -0,0 +1,208 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.web.handlers;
+
+import org.apache.commons.codec.binary.Base64;
+import org.apache.hadoop.ozone.web.exceptions.ErrorTable;
+import org.apache.hadoop.ozone.web.exceptions.OzoneException;
+import org.apache.hadoop.ozone.web.headers.Header;
+import org.apache.hadoop.ozone.web.interfaces.StorageHandler;
+import org.apache.hadoop.ozone.web.interfaces.UserAuth;
+import org.apache.hadoop.ozone.web.utils.OzoneUtils;
+
+import javax.ws.rs.core.HttpHeaders;
+import javax.ws.rs.core.Request;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.UriInfo;
+import java.io.IOException;
+import java.io.InputStream;
+import java.security.NoSuchAlgorithmException;
+import java.util.List;
+
+import static org.apache.hadoop.ozone.web.exceptions.ErrorTable.BAD_DIGEST;
+import static org.apache.hadoop.ozone.web.exceptions.ErrorTable.INCOMPLETE_BODY;
+import static org.apache.hadoop.ozone.web.exceptions.ErrorTable.INVALID_BUCKET_NAME;
+import static org.apache.hadoop.ozone.web.exceptions.ErrorTable.INVALID_REQUEST;
+import static org.apache.hadoop.ozone.web.exceptions.ErrorTable.SERVER_ERROR;
+import static org.apache.hadoop.ozone.web.exceptions.ErrorTable.newError;
+
+/**
+ * This class abstracts way the repetitive tasks in  Key handling code.
+ */
+public abstract class KeyProcessTemplate {
+
+  /**
+   * This function serves as the common error handling function for all Key
+   * related operations.
+   *
+   * @param bucket  bucket Name
+   * @param key     the object name
+   * @param headers Http headers
+   * @param is      Input XML stream
+   * @throws OzoneException
+   */
+  public Response handleCall(String volume, String bucket, String key,
+                             Request request, HttpHeaders headers, UriInfo info,
+                             InputStream is) throws OzoneException {
+
+    String reqID = OzoneUtils.getRequestID();
+    String hostName = OzoneUtils.getHostName();
+    UserArgs userArgs = null;
+    try {
+      OzoneUtils.validate(request, headers, reqID, bucket, hostName);
+      OzoneUtils.verifyBucketName(bucket);
+
+      UserAuth auth = UserHandlerBuilder.getAuthHandler();
+      userArgs = new UserArgs(reqID, hostName, request, info, headers);
+      userArgs.setUserName(auth.getUser(userArgs));
+
+      KeyArgs args = new KeyArgs(volume, bucket, key, userArgs);
+      return doProcess(args, is, request, headers, info);
+    } catch (IllegalArgumentException argExp) {
+      OzoneException ex =
+          newError(INVALID_BUCKET_NAME, reqID, bucket, hostName);
+      ex.setMessage(argExp.getMessage());
+      throw ex;
+    } catch (IOException fsExp) {
+      // TODO : Handle errors from the FileSystem , let us map to server error
+      // for now.
+      throw ErrorTable.newError(ErrorTable.SERVER_ERROR, userArgs, fsExp);
+    } catch (NoSuchAlgorithmException algoEx) {
+      OzoneException ex =
+          ErrorTable.newError(SERVER_ERROR, reqID, key, hostName);
+      ex.setMessage(algoEx.getMessage());
+      throw ex;
+    }
+  }
+
+  /**
+   * Abstract function that gets implemented in the KeyHandler functions. This
+   * function will just deal with the core file system related logic and will
+   * rely on handleCall function for repetitive error checks
+   *
+   * @param args    - parsed bucket args, name, userName, ACLs etc
+   * @param input   - The body as an Input Stream
+   * @param request - Http request
+   * @param headers - Parsed http Headers.
+   * @param info    - UriInfo
+   * @return Response
+   * @throws IOException - From the file system operations
+   */
+  public abstract Response doProcess(KeyArgs args, InputStream input,
+                                     Request request, HttpHeaders headers,
+                                     UriInfo info)
+      throws IOException, OzoneException, NoSuchAlgorithmException;
+
+  /**
+   * checks if the File Content-MD5 we wrote matches the hash we computed from
+   * the stream. if it does match we delete the file and throw and exception to
+   * let the user know that we have a hash mismatch
+   *
+   * @param args           Object Args
+   * @param computedString MD5 hash value
+   * @param fs             Pointer to File System so we can delete the file
+   * @param contentHash    User Specified hash string
+   * @throws IOException
+   * @throws OzoneException
+   */
+  public void checkFileHashMatch(KeyArgs args, String computedString,
+                                 StorageHandler fs, String contentHash)
+      throws IOException, OzoneException {
+    if (contentHash != null) {
+      String contentString =
+          new String(Base64.decodeBase64(contentHash), OzoneUtils.ENCODING)
+              .trim();
+
+      if (!contentString.equals(computedString)) {
+        fs.deleteKey(args);
+        OzoneException ex = ErrorTable.newError(BAD_DIGEST, args.getRequestID(),
+            args.getKeyName(), args.getHostName());
+        ex.setMessage(String.format("MD5 Digest mismatch. Expected %s Found " +
+            "%s", contentString, computedString));
+        throw ex;
+      }
+    }
+  }
+
+  /**
+   * check if the content-length matches the actual stream length. if we find a
+   * mismatch we will delete the file and throw an exception to let the user
+   * know that length mismatch detected
+   *
+   * @param args       Object Args
+   * @param fs         Pointer to File System Object, to delete the file that we
+   *                   wrote
+   * @param contentLen Http Content-Length Header
+   * @param bytesRead  Actual Bytes we read from the stream
+   * @throws IOException
+   * @throws OzoneException
+   */
+  public void checkFileLengthMatch(KeyArgs args, StorageHandler fs,
+                                   int contentLen, int bytesRead)
+      throws IOException, OzoneException {
+    if (bytesRead != contentLen) {
+      fs.deleteKey(args);
+      OzoneException ex = ErrorTable.newError(INCOMPLETE_BODY,
+          args.getRequestID(), args.getKeyName(), args.getHostName());
+      ex.setMessage(String.format("Body length mismatch. Expected length : %d" +
+          " Found %d", contentLen, bytesRead));
+      throw ex;
+    }
+  }
+
+  /**
+   * Returns Content Length header value if available.
+   *
+   * @param headers - Http Headers
+   * @return - String or null
+   */
+  public String getContentLength(HttpHeaders headers, KeyArgs args)
+      throws OzoneException {
+    List<String> contentLengthList =
+        headers.getRequestHeader(HttpHeaders.CONTENT_LENGTH);
+    if ((contentLengthList != null) && (contentLengthList.size() > 0)) {
+      return contentLengthList.get(0);
+    }
+
+    OzoneException ex = ErrorTable.newError(INVALID_REQUEST, args);
+    ex.setMessage("Content-Length is a required header for putting a key.");
+    throw ex;
+
+  }
+
+  /**
+   * Returns Content MD5 value if available.
+   *
+   * @param headers - Http Headers
+   * @return - String or null
+   */
+  public String getContentMD5(HttpHeaders headers, KeyArgs args) {
+    List<String> contentLengthList =
+        headers.getRequestHeader(Header.CONTENT_MD5);
+    if ((contentLengthList != null) && (contentLengthList.size() > 0)) {
+      return contentLengthList.get(0);
+    }
+// TODO : Should we make this compulsory ?
+//    OzoneException ex = ErrorTable.newError(ErrorTable.invalidRequest, args);
+//    ex.setMessage("Content-MD5 is a required header for putting a key");
+//    throw ex;
+    return "";
+  }
+}
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/28f770d8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/ListArgs.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/ListArgs.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/ListArgs.java
new file mode 100644
index 0000000..892b97a
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/ListArgs.java
@@ -0,0 +1,106 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+package org.apache.hadoop.ozone.web.handlers;
+
+/**
+ * Supports listing keys with pagination.
+ */
+public class ListArgs extends BucketArgs {
+  private String startPage;
+  private String prefix;
+  private int maxKeys;
+
+  /**
+   * Constructor for ListArgs.
+   *
+   * @param args      - BucketArgs
+   * @param prefix    Prefix to start Query from
+   * @param maxKeys   Max result set
+   * @param startPage - Page token
+   */
+  public ListArgs(BucketArgs args, String prefix, int maxKeys,
+                  String startPage) {
+    super(args);
+    setPrefix(prefix);
+    setMaxKeys(maxKeys);
+    setStartPage(startPage);
+  }
+
+  /**
+   * Copy Constructor for ListArgs.
+   *
+   * @param args - List Args
+   */
+  public ListArgs(ListArgs args) {
+    this(args, args.getPrefix(), args.getMaxKeys(), args.getStartPage());
+  }
+
+  /**
+   * Returns page token.
+   *
+   * @return String
+   */
+  public String getStartPage() {
+    return startPage;
+  }
+
+  /**
+   * Sets page token.
+   *
+   * @param startPage - Page token
+   */
+  public void setStartPage(String startPage) {
+    this.startPage = startPage;
+  }
+
+  /**
+   * Gets max keys.
+   *
+   * @return int
+   */
+  public int getMaxKeys() {
+    return maxKeys;
+  }
+
+  /**
+   * Sets max keys.
+   *
+   * @param maxKeys - Maximum keys to return
+   */
+  public void setMaxKeys(int maxKeys) {
+    this.maxKeys = maxKeys;
+  }
+
+  /**
+   * Gets prefix.
+   *
+   * @return String
+   */
+  public String getPrefix() {
+    return prefix;
+  }
+
+  /**
+   * Sets prefix.
+   *
+   * @param prefix - The prefix that we are looking for
+   */
+  public void setPrefix(String prefix) {
+    this.prefix = prefix;
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/28f770d8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/interfaces/Accounting.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/interfaces/Accounting.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/interfaces/Accounting.java
new file mode 100644
index 0000000..f03276c
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/interfaces/Accounting.java
@@ -0,0 +1,57 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.web.interfaces;
+
+/**
+ * This in the accounting interface, Ozone Rest interface will call into this
+ * interface whenever a put or delete key happens.
+ * <p>
+ * TODO : Technically we need to report bucket creation and deletion too
+ * since the bucket names and metadata consume storage.
+ * <p>
+ * TODO : We should separate out reporting metadata & data --
+ * <p>
+ * In some cases end users will only want to account for the data they are
+ * storing since metadata is mostly a cost of business.
+ */
+public interface Accounting {
+  /**
+   * This call is made when ever a put key call is made.
+   * <p>
+   * In case of a Put which causes a over write of a key accounting system will
+   * see two calls, a removeByte call followed by an addByte call.
+   *
+   * @param owner  - Volume Owner
+   * @param volume - Name of the Volume
+   * @param bucket - Name of the bucket
+   * @param bytes  - How many bytes are put
+   */
+  void addBytes(String owner, String volume, String bucket, int bytes);
+
+  /**
+   * This call is made whenever a delete call is made.
+   *
+   * @param owner  - Volume Owner
+   * @param volume - Name of the Volume
+   * @param bucket - Name of the bucket
+   * @param bytes  - How many bytes are deleted
+   */
+  void removeBytes(String owner, String volume, String bucket, int bytes);
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/28f770d8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/interfaces/Keys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/interfaces/Keys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/interfaces/Keys.java
new file mode 100644
index 0000000..644ba62
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/interfaces/Keys.java
@@ -0,0 +1,118 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.web.interfaces;
+
+import org.apache.hadoop.ozone.web.exceptions.OzoneException;
+
+import javax.ws.rs.Consumes;
+import javax.ws.rs.DELETE;
+import javax.ws.rs.GET;
+import javax.ws.rs.PUT;
+import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.Produces;
+import javax.ws.rs.core.Context;
+import javax.ws.rs.core.HttpHeaders;
+import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.Request;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.UriInfo;
+import java.io.InputStream;
+
+/**
+ * This interface defines operations permitted on a key.
+ */
+
+@Path("/{volume}/{bucket}/{keys}")
+public interface Keys {
+
+  /**
+   * Adds a key to an existing bucket. If the object already exists
+   * this call will overwrite or add with new version number if the bucket
+   * versioning is turned on.
+   *
+   * @param volume Storage Volume Name
+   * @param bucket Name of the bucket
+   * @param keys Name of the Object
+   * @param is InputStream or File Data
+   * @param req Request
+   * @param headers http headers
+   *
+   * @return Response
+   *
+   * @throws OzoneException
+   */
+  @PUT
+  @Consumes(MediaType.WILDCARD)
+  Response putKey(@PathParam("volume") String volume,
+                  @PathParam("bucket") String bucket,
+                  @PathParam("keys") String keys,
+                  InputStream is,
+                  @Context Request req,
+                  @Context UriInfo info,
+                  @Context HttpHeaders headers)
+    throws OzoneException;
+
+
+  /**
+   * Gets the Key if it exists.
+   *
+   * @param volume Storage Volume
+   * @param bucket Name of the bucket
+   * @param keys Object Name
+   * @param req Request
+   * @param headers Http Header
+   *
+   * @return Response
+   *
+   * @throws OzoneException
+   */
+  @GET
+  @Produces(MediaType.APPLICATION_OCTET_STREAM)
+  Response getKey(@PathParam("volume") String volume,
+                  @PathParam("bucket") String bucket,
+                  @PathParam("keys") String keys,
+                  @Context Request req,
+                  @Context UriInfo info,
+                  @Context HttpHeaders headers)
+    throws OzoneException;
+
+  /**
+   * Deletes an existing key.
+   *
+   * @param volume Storage Volume Name
+   * @param bucket Name of the bucket
+   * @param keys Name of the Object
+   * @param req http Request
+   * @param headers HttpHeaders
+   *
+   * @return Response
+   *
+   * @throws OzoneException
+   */
+  @DELETE
+  Response deleteKey(@PathParam("volume") String volume,
+                     @PathParam("bucket") String bucket,
+                     @PathParam("keys") String keys,
+                     @Context Request req,
+                     @Context UriInfo info,
+                     @Context HttpHeaders headers)
+    throws OzoneException;
+}
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/28f770d8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/messages/LengthInputStreamMessageBodyWriter.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/messages/LengthInputStreamMessageBodyWriter.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/messages/LengthInputStreamMessageBodyWriter.java
new file mode 100644
index 0000000..502ccd5
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/messages/LengthInputStreamMessageBodyWriter.java
@@ -0,0 +1,59 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.web.messages;
+
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.LengthInputStream;
+import org.apache.hadoop.io.IOUtils;
+
+import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.MultivaluedMap;
+import javax.ws.rs.ext.MessageBodyWriter;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.lang.annotation.Annotation;
+import java.lang.reflect.Type;
+
+/**
+ * Writes outbound HTTP response object bytes.  The content length is determined
+ * from the {@link LengthInputStream}.
+ */
+public final class LengthInputStreamMessageBodyWriter
+    implements MessageBodyWriter<LengthInputStream> {
+  private static final int CHUNK_SIZE = 8192;
+
+  @Override
+  public long getSize(LengthInputStream lis, Class<?> type, Type genericType,
+                      Annotation[] annotations, MediaType mediaType) {
+    return lis.getLength();
+  }
+
+  @Override
+  public boolean isWriteable(Class<?> type, Type genericType,
+                             Annotation[] annotations, MediaType mediaType) {
+    return LengthInputStream.class.isAssignableFrom(type);
+  }
+
+  @Override
+  public void writeTo(LengthInputStream lis, Class<?> type, Type genericType,
+                      Annotation[] annotations, MediaType mediaType,
+                      MultivaluedMap<String, Object> httpHeaders,
+                      OutputStream out) throws IOException {
+    IOUtils.copyBytes(lis, out, CHUNK_SIZE);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/28f770d8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/KeyInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/KeyInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/KeyInfo.java
new file mode 100644
index 0000000..7e9e00f
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/KeyInfo.java
@@ -0,0 +1,261 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+package org.apache.hadoop.ozone.web.response;
+
+import org.apache.commons.lang.builder.EqualsBuilder;
+import org.apache.commons.lang.builder.HashCodeBuilder;
+import org.codehaus.jackson.annotate.JsonAutoDetect;
+import org.codehaus.jackson.annotate.JsonMethod;
+import org.codehaus.jackson.map.ObjectMapper;
+import org.codehaus.jackson.map.ObjectWriter;
+import org.codehaus.jackson.map.annotate.JsonFilter;
+import org.codehaus.jackson.map.ser.FilterProvider;
+import org.codehaus.jackson.map.ser.impl.SimpleBeanPropertyFilter;
+import org.codehaus.jackson.map.ser.impl.SimpleFilterProvider;
+
+import java.io.IOException;
+
+/**
+ * Represents an Ozone key Object.
+ */
+public class KeyInfo implements Comparable<KeyInfo> {
+  static final String OBJECT_INFO = "OBJECT_INFO_FILTER";
+  /**
+   * This class allows us to create custom filters
+   * for the Json serialization.
+   */
+  @JsonFilter(OBJECT_INFO)
+  class MixIn {
+
+  }
+  private long version;
+  private String md5hash;
+  private String createdOn;
+  private long size;
+  private String keyName;
+
+  private String dataFileName;
+
+  /**
+   * When this key was created.
+   *
+   * @return Date String
+   */
+  public String getCreatedOn() {
+    return createdOn;
+  }
+
+  /**
+   * When this key was created.
+   *
+   * @param createdOn - Date String
+   */
+  public void setCreatedOn(String createdOn) {
+    this.createdOn = createdOn;
+  }
+
+  /**
+   * Full path to where the actual data for this key is stored.
+   *
+   * @return String
+   */
+  public String getDataFileName() {
+    return dataFileName;
+  }
+
+  /**
+   * Sets up where the file path is stored.
+   *
+   * @param dataFileName - Data File Name
+   */
+  public void setDataFileName(String dataFileName) {
+    this.dataFileName = dataFileName;
+  }
+
+  /**
+   * Gets the Keyname of this object.
+   *
+   * @return String
+   */
+  public String getKeyName() {
+    return keyName;
+  }
+
+  /**
+   * Sets the Key name of this object.
+   *
+   * @param keyName - String
+   */
+  public void setKeyName(String keyName) {
+    this.keyName = keyName;
+  }
+
+  /**
+   * Returns the MD5 Hash for the data of this key.
+   *
+   * @return String MD5
+   */
+  public String getMd5hash() {
+    return md5hash;
+  }
+
+  /**
+   * Sets the MD5 of this file.
+   *
+   * @param md5hash - Md5 of this file
+   */
+  public void setMd5hash(String md5hash) {
+    this.md5hash = md5hash;
+  }
+
+  /**
+   * Number of bytes stored in the data part of this key.
+   *
+   * @return long size of the data file
+   */
+  public long getSize() {
+    return size;
+  }
+
+  /**
+   * Sets the size of the Data part of this key.
+   *
+   * @param size - Size in long
+   */
+  public void setSize(long size) {
+    this.size = size;
+  }
+
+  /**
+   * Version of this key.
+   *
+   * @return - returns the version of this key.
+   */
+  public long getVersion() {
+    return version;
+  }
+
+  /**
+   * Sets the version of this key.
+   *
+   * @param version - Version String
+   */
+  public void setVersion(long version) {
+    this.version = version;
+  }
+
+  /**
+   * Compares this object with the specified object for order.  Returns a
+   * negative integer, zero, or a positive integer as this object is less
+   * than, equal to, or greater than the specified object.
+   *
+   * @param o the object to be compared.
+   *
+   * @return a negative integer, zero, or a positive integer as this object
+   * is less than, equal to, or greater than the specified object.
+   *
+   * @throws NullPointerException if the specified object is null
+   * @throws ClassCastException if the specified object's type prevents it
+   * from being compared to this object.
+   */
+  @Override
+  public int compareTo(KeyInfo o) {
+    if (this.keyName.compareTo(o.getKeyName()) != 0) {
+      return this.keyName.compareTo(o.getKeyName());
+    }
+
+    if (this.getVersion() == o.getVersion()) {
+      return 0;
+    }
+    if (this.getVersion() < o.getVersion()) {
+      return -1;
+    }
+    return 1;
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (this == o) {
+      return true;
+    }
+
+    if (o == null || getClass() != o.getClass()) {
+      return false;
+    }
+
+    KeyInfo keyInfo = (KeyInfo) o;
+
+    return new EqualsBuilder()
+        .append(version, keyInfo.version)
+        .append(keyName, keyInfo.keyName)
+        .isEquals();
+  }
+
+  @Override
+  public int hashCode() {
+    return new HashCodeBuilder(17, 37)
+        .append(version)
+        .append(keyName)
+        .toHashCode();
+  }
+
+  /**
+
+   * Parse a string to retuen BucketInfo Object.
+   *
+   * @param jsonString - Json String
+   *
+   * @return - BucketInfo
+   *
+   * @throws IOException
+   */
+  public static KeyInfo parse(String jsonString) throws IOException {
+    ObjectMapper mapper = new ObjectMapper();
+    return mapper.readValue(jsonString, KeyInfo.class);
+  }
+
+
+  /**
+   * Returns a JSON string of this object.
+   * After stripping out bytesUsed and keyCount
+   *
+   * @return String
+   */
+  public String toJsonString() throws IOException {
+    String[] ignorableFieldNames = {"dataFileName"};
+
+    FilterProvider filters = new SimpleFilterProvider()
+        .addFilter(OBJECT_INFO, SimpleBeanPropertyFilter
+            .serializeAllExcept(ignorableFieldNames));
+
+    ObjectMapper mapper = new ObjectMapper()
+        .setVisibility(JsonMethod.FIELD, JsonAutoDetect.Visibility.ANY);
+    mapper.getSerializationConfig()
+        .addMixInAnnotations(Object.class, MixIn.class);
+    ObjectWriter writer = mapper.writer(filters);
+    return writer.writeValueAsString(this);
+  }
+
+  /**
+   * Returns the Object as a Json String.
+   */
+  public String toDBString() throws IOException {
+    ObjectMapper mapper = new ObjectMapper();
+    return mapper.writeValueAsString(this);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/28f770d8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/ListKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/ListKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/ListKeys.java
new file mode 100644
index 0000000..8b0d9cc
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/ListKeys.java
@@ -0,0 +1,183 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+package org.apache.hadoop.ozone.web.response;
+
+import org.apache.hadoop.ozone.web.handlers.ListArgs;
+import org.codehaus.jackson.annotate.JsonAutoDetect;
+import org.codehaus.jackson.annotate.JsonMethod;
+import org.codehaus.jackson.map.ObjectMapper;
+import org.codehaus.jackson.map.ObjectWriter;
+import org.codehaus.jackson.map.annotate.JsonFilter;
+import org.codehaus.jackson.map.ser.FilterProvider;
+import org.codehaus.jackson.map.ser.impl.SimpleBeanPropertyFilter;
+import org.codehaus.jackson.map.ser.impl.SimpleFilterProvider;
+
+import java.io.IOException;
+import java.util.Collections;
+import java.util.LinkedList;
+import java.util.List;
+
+/**
+ * This class the represents the list of keys (Objects) in a bucket.
+ */
+public class ListKeys {
+  static final String OBJECT_LIST = "OBJECT_LIST_FILTER";
+  private String name;
+  private String prefix;
+  private long maxKeys;
+  private boolean truncated;
+  private List<KeyInfo> objectList;
+
+  /**
+   * Default constructor needed for json serialization.
+   */
+  public ListKeys() {
+    this.objectList = new LinkedList<>();
+  }
+
+  /**
+   * Constructor for ListKeys.
+   *
+   * @param args      ListArgs
+   * @param truncated is truncated
+   */
+  public ListKeys(ListArgs args, boolean truncated) {
+    this.name = args.getBucketName();
+    this.prefix = args.getPrefix();
+    this.maxKeys = args.getMaxKeys();
+    this.truncated = truncated;
+  }
+
+  /**
+   * Converts a Json string to POJO.
+   * @param jsonString
+   * @return ListObject
+   * @throws IOException
+   */
+  public static ListKeys parse(String jsonString) throws IOException {
+    ObjectMapper mapper = new ObjectMapper();
+    return mapper.readValue(jsonString, ListKeys.class);
+  }
+
+  /**
+   * Returns a list of Objects.
+   *
+   * @return List of KeyInfo Objects.
+   */
+  public List<KeyInfo> getObjectList() {
+    return objectList;
+  }
+
+  /**
+   * Sets the list of Objects.
+   *
+   * @param objectList
+   */
+  public void setObjectList(List<KeyInfo> objectList) {
+    this.objectList = objectList;
+  }
+
+  /**
+   * Gets the Max Key Count.
+   *
+   * @return long
+   */
+  public long getMaxKeys() {
+    return maxKeys;
+  }
+
+  /**
+   * Gets bucket Name.
+   *
+   * @return String
+   */
+  public String getName() {
+    return name;
+  }
+
+  /**
+   * Gets Prefix.
+   *
+   * @return String
+   */
+  public String getPrefix() {
+    return prefix;
+  }
+
+  /**
+   * Gets truncated Status.
+   *
+   * @return Boolean
+   */
+  public boolean isTruncated() {
+    return truncated;
+  }
+
+  /**
+   * Sets the value of truncated.
+   *
+   * @param value - Boolean
+   */
+  public void setTruncated(boolean value) {
+    this.truncated = value;
+  }
+
+  /**
+   * Returns a JSON string of this object. After stripping out bytesUsed and
+   * keyCount.
+   *
+   * @return String
+   */
+  public String toJsonString() throws IOException {
+    String[] ignorableFieldNames = {"dataFileName"};
+
+    FilterProvider filters = new SimpleFilterProvider().addFilter(OBJECT_LIST,
+        SimpleBeanPropertyFilter.serializeAllExcept(ignorableFieldNames));
+
+    ObjectMapper mapper = new ObjectMapper()
+        .setVisibility(JsonMethod.FIELD, JsonAutoDetect.Visibility.ANY);
+    mapper.getSerializationConfig()
+        .addMixInAnnotations(Object.class, MixIn.class);
+    ObjectWriter writer = mapper.writer(filters);
+    return writer.writeValueAsString(this);
+  }
+
+  /**
+   * Returns the Object as a Json String.
+   */
+  public String toDBString() throws IOException {
+    ObjectMapper mapper = new ObjectMapper();
+    return mapper.writeValueAsString(this);
+  }
+
+  /**
+   * Sorts the keys based on name and version. This is useful when we return the
+   * list of keys.
+   */
+  public void sort() {
+    Collections.sort(objectList);
+  }
+
+  /**
+   * This class allows us to create custom filters for the Json serialization.
+   */
+  @JsonFilter(OBJECT_LIST)
+  class MixIn {
+
+  }
+}