You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@aurora.apache.org by wf...@apache.org on 2014/07/29 02:55:33 UTC

[2/2] git commit: Add a human-friendly HTTP/JSON API with auto-generated documentation.

Add a human-friendly HTTP/JSON API with auto-generated documentation.

Bugs closed: AURORA-369

Reviewed at https://reviews.apache.org/r/23741/


Project: http://git-wip-us.apache.org/repos/asf/incubator-aurora/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-aurora/commit/b8dd4a1d
Tree: http://git-wip-us.apache.org/repos/asf/incubator-aurora/tree/b8dd4a1d
Diff: http://git-wip-us.apache.org/repos/asf/incubator-aurora/diff/b8dd4a1d

Branch: refs/heads/master
Commit: b8dd4a1d685f0c989ae94be70f842eecac6dd36e
Parents: fa2aa99
Author: Bill Farner <wf...@apache.org>
Authored: Mon Jul 28 17:50:15 2014 -0700
Committer: Bill Farner <wf...@apache.org>
Committed: Mon Jul 28 17:50:15 2014 -0700

----------------------------------------------------------------------
 build.gradle                                    |  19 +-
 .../aurora/scheduler/http/ServletModule.java    |   4 +
 .../aurora/scheduler/http/api/ApiBeta.java      | 200 +++++++
 .../http/api/GsonMessageBodyHandler.java        | 254 +++++++++
 .../aurora/tools/java/thrift_wrapper_codegen.py | 374 +++++++++++--
 .../thrift/org/apache/aurora/gen/api.thrift     | 520 ++++++++++++-------
 .../scheduler/http/ServletFilterTest.java       |  97 ++++
 .../scheduler/http/ServletModuleTest.java       | 127 ++---
 .../aurora/scheduler/http/api/ApiBetaTest.java  | 232 +++++++++
 9 files changed, 1505 insertions(+), 322 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-aurora/blob/b8dd4a1d/build.gradle
----------------------------------------------------------------------
diff --git a/build.gradle b/build.gradle
index 5919a98..650338f 100644
--- a/build.gradle
+++ b/build.gradle
@@ -42,6 +42,7 @@ buildDir = 'dist'
 def generatedDir = "$buildDir/generated-src"
 def generatedJavaDir = "$generatedDir/gen-java"
 def generatedJSDir = "$generatedDir/gen-js"
+def generatedResourceDir = "$generatedDir/resources"
 
 def thriftVersion = '0.9.1'
 
@@ -116,6 +117,7 @@ sourceSets {
     resources {
       srcDir '3rdparty/javascript'
       srcDir generatedJSDir
+      srcDir generatedResourceDir
     }
   }
   test {
@@ -144,11 +146,13 @@ dependencies {
   def slf4jRev = '1.6.1'
   def junitRev = '4.11'
 
+  def gsonDep = 'com.google.code.gson:gson:2.2.4'
   def guavaDep = 'com.google.guava:guava:16.0'
   def thriftLib = "org.apache.thrift:libthrift:${thriftVersion}"
 
   compile 'aopalliance:aopalliance:1.0'
   compile 'com.google.code.findbugs:jsr305:2.0.1'
+  compile gsonDep
   compile guavaDep
   compile 'com.google.inject:guice:3.0'
   compile 'com.google.protobuf:protobuf-java:2.5.0'
@@ -212,6 +216,7 @@ dependencies {
   testCompile "com.sun.jersey:jersey-client:${jerseyRev}"
   testCompile "junit:junit:${junitRev}"
 
+  generatedCompile gsonDep
   generatedCompile guavaDep
   generatedCompile thriftLib
 
@@ -224,6 +229,7 @@ dependencies {
 
       def forceDepVersions = [
         // Force versions based on the dependencies we use from above
+        'com.google.code.gson:gson': '2.2.4',
         'org.slf4j:slf4j-api' : slf4jRev,
         'log4j:log4j' : log4jRev,
         'org.apache.thrift:libthrift' : thriftVersion,
@@ -391,13 +397,15 @@ task bootstrapThrift {
 }
 
 task generateSources(dependsOn: ['bootstrapThrift', 'checkPython']) {
-  ext.inputFiles = fileTree(dir: 'src/main/thrift').matching { include '**/*.thrift' }
+  def codeGenerator = 'src/main/python/apache/aurora/tools/java/thrift_wrapper_codegen.py';
+  def thriftFiles = fileTree(dir: 'src/main/thrift').matching { include '**/*.thrift' }
+  ext.inputFiles = thriftFiles + files(codeGenerator)
   ext.outputDir = file(generatedDir)
   inputs.file inputFiles
   outputs.dir outputDir
   doLast {
     outputDir.exists() || outputDir.mkdirs()
-    inputFiles.each { File file ->
+    thriftFiles.each { File file ->
       exec {
         executable = project.thrift
         args = ['--gen', 'java:hashcode', '--gen', 'js:jquery', '-o', outputDir, file]
@@ -405,9 +413,10 @@ task generateSources(dependsOn: ['bootstrapThrift', 'checkPython']) {
     }
     exec {
       executable = project.py
-      args = ['src/main/python/apache/aurora/tools/java/thrift_wrapper_codegen.py',
+      args = [codeGenerator,
               'src/main/thrift/org/apache/aurora/gen/api.thrift',
-              generatedJavaDir]
+              generatedJavaDir,
+              generatedResourceDir]
     }
   }
 }
@@ -426,7 +435,7 @@ ideaProject.dependsOn generateSources
 
 idea {
   module {
-    def codegenDirs = [file(generatedJavaDir), file(generatedJSDir)]
+    def codegenDirs = [file(generatedJavaDir)]
 
     // These directories must exist, else the plugin omits them from the
     // generated project. Since this is executed during the configuration

http://git-wip-us.apache.org/repos/asf/incubator-aurora/blob/b8dd4a1d/src/main/java/org/apache/aurora/scheduler/http/ServletModule.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/aurora/scheduler/http/ServletModule.java b/src/main/java/org/apache/aurora/scheduler/http/ServletModule.java
index 729e0ab..5c0fc2c 100644
--- a/src/main/java/org/apache/aurora/scheduler/http/ServletModule.java
+++ b/src/main/java/org/apache/aurora/scheduler/http/ServletModule.java
@@ -39,6 +39,7 @@ import com.twitter.common.net.pool.DynamicHostSet;
 import com.twitter.common.net.pool.DynamicHostSet.MonitorException;
 import com.twitter.thrift.ServiceInstance;
 
+import org.apache.aurora.scheduler.http.api.ApiBeta;
 import org.mortbay.servlet.GzipFilter;
 
 import static com.sun.jersey.api.core.ResourceConfig.PROPERTY_CONTAINER_REQUEST_FILTERS;
@@ -71,6 +72,8 @@ public class ServletModule extends AbstractModule {
     // NOTE: GzipFilter is applied only to /api instead of globally because the Jersey-managed
     // servlets have a conflicting filter applied to them.
     Registration.registerServletFilter(binder(), GzipFilter.class, "/api/*");
+    // TODO(wfarner): Add a unit test to validate gzip behavior.
+    Registration.registerServletFilter(binder(), GzipFilter.class, "/apibeta");
 
     // Bindings required for the leader redirector.
     requireBinding(LocalServiceRegistry.class);
@@ -99,6 +102,7 @@ public class ServletModule extends AbstractModule {
           filter("/api*").through(CorsFilter.class);
         }
 
+        registerJerseyEndpoint("/apibeta", ApiBeta.class);
         registerJerseyEndpoint("/cron", Cron.class);
         registerJerseyEndpoint("/locks", Locks.class);
         registerJerseyEndpoint("/maintenance", Maintenance.class);

http://git-wip-us.apache.org/repos/asf/incubator-aurora/blob/b8dd4a1d/src/main/java/org/apache/aurora/scheduler/http/api/ApiBeta.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/aurora/scheduler/http/api/ApiBeta.java b/src/main/java/org/apache/aurora/scheduler/http/api/ApiBeta.java
new file mode 100644
index 0000000..2b96427
--- /dev/null
+++ b/src/main/java/org/apache/aurora/scheduler/http/api/ApiBeta.java
@@ -0,0 +1,200 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.aurora.scheduler.http.api;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.io.OutputStreamWriter;
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
+import java.lang.reflect.Type;
+import java.net.URI;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+import javax.inject.Inject;
+import javax.ws.rs.GET;
+import javax.ws.rs.POST;
+import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.Produces;
+import javax.ws.rs.WebApplicationException;
+import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.Response.Status;
+import javax.ws.rs.core.StreamingOutput;
+
+import com.google.common.base.Charsets;
+import com.google.common.base.Throwables;
+import com.google.common.collect.Lists;
+import com.google.common.io.ByteSource;
+import com.google.common.io.ByteStreams;
+import com.google.common.io.Resources;
+import com.google.gson.JsonElement;
+import com.google.gson.JsonObject;
+import com.google.gson.JsonParseException;
+import com.google.gson.JsonSyntaxException;
+
+import org.apache.aurora.gen.AuroraAdmin;
+import org.apache.aurora.gen.AuroraAdmin.Iface;
+import org.apache.aurora.gen.ResponseCode;
+import org.apache.aurora.scheduler.storage.entities.AuroraAdminMetadata;
+import org.apache.aurora.scheduler.storage.entities.ITaskConfig;
+import org.apache.aurora.scheduler.thrift.Util;
+
+import static org.apache.aurora.scheduler.http.api.GsonMessageBodyHandler.GSON;
+
+/**
+ * A scheduler interface that allows interaction with the thrift API via traditional JSON,
+ * rather than thrift's preferred means which uses field IDs.
+ */
+@Path("/apibeta")
+public class ApiBeta {
+
+  private static final Logger LOG = Logger.getLogger(ApiBeta.class.getName());
+
+  private final Iface api;
+
+  @Inject
+  ApiBeta(AuroraAdmin.Iface api) {
+    this.api = Objects.requireNonNull(api);
+  }
+
+  private JsonElement getJsonMember(JsonObject json, String memberName) {
+    return (json == null) ? null : json.get(memberName);
+  }
+
+  private static Response errorResponse(Status status, String message) {
+    return Response.status(status)
+        .entity(Util.addMessage(Util.emptyResponse(), message).setResponseCode(ResponseCode.ERROR))
+        .build();
+  }
+
+  private static Response badRequest(String message) {
+    return errorResponse(Status.BAD_REQUEST, message);
+  }
+
+  /**
+   * Parses method parameters into the appropriate types.  For a method call to be successful,
+   * the elements supplied in the request must match the names of those specified in the thrift
+   * method definition.  If a method parameter does not exist in the request object, {@code null}
+   * will be substituted.
+   *
+   * @param json Incoming request data, to translate into method parameters.
+   * @param fields Field metadata map. Map <strong>iteration order must match</strong> the order
+   *               defined in the thrift method.
+   * @return Parsed method parameters.
+   * @throws WebApplicationException If a parameter could not be parsed.
+   */
+  private Object[] readParams(JsonObject json, Map<String, Type> fields)
+      throws WebApplicationException {
+
+    List<Object> params = Lists.newArrayList();
+    for (Map.Entry<String, Type> entry : fields.entrySet()) {
+      try {
+        params.add(GSON.fromJson(getJsonMember(json, entry.getKey()), entry.getValue()));
+      } catch (JsonParseException e) {
+        throw new WebApplicationException(
+            e,
+            badRequest("Failed to parse parameter " + entry.getKey() + ": " + e.getMessage()));
+      }
+    }
+    return params.toArray();
+  }
+
+  private Method getApiMethod(String name, Map<String, Type> metadata) {
+    try {
+      return Iface.class.getMethod(name, metadata.values().toArray(new Class<?>[0]));
+    } catch (NoSuchMethodException e) {
+      throw Throwables.propagate(e);
+    }
+  }
+
+  @POST
+  @Path("{method}")
+  @Produces(MediaType.APPLICATION_JSON)
+  public Response invoke(@PathParam("method") String methodName, String postData) {
+    if (LOG.isLoggable(Level.FINE)) {
+      LOG.info("Call to " + methodName + " with data: " + postData);
+    }
+
+    // First, verify that this is a valid method on the interface.
+    Map<String, Type> methodMetadata = AuroraAdminMetadata.METHODS.get(methodName);
+    if (methodMetadata == null) {
+      return errorResponse(Status.NOT_FOUND, "Method " + methodName + " does not exist.");
+    }
+
+    JsonObject parameters;
+    try {
+      JsonElement json = GSON.fromJson(postData, JsonElement.class);
+      // The parsed object will be null if there was no post data.  This is okay, since that is
+      // expected for a zero-parameter method.
+      if (json != null && !(json instanceof JsonObject)) {
+        throw new WebApplicationException(
+            badRequest("Request data must be a JSON object of method parameters."));
+      }
+      parameters = (JsonObject) json;
+    } catch (JsonSyntaxException e) {
+      throw new WebApplicationException(e, badRequest("Request must be valid JSON"));
+    }
+
+    final Method method = getApiMethod(methodName, methodMetadata);
+    final Object[] params = readParams(parameters, methodMetadata);
+    return Response.ok(new StreamingOutput() {
+      @Override
+      public void write(OutputStream output) throws IOException {
+        try {
+          Object response = method.invoke(api, params);
+          try (OutputStreamWriter out = new OutputStreamWriter(output, Charsets.UTF_8)) {
+            GSON.toJson(response, out);
+          }
+        } catch (IllegalAccessException | InvocationTargetException e) {
+          throw Throwables.propagate(e);
+        }
+      }
+    }).build();
+  }
+
+  @GET
+  @Produces(MediaType.TEXT_HTML)
+  public Response getIndex() {
+    return Response.seeOther(URI.create("/apibeta/help/index.html")).build();
+  }
+
+  @GET
+  @Path("help/{resourcePath: .*\\.html}")
+  @Produces(MediaType.TEXT_HTML)
+  public Response getApiHelp(@PathParam("resourcePath") final String resourcePath) {
+    final ByteSource data;
+    try {
+      data = Resources.asByteSource(Resources.getResource(ITaskConfig.class, resourcePath));
+    } catch (IllegalArgumentException e) {
+      return Response.status(Status.NOT_FOUND).entity("Page does not exist.").build();
+    }
+
+    return Response.ok(new StreamingOutput() {
+      @Override
+      public void write(OutputStream output) throws IOException {
+        try {
+          ByteStreams.copy(data.openStream(), output);
+        } finally {
+          output.close();
+        }
+      }
+    }).build();
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-aurora/blob/b8dd4a1d/src/main/java/org/apache/aurora/scheduler/http/api/GsonMessageBodyHandler.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/aurora/scheduler/http/api/GsonMessageBodyHandler.java b/src/main/java/org/apache/aurora/scheduler/http/api/GsonMessageBodyHandler.java
new file mode 100644
index 0000000..826c0ad
--- /dev/null
+++ b/src/main/java/org/apache/aurora/scheduler/http/api/GsonMessageBodyHandler.java
@@ -0,0 +1,254 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.aurora.scheduler.http.api;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.InputStreamReader;
+import java.io.OutputStream;
+import java.io.OutputStreamWriter;
+import java.lang.annotation.Annotation;
+import java.lang.reflect.Field;
+import java.lang.reflect.Type;
+import java.nio.ByteBuffer;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+
+import javax.ws.rs.Consumes;
+import javax.ws.rs.Produces;
+import javax.ws.rs.WebApplicationException;
+import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.MultivaluedMap;
+import javax.ws.rs.ext.MessageBodyReader;
+import javax.ws.rs.ext.MessageBodyWriter;
+import javax.ws.rs.ext.Provider;
+
+import com.google.common.base.Charsets;
+import com.google.common.base.Throwables;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Iterables;
+import com.google.common.io.BaseEncoding;
+import com.google.gson.ExclusionStrategy;
+import com.google.gson.FieldAttributes;
+import com.google.gson.Gson;
+import com.google.gson.GsonBuilder;
+import com.google.gson.JsonDeserializationContext;
+import com.google.gson.JsonDeserializer;
+import com.google.gson.JsonElement;
+import com.google.gson.JsonObject;
+import com.google.gson.JsonParseException;
+import com.google.gson.JsonSerializationContext;
+import com.google.gson.JsonSerializer;
+import com.google.gson.TypeAdapter;
+import com.google.gson.stream.JsonReader;
+import com.google.gson.stream.JsonWriter;
+
+import org.apache.thrift.TFieldIdEnum;
+import org.apache.thrift.TUnion;
+import org.apache.thrift.meta_data.FieldMetaData;
+import org.apache.thrift.meta_data.StructMetaData;
+
+/**
+ * A message body reader/writer that uses gson to translate JSON to and from java objects produced
+ * by the thrift compiler.
+ * <p>
+ * This is used since jackson doesn't provide target type information to custom deserializer
+ * implementations, so it is apparently not possible to implement a generic deserializer for
+ * sublasses of {@link TUnion}.
+ */
+@Provider
+@Produces(MediaType.APPLICATION_JSON)
+@Consumes(MediaType.APPLICATION_JSON)
+public class GsonMessageBodyHandler
+    implements MessageBodyReader<Object>, MessageBodyWriter<Object> {
+
+  @Override
+  public Object readFrom(
+      Class<Object> type,
+      Type genericType,
+      Annotation[] annotations,
+      MediaType mediaType,
+      MultivaluedMap<String, String> httpHeaders,
+      InputStream entityStream) throws IOException {
+
+    // For some reason try-with-resources syntax trips a findbugs error here.
+    InputStreamReader streamReader = null;
+    try {
+      streamReader = new InputStreamReader(entityStream, Charsets.UTF_8);
+      Type jsonType;
+      if (type.equals(genericType)) {
+        jsonType = type;
+      } else {
+        jsonType = genericType;
+      }
+      return GSON.fromJson(streamReader, jsonType);
+    } finally {
+      if (streamReader != null) {
+        streamReader.close();
+      }
+    }
+  }
+
+  @Override
+  public void writeTo(
+      Object o,
+      Class<?> type,
+      Type genericType, Annotation[] annotations,
+      MediaType mediaType,
+      MultivaluedMap<String, Object> httpHeaders,
+      OutputStream entityStream) throws IOException, WebApplicationException {
+
+    try (OutputStreamWriter writer = new OutputStreamWriter(entityStream, Charsets.UTF_8)) {
+      Type jsonType;
+      if (type.equals(genericType)) {
+        jsonType = type;
+      } else {
+        jsonType = genericType;
+      }
+      GSON.toJson(o, jsonType, writer);
+    }
+  }
+
+  @Override
+  public boolean isReadable(
+      Class<?> type,
+      Type genericType,
+      Annotation[] annotations,
+      MediaType mediaType) {
+
+    return true;
+  }
+
+  @Override
+  public boolean isWriteable(
+      Class<?> type,
+      Type genericType,
+      Annotation[] annotations,
+      MediaType mediaType) {
+
+    return true;
+  }
+
+  @Override
+  public long getSize(
+      Object o,
+      Class<?> type,
+      Type genericType,
+      Annotation[] annotations,
+      MediaType mediaType) {
+
+    return -1;
+  }
+
+  private static final Set<String> THRIFT_CONTROL_FIELDS = ImmutableSet.of(
+      "__isset_bitfield",
+      "optionals");
+
+  private static final ExclusionStrategy EXCLUDE_THRIFT_FIELDS = new ExclusionStrategy() {
+    @Override
+    public boolean shouldSkipField(FieldAttributes f) {
+      return THRIFT_CONTROL_FIELDS.contains(f.getName());
+    }
+
+    @Override
+    public boolean shouldSkipClass(Class<?> clazz) {
+      return false;
+    }
+  };
+
+  @SuppressWarnings({"unchecked", "rawtypes"})
+  private static TUnion<?, ?> createUnion(
+      Class<?> unionType,
+      TFieldIdEnum setField,
+      Object fieldValue) throws IllegalAccessException, InstantiationException {
+
+    TUnion union = (TUnion) unionType.newInstance();
+    union.setFieldValue(setField, fieldValue);
+    return union;
+  }
+
+  public static final Gson GSON = new GsonBuilder()
+      .addSerializationExclusionStrategy(EXCLUDE_THRIFT_FIELDS)
+      .registerTypeHierarchyAdapter(TUnion.class, new JsonSerializer<TUnion<?, ?>>() {
+        @Override
+        public JsonElement serialize(
+            TUnion<?, ?> src,
+            Type typeOfSrc,
+            JsonSerializationContext context) {
+
+          return context.serialize(
+              ImmutableMap.of(src.getSetField().getFieldName(), src.getFieldValue()));
+        }
+      })
+      .registerTypeHierarchyAdapter(TUnion.class, new JsonDeserializer<TUnion<?, ?>>() {
+        @Override
+        public TUnion<?, ?> deserialize(
+            JsonElement json,
+            Type typeOfT,
+            JsonDeserializationContext context) throws JsonParseException {
+
+          JsonObject jsonObject = json.getAsJsonObject();
+          if (jsonObject.entrySet().size() != 1) {
+            throw new JsonParseException(
+                typeOfT.getClass().getName() + " must have exactly one element");
+          }
+
+          if (typeOfT instanceof Class) {
+            Class<?> clazz = (Class<?>) typeOfT;
+            Entry<String, JsonElement> item = Iterables.getOnlyElement(jsonObject.entrySet());
+
+            try {
+              Field metaDataMapField = clazz.getField("metaDataMap");
+              @SuppressWarnings("unchecked")
+              Map<TFieldIdEnum, FieldMetaData> metaDataMap =
+                  (Map<TFieldIdEnum, FieldMetaData>) metaDataMapField.get(null);
+
+              for (Map.Entry<TFieldIdEnum, FieldMetaData> entry : metaDataMap.entrySet()) {
+                if (entry.getKey().getFieldName().equals(item.getKey())) {
+                  StructMetaData valueMetaData = (StructMetaData) entry.getValue().valueMetaData;
+                  Object result = context.deserialize(item.getValue(), valueMetaData.structClass);
+                  return createUnion(clazz, entry.getKey(), result);
+                }
+              }
+
+              throw new RuntimeException("Failed to deserialize " + typeOfT);
+            } catch (NoSuchFieldException | IllegalAccessException | InstantiationException e) {
+              throw Throwables.propagate(e);
+            }
+          } else {
+            throw new RuntimeException("Unable to deserialize " + typeOfT);
+          }
+        }
+      })
+      .registerTypeAdapter(ByteBuffer.class, new TypeAdapter<ByteBuffer>() {
+        @Override
+        public void write(JsonWriter out, ByteBuffer value) throws IOException {
+          out.value(BaseEncoding.base64().encode(value.array()));
+        }
+
+        @Override
+        public ByteBuffer read(JsonReader in) throws IOException {
+          try {
+            return ByteBuffer.wrap(BaseEncoding.base64().decode(in.nextString()));
+          } catch (UnsupportedOperationException e) {
+            throw new JsonParseException("Byte array element must be a JSON string.", e);
+          } catch (IllegalArgumentException e) {
+            throw new JsonParseException("Unable to parse base64-encoded string.", e);
+          }
+        }
+      })
+      .create();
+}

http://git-wip-us.apache.org/repos/asf/incubator-aurora/blob/b8dd4a1d/src/main/python/apache/aurora/tools/java/thrift_wrapper_codegen.py
----------------------------------------------------------------------
diff --git a/src/main/python/apache/aurora/tools/java/thrift_wrapper_codegen.py b/src/main/python/apache/aurora/tools/java/thrift_wrapper_codegen.py
index 9d500ba..26dee08 100644
--- a/src/main/python/apache/aurora/tools/java/thrift_wrapper_codegen.py
+++ b/src/main/python/apache/aurora/tools/java/thrift_wrapper_codegen.py
@@ -64,22 +64,33 @@ class ParameterizedType(Type):
 class StructType(Type):
   '''A thrift-defined type, which composes other types as fields.'''
 
-  def __init__(self, name, package, kind, fields):
+  def __init__(self, name, package, kind, fields, doc):
     Type.__init__(self, name, package, kind == 'enum')
     self.codegen_name = 'I%s' % name
     self.kind = kind
     self.fields = fields
+    self.doc = doc
 
   def __str__(self):
     return '%s %s { %s }' % (self.kind, self.name, ', '.join(map(str, self.fields)))
 
+class EnumType(StructType):
+  '''A thrift-defined value enumeration.'''
+
+  def __init__(self, name, package, values, doc):
+    StructType.__init__(self, name, package, 'enum', [], doc)
+    self.values = values
+
+  def __str__(self):
+    return '%s (%s)' % (self.name, ', '.join(self.values))
 
 class Field(object):
   '''A field within a thrift structure.'''
 
-  def __init__(self, ttype, name):
+  def __init__(self, ttype, name, doc):
     self.ttype = ttype
     self.name = name
+    self.doc = doc
 
   def accessor_method(self):
     return '%s%s' % (
@@ -126,20 +137,9 @@ STRUCT_COLLECTION_FIELD_ASSIGNMENT = '''this.%(field)s = !wrapped.%(isset)s()
 
 PACKAGE_NAME = 'org.apache.aurora.scheduler.storage.entities'
 
-CLASS_TEMPLATE = '''/*
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package %(package)s;
+
+
+CLASS_TEMPLATE = '''package %(package)s;
 
 %(imports)s
 
@@ -283,6 +283,11 @@ class GeneratedCode(object):
 #    namespace java org.apache.aurora.gen
 NAMESPACE_RE = 'namespace\s+(?P<lang>\w+)\s+(?P<namespace>[^\s]+)'
 
+DOC_RE = '(\/\*+(?P<doc>[^\/]+)\*\/\s*)?'
+
+# Matches a complete struct definition, capturing the type and body.
+STRUCT_RE = DOC_RE + '(?P<kind>enum|struct|union)\s+(?P<name>\w+)\s+{(?P<body>[^}]+)}'
+
 # A possibly-parameterized type name, e.g.:
 #    int
 #    TaskConfig
@@ -290,15 +295,148 @@ NAMESPACE_RE = 'namespace\s+(?P<lang>\w+)\s+(?P<namespace>[^\s]+)'
 #    Map<String, TaskConfig>
 TYPE_PATTERN = '(?P<type>\w+)(?:<(?P<params>[^>]+)>)?'
 
-
-# Matches a complete struct definnition, capturing the type and body.
-STRUCT_RE = '(?P<kind>enum|struct|union)\s+(?P<name>\w+)\s+{(?P<body>[^}]+)}'
-
-
 # A field definition within a struct, e.g.:
 #     1: string name
 #     15: Map<String, TaskConfig> configs  # Configs mapped by name.
-FIELD_RE = '\s*\d+:\s+(?:(?:required|optional)\s+)?(%s)\s+(?P<name>\w+).*' % TYPE_PATTERN
+FIELD_RE = DOC_RE + '\s*\d+:\s+(?:(?:required|optional)\s+)?(%s)\s+(?P<name>\w+).*' % TYPE_PATTERN
+
+# An enum value definition, e.g.:
+#    INVALID_REQUEST = 0,
+ENUM_VALUE_RE = DOC_RE + '\s*(?P<name>\w+)\s*=\s*\d+,?'
+
+
+class Service(object):
+  def __init__(self, name, parent, methods):
+    self.name = name
+    self.parent = parent
+    self.methods = methods
+
+  def __str__(self):
+    return ''.join([self.name, self.parent or '', '  ' + '\n  '.join(map(str, self.methods))])
+
+class Method(object):
+  def __init__(self, name, parameters, return_type, doc):
+    self.name = name
+    self.parameters = parameters
+    self.return_type = return_type
+    self.doc = doc
+
+  def __str__(self):
+    return '%s(%s)' % (self.name, ', '.join(map(str, self.parameters)))
+
+class Parameter(object):
+  def __init__(self, name, type_name):
+    self.name = name
+    self.type_name = type_name
+
+  def __str__(self):
+    return '%s %s' % (self.type_name, self.name)
+
+class GenericParameter(Parameter):
+  def __init__(self, name, type_name, parameters):
+    Parameter.__init__(self, name, type_name)
+    self.parameters = parameters
+
+GET_SUPER_METHODS = '.putAll(%(super)sMetadata.METHODS)'
+
+PARAM_METADATA_TEMPLATE = '.put("%(name)s", %(type)s.class)'
+
+GENERIC_PARAM_METADATA_TEMPLATE = (
+    '.put("%(name)s", new TypeToken<%(type)s<%(params)s>>() {}.getType())')
+
+METHOD_METADATA_TEMPLATE = '''.put(
+              "%(name)s",
+              ImmutableMap.<String, Type>builder()%(params)s
+                  .build())'''
+
+SERVICE_METADATA_TEMPLATE = '''package %(package)s;
+
+import java.lang.reflect.Type;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import com.google.common.collect.ImmutableMap;
+import com.google.gson.reflect.TypeToken;
+
+import org.apache.aurora.gen.*;
+
+public final class %(name)sMetadata {
+  public static final Map<String, Map<String, Type>> METHODS =
+      ImmutableMap.<String, Map<String, Type>>builder()
+          %(methods)s
+          .build();
+
+  private %(name)sMetadata() {
+    // Utility class
+  }
+}
+'''
+
+HTML_TEMPLATE = '''<html>
+  <head>
+    <title>Aurora Beta API Help</title>
+  </head>
+  <body>
+    %s
+  </body>
+</html>
+'''
+
+
+METHOD_ENTRY = '<li><a href="method/%(name)s.html">%(name)s</a></li>'
+
+
+SERVICE_DOC_PAGE_TEMPLATE = HTML_TEMPLATE % '''<h2>Available Methods</h2>
+    <ul>
+      %(methods)s
+    </ul>'''
+
+TYPE_LINK = '<a href="../type/%(name)s.html">%(name)s</a>'
+
+PARAM_ENTRY = '<li>%(type)s <code>%(name)s</code>: %(doc)s</li>'
+
+PARAMETERS_LIST = '''<ul>
+  %(params)s
+  </ul>'''
+
+METHOD_DOC_PAGE_TEMPLATE = HTML_TEMPLATE % '''<h2>POST /apibeta/%(method)s</h2>
+    <a href="../index.html">home</a>
+    <h4>Description</h4>
+    %(doc)s
+    <h4>Return Type</h4>
+    %(return)s
+    <h4>Parameters</h4>
+    %(parameters)s
+'''
+
+TYPE_DOC_PAGE_TEMPLATE = HTML_TEMPLATE % '''<h2>object %(name)s</h2>
+    <a href="../index.html">home</a>
+    <h4>Description</h4>
+    %(doc)s
+    <h4>Fields</h4>
+    <ul>
+    %(fields)s
+    </ul>
+'''
+
+ENUM_DOC_PAGE_TEMPLATE = HTML_TEMPLATE % '''<h2>enum %(name)s</h2>
+    <a href="../index.html">home</a>
+    <h4>Description</h4>
+    %(doc)s
+    <h4>Values</h4>
+    <ul>
+    %(values)s
+    </ul>
+'''
+
+ENUM_VALUE_ENTRY = '<li><code>%(value)s</code>: %(doc)s</li>'
+
+SERVICE_RE = 'service (?P<name>\w+)\s+(extends\s+(?P<super>\w+)\s+)?{(?P<body>[^}]+)}'
+
+METHOD_RE = DOC_RE + '\s*(?P<return>\w+)\s+(?P<name>\w+)\((?P<params>[^\)]*)\)'
+
+PARAM_RE = '\d+\:\s+%s\s+(?P<name>\w+)' % TYPE_PATTERN
 
 THRIFT_TYPES = {
   'bool': PrimitiveType('boolean', 'Boolean'),
@@ -340,18 +478,56 @@ def parse_structs(thrift_defs):
       ttype = ParameterizedType(type_name.title(), params)
     else:
       ttype = make_type(type_name)
-    return Field(ttype, field.group('name'))
+    return Field(ttype, field.group('name'), field.group('doc'))
 
   def parse_fields(field_str):
     return map(parse_field, re.finditer(FIELD_RE, field_str))
 
+  def parse_values(enum_str):
+    return [(m.group('name'), m.group('doc')) for m in re.finditer(ENUM_VALUE_RE, enum_str)]
+
   for s in re.finditer(STRUCT_RE, thrift_defs, flags=re.MULTILINE):
-    structs.append(StructType(s.group('name'),
-                              namespaces['java'],
-                              s.group('kind'),
-                              parse_fields(s.group('body'))))
+    if s.group('kind') == 'enum':
+      struct = EnumType(s.group('name'),
+                        namespaces['java'],
+                        parse_values(s.group('body')),
+                        s.group('doc'))
+    else:
+      struct = StructType(s.group('name'),
+                          namespaces['java'],
+                          s.group('kind'),
+                          parse_fields(s.group('body')),
+                          s.group('doc'))
+    structs.append(struct)
+
   return structs
 
+def parse_services(service_defs):
+  services = []
+
+  for s in re.finditer(SERVICE_RE, service_defs, flags=re.MULTILINE):
+    methods = []
+    for method in re.finditer(METHOD_RE, s.group('body'), flags=re.MULTILINE):
+      params = []
+      for param in re.finditer(PARAM_RE, method.group('params'), flags=re.MULTILINE):
+        if param.group('params'):
+          params.append(GenericParameter(
+              param.group('name'),
+              param.group('type'),
+              param.group('params').replace(' ', '').split(',')))
+        else:
+          params.append(Parameter(param.group('name'), param.group('type')))
+      methods.append(Method(method.group('name'),
+                            params,
+                            method.group('return'),
+                            method.group('doc')))
+    services.append(Service(s.group('name'), s.group('super'), methods))
+  return services
+
+def trim_doc_text(doc):
+  # Remove multiline comment text from doc strings
+  return re.sub('^\s*\*\s*', '', doc, flags=re.MULTILINE) if doc else None
+
 
 def generate_java(struct):
   code = GeneratedCode(struct.codegen_name, struct.name)
@@ -449,16 +625,17 @@ if __name__ == '__main__':
     if options.verbose:
       print(value)
 
-  if len(args) != 2:
-    print('usage: %s thrift_file output_directory' % sys.argv[0])
+  if len(args) != 3:
+    print('usage: %s thrift_file code_output_dir resource_output_dir' % sys.argv[0])
     sys.exit(1)
 
-  thrift_file, output_directory = args
+  thrift_file, code_output_dir, resource_output_dir = args
   with open(thrift_file) as f:
     # Load all structs found in the thrift file.
-    structs = parse_structs(f.read())
+    file_contents = f.read()
+    structs = parse_structs(file_contents)
 
-    package_dir = os.path.join(output_directory, PACKAGE_NAME.replace('.', os.path.sep))
+    package_dir = os.path.join(code_output_dir, PACKAGE_NAME.replace('.', os.path.sep))
     if not os.path.isdir(package_dir):
       os.makedirs(package_dir)
     for struct in structs:
@@ -470,3 +647,136 @@ if __name__ == '__main__':
       with open(gen_file, 'w') as f:
         code = generate_java(struct)
         code.dump(f)
+
+    services = parse_services(file_contents)
+    resource_dir = os.path.join(resource_output_dir, PACKAGE_NAME.replace('.', os.path.sep))
+    if not os.path.isdir(resource_dir):
+      os.makedirs(resource_dir)
+
+    methods_dir = os.path.join(resource_dir, 'method')
+    if not os.path.isdir(methods_dir):
+      os.makedirs(methods_dir)
+    types_dir = os.path.join(resource_dir, 'type')
+    if not os.path.isdir(types_dir):
+      os.makedirs(types_dir)
+
+    def get_service(name):
+      return [s for s in services if s.name == name][0]
+
+    service = get_service('AuroraAdmin')
+
+    all_methods = [] + service.methods
+    cur_service = service
+    while cur_service.parent:
+      cur_service = get_service(cur_service.parent)
+      all_methods += cur_service.methods
+
+    api_help_page = os.path.join(resource_dir, 'index.html')
+    log('Generating service help file %s' % api_help_page)
+    with open(api_help_page, 'w') as f:
+      method_entries = '\n  '.join([METHOD_ENTRY % {'name': m.name} for m in all_methods])
+      print(SERVICE_DOC_PAGE_TEMPLATE % {'methods': method_entries}, file=f)
+
+    def get_type_name(name):
+      if name in THRIFT_TYPES:
+        thrift_type = THRIFT_TYPES[name]
+        if isinstance(thrift_type, PrimitiveType):
+          return thrift_type.boxed_name
+        else:
+          return name
+      return name
+
+    def ttype_text(ttype):
+      if isinstance(ttype, PrimitiveType):
+        return ttype.name
+      elif isinstance(ttype, ParameterizedType):
+        return '%s&lt;%s&gt;' % (ttype.name, ', '.join(map(ttype_text, ttype.params)))
+      else:
+        return TYPE_LINK % {'name': ttype.name}
+
+    for struct in structs:
+      type_help_page = os.path.join(types_dir, '%s.html' % struct.name)
+      log('Generating type help file %s' % type_help_page)
+      with open(type_help_page, 'w') as f:
+        def make_field_help(field):
+          return PARAM_ENTRY % {'type': ttype_text(field.ttype),
+                                'name': field.name,
+                                'doc': trim_doc_text(field.doc)}
+
+        def value_entries(values):
+          return '\n      '.join([ENUM_VALUE_ENTRY % {'value': v[0],
+                                                      'doc': trim_doc_text(v[1])} for v in values])
+
+        if struct.kind == 'enum':
+          print(ENUM_DOC_PAGE_TEMPLATE % {'name': struct.name,
+                                          'values': value_entries(struct.values),
+                                          'doc': trim_doc_text(struct.doc)}, file=f)
+        else:
+          fields_text = '\n  '.join(make_field_help(f) for f in struct.fields)
+          print(TYPE_DOC_PAGE_TEMPLATE % {'name': struct.name,
+                                          'fields': fields_text,
+                                          'doc': trim_doc_text(struct.doc)}, file=f)
+
+    def type_text(type_name):
+      type_text = type_name
+      if type_name in THRIFT_TYPES:
+        thrift_type = THRIFT_TYPES[type_name]
+        if isinstance(thrift_type, PrimitiveType):
+          type_text = thrift_type.boxed_name
+      else:
+        type_text = TYPE_LINK % {'name': type_name}
+      return type_text
+
+    for method in all_methods:
+      method_help_page = os.path.join(methods_dir, '%s.html' % method.name)
+      log('Generating method help file %s' % method_help_page)
+      with open(method_help_page, 'w') as f:
+        def make_param_help(parameter):
+          return PARAM_ENTRY % {'type': type_text(parameter.type_name),
+                                'name': parameter.name,
+                                'doc': None}
+
+        def make_params_help(parameters):
+          if parameters:
+            return PARAMETERS_LIST % {'params': '\n  '.join([make_param_help(p) for p in parameters])}
+          else:
+            return 'This method has no parameters.'
+
+        print(METHOD_DOC_PAGE_TEMPLATE % {'method': method.name,
+                                          'parameters': make_params_help(method.parameters),
+                                          'return': type_text(method.return_type),
+                                          'doc': trim_doc_text(method.doc)},
+              file=f)
+
+    def add_param(param):
+      if param.type_name in THRIFT_TYPES:
+        thrift_type = THRIFT_TYPES[param.type_name]
+        if not isinstance(thrift_type, PrimitiveType):
+          return GENERIC_PARAM_METADATA_TEMPLATE % {
+            'name': param.name,
+            'type': thrift_type.name,
+            'params': ', '.join(map(get_type_name, param.parameters))
+          }
+      return PARAM_METADATA_TEMPLATE % {
+        'name': param.name,
+        'type': get_type_name(param.type_name)
+      }
+
+    def add_method(method):
+      spacing = '\n                  '
+      return METHOD_METADATA_TEMPLATE % {
+        'name': method.name,
+        'params': (spacing if method.parameters else '') + spacing.join(map(add_param, method.parameters))
+      }
+
+    method_metadata = '\n          '.join(map(add_method, all_methods))
+
+    service_metadata = SERVICE_METADATA_TEMPLATE % {
+            'package': PACKAGE_NAME,
+            'methods': method_metadata,
+            'name': service.name
+          }
+    gen_file = os.path.join(package_dir, '%sMetadata.java' % service.name)
+    log('Generating service metadata file %s' % gen_file)
+    with open(gen_file, 'w') as f:
+      print(service_metadata, file=f)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-aurora/blob/b8dd4a1d/src/main/thrift/org/apache/aurora/gen/api.thrift
----------------------------------------------------------------------
diff --git a/src/main/thrift/org/apache/aurora/gen/api.thrift b/src/main/thrift/org/apache/aurora/gen/api.thrift
index 978525e..54b8985 100644
--- a/src/main/thrift/org/apache/aurora/gen/api.thrift
+++ b/src/main/thrift/org/apache/aurora/gen/api.thrift
@@ -17,13 +17,18 @@ namespace py gen.apache.aurora.api
 
 // Thrift interface definition for the aurora scheduler.
 
+/*
+ * TODO(wfarner): It would be nice if we could put some HTML tags here, regex doesn't handle it though.
+ * The result of an API operation.  A result may only be specified when this is OK.
+ */
 enum ResponseCode {
   INVALID_REQUEST = 0,
   OK              = 1,
   ERROR           = 2,
   WARNING         = 3,
   AUTH_FAILED     = 4,
-  LOCK_ERROR      = 5  // Raised when a Lock-protected operation failed due to lock validation.
+  /** Raised when a Lock-protected operation failed due to lock validation. */
+  LOCK_ERROR      = 5
 }
 
 const i32 THRIFT_API_VERSION = 3
@@ -44,18 +49,25 @@ struct Identity {
 }
 
 struct SessionKey {
-  4: optional string mechanism  // The name of the authentication mechanism, which
-                                // instructs the server how to interpret the data field.
-  5: optional binary data       // A blob of data that the server may use for authentication.
+  /**
+   * The name of the authentication mechanism, which instructs the server how to interpret the data
+   * field.
+   */
+  4: optional string mechanism
+  /** A blob of data that the server may use for authentication. */
+  5: optional binary data
 }
 
 struct ResourceAggregate {
-  1: double numCpus  // Number of CPU cores allotted.
-  2: i64 ramMb       // Megabytes of RAM allotted.
-  3: i64 diskMb      // Megabytes of disk space allotted.
+  /** Number of CPU cores allotted. */
+  1: double numCpus
+  /** Megabytes of RAM allotted. */
+  2: i64 ramMb
+  /** Megabytes of disk space allotted. */
+  3: i64 diskMb
 }
 
-// A single host attribute.
+/** A single host attribute. */
 struct Attribute {
   1: string name
   2: set<string> values
@@ -68,7 +80,7 @@ enum MaintenanceMode {
   DRAINED   = 4
 }
 
-// The attributes assigned to a host.
+/** The attributes assigned to a host. */
 struct HostAttributes {
   1: string          host
   2: set<Attribute>  attributes
@@ -76,27 +88,33 @@ struct HostAttributes {
   4: optional string slaveId
 }
 
-// A constraint that specifies an explicit set of values, at least one of which must be present
-// on a host for a task to be scheduled there.
+/**
+ * A constraint that specifies an explicit set of values, at least one of which must be present
+ * on a host for a task to be scheduled there.
+ */
 struct ValueConstraint {
-  1: bool negated       // If true, treat this as a 'not' - to avoid specific values.
+  /** If true, treat this as a 'not' - to avoid specific values. */
+  1: bool negated
   2: set<string> values
 }
 
-// A constraint the specifies the maximum number of active tasks on a host with a matching
-// attribute that may be scheduled simultaneously.
+/**
+ * A constraint the specifies the maximum number of active tasks on a host with a matching
+ * attribute that may be scheduled simultaneously.
+ */
 struct LimitConstraint {
   1: i32 limit
 }
 
-// Types of constraints that may be applied to a task.
+/** Types of constraints that may be applied to a task. */
 union TaskConstraint {
   1: ValueConstraint value
   2: LimitConstraint limit
 }
 
-// A constraint that defines whether a task may be scheduled on a host.
+/** A constraint that defines whether a task may be scheduled on a host. */
 struct Constraint {
+  /** Mesos slave attribute that the constraint is matched against. */
   1: string name
   2: TaskConstraint constraint
 }
@@ -107,119 +125,159 @@ struct Package {
   3: i32 version
 }
 
-// Arbitrary key-value metadata to be included into TaskConfig.
+/** Arbitrary key-value metadata to be included into TaskConfig. */
 struct Metadata {
   1: string key
   2: string value
 }
 
-// A unique identifier for a Job.
+/** A unique identifier for a Job. */
 struct JobKey {
-  1: string role        // Mesos role (Unix service account), for example "mesos"
-  2: string environment // Environment, for example "devel"
-  3: string name        // Name, for example "labrat"
+  /** User role (Unix service account), for example "mesos" */
+  1: string role
+  /** Environment, for example "devel" */
+  2: string environment
+  /** Name, for example "labrat" */
+  3: string name
 }
 
-// A unique lock key.
+/** A unique lock key. */
 union LockKey {
   1: JobKey job
 }
 
-// A generic lock struct to facilitate context specific resource/operation serialization.
+/** A generic lock struct to facilitate context specific resource/operation serialization. */
 struct Lock {
-  1: LockKey key                // ID of the lock - unique per storage
-  2: string token               // UUID - facilitating soft lock authorization
-  3: string user                // Lock owner
-  4: i64 timestampMs            // Lock creation timestamp in milliseconds
-  5: optional string message    // Optional message to record with the lock
-}
-
-// Defines the required lock validation level.
+  /** ID of the lock - unique per storage */
+  1: LockKey key
+  /** UUID - facilitating soft lock authorization */
+  2: string token
+  /** Lock creator */
+  3: string user
+  /** Lock creation timestamp in milliseconds */
+  4: i64 timestampMs
+  /** Optional message to record with the lock */
+  5: optional string message
+}
+
+/** Defines the required lock validation level. */
 enum LockValidation {
-  CHECKED   = 0   // The lock must be valid in order to be released.
-  UNCHECKED = 1   // The lock will be released without validation (aka "force release").
+  /** The lock must be valid in order to be released. */
+  CHECKED   = 0
+  /** The lock will be released without validation (aka "force release"). */
+  UNCHECKED = 1
 }
 
-// A unique identifier for the active task within a job.
+/** A unique identifier for the active task within a job. */
 struct InstanceKey {
-  1: JobKey jobKey   // Key identifying the job.
-  2: i32 instanceId  // Unique instance ID for the active task in a job.
+  /** Key identifying the job. */
+  1: JobKey jobKey
+  /** Unique instance ID for the active task in a job. */
+  2: i32 instanceId
 }
 
 struct ExecutorConfig {
-  1: string name    // Name identifying the Executor.
-  2: string data    // Executor configuration data.
+  /** Name identifying the Executor. */
+  1: string name
+  /** Executor configuration data. */
+  2: string data
 }
 
-// Description of the tasks contained within a job.
+/** Description of the tasks contained within a job. */
 struct TaskConfig {
-                                             // TODO(William Farner): Store a JobKey instead.
- 17: Identity owner                          // contains the role component of JobKey
- 26: string environment                      // contains the environment component of JobKey
-  3: string jobName                          // contains the name component of JobKey
+ // TODO(William Farner): Store a JobKey instead.
+ /** contains the role component of JobKey */
+ 17: Identity owner
+ /** contains the environment component of JobKey */
+ 26: string environment
+ /** contains the name component of JobKey */
+  3: string jobName
   7: bool isService
   8: double numCpus
   9: i64 ramMb
  10: i64 diskMb
  11: i32 priority
  13: i32 maxTaskFailures
- 18: optional bool production                // Whether this is a production task, which can preempt
-                                             // non-production tasks.
+ /** Whether this is a production task, which can preempt. */
+ 18: optional bool production
+
  20: set<Constraint> constraints
- 21: set<string> requestedPorts              // a list of named ports this task requests
- 22: optional map<string, string> taskLinks  // Custom links to include when displaying this task
-                                             // on the scheduler dashboard.  Keys are anchor text,
-                                             // values are URLs.
-                                             // Wildcards are supported for dynamic link
-                                             // crafting based on host, ports, instance, etc.
+ /** a list of named ports this task requests */
+ 21: set<string> requestedPorts
+
+ /**
+  * Custom links to include when displaying this task on the scheduler dashboard. Keys are anchor
+  * text, values are URLs. Wildcards are supported for dynamic link crafting based on host, ports,
+  * instance, etc.
+  */
+ 22: optional map<string, string> taskLinks
  23: optional string contactEmail
- 25: optional ExecutorConfig executorConfig  // Executor configuration
- 27: optional set<Metadata> metadata         // Used to display additional details in the UI.
+ /** Executor configuration */
+ 25: optional ExecutorConfig executorConfig
+ /** Used to display additional details in the UI. */
+ 27: optional set<Metadata> metadata
 }
 
-// Defines the policy for launching a new cron job when one is already running.
+/** Defines the policy for launching a new cron job when one is already running. */
 enum CronCollisionPolicy {
-  KILL_EXISTING = 0,  // Kills the existing job with the colliding name, and runs the new cron job.
-  CANCEL_NEW    = 1,  // Cancels execution of the new job, leaving the running job in tact.
-  RUN_OVERLAP   = 2   // DEPRECATED. For existing jobs, treated the same as CANCEL_NEW. createJob
-                      // will reject jobs with this policy.
-}
-
-// Description of an aurora job.
-// A list of task descriptions must be specified, which may be
-// heterogeneous.  One task will be scheduled for each task description.
-// The tuple (name, environment, owner.role) must be unique.
+  /** Kills the existing job with the colliding name, and runs the new cron job. */
+  KILL_EXISTING = 0,
+  /** Cancels execution of the new job, leaving the running job in tact. */
+  CANCEL_NEW    = 1,
+  /**
+   * DEPRECATED. For existing jobs, treated the same as CANCEL_NEW.
+   * createJob will reject jobs with this policy.
+   */
+  RUN_OVERLAP   = 2
+}
+
+/**
+ * Description of an Aurora job. One task will be scheduled for each instance within the job.
+ */
 struct JobConfiguration {
-  9: JobKey key                               // Key for this job. If not specified
-                                              // name, owner.role, and a reasonable default
-                                              // environment are used to construct it server-side.
-                                              // TODO(William Farner): Deprecate Identity and
-                                              // use JobKey instead (MESOS-4006).
-  7: Identity owner                           // Owner of this job.
-  4: string cronSchedule                      // If present, the job will be handled as a cron job
-                                              // with this crontab-syntax schedule.
-  5: CronCollisionPolicy cronCollisionPolicy  // Collision policy to use when handling overlapping
-                                              // cron runs.  Default is KILL_EXISTING.
-  6: TaskConfig taskConfig                    // Task configuration for this job.
-  8: i32 instanceCount                        // The number of instances in the job.  Generated
-                                              // instance IDs for tasks will be in the range
-                                              // [0, instances).
+  /**
+   * Key for this job. If not specified name, owner.role, and a reasonable default environment are
+   * used to construct it server-side.
+   */
+  9: JobKey key
+  // TODO(William Farner): Deprecate Identity and
+  // use JobKey instead (MESOS-4006).
+  /** Owner of this job. */
+  7: Identity owner
+  /**
+   * If present, the job will be handled as a cron job with this crontab-syntax schedule.
+   */
+  4: string cronSchedule
+  /** Collision policy to use when handling overlapping cron runs.  Default is KILL_EXISTING. */
+  5: CronCollisionPolicy cronCollisionPolicy
+  /** Task configuration for this job. */
+  6: TaskConfig taskConfig
+  /**
+   * The number of instances in the job. Generated instance IDs for tasks will be in the range
+   * [0, instances).
+   */
+  8: i32 instanceCount
 }
 
 struct JobStats {
- 1: i32 activeTaskCount    // Number of tasks in active state for this job.
- 2: i32 finishedTaskCount  // Number of tasks in finished state for this job.
- 3: i32 failedTaskCount    // Number of failed tasks for this job.
- 4: i32 pendingTaskCount   // Number of tasks in pending state for this job.
+  /** Number of tasks in active state for this job. */
+  1: i32 activeTaskCount
+  /** Number of tasks in finished state for this job. */
+  2: i32 finishedTaskCount
+  /** Number of failed tasks for this job. */
+  3: i32 failedTaskCount
+  /** Number of tasks in pending state for this job. */
+  4: i32 pendingTaskCount
 }
 
 struct JobSummary {
   1: JobConfiguration job
   2: JobStats stats
-  3: optional i64 nextCronRunMs  // Timestamp of next cron run in ms since epoch, for a cron job
+  /** Timestamp of next cron run in ms since epoch, for a cron job */
+  3: optional i64 nextCronRunMs
 }
 
-// A request to add the following instances to an existing job. Used by addInstances.
+/** A request to add the following instances to an existing job. Used by addInstances. */
 struct AddInstancesConfig {
   1: JobKey key
   2: TaskConfig taskConfig
@@ -241,49 +299,54 @@ struct PopulateJobResult {
 }
 
 struct GetQuotaResult {
-  1: ResourceAggregate quota                       // Total allocated resource quota.
-  2: optional ResourceAggregate prodConsumption    // Resources consumed by production jobs.
-  3: optional ResourceAggregate nonProdConsumption // Resources consumed by non-production jobs.
+  /** Total allocated resource quota. */
+  1: ResourceAggregate quota
+  /** Resources consumed by production jobs. */
+  2: optional ResourceAggregate prodConsumption
+  /** Resources consumed by non-production jobs. */
+  3: optional ResourceAggregate nonProdConsumption
 }
 
-// Wraps return results for the acquireLock API.
+/** Wraps return results for the acquireLock API. */
 struct AcquireLockResult {
-  1: Lock lock			// Acquired Lock instance.
+  /** Acquired Lock instance. */
+  1: Lock lock
 }
 
-// States that a task may be in.
+/** States that a task may be in. */
 enum ScheduleStatus {
   // TODO(maxim): This state does not add much value. Consider dropping it completely.
-  // Initial state for a task.  A task will remain in this state until it has been persisted.
+  /* Initial state for a task.  A task will remain in this state until it has been persisted. */
   INIT             = 11,
-  // The task will be rescheduled, but is being throttled for restarting too frequently.
+  /** The task will be rescheduled, but is being throttled for restarting too frequently. */
   THROTTLED        = 16,
-  // Task is awaiting assignment to a slave.
+  /** Task is awaiting assignment to a slave. */
   PENDING          = 0,
-  // Task has been assigned to a slave.
+  /** Task has been assigned to a slave. */
   ASSIGNED         = 9,
-  // Slave has acknowledged receipt of task and is bootstrapping the task.
+  /** Slave has acknowledged receipt of task and is bootstrapping the task. */
   STARTING         = 1,
-  // The task is running on the slave.
+  /** The task is running on the slave. */
   RUNNING          = 2,
-  // The task terminated with an exit code of zero.
+  /** The task terminated with an exit code of zero. */
   FINISHED         = 3,
-  // The task is being preempted by another task.
+  /** The task is being preempted by another task. */
   PREEMPTING       = 13,
-  // The task is being restarted in response to a user request.
+  /** The task is being restarted in response to a user request. */
   RESTARTING       = 12,
-  // The task is being restarted in response to a host maintenance request.
+  /** The task is being restarted in response to a host maintenance request. */
   DRAINING         = 17,
-  // The task terminated with a non-zero exit code.
+  /** The task terminated with a non-zero exit code. */
   FAILED           = 4,
-  // Execution of the task was terminated by the system.
+  /** Execution of the task was terminated by the system. */
   KILLED           = 5,
-  // The task is being forcibly killed.
+  /** The task is being forcibly killed. */
   KILLING          = 6,
-  // A fault in the task environment has caused the system to believe the task no longer exists.
-  // This can happen, for example, when a slave process disappears.
+  /** A fault in the task environment has caused the system to believe the task no longer exists.
+   * This can happen, for example, when a slave process disappears.
+   */
   LOST             = 7,
-  // The task sandbox has been deleted by the executor.
+  /** The task sandbox has been deleted by the executor. */
   SANDBOX_DELETED  = 10
 }
 
@@ -332,44 +395,65 @@ const string GOOD_IDENTIFIER_PATTERN_JVM = GOOD_IDENTIFIER_PATTERN
 // Python: Use with re.compile
 const string GOOD_IDENTIFIER_PATTERN_PYTHON = GOOD_IDENTIFIER_PATTERN
 
-// Event marking a state transition within a task's lifecycle.
+/** Event marking a state transition within a task's lifecycle. */
 struct TaskEvent {
-  // Epoch timestamp in milliseconds.
+  /** Epoch timestamp in milliseconds. */
   1: i64 timestamp
-  // New status of the task.
+  /** New status of the task. */
   2: ScheduleStatus status
-  // Audit message that explains why a transition occurred.
+  /** Audit message that explains why a transition occurred. */
   3: optional string message
-  // Hostname of the scheduler machine that performed the event.
+  /** Hostname of the scheduler machine that performed the event. */
   4: optional string scheduler
 }
 
-// A task assignment that is provided to a slave.
+/** A task assignment that is provided to an executor. */
 struct AssignedTask {
-  1: string taskId                   // The mesos task ID for this task.  Guaranteed to be globally
-                                     // unique.
-  2: string slaveId                  // The mesos slave ID that this task has been assigned to.
-                                     // This will not be populated for a PENDING task.
-  3: string slaveHost                // The name of the machine that this task has been assigned to.
-                                     // This will not be populated for a PENDING task.
-  4: TaskConfig task                 // Information about how to run this task.
-  5: map<string, i32> assignedPorts  // Ports reserved on the machine while this task is running.
-  6: i32 instanceId                  // The instance ID assigned to this task.
-                                     // Instance IDs must be unique and contiguous within a
-                                     // job, and will be in the range [0, N-1] (inclusive)
-                                     // for a job that has N instances.
-}
-
-// A task that has been scheduled.
+  /** The mesos task ID for this task.  Guaranteed to be globally unique */
+  1: string taskId
+
+  /**
+   * The mesos slave ID that this task has been assigned to.
+   * This will not be populated for a PENDING task.
+   */
+  2: string slaveId
+
+  /**
+   * The name of the machine that this task has been assigned to.
+   * This will not be populated for a PENDING task.
+   */
+  3: string slaveHost
+
+  /** Information about how to run this task. */
+  4: TaskConfig task
+  /** Ports reserved on the machine while this task is running. */
+  5: map<string, i32> assignedPorts
+
+  /**
+   * The instance ID assigned to this task. Instance IDs must be unique and contiguous within a
+   * job, and will be in the range [0, N-1] (inclusive) for a job that has N instances.
+   */
+  6: i32 instanceId
+}
+
+/** A task that has been scheduled. */
 struct ScheduledTask {
-  1: AssignedTask assignedTask   // The task that was scheduled.
-  2: ScheduleStatus status       // The current status of this task.
-  3: i32 failureCount            // The number of failures that this task has accumulated over the
-                                 // multi-generational history of this task.
-  4: list<TaskEvent> taskEvents  // State change history for this task.
-  5: string ancestorId           // The task ID of the previous generation of this task.  When a
-                                 // task is automatically rescheduled, a copy of the task is created
-                                 // and ancestor ID of the previous task's task ID.
+  /** The task that was scheduled. */
+  1: AssignedTask assignedTask
+  /** The current status of this task. */
+  2: ScheduleStatus status
+  /**
+   * The number of failures that this task has accumulated over the multi-generational history of
+   * this task.
+   */
+  3: i32 failureCount
+  /** State change history for this task. */
+  4: list<TaskEvent> taskEvents
+  /**
+   * The task ID of the previous generation of this task.  When a task is automatically rescheduled,
+   * a copy of the task is created and ancestor ID of the previous task's task ID.
+   */
+  5: string ancestorId
 }
 
 struct ScheduleStatusResult {
@@ -380,8 +464,10 @@ struct GetJobsResult {
   1: set<JobConfiguration> configs
 }
 
-// Contains a set of restrictions on matching tasks where all restrictions must be met (terms are
-// AND'ed together).
+/**
+ * Contains a set of restrictions on matching tasks where all restrictions must be met
+ * (terms are AND'ed together).
+ */
 struct TaskQuery {
   8: Identity owner               // TODO(wfarner): Deprecate Identity
   9: string environment
@@ -460,11 +546,12 @@ struct GetPendingReasonResult {
   1: set<PendingReason> reasons
 }
 
-// meta-data about the thrift server that is wrapped around every thrift response
+/** Information about the scheduler. */
 struct ServerInfo {
   1: string clusterName
   2: i32 thriftAPIVersion
-  3: string statsUrlPrefix  // A url prefix for job container stats.
+  /** A url prefix for job container stats. */
+  3: string statsUrlPrefix
 }
 
 union Result {
@@ -485,7 +572,6 @@ union Result {
   19: GetLocksResult getLocksResult
   20: ConfigSummaryResult configSummaryResult
   21: GetPendingReasonResult getPendingReasonResult
-
 }
 
 struct ResponseDetail {
@@ -499,48 +585,61 @@ struct Response {
   // TODO(wfarner): Remove version field in 0.7.0. (AURORA-467)
   4: APIVersion DEPRECATEDversion
   5: ServerInfo serverInfo
+  /** Payload from the invoked RPC. */
   3: optional Result result
+  /**
+   * Messages from the server relevant to the request, such as warnings or use of deprecated
+   * features.
+   */
   6: list<ResponseDetail> details
 }
 
 // A service that provides all the read only calls to the Aurora scheduler.
 service ReadOnlyScheduler {
-  // Returns a summary of the jobs grouped by role.
+  /** Returns a summary of the jobs grouped by role. */
   Response getRoleSummary()
 
-  // Returns a summary of jobs, optionally only those owned by a specific role.
+  /** Returns a summary of jobs, optionally only those owned by a specific role. */
   Response getJobSummary(1: string role)
 
-  // Fetches the status of tasks.
+  /** Fetches the status of tasks. */
   Response getTasksStatus(1: TaskQuery query)
 
-  // Same as getTaskStatus but without the TaskConfig.ExecutorConfig data set.
-  // This is an interim solution until we have a better way to query TaskConfigs (AURORA-541).
+  /**
+   * Same as getTaskStatus but without the TaskConfig.ExecutorConfig data set.
+   * This is an interim solution until we have a better way to query TaskConfigs (AURORA-541).
+   */
   Response getTasksWithoutConfigs(1: TaskQuery query)
 
-  // Returns user-friendly reasons (if available) for tasks retained in PENDING state.
+  /** Returns user-friendly reasons (if available) for tasks retained in PENDING state. */
   Response getPendingReason(1: TaskQuery query)
 
-  // Fetches the configuration summary of active tasks for the specified job.
+  /** Fetches the configuration summary of active tasks for the specified job. */
   Response getConfigSummary(1: JobKey job)
 
-  // Fetches the status of jobs.
-  // ownerRole is optional, in which case all jobs are returned.
+  /**
+   * Fetches the status of jobs.
+   * ownerRole is optional, in which case all jobs are returned.
+   */
   Response getJobs(1: string ownerRole)
 
-  // Fetches the quota allocated for a user.
+  /** Fetches the quota allocated for a user. */
   Response getQuota(1: string ownerRole)
 
   // TODO(Suman Karumuri): Delete this API once it is no longer used.
-  // NOTE: This API is deprecated.
-  // Returns the current version of the API implementation
+  /**
+   * Returns the current version of the API implementation
+   * NOTE: This method is deprecated.
+   */
   Response getVersion()
 
-  // Populates fields in a job configuration as though it were about to be run.
-  // This can be used to diff a configuration running tasks.
+  /**
+   * Populates fields in a job configuration as though it were about to be run.
+   * This can be used to diff a configuration running tasks.
+   */
   Response populateJobConfig(1: JobConfiguration description)
 
-  // Returns all stored context specific resource/operation locks.
+  /** Returns all stored context specific resource/operation locks. */
   Response getLocks()
 }
 
@@ -548,57 +647,76 @@ service ReadOnlyScheduler {
 // last argument. Note that the order in this file is what matters, and message numbers should still
 // never be reused.
 service AuroraSchedulerManager extends ReadOnlyScheduler {
-  // Creates a new job.  The request will be denied if a job with the provided
-  // name already exists in the cluster.
+  /**
+   * Creates a new job.  The request will be denied if a job with the provided name already exists
+   * in the cluster.
+   */
   Response createJob(1: JobConfiguration description, 3: Lock lock, 2: SessionKey session)
 
-  // Enters a job into the cron schedule, without actually starting the job.
-  // If the job is already present in the schedule, this will update the schedule
-  // entry with the new configuration.
+  /**
+   * Enters a job into the cron schedule, without actually starting the job.
+   * If the job is already present in the schedule, this will update the schedule entry with the new
+   * configuration.
+   */
   Response scheduleCronJob(1: JobConfiguration description, 3: Lock lock, 2: SessionKey session)
 
-  // Removes a job from the cron schedule. The request will be denied if the
-  // job was not previously scheduled with scheduleCronJob.
+  /**
+   * Removes a job from the cron schedule. The request will be denied if the job was not previously
+   * scheduled with scheduleCronJob.
+   */
   Response descheduleCronJob(4: JobKey job, 3: Lock lock, 2: SessionKey session)
 
-  // Starts a cron job immediately.  The request will be denied if the specified job does not
-  // exist for the role account, or the job is not a cron job.
+  /**
+   * Starts a cron job immediately.  The request will be denied if the specified job does not
+   * exist for the role account, or the job is not a cron job.
+   */
   Response startCronJob(4: JobKey job, 3: SessionKey session)
 
-  // Restarts a batch of shards.
+  /** Restarts a batch of shards. */
   Response restartShards(5: JobKey job, 3: set<i32> shardIds, 6: Lock lock 4: SessionKey session)
 
-  // Initiates a kill on tasks.
+  /** Initiates a kill on tasks. */
   Response killTasks(1: TaskQuery query, 3: Lock lock, 2: SessionKey session)
 
-  // Adds new instances specified by the AddInstancesConfig.
-  // A job represented by the JobKey must be protected by Lock.
+  /**
+   * Adds new instances specified by the AddInstancesConfig. A job represented by the JobKey must be
+   * protected by Lock.
+   */
   Response addInstances(
       1: AddInstancesConfig config,
       2: Lock lock,
       3: SessionKey session)
 
-  // Creates and saves a new Lock instance guarding against multiple
-  // mutating operations within the context defined by LockKey.
+  /**
+   * Creates and saves a new Lock instance guarding against multiple mutating operations within the
+   * context defined by LockKey.
+   */
   Response acquireLock(1: LockKey lockKey, 2: SessionKey session)
 
-  // Releases the lock acquired earlier in acquireLock call.
+  /** Releases the lock acquired earlier in acquireLock call. */
   Response releaseLock(1: Lock lock, 2: LockValidation validation, 3: SessionKey session)
 
-  // Replaces the template (configuration) for the existing cron job.
-  // The cron job template (configuration) must exist for the call to succeed.
+  /**
+   * Replaces the template (configuration) for the existing cron job.
+   * The cron job template (configuration) must exist for the call to succeed.
+   */
   Response replaceCronTemplate(1: JobConfiguration config, 2: Lock lock, 3: SessionKey session)
 }
 
 struct InstanceConfigRewrite {
-  1: InstanceKey instanceKey   // Key for the task to rewrite.
-  2: TaskConfig oldTask        // The original configuration.
-  3: TaskConfig rewrittenTask  // The rewritten configuration.
+  /** Key for the task to rewrite. */
+  1: InstanceKey instanceKey
+  /** The original configuration. */
+  2: TaskConfig oldTask
+  /** The rewritten configuration. */
+  3: TaskConfig rewrittenTask
 }
 
 struct JobConfigRewrite {
-  1: JobConfiguration oldJob        // The original job configuration.
-  2: JobConfiguration rewrittenJob  // The rewritten job configuration.
+  /** The original job configuration. */
+  1: JobConfiguration oldJob
+  /** The rewritten job configuration. */
+  2: JobConfiguration rewrittenJob
 }
 
 union ConfigRewrite {
@@ -613,57 +731,61 @@ struct RewriteConfigsRequest {
 // It would be great to compose these services rather than extend, but that won't be possible until
 // https://issues.apache.org/jira/browse/THRIFT-66 is resolved.
 service AuroraAdmin extends AuroraSchedulerManager {
-  // Assign quota to a user.  This will overwrite any pre-existing quota for the user.
+  /** Assign quota to a user.  This will overwrite any pre-existing quota for the user. */
   Response setQuota(1: string ownerRole, 2: ResourceAggregate quota, 3: SessionKey session)
 
-  // Forces a task into a specific state.  This does not guarantee the task will enter the given
-  // state, as the task must still transition within the bounds of the state machine.  However,
-  // it attempts to enter that state via the state machine.
+  /**
+   * Forces a task into a specific state.  This does not guarantee the task will enter the given
+   * state, as the task must still transition within the bounds of the state machine.  However,
+   * it attempts to enter that state via the state machine.
+   */
   Response forceTaskState(
       1: string taskId,
       2: ScheduleStatus status,
       3: SessionKey session)
 
-  // Immediately writes a storage snapshot to disk.
+  /** Immediately writes a storage snapshot to disk. */
   Response performBackup(1: SessionKey session)
 
-  // Lists backups that are available for recovery.
+  /** Lists backups that are available for recovery. */
   Response listBackups(1: SessionKey session)
 
-  // Loads a backup to an in-memory storage.  This must precede all other recovery operations.
+  /** Loads a backup to an in-memory storage.  This must precede all other recovery operations. */
   Response stageRecovery(1: string backupId, 2: SessionKey session)
 
-  // Queries for tasks in a staged recovery.
+  /** Queries for tasks in a staged recovery. */
   Response queryRecovery(1: TaskQuery query, 2: SessionKey session)
 
-  // Deletes tasks from a staged recovery.
+  /** Deletes tasks from a staged recovery. */
   Response deleteRecoveryTasks(1: TaskQuery query, 2: SessionKey session)
 
-  // Commits a staged recovery, completely replacing the previous storage state.
+  /** Commits a staged recovery, completely replacing the previous storage state. */
   Response commitRecovery(1: SessionKey session)
 
-  // Unloads (aborts) a staged recovery.
+  /** Unloads (aborts) a staged recovery. */
   Response unloadRecovery(1: SessionKey session)
 
-  // Put the given hosts into maintenance mode.
+  /** Put the given hosts into maintenance mode. */
   Response startMaintenance(1: Hosts hosts, 2: SessionKey session)
 
-  // Ask scheduler to begin moving tasks scheduled on given hosts.
+  /** Ask scheduler to begin moving tasks scheduled on given hosts. */
   Response drainHosts(1: Hosts hosts, 2: SessionKey session)
 
-  // Retrieve the current maintenance states for a group of hosts.
+  /** Retrieve the current maintenance states for a group of hosts. */
   Response maintenanceStatus(1: Hosts hosts, 2: SessionKey session)
 
-  // Set the given hosts back into serving mode.
+  /** Set the given hosts back into serving mode. */
   Response endMaintenance(1: Hosts hosts, 2: SessionKey session)
 
-  // Start a storage snapshot and block until it completes.
+  /** Start a storage snapshot and block until it completes. */
   Response snapshot(1: SessionKey session)
 
-  // Forcibly rewrites the stored definition of user configurations.  This is intended to be used
-  // in a controlled setting, primarily to migrate pieces of configurations that are opaque to the
-  // scheduler (e.g. executorConfig).
-  // The scheduler may do some validation of the rewritten configurations, but it is important
-  // that the caller take care to provide valid input and alter only necessary fields.
+  /**
+   * Forcibly rewrites the stored definition of user configurations.  This is intended to be used
+   * in a controlled setting, primarily to migrate pieces of configurations that are opaque to the
+   * scheduler (e.g. executorConfig).
+   * The scheduler may do some validation of the rewritten configurations, but it is important
+   * that the caller take care to provide valid input and alter only necessary fields.
+   */
   Response rewriteConfigs(1: RewriteConfigsRequest request, 2: SessionKey session)
 }

http://git-wip-us.apache.org/repos/asf/incubator-aurora/blob/b8dd4a1d/src/test/java/org/apache/aurora/scheduler/http/ServletFilterTest.java
----------------------------------------------------------------------
diff --git a/src/test/java/org/apache/aurora/scheduler/http/ServletFilterTest.java b/src/test/java/org/apache/aurora/scheduler/http/ServletFilterTest.java
new file mode 100644
index 0000000..0399ec0
--- /dev/null
+++ b/src/test/java/org/apache/aurora/scheduler/http/ServletFilterTest.java
@@ -0,0 +1,97 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.aurora.scheduler.http;
+
+import javax.ws.rs.core.HttpHeaders;
+
+import com.google.common.base.Optional;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.ImmutableSet;
+import com.sun.jersey.api.client.ClientResponse;
+import com.sun.jersey.api.client.ClientResponse.Status;
+import com.twitter.thrift.Endpoint;
+import com.twitter.thrift.ServiceInstance;
+
+import org.junit.Test;
+
+import static org.junit.Assert.assertEquals;
+
+public class ServletFilterTest extends ServletModuleTest {
+
+  protected ClientResponse get(String path) {
+    return getRequestBuilder(path)
+        .header(HttpHeaders.ACCEPT_ENCODING, "gzip")
+        .get(ClientResponse.class);
+  }
+
+  private void assertContentEncoding(String path, Optional<String> encoding) {
+    assertEquals(encoding.orNull(), get(path).getHeaders().getFirst(HttpHeaders.CONTENT_ENCODING));
+  }
+
+  private void assertGzipEncoded(String path) {
+    assertContentEncoding(path, Optional.of("gzip"));
+  }
+
+  @Test
+  public void testGzipEncoding() throws Exception {
+    replayAndStart();
+
+    assertContentEncoding("/", Optional.<String>absent());
+    assertGzipEncoded("/scheduler");
+    assertGzipEncoded("/scheduler/");
+    assertGzipEncoded("/scheduler/role");
+    assertGzipEncoded("/scheduler/role/");
+    assertGzipEncoded("/scheduler/role/env/");
+    assertGzipEncoded("/scheduler/role/env/job");
+    assertGzipEncoded("/scheduler/role/env/job/");
+  }
+
+  private void assertResponseStatus(String path, Status expectedStatus) {
+    ClientResponse response = get(path);
+    assertEquals(expectedStatus.getStatusCode(), response.getStatus());
+  }
+
+  private void setLeadingScheduler(String host, int port) {
+    ServiceInstance instance = new ServiceInstance()
+        .setAdditionalEndpoints(ImmutableMap.of("http", new Endpoint(host, port)));
+    schedulerWatcher.getValue().onChange(ImmutableSet.of(instance));
+  }
+
+  private void leaderRedirectSmokeTest(Status expectedStatus) {
+    assertResponseStatus("/scheduler", expectedStatus);
+    assertResponseStatus("/scheduler/", expectedStatus);
+    assertResponseStatus("/scheduler/role", expectedStatus);
+    assertResponseStatus("/scheduler/role/env", expectedStatus);
+    assertResponseStatus("/scheduler/role/env/job", expectedStatus);
+  }
+
+  @Test
+  public void testLeaderRedirect() throws Exception {
+    replayAndStart();
+
+    assertResponseStatus("/", Status.OK);
+
+    // Scheduler is assumed leader at this point, since no members are present in the service
+    // (not even this process).
+    leaderRedirectSmokeTest(Status.OK);
+
+    // This process is leading
+    setLeadingScheduler(httpServer.getHostName(), httpServer.getPort());
+    leaderRedirectSmokeTest(Status.OK);
+
+    setLeadingScheduler("otherHost", 1234);
+    leaderRedirectSmokeTest(Status.FOUND);
+    assertResponseStatus("/", Status.OK);
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-aurora/blob/b8dd4a1d/src/test/java/org/apache/aurora/scheduler/http/ServletModuleTest.java
----------------------------------------------------------------------
diff --git a/src/test/java/org/apache/aurora/scheduler/http/ServletModuleTest.java b/src/test/java/org/apache/aurora/scheduler/http/ServletModuleTest.java
index 90a001b..63c504e 100644
--- a/src/test/java/org/apache/aurora/scheduler/http/ServletModuleTest.java
+++ b/src/test/java/org/apache/aurora/scheduler/http/ServletModuleTest.java
@@ -15,11 +15,9 @@ package org.apache.aurora.scheduler.http;
 
 import java.net.InetSocketAddress;
 
-import javax.ws.rs.core.HttpHeaders;
+import javax.ws.rs.core.MediaType;
 
-import com.google.common.base.Optional;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.ImmutableSet;
+import com.google.common.base.Throwables;
 import com.google.common.testing.TearDown;
 import com.google.common.util.concurrent.RateLimiter;
 import com.google.inject.AbstractModule;
@@ -27,8 +25,11 @@ import com.google.inject.Guice;
 import com.google.inject.Injector;
 import com.google.inject.TypeLiteral;
 import com.sun.jersey.api.client.Client;
-import com.sun.jersey.api.client.ClientResponse;
 import com.sun.jersey.api.client.WebResource;
+import com.sun.jersey.api.client.config.ClientConfig;
+import com.sun.jersey.api.client.config.DefaultClientConfig;
+import com.sun.jersey.api.json.JSONConfiguration;
+import com.twitter.common.application.ShutdownRegistry.ShutdownRegistryImpl;
 import com.twitter.common.application.StartupRegistry;
 import com.twitter.common.application.modules.HttpModule;
 import com.twitter.common.application.modules.LifecycleModule;
@@ -37,17 +38,19 @@ import com.twitter.common.application.modules.LogModule;
 import com.twitter.common.application.modules.StatsModule;
 import com.twitter.common.base.Command;
 import com.twitter.common.net.pool.DynamicHostSet;
+import com.twitter.common.net.pool.DynamicHostSet.HostChangeMonitor;
 import com.twitter.common.testing.easymock.EasyMockTest;
 import com.twitter.common.util.BackoffStrategy;
-import com.twitter.thrift.Endpoint;
 import com.twitter.thrift.ServiceInstance;
 
 import org.apache.aurora.gen.AuroraAdmin;
 import org.apache.aurora.gen.ServerInfo;
 import org.apache.aurora.scheduler.async.OfferQueue;
 import org.apache.aurora.scheduler.async.RescheduleCalculator;
+import org.apache.aurora.scheduler.async.TaskGroups.TaskGroupsSettings;
 import org.apache.aurora.scheduler.async.TaskScheduler;
 import org.apache.aurora.scheduler.cron.CronJobManager;
+import org.apache.aurora.scheduler.http.api.GsonMessageBodyHandler;
 import org.apache.aurora.scheduler.quota.QuotaManager;
 import org.apache.aurora.scheduler.state.LockManager;
 import org.apache.aurora.scheduler.state.SchedulerCore;
@@ -56,34 +59,32 @@ import org.apache.aurora.scheduler.storage.entities.IServerInfo;
 import org.apache.aurora.scheduler.storage.testing.StorageTestUtil;
 import org.easymock.Capture;
 import org.junit.Before;
-import org.junit.Test;
 
-import static com.sun.jersey.api.client.ClientResponse.Status;
-import static com.twitter.common.application.ShutdownRegistry.ShutdownRegistryImpl;
-import static com.twitter.common.net.pool.DynamicHostSet.HostChangeMonitor;
-
-import static org.apache.aurora.scheduler.async.TaskGroups.TaskGroupsSettings;
 import static org.easymock.EasyMock.capture;
 import static org.easymock.EasyMock.expect;
-import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
 
 /**
  * TODO(wfarner): Break apart ServletModule so test setup isn't so involved.
+ * TODO(wfarner): Come up with an approach for these tests that doesn't require starting an actual
+ * HTTP server for each test case.
+ *
  */
-public class ServletModuleTest extends EasyMockTest {
+public abstract class ServletModuleTest extends EasyMockTest {
 
-  private StorageTestUtil storage;
-  private InetSocketAddress httpServer;
-  private Capture<HostChangeMonitor<ServiceInstance>> schedulerWatcher;
+  private Injector injector;
+  protected StorageTestUtil storage;
+  protected InetSocketAddress httpServer;
+  protected Capture<HostChangeMonitor<ServiceInstance>> schedulerWatcher;
+  protected AuroraAdmin.Iface thrift;
 
   @Before
-  public void setUp() throws Exception {
+  public final void setUpServletModuleTest() throws Exception {
     storage = new StorageTestUtil(this);
     final DynamicHostSet<ServiceInstance> schedulers =
-        createMock(new Clazz<DynamicHostSet<ServiceInstance>>() {
-        });
+        createMock(new Clazz<DynamicHostSet<ServiceInstance>>() { });
 
-    Injector injector = Guice.createInjector(
+    injector = Guice.createInjector(
         new ServletModule(),
         new LogModule(),
         new StatsModule(),
@@ -107,7 +108,7 @@ public class ServletModuleTest extends EasyMockTest {
                 new TaskGroupsSettings(bindMock(BackoffStrategy.class), RateLimiter.create(1000)));
 
             bind(new TypeLiteral<DynamicHostSet<ServiceInstance>>() { }).toInstance(schedulers);
-            bindMock(AuroraAdmin.Iface.class);
+            thrift = bindMock(AuroraAdmin.Iface.class);
             bindMock(CronJobManager.class);
             bindMock(LockManager.class);
             bindMock(OfferQueue.class);
@@ -121,7 +122,9 @@ public class ServletModuleTest extends EasyMockTest {
 
     schedulerWatcher = createCapture();
     expect(schedulers.watch(capture(schedulerWatcher))).andReturn(createMock(Command.class));
+  }
 
+  protected void replayAndStart() {
     control.replay();
 
     final ShutdownRegistryImpl shutdownRegistry = injector.getInstance(ShutdownRegistryImpl.class);
@@ -131,75 +134,27 @@ public class ServletModuleTest extends EasyMockTest {
         shutdownRegistry.execute();
       }
     });
-    injector.getInstance(StartupRegistry.class).execute();
+    try {
+      injector.getInstance(StartupRegistry.class).execute();
+    } catch (Exception e) {
+      throw Throwables.propagate(e);
+    }
     LocalServiceRegistry serviceRegistry = injector.getInstance(LocalServiceRegistry.class);
     httpServer = serviceRegistry.getAuxiliarySockets().get("http");
   }
 
-  private ClientResponse get(String path) {
-    Client client = Client.create();
-    // Disable redirects so we can unit test them.
-    client.setFollowRedirects(false);
-    WebResource resource = client.resource(
-        String.format("http://%s:%d%s", httpServer.getHostName(), httpServer.getPort(), path));
-    return resource.getRequestBuilder()
-        .header(HttpHeaders.ACCEPT_ENCODING, "gzip")
-        .get(ClientResponse.class);
-  }
-
-  private void assertContentEncoding(String path, Optional<String> encoding) {
-    assertEquals(encoding.orNull(), get(path).getHeaders().getFirst(HttpHeaders.CONTENT_ENCODING));
-  }
-
-  private void assertGzipEncoded(String path) {
-    assertContentEncoding(path, Optional.of("gzip"));
-  }
-
-  @Test
-  public void testGzipEncoding() throws Exception {
-    assertContentEncoding("/", Optional.<String>absent());
-    assertGzipEncoded("/scheduler");
-    assertGzipEncoded("/scheduler/");
-    assertGzipEncoded("/scheduler/role");
-    assertGzipEncoded("/scheduler/role/");
-    assertGzipEncoded("/scheduler/role/env/");
-    assertGzipEncoded("/scheduler/role/env/job");
-    assertGzipEncoded("/scheduler/role/env/job/");
+  protected String makeUrl(String path) {
+    return String.format("http://%s:%s%s", httpServer.getHostName(), httpServer.getPort(), path);
   }
 
-  private void assertResponseStatus(String path, Status expectedStatus) {
-    ClientResponse response = get(path);
-    assertEquals(expectedStatus.getStatusCode(), response.getStatus());
-  }
-
-  private void setLeadingScheduler(String host, int port) {
-    ServiceInstance instance = new ServiceInstance()
-        .setAdditionalEndpoints(ImmutableMap.of("http", new Endpoint(host, port)));
-    schedulerWatcher.getValue().onChange(ImmutableSet.of(instance));
-  }
-
-  private void leaderRedirectSmokeTest(Status expectedStatus) {
-    assertResponseStatus("/scheduler", expectedStatus);
-    assertResponseStatus("/scheduler/", expectedStatus);
-    assertResponseStatus("/scheduler/role", expectedStatus);
-    assertResponseStatus("/scheduler/role/env", expectedStatus);
-    assertResponseStatus("/scheduler/role/env/job", expectedStatus);
-  }
-
-  @Test
-  public void testLeaderRedirect() throws Exception {
-    assertResponseStatus("/", Status.OK);
-
-    // Scheduler is assumed leader at this point, since no members are present in the service
-    // (not even this process).
-    leaderRedirectSmokeTest(Status.OK);
-
-    // This process is leading
-    setLeadingScheduler(httpServer.getHostName(), httpServer.getPort());
-    leaderRedirectSmokeTest(Status.OK);
-
-    setLeadingScheduler("otherHost", 1234);
-    leaderRedirectSmokeTest(Status.FOUND);
-    assertResponseStatus("/", Status.OK);
+  protected WebResource.Builder getRequestBuilder(String path) {
+    assertNotNull("HTTP server must be started first", httpServer);
+    ClientConfig config = new DefaultClientConfig();
+    config.getFeatures().put(JSONConfiguration.FEATURE_POJO_MAPPING, Boolean.TRUE);
+    config.getClasses().add(GsonMessageBodyHandler.class);
+    Client client = Client.create(config);
+    // Disable redirects so we can unit test them.
+    client.setFollowRedirects(false);
+    return client.resource(makeUrl(path)).getRequestBuilder().accept(MediaType.APPLICATION_JSON);
   }
 }