You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ji...@apache.org on 2016/12/07 21:10:05 UTC
[05/76] [abbrv] hadoop git commit: YARN-5461. Initial code ported
from slider-core module. (jianhe)
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d8cab88d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/management/resources/ComponentResource.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/management/resources/ComponentResource.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/management/resources/ComponentResource.java
new file mode 100644
index 0000000..a44448e
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/management/resources/ComponentResource.java
@@ -0,0 +1,53 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.slider.server.appmaster.web.rest.management.resources;
+
+import org.codehaus.jackson.annotate.JsonIgnoreProperties;
+import org.codehaus.jackson.map.annotate.JsonSerialize;
+
+import javax.ws.rs.core.UriBuilder;
+import java.util.Map;
+
+@JsonIgnoreProperties(ignoreUnknown = true)
+@JsonSerialize(include = JsonSerialize.Inclusion.NON_NULL)
+public class ComponentResource {
+ private final Map<String, String> props;
+ private String href;
+
+ public ComponentResource() {
+ this(null, null, null, null);
+ }
+
+ public ComponentResource(String name,
+ Map<String, String> props,
+ UriBuilder uriBuilder,
+ Map<String, Object> pathElems) {
+ this.props = props;
+ }
+
+ public Map<String, String> getProps() {
+ return props;
+ }
+
+ public String getHref() {
+ return href;
+ }
+
+ public void setHref(String href) {
+ this.href = href;
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d8cab88d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/management/resources/ConfTreeResource.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/management/resources/ConfTreeResource.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/management/resources/ConfTreeResource.java
new file mode 100644
index 0000000..407bab6
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/management/resources/ConfTreeResource.java
@@ -0,0 +1,69 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.slider.server.appmaster.web.rest.management.resources;
+
+import org.apache.slider.core.conf.ConfTree;
+import org.codehaus.jackson.annotate.JsonIgnoreProperties;
+import org.codehaus.jackson.map.annotate.JsonSerialize;
+
+import javax.ws.rs.core.UriBuilder;
+import java.util.Map;
+
+@JsonIgnoreProperties(ignoreUnknown = true)
+@JsonSerialize(include = JsonSerialize.Inclusion.NON_NULL)
+public class ConfTreeResource {
+
+ private final String href;
+ private final Map<String, Object> metadata;
+ private final Map<String, String> global;
+ private final Map<String, Map<String, String>> components;
+
+ public ConfTreeResource() {
+ this(null, null);
+ }
+
+ public ConfTreeResource(ConfTree confTree,
+ UriBuilder uriBuilder) {
+ if (uriBuilder != null && confTree != null) {
+ metadata = confTree.metadata;
+ global = confTree.global;
+ components = confTree.components;
+ this.href = uriBuilder.build().toASCIIString();
+ } else {
+ this.href = null;
+ this.metadata = null;
+ this.global = null;
+ this.components = null;
+ }
+ }
+
+ public Map<String, Object> getMetadata() {
+ return metadata;
+ }
+
+ public Map<String, String> getGlobal() {
+ return global;
+ }
+
+ public Map<String, Map<String, String>> getComponents() {
+ return components;
+ }
+
+ public String getHref() {
+ return href;
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d8cab88d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/management/resources/ResourceFactory.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/management/resources/ResourceFactory.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/management/resources/ResourceFactory.java
new file mode 100644
index 0000000..9876412
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/management/resources/ResourceFactory.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.slider.server.appmaster.web.rest.management.resources;
+
+import org.apache.slider.core.conf.AggregateConf;
+import org.apache.slider.core.conf.ConfTree;
+import org.codehaus.jackson.annotate.JsonIgnoreProperties;
+import org.codehaus.jackson.map.annotate.JsonSerialize;
+
+import javax.ws.rs.core.UriBuilder;
+import java.util.Map;
+
+@JsonIgnoreProperties(ignoreUnknown = true)
+@JsonSerialize(include = JsonSerialize.Inclusion.NON_NULL)
+public class ResourceFactory {
+
+ public static AggregateConfResource createAggregateConfResource(AggregateConf conf,
+ UriBuilder uriBuilder) {
+ return new AggregateConfResource(conf, uriBuilder);
+ }
+
+ public static ConfTreeResource createConfTreeResource(ConfTree confTree,
+ UriBuilder uriBuilder) {
+ return new ConfTreeResource(confTree, uriBuilder);
+ }
+
+ public static ComponentResource createComponentResource(String name,
+ Map<String, String> props,
+ UriBuilder uriBuilder,
+ Map<String, Object> pathElems) {
+ return new ComponentResource(name, props, uriBuilder, pathElems);
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d8cab88d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/publisher/PublisherResource.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/publisher/PublisherResource.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/publisher/PublisherResource.java
new file mode 100644
index 0000000..c727581
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/publisher/PublisherResource.java
@@ -0,0 +1,273 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.appmaster.web.rest.publisher;
+
+import org.apache.hadoop.yarn.webapp.NotFoundException;
+import org.apache.slider.core.registry.docstore.ConfigFormat;
+import org.apache.slider.core.registry.docstore.PublishedConfigSet;
+import org.apache.slider.core.registry.docstore.PublishedConfiguration;
+import org.apache.slider.core.registry.docstore.PublishedConfigurationOutputter;
+import org.apache.slider.core.registry.docstore.PublishedExports;
+import org.apache.slider.core.registry.docstore.PublishedExportsSet;
+import org.apache.slider.core.registry.docstore.UriMap;
+import org.apache.slider.server.appmaster.state.StateAccessForProviders;
+import org.apache.slider.server.appmaster.web.WebAppApi;
+import org.apache.slider.server.appmaster.web.rest.AbstractSliderResource;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+import javax.ws.rs.GET;
+import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.Produces;
+import javax.ws.rs.core.Context;
+import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.UriInfo;
+import java.io.IOException;
+import java.net.URL;
+import java.net.URLClassLoader;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.LinkedHashSet;
+import java.util.Map;
+import java.util.Set;
+
+import static org.apache.slider.server.appmaster.web.rest.RestPaths.*;
+
+/**
+ * This publishes configuration sets
+ */
+public class PublisherResource extends AbstractSliderResource {
+ protected static final Logger log =
+ LoggerFactory.getLogger(PublisherResource.class);
+ public static final String EXPORTS_NAME = "exports";
+ public static final String EXPORTS_RESOURCES_PATH = "/" + EXPORTS_NAME;
+ public static final String EXPORT_RESOURCE_PATH = EXPORTS_RESOURCES_PATH + "/{exportname}" ;
+ public static final String SET_NAME =
+ "{setname: " + PUBLISHED_CONFIGURATION_SET_REGEXP + "}";
+ public static final String SETNAME = "setname";
+ public static final String CLASSPATH = "/classpath";
+ public static final String CONFIG = "config";
+
+ public static final String SETNAME_PATTERN =
+ "{"+ SETNAME+": " + PUBLISHED_CONFIGURATION_SET_REGEXP + "}";
+ private static final String CONFIG_PATTERN =
+ SETNAME_PATTERN + "/{"+ CONFIG +": " + PUBLISHED_CONFIGURATION_REGEXP + "}";
+ private final StateAccessForProviders appState;
+
+ public PublisherResource(WebAppApi slider) {
+ super(slider);
+ appState = slider.getAppState();
+ }
+
+ private void init(HttpServletResponse res, UriInfo uriInfo) {
+ res.setContentType(null);
+ log.debug(uriInfo.getRequestUri().toString());
+ }
+
+ /**
+ * Get a named config set
+ * @param setname name of the config set
+ * @return the config set
+ * @throws NotFoundException if there was no matching set
+ */
+ private PublishedConfigSet getConfigSet(String setname) {
+ PublishedConfigSet configSet =
+ appState.getPublishedConfigSet(setname);
+ if (configSet == null) {
+ throw new NotFoundException("Not found: " + setname);
+ }
+ return configSet;
+ }
+
+ @GET
+ @Path("/")
+ @Produces({MediaType.APPLICATION_JSON})
+ public UriMap enumConfigSets(
+ @Context UriInfo uriInfo,
+ @Context HttpServletResponse res) {
+ init(res, uriInfo);
+ String baseURL = uriInfo.getRequestUri().toString();
+ if (!baseURL.endsWith("/")) {
+ baseURL += "/";
+ }
+ UriMap uriMap = new UriMap();
+ for (String name : appState.listConfigSets()) {
+ uriMap.put(name, baseURL + name);
+ log.info("registering config set {} at {}", name, baseURL);
+ }
+ uriMap.put(EXPORTS_NAME, baseURL + EXPORTS_NAME);
+ return uriMap;
+ }
+
+ @GET
+ @Path(CLASSPATH)
+ @Produces({MediaType.APPLICATION_JSON})
+ public Set<URL> getAMClassPath() {
+ URL[] urls = ((URLClassLoader) getClass().getClassLoader()).getURLs();
+ return new LinkedHashSet<URL>(Arrays.asList(urls));
+ }
+
+ @GET
+ @Path(EXPORTS_RESOURCES_PATH)
+ @Produces({MediaType.APPLICATION_JSON})
+ public PublishedExportsSet gePublishedExports() {
+
+ return appState.getPublishedExportsSet();
+ }
+
+ @GET
+ @Path(EXPORT_RESOURCE_PATH)
+ @Produces({MediaType.APPLICATION_JSON})
+ public PublishedExports getAMExports2(@PathParam("exportname") String exportname,
+ @Context UriInfo uriInfo,
+ @Context HttpServletResponse res) {
+ init(res, uriInfo);
+ PublishedExportsSet set = appState.getPublishedExportsSet();
+ return set.get(exportname);
+ }
+
+ @GET
+ @Path("/"+ SETNAME_PATTERN)
+ @Produces({MediaType.APPLICATION_JSON})
+ public PublishedConfigSet getPublishedConfiguration(
+ @PathParam(SETNAME) String setname,
+ @Context UriInfo uriInfo,
+ @Context HttpServletResponse res) {
+ init(res, uriInfo);
+
+ logRequest(uriInfo);
+ PublishedConfigSet publishedConfigSet = getConfigSet(setname);
+ log.debug("Number of configurations: {}", publishedConfigSet.size());
+ return publishedConfigSet.shallowCopy();
+ }
+
+ private void logRequest(UriInfo uriInfo) {
+ log.info(uriInfo.getRequestUri().toString());
+ }
+
+ @GET
+ @Path("/" + CONFIG_PATTERN)
+ @Produces({MediaType.APPLICATION_JSON})
+ public PublishedConfiguration getConfigurationInstance(
+ @PathParam(SETNAME) String setname,
+ @PathParam(CONFIG) String config,
+ @Context UriInfo uriInfo,
+ @Context HttpServletResponse res) {
+ init(res, uriInfo);
+
+ PublishedConfiguration publishedConfig =
+ getPublishedConfiguration(setname, config);
+ if (publishedConfig == null) {
+ log.info("Configuration {} not found", config);
+ throw new NotFoundException("Not found: " + uriInfo.getAbsolutePath());
+ }
+ return publishedConfig;
+ }
+
+ /**
+ * Get a configuration
+ * @param setname name of the config set
+ * @param config config
+ * @return null if there was a config, but not a set
+ * @throws NotFoundException if there was no matching set
+ */
+ public PublishedConfiguration getPublishedConfiguration(String setname,
+ String config) {
+ return getConfigSet(setname).get(config);
+ }
+
+ @GET
+ @Path("/" + CONFIG_PATTERN + ".json")
+ @Produces({MediaType.APPLICATION_JSON})
+ public String getConfigurationContentJson(
+ @PathParam(SETNAME) String setname,
+
+ @PathParam(CONFIG) String config,
+ @Context UriInfo uriInfo,
+ @Context HttpServletResponse res) throws IOException {
+ return getStringRepresentation(setname, config, uriInfo, res,
+ ConfigFormat.JSON);
+ }
+
+ @GET
+ @Path("/" + CONFIG_PATTERN + ".xml")
+ @Produces({MediaType.APPLICATION_XML})
+ public String getConfigurationContentXML(
+ @PathParam(SETNAME) String setname,
+ @PathParam(CONFIG) String config,
+ @Context UriInfo uriInfo,
+ @Context HttpServletResponse res) throws IOException {
+ return getStringRepresentation(setname, config, uriInfo, res,
+ ConfigFormat.XML);
+ }
+
+ @GET
+ @Path("/" + CONFIG_PATTERN + ".properties")
+ @Produces({MediaType.APPLICATION_XML})
+ public String getConfigurationContentProperties(
+ @PathParam(SETNAME) String setname,
+
+ @PathParam(CONFIG) String config,
+ @Context UriInfo uriInfo,
+ @Context HttpServletResponse res) throws IOException {
+
+ return getStringRepresentation(setname, config, uriInfo, res,
+ ConfigFormat.PROPERTIES);
+ }
+
+ public String getStringRepresentation(String setname,
+ String config,
+ UriInfo uriInfo,
+ HttpServletResponse res, ConfigFormat format) throws IOException {
+ // delegate (including init)
+ PublishedConfiguration publishedConfig =
+ getConfigurationInstance(setname, config, uriInfo, res);
+ PublishedConfigurationOutputter outputter =
+ publishedConfig.createOutputter(format);
+ return outputter.asString();
+ }
+
+ @GET
+ @Path("/" + CONFIG_PATTERN +"/{propertyName}")
+ @Produces({MediaType.APPLICATION_JSON})
+ public Map<String,String> getConfigurationProperty(
+ @PathParam(SETNAME) String setname,
+ @PathParam(CONFIG) String config,
+ @PathParam("propertyName") String propertyName,
+ @Context UriInfo uriInfo,
+ @Context HttpServletResponse res) {
+ PublishedConfiguration publishedConfig =
+ getConfigurationInstance(setname, config, uriInfo, res);
+ String propVal = publishedConfig.entries.get(propertyName);
+ if (propVal == null) {
+ log.debug("Configuration property {} not found in configuration {}",
+ propertyName, config);
+ throw new NotFoundException("Property not found: " + propertyName);
+ }
+ Map<String, String> rtnVal = new HashMap<>();
+ rtnVal.put(propertyName, propVal);
+
+ return rtnVal;
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d8cab88d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/registry/PathEntryResource.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/registry/PathEntryResource.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/registry/PathEntryResource.java
new file mode 100644
index 0000000..efb09a8
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/registry/PathEntryResource.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.appmaster.web.rest.registry;
+
+import org.apache.hadoop.registry.client.types.ServiceRecord;
+import org.codehaus.jackson.annotate.JsonIgnoreProperties;
+import org.codehaus.jackson.map.annotate.JsonSerialize;
+
+import java.util.List;
+
+/**
+ * Representation of a path entry
+ */
+@JsonIgnoreProperties(ignoreUnknown = true)
+@JsonSerialize(include = JsonSerialize.Inclusion.NON_NULL)
+public class PathEntryResource {
+
+ /**
+ * Child nodes: as the short path to each element
+ */
+ public List<String> nodes;
+
+ /**
+ * Service record: if null \u2014there is no resolvable service
+ * record at this node.
+ */
+ public ServiceRecord service;
+
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d8cab88d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/registry/RegistryResource.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/registry/RegistryResource.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/registry/RegistryResource.java
new file mode 100644
index 0000000..c824848
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/registry/RegistryResource.java
@@ -0,0 +1,151 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.appmaster.web.rest.registry;
+
+import com.google.inject.Singleton;
+import org.apache.hadoop.fs.PathNotFoundException;
+import org.apache.hadoop.registry.client.api.RegistryOperations;
+import org.apache.hadoop.registry.client.exceptions.AuthenticationFailedException;
+import org.apache.hadoop.registry.client.exceptions.InvalidRecordException;
+import org.apache.hadoop.registry.client.exceptions.NoPathPermissionsException;
+import org.apache.hadoop.registry.client.exceptions.NoRecordException;
+import org.apache.hadoop.yarn.webapp.ForbiddenException;
+import org.apache.hadoop.yarn.webapp.NotFoundException;
+import org.apache.slider.server.appmaster.web.WebAppApi;
+import org.apache.slider.server.appmaster.web.rest.AbstractSliderResource;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.servlet.http.HttpServletRequest;
+import javax.ws.rs.GET;
+import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.Produces;
+import javax.ws.rs.WebApplicationException;
+import javax.ws.rs.core.Context;
+import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.UriInfo;
+import java.io.IOException;
+
+/**
+ * This is the read-only view of the YARN registry.
+ *
+ * Model:
+ * <ol>
+ * <li>a tree of nodes</li>
+ * <li>Default view is of children + record</li>
+ * </ol>
+ *
+ */
+@Singleton
+public class RegistryResource extends AbstractSliderResource {
+ protected static final Logger log =
+ LoggerFactory.getLogger(RegistryResource.class);
+ public static final String SERVICE_PATH =
+ "/{path:.*}";
+
+ private final RegistryOperations registry;
+
+ /**
+ * Construct an instance bonded to a registry
+ * @param slider slider API
+ */
+ public RegistryResource(WebAppApi slider) {
+ super(slider);
+ this.registry = slider.getRegistryOperations();
+ }
+
+
+ /**
+ * Internal init code, per request
+ * @param request incoming request
+ * @param uriInfo URI details
+ */
+ private void init(HttpServletRequest request, UriInfo uriInfo) {
+ log.debug(uriInfo.getRequestUri().toString());
+ }
+
+ @GET
+ @Produces({MediaType.APPLICATION_JSON})
+ public PathEntryResource getRoot(@Context HttpServletRequest request,
+ @Context UriInfo uriInfo) {
+ return lookup("/", request, uriInfo);
+ }
+
+// {path:.*}
+
+ @Path(SERVICE_PATH)
+ @GET
+ @Produces({MediaType.APPLICATION_JSON})
+ public PathEntryResource lookup(
+ @PathParam("path") String path,
+ @Context HttpServletRequest request,
+ @Context UriInfo uriInfo) {
+ init(request, uriInfo);
+ return resolvePath(path);
+ }
+
+ /**
+ * Do the actual processing of requests to responses; can be directly
+ * invoked for testing.
+ * @param path path to query
+ * @return the entry
+ * @throws WebApplicationException on any failure.
+ */
+ public PathEntryResource resolvePath(String path) throws
+ WebApplicationException {
+ try {
+ PathEntryResource pathEntry =
+ fromRegistry(path);
+ if (log.isDebugEnabled()) {
+ log.debug("Resolved:\n{}", pathEntry);
+ }
+ return pathEntry;
+
+ } catch (Exception e) {
+ throw buildException(path, e);
+ }
+ }
+
+
+ /**
+ * Build from the registry, filling up the children and service records.
+ * If there is no service record at the end of the path, that entry is
+ * null
+ * @param path path to query
+ * @return the built up record
+ * @throws IOException problems
+ *
+ */
+ private PathEntryResource fromRegistry(String path) throws IOException {
+ PathEntryResource entry = new PathEntryResource();
+ try {
+ entry.service = registry.resolve(path);
+ } catch (NoRecordException e) {
+ // ignoring
+ log.debug("No record at {}", path);
+ } catch (InvalidRecordException e) {
+ // swallowing this exception, the sign of "no entry present"
+ // "nothing parseable"
+ log.warn("Failed to resolve {}: {}", path, e, e);
+ }
+ entry.nodes = registry.list(path);
+ return entry;
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d8cab88d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/view/ClusterSpecificationBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/view/ClusterSpecificationBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/view/ClusterSpecificationBlock.java
new file mode 100644
index 0000000..2f02f27
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/view/ClusterSpecificationBlock.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.slider.server.appmaster.web.view;
+
+import com.google.inject.Inject;
+import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
+import org.apache.slider.server.appmaster.web.WebAppApi;
+
+/**
+ *
+ */
+public class ClusterSpecificationBlock extends SliderHamletBlock {
+
+ @Inject
+ public ClusterSpecificationBlock(WebAppApi slider) {
+ super(slider);
+ }
+
+ @Override
+ protected void render(Block html) {
+ doRender(html);
+ }
+
+ // An extra method to make testing easier since you can't make an instance of Block
+ protected void doRender(Hamlet html) {
+ html.
+ div("cluster_json").
+ h2("JSON Cluster Specification").
+ pre().
+ _(getJson())._()._();
+ }
+
+ /**
+ * Get the JSON, catching any exceptions and returning error text instead
+ * @return
+ */
+ private String getJson() {
+ return appState.getClusterStatus().toString();
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d8cab88d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/view/ContainerStatsBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/view/ContainerStatsBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/view/ContainerStatsBlock.java
new file mode 100644
index 0000000..56285c2
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/view/ContainerStatsBlock.java
@@ -0,0 +1,282 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.slider.server.appmaster.web.view;
+
+import com.google.common.base.Function;
+import com.google.common.collect.Iterables;
+import com.google.common.collect.Maps;
+import com.google.inject.Inject;
+import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
+import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.DIV;
+import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE;
+import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY;
+import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TR;
+import org.apache.slider.api.ClusterDescription;
+import org.apache.slider.api.ClusterNode;
+import org.apache.slider.api.types.ComponentInformation;
+import org.apache.slider.server.appmaster.state.RoleInstance;
+import org.apache.slider.server.appmaster.web.WebAppApi;
+
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+
+/**
+ *
+ */
+public class ContainerStatsBlock extends SliderHamletBlock {
+
+ private static final String EVEN = "even", ODD = "odd", BOLD = "bold", SCHEME = "http://", PATH = "/node/container/";
+
+ // Some functions that help transform the data into an object we can use to abstract presentation specifics
+ protected static final Function<Entry<String,Integer>,Entry<TableContent,Integer>> stringIntPairFunc = toTableContentFunction();
+ protected static final Function<Entry<String,Long>,Entry<TableContent,Long>> stringLongPairFunc = toTableContentFunction();
+ protected static final Function<Entry<String,String>,Entry<TableContent,String>> stringStringPairFunc = toTableContentFunction();
+
+ @Inject
+ public ContainerStatsBlock(WebAppApi slider) {
+ super(slider);
+ }
+
+ /**
+ * Sort a collection of ClusterNodes by name
+ */
+ protected static class ClusterNodeNameComparator implements Comparator<ClusterNode>,
+ Serializable {
+
+ @Override
+ public int compare(ClusterNode node1, ClusterNode node2) {
+ if (null == node1 && null != node2) {
+ return -1;
+ } else if (null != node1 && null == node2) {
+ return 1;
+ } else if (null == node1) {
+ return 0;
+ }
+
+ final String name1 = node1.name, name2 = node2.name;
+ if (null == name1 && null != name2) {
+ return -1;
+ } else if (null != name1 && null == name2) {
+ return 1;
+ } else if (null == name1) {
+ return 0;
+ }
+
+ return name1.compareTo(name2);
+ }
+
+ }
+
+ @Override
+ protected void render(Block html) {
+ final Map<String,RoleInstance> containerInstances = getContainerInstances(
+ appState.cloneOwnedContainerList());
+
+ Map<String, Map<String, ClusterNode>> clusterNodeMap =
+ appState.getRoleClusterNodeMapping();
+ Map<String, ComponentInformation> componentInfoMap = appState.getComponentInfoSnapshot();
+
+ for (Entry<String, Map<String, ClusterNode>> entry : clusterNodeMap.entrySet()) {
+ final String name = entry.getKey();
+ Map<String, ClusterNode> clusterNodesInRole = entry.getValue();
+ //final RoleStatus roleStatus = entry.getValue();
+
+ DIV<Hamlet> div = html.div("role-info ui-widget-content ui-corner-all");
+
+ List<ClusterNode> nodesInRole =
+ new ArrayList<>(clusterNodesInRole.values());
+
+ div.h2(BOLD, StringUtils.capitalize(name));
+
+ // Generate the details on this role
+ ComponentInformation componentInfo = componentInfoMap.get(name);
+ if (componentInfo != null) {
+ Iterable<Entry<String,Integer>> stats = componentInfo.buildStatistics().entrySet();
+ generateRoleDetails(div,"role-stats-wrap", "Specifications",
+ Iterables.transform(stats, stringIntPairFunc));
+ }
+
+ // Sort the ClusterNodes by their name (containerid)
+ Collections.sort(nodesInRole, new ClusterNodeNameComparator());
+
+ // Generate the containers running this role
+ generateRoleDetails(div, "role-stats-containers", "Containers",
+ Iterables.transform(nodesInRole, new Function<ClusterNode,Entry<TableContent,String>>() {
+
+ @Override
+ public Entry<TableContent,String> apply(ClusterNode input) {
+ final String containerId = input.name;
+
+ if (containerInstances.containsKey(containerId)) {
+ RoleInstance roleInst = containerInstances.get(containerId);
+ if (roleInst.container.getNodeHttpAddress() != null) {
+ return Maps.<TableContent,String> immutableEntry(
+ new TableAnchorContent(containerId,
+ buildNodeUrlForContainer(roleInst.container.getNodeHttpAddress(), containerId)), null);
+ }
+ }
+ return Maps.immutableEntry(new TableContent(input.name), null);
+ }
+
+ }));
+
+ ClusterDescription desc = appState.getClusterStatus();
+ Map<String, String> options = desc.getRole(name);
+ Iterable<Entry<TableContent, String>> tableContent;
+
+ // Generate the pairs of data in the expected form
+ if (null != options) {
+ tableContent = Iterables.transform(options.entrySet(), stringStringPairFunc);
+ } else {
+ // Or catch that we have no options and provide "empty"
+ tableContent = Collections.emptySet();
+ }
+
+ // Generate the options used by this role
+ generateRoleDetails(div, "role-options-wrap", "Role Options", tableContent);
+
+ // Close the div for this role
+ div._();
+ }
+ }
+
+ protected static <T> Function<Entry<String,T>,Entry<TableContent,T>> toTableContentFunction() {
+ return new Function<Entry<String,T>,Entry<TableContent,T>>() {
+ @Override
+ public Entry<TableContent,T> apply(Entry<String,T> input) {
+ return Maps.immutableEntry(new TableContent(input.getKey()), input.getValue());
+ }
+ };
+ }
+
+ protected Map<String,RoleInstance> getContainerInstances(List<RoleInstance> roleInstances) {
+ Map<String,RoleInstance> map = Maps.newHashMapWithExpectedSize(roleInstances.size());
+ for (RoleInstance roleInstance : roleInstances) {
+ // UUID is the containerId
+ map.put(roleInstance.id, roleInstance);
+ }
+ return map;
+ }
+
+ /**
+ * Given a div, a name for this data, and some pairs of data, generate a nice HTML table. If contents is empty (of size zero), then a mesage will be printed
+ * that there were no items instead of an empty table.
+ *
+ */
+ protected <T1 extends TableContent,T2> void generateRoleDetails(DIV<Hamlet> parent, String divSelector, String detailsName, Iterable<Entry<T1,T2>> contents) {
+ final DIV<DIV<Hamlet>> div = parent.div(divSelector).h3(BOLD, detailsName);
+
+ int offset = 0;
+ TABLE<DIV<DIV<Hamlet>>> table = null;
+ TBODY<TABLE<DIV<DIV<Hamlet>>>> tbody = null;
+ for (Entry<T1,T2> content : contents) {
+ if (null == table) {
+ table = div.table("ui-widget-content ui-corner-bottom");
+ tbody = table.tbody();
+ }
+
+ TR<TBODY<TABLE<DIV<DIV<Hamlet>>>>> row = tbody.tr(offset % 2 == 0 ? EVEN : ODD);
+
+ // Defer to the implementation of the TableContent for what the cell should contain
+ content.getKey().printCell(row);
+
+ // Only add the second column if the element is non-null
+ // This also lets us avoid making a second method if we're only making a one-column table
+ if (null != content.getValue()) {
+ row.td(content.getValue().toString());
+ }
+
+ row._();
+
+ offset++;
+ }
+
+ // If we made a table, close it out
+ if (null != table) {
+ tbody._()._();
+ } else {
+ // Otherwise, throw in a nice "no content" message
+ div.p("no-table-contents")._("None")._();
+ }
+
+ // Close out the initial div
+ div._();
+ }
+
+ /**
+ * Build a URL from the address:port and container ID directly to the NodeManager service
+ * @param nodeAddress
+ * @param containerId
+ * @return
+ */
+ protected String buildNodeUrlForContainer(String nodeAddress, String containerId) {
+ StringBuilder sb = new StringBuilder(SCHEME.length() + nodeAddress.length() + PATH.length() + containerId.length());
+
+ sb.append(SCHEME).append(nodeAddress).append(PATH).append(containerId);
+
+ return sb.toString();
+ }
+
+ /**
+ * Creates a table cell with the provided String as content.
+ */
+ protected static class TableContent {
+ private String cell;
+
+ public TableContent(String cell) {
+ this.cell = cell;
+ }
+
+ public String getCell() {
+ return cell;
+ }
+
+ /**
+ * Adds a td to the given tr. The tr is not closed
+ * @param tableRow
+ */
+ public void printCell(TR<?> tableRow) {
+ tableRow.td(this.cell);
+ }
+ }
+
+ /**
+ * Creates a table cell with an anchor to the given URL with the provided String as content.
+ */
+ protected static class TableAnchorContent extends TableContent {
+ private String anchorUrl;
+
+ public TableAnchorContent(String cell, String anchorUrl) {
+ super(cell);
+ this.anchorUrl = anchorUrl;
+ }
+
+ /* (non-javadoc)
+ * @see org.apache.slider.server.appmaster.web.view.ContainerStatsBlock$TableContent#printCell()
+ */
+ @Override
+ public void printCell(TR<?> tableRow) {
+ tableRow.td().a(anchorUrl, getCell())._();
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d8cab88d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/view/IndexBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/view/IndexBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/view/IndexBlock.java
new file mode 100644
index 0000000..c3b9b6f
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/view/IndexBlock.java
@@ -0,0 +1,305 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.slider.server.appmaster.web.view;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.inject.Inject;
+import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
+import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.DIV;
+import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.LI;
+import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.UL;
+import org.apache.slider.api.ClusterDescription;
+import org.apache.slider.api.StatusKeys;
+import org.apache.slider.api.types.ApplicationLivenessInformation;
+import org.apache.slider.api.types.RoleStatistics;
+import org.apache.slider.common.tools.SliderUtils;
+import org.apache.slider.core.registry.docstore.ExportEntry;
+import org.apache.slider.core.registry.docstore.PublishedExports;
+import org.apache.slider.core.registry.docstore.PublishedExportsSet;
+import org.apache.slider.providers.MonitorDetail;
+import org.apache.slider.providers.ProviderService;
+import org.apache.slider.server.appmaster.state.RoleStatus;
+import org.apache.slider.server.appmaster.web.WebAppApi;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+
+import static org.apache.slider.server.appmaster.web.rest.RestPaths.LIVE_COMPONENTS;
+
+/**
+ * The main content on the Slider AM web page
+ */
+public class IndexBlock extends SliderHamletBlock {
+ private static final Logger log = LoggerFactory.getLogger(IndexBlock.class);
+
+ /**
+ * Message printed when application is at full size.
+ *
+ * {@value}
+ */
+ public static final String ALL_CONTAINERS_ALLOCATED = "all containers allocated";
+
+ @Inject
+ public IndexBlock(WebAppApi slider) {
+ super(slider);
+ }
+
+ @Override
+ protected void render(Block html) {
+ doIndex(html, getProviderName());
+ }
+
+ // An extra method to make testing easier since you can't make an instance of Block
+ @VisibleForTesting
+ protected void doIndex(Hamlet html, String providerName) {
+ ClusterDescription clusterStatus = appState.getClusterStatus();
+ String name = clusterStatus.name;
+ if (name != null && (name.startsWith(" ") || name.endsWith(" "))) {
+ name = "'" + name + "'";
+ }
+ DIV<Hamlet> div = html.div("general_info")
+ .h1("index_header",
+ "Application: " + name);
+
+ ApplicationLivenessInformation liveness =
+ appState.getApplicationLivenessInformation();
+ String livestatus = liveness.allRequestsSatisfied
+ ? ALL_CONTAINERS_ALLOCATED
+ : String.format("Awaiting %d containers", liveness.requestsOutstanding);
+ Hamlet.TABLE<DIV<Hamlet>> table1 = div.table();
+ table1.tr()
+ .td("Status")
+ .td(livestatus)
+ ._();
+ table1.tr()
+ .td("Total number of containers")
+ .td(Integer.toString(appState.getNumOwnedContainers()))
+ ._();
+ table1.tr()
+ .td("Create time: ")
+ .td(getInfoAvoidingNulls(StatusKeys.INFO_CREATE_TIME_HUMAN))
+ ._();
+ table1.tr()
+ .td("Running since: ")
+ .td(getInfoAvoidingNulls(StatusKeys.INFO_LIVE_TIME_HUMAN))
+ ._();
+ table1.tr()
+ .td("Time last flexed: ")
+ .td(getInfoAvoidingNulls(StatusKeys.INFO_FLEX_TIME_HUMAN))
+ ._();
+ table1.tr()
+ .td("Application storage path: ")
+ .td(clusterStatus.dataPath)
+ ._();
+ table1.tr()
+ .td("Application configuration path: ")
+ .td(clusterStatus.originConfigurationPath)
+ ._();
+ table1._();
+ div._();
+ div = null;
+
+ DIV<Hamlet> containers = html.div("container_instances")
+ .h3("Component Instances");
+
+ int aaRoleWithNoSuitableLocations = 0;
+ int aaRoleWithOpenRequest = 0;
+ int roleWithOpenRequest = 0;
+
+ Hamlet.TABLE<DIV<Hamlet>> table = containers.table();
+ Hamlet.TR<Hamlet.THEAD<Hamlet.TABLE<DIV<Hamlet>>>> header = table.thead().tr();
+ trb(header, "Component");
+ trb(header, "Desired");
+ trb(header, "Actual");
+ trb(header, "Outstanding Requests");
+ trb(header, "Failed");
+ trb(header, "Failed to start");
+ trb(header, "Placement");
+ header._()._(); // tr & thead
+
+ List<RoleStatus> roleStatuses = appState.cloneRoleStatusList();
+ Collections.sort(roleStatuses, new RoleStatus.CompareByName());
+ for (RoleStatus status : roleStatuses) {
+ String roleName = status.getName();
+ String nameUrl = apiPath(LIVE_COMPONENTS) + "/" + roleName;
+ String aatext;
+ if (status.isAntiAffinePlacement()) {
+ boolean aaRequestOutstanding = status.isAARequestOutstanding();
+ int pending = (int)status.getPendingAntiAffineRequests();
+ aatext = buildAADetails(aaRequestOutstanding, pending);
+ if (SliderUtils.isSet(status.getLabelExpression())) {
+ aatext += " (label: " + status.getLabelExpression() + ")";
+ }
+ if (pending > 0 && !aaRequestOutstanding) {
+ aaRoleWithNoSuitableLocations ++;
+ } else if (aaRequestOutstanding) {
+ aaRoleWithOpenRequest++;
+ }
+ } else {
+ if (SliderUtils.isSet(status.getLabelExpression())) {
+ aatext = "label: " + status.getLabelExpression();
+ } else {
+ aatext = "";
+ }
+ if (status.getRequested() > 0) {
+ roleWithOpenRequest ++;
+ }
+ }
+ table.tr()
+ .td().a(nameUrl, roleName)._()
+ .td(String.format("%d", status.getDesired()))
+ .td(String.format("%d", status.getActual()))
+ .td(String.format("%d", status.getRequested()))
+ .td(String.format("%d", status.getFailed()))
+ .td(String.format("%d", status.getStartFailed()))
+ .td(aatext)
+ ._();
+ }
+
+ // empty row for some more spacing
+ table.tr()._();
+ // close table
+ table._();
+
+ containers._();
+ containers = null;
+
+ // some spacing
+ html.div()._();
+ html.div()._();
+
+ DIV<Hamlet> diagnostics = html.div("diagnostics");
+
+ List<String> statusEntries = new ArrayList<>(0);
+ if (roleWithOpenRequest > 0) {
+ statusEntries.add(String.format("%d %s with requests unsatisfiable by cluster",
+ roleWithOpenRequest, plural(roleWithOpenRequest, "component")));
+ }
+ if (aaRoleWithNoSuitableLocations > 0) {
+ statusEntries.add(String.format("%d anti-affinity %s no suitable nodes in the cluster",
+ aaRoleWithNoSuitableLocations,
+ plural(aaRoleWithNoSuitableLocations, "component has", "components have")));
+ }
+ if (aaRoleWithOpenRequest > 0) {
+ statusEntries.add(String.format("%d anti-affinity %s with requests unsatisfiable by cluster",
+ aaRoleWithOpenRequest,
+ plural(aaRoleWithOpenRequest, "component has", "components have")));
+
+ }
+ if (!statusEntries.isEmpty()) {
+ diagnostics.h3("Diagnostics");
+ Hamlet.TABLE<DIV<Hamlet>> diagnosticsTable = diagnostics.table();
+ for (String entry : statusEntries) {
+ diagnosticsTable.tr().td(entry)._();
+ }
+ diagnosticsTable._();
+ }
+ diagnostics._();
+
+ DIV<Hamlet> provider_info = html.div("provider_info");
+ provider_info.h3(providerName + " information");
+ UL<Hamlet> ul = html.ul();
+ addProviderServiceOptions(providerService, ul, clusterStatus);
+ ul._();
+ provider_info._();
+
+ DIV<Hamlet> exports = html.div("exports");
+ exports.h3("Exports");
+ ul = html.ul();
+ enumeratePublishedExports(appState.getPublishedExportsSet(), ul);
+ ul._();
+ exports._();
+ }
+
+ @VisibleForTesting
+ String buildAADetails(boolean outstanding, int pending) {
+ return String.format("Anti-affinity:%s %d pending %s",
+ (outstanding ? " 1 active request and" : ""),
+ pending, plural(pending, "request"));
+ }
+
+ private String plural(int n, String singular) {
+ return plural(n, singular, singular + "s");
+ }
+ private String plural(int n, String singular, String plural) {
+ return n == 1 ? singular : plural;
+ }
+
+ private void trb(Hamlet.TR tr,
+ String text) {
+ tr.td().b(text)._();
+ }
+
+ private String getProviderName() {
+ return providerService.getHumanName();
+ }
+
+ private String getInfoAvoidingNulls(String key) {
+ String createTime = appState.getClusterStatus().getInfo(key);
+
+ return null == createTime ? "N/A" : createTime;
+ }
+
+ protected void addProviderServiceOptions(ProviderService provider,
+ UL ul, ClusterDescription clusterStatus) {
+ Map<String, MonitorDetail> details = provider.buildMonitorDetails(
+ clusterStatus);
+ if (null == details) {
+ return;
+ }
+ // Loop over each entry, placing the text in the UL, adding an anchor when the URL is non-null/empty
+ for (Entry<String, MonitorDetail> entry : details.entrySet()) {
+ MonitorDetail detail = entry.getValue();
+ if (SliderUtils.isSet(detail.getValue()) ) {
+ LI item = ul.li();
+ item.span().$class("bold")._(entry.getKey())._();
+ item._(" - ");
+ if (detail.isUrl()) {
+ // Render an anchor if the value is a URL
+ item.a(detail.getValue(), detail.getValue())._();
+ } else {
+ item._(detail.getValue())._();
+ }
+ } else {
+ ul.li(entry.getKey());
+ }
+ }
+ }
+
+ protected void enumeratePublishedExports(PublishedExportsSet exports, UL<Hamlet> ul) {
+ for(String key : exports.keys()) {
+ PublishedExports export = exports.get(key);
+ LI<UL<Hamlet>> item = ul.li();
+ item.span().$class("bold")._(export.description)._();
+ UL sublist = item.ul();
+ for (Entry<String, List<ExportEntry>> entry : export.entries.entrySet()) {
+ LI sublistItem = sublist.li()._(entry.getKey());
+ for (ExportEntry exportEntry : entry.getValue()) {
+ sublistItem._(exportEntry.getValue());
+ }
+ sublistItem._();
+ }
+ sublist._();
+ item._();
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d8cab88d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/view/NavBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/view/NavBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/view/NavBlock.java
new file mode 100644
index 0000000..069d386
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/view/NavBlock.java
@@ -0,0 +1,62 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.slider.server.appmaster.web.view;
+
+import com.google.inject.Inject;
+import org.apache.slider.server.appmaster.web.WebAppApi;
+
+import static org.apache.slider.server.appmaster.web.SliderAMWebApp.*;
+import static org.apache.slider.server.appmaster.web.rest.RestPaths.*;
+
+/**
+ *
+ */
+public class NavBlock extends SliderHamletBlock {
+
+ @Inject
+ public NavBlock(WebAppApi slider) {
+ super(slider);
+ }
+
+ @Override
+ protected void render(Block html) {
+ html.
+ div("#nav").
+ h3("Slider").
+ ul().
+ li().a(this.prefix(), "Overview")._().
+ li().a(relPath(CONTAINER_STATS), "Statistics")._().
+ li().a(relPath(CLUSTER_SPEC), "Specification")._().
+ li().a(rootPath(SYSTEM_METRICS_JSON), "Metrics")._().
+ li().a(rootPath(SYSTEM_HEALTHCHECK), "Health")._().
+ li().a(rootPath(SYSTEM_THREADS), "Threads")._().
+ _()
+ .h3("REST API").
+ ul().
+ li().a(apiPath(MODEL_DESIRED), "Specified")._().
+ li().a(apiPath(MODEL_RESOLVED), "Resolved")._().
+ li().a(apiPath(LIVE_RESOURCES), "Resources")._().
+ li().a(apiPath(LIVE_COMPONENTS), "Components")._().
+ li().a(apiPath(LIVE_CONTAINERS), "Containers")._().
+ li().a(apiPath(LIVE_NODES), "Nodes")._().
+ li().a(apiPath(LIVE_STATISTICS), "Statistics")._().
+ li().a(apiPath(LIVE_LIVENESS), "Liveness")._()
+ ._()
+ ._();
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d8cab88d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/view/SliderHamletBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/view/SliderHamletBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/view/SliderHamletBlock.java
new file mode 100644
index 0000000..82d7c8f
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/view/SliderHamletBlock.java
@@ -0,0 +1,56 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.appmaster.web.view;
+
+import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
+import org.apache.slider.providers.ProviderService;
+import org.apache.slider.server.appmaster.state.StateAccessForProviders;
+import org.apache.slider.server.appmaster.web.WebAppApi;
+import org.apache.slider.server.appmaster.web.rest.RestPaths;
+
+import static org.apache.hadoop.yarn.util.StringHelper.ujoin;
+import static org.apache.slider.server.appmaster.web.rest.RestPaths.SLIDER_PATH_APPLICATION;
+
+/**
+ * Anything we want to share across slider hamlet blocks
+ */
+public abstract class SliderHamletBlock extends HtmlBlock {
+
+ protected final StateAccessForProviders appState;
+ protected final ProviderService providerService;
+ protected final RestPaths restPaths = new RestPaths();
+
+ public SliderHamletBlock(WebAppApi slider) {
+ this.appState = slider.getAppState();
+ this.providerService = slider.getProviderService();
+ }
+
+ protected String rootPath(String absolutePath) {
+ return root_url(absolutePath);
+ }
+
+ protected String relPath(String... args) {
+ return ujoin(this.prefix(), args);
+ }
+
+ protected String apiPath(String api) {
+ return root_url(SLIDER_PATH_APPLICATION, api);
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d8cab88d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/avro/LoadedRoleHistory.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/avro/LoadedRoleHistory.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/avro/LoadedRoleHistory.java
new file mode 100644
index 0000000..77408a5
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/avro/LoadedRoleHistory.java
@@ -0,0 +1,92 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.avro;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.slider.common.tools.SliderUtils;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * The role history
+ */
+public class LoadedRoleHistory {
+
+ private RoleHistoryHeader header;
+
+ private Path path;
+
+ public final Map<String, Integer> roleMap = new HashMap<>();
+
+ public final List<NodeEntryRecord> records = new ArrayList<>();
+
+ /**
+ * Add a record
+ * @param record
+ */
+ public void add(NodeEntryRecord record) {
+ records.add(record);
+ }
+
+ /**
+ * Number of loaded records
+ * @return
+ */
+ public int size() {
+ return records.size();
+ }
+
+ public RoleHistoryHeader getHeader() {
+ return header;
+ }
+
+ public void setHeader(RoleHistoryHeader header) {
+ this.header = header;
+ }
+
+ public Path getPath() {
+ return path;
+ }
+
+ public void setPath(Path path) {
+ this.path = path;
+ }
+
+ public void buildMapping(Map<CharSequence, Integer> source) {
+ roleMap.clear();
+ for (Map.Entry<CharSequence, Integer> entry : source.entrySet()) {
+ roleMap.put(SliderUtils.sequenceToString(entry.getKey()),
+ entry.getValue());
+ }
+ }
+
+ @Override
+ public String toString() {
+ final StringBuilder sb = new StringBuilder(
+ "LoadedRoleHistory{");
+ sb.append("path=").append(path);
+ sb.append("; number of roles=").append(roleMap.size());
+ sb.append("; size=").append(size());
+ sb.append('}');
+ return sb.toString();
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d8cab88d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/avro/NewerFilesFirst.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/avro/NewerFilesFirst.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/avro/NewerFilesFirst.java
new file mode 100644
index 0000000..2e049cb
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/avro/NewerFilesFirst.java
@@ -0,0 +1,43 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.avro;
+
+import org.apache.hadoop.fs.Path;
+
+import java.io.Serializable;
+import java.util.Comparator;
+
+/**
+ * Compare two filenames by name; the more recent one comes first
+ */
+public class NewerFilesFirst implements Comparator<Path>, Serializable {
+
+ /**
+ * Takes the ordering of path names from the normal string comparison
+ * and negates it, so that names that come after other names in
+ * the string sort come before here
+ * @param o1 leftmost
+ * @param o2 rightmost
+ * @return positive if o1 > o2
+ */
+ @Override
+ public int compare(Path o1, Path o2) {
+ return (o2.getName().compareTo(o1.getName()));
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d8cab88d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/avro/OlderFilesFirst.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/avro/OlderFilesFirst.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/avro/OlderFilesFirst.java
new file mode 100644
index 0000000..407aaa6
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/avro/OlderFilesFirst.java
@@ -0,0 +1,43 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.avro;
+
+import org.apache.hadoop.fs.Path;
+
+import java.io.Serializable;
+import java.util.Comparator;
+
+/**
+ * Compare two filenames by name; the older ones comes first
+ */
+public class OlderFilesFirst implements Comparator<Path>, Serializable {
+
+ /**
+ * Takes the ordering of path names from the normal string comparison
+ * and negates it, so that names that come after other names in
+ * the string sort come before here
+ * @param o1 leftmost
+ * @param o2 rightmost
+ * @return positive if o1 > o2
+ */
+ @Override
+ public int compare(Path o1, Path o2) {
+ return (o1.getName().compareTo(o2.getName()));
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d8cab88d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/avro/RoleHistoryWriter.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/avro/RoleHistoryWriter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/avro/RoleHistoryWriter.java
new file mode 100644
index 0000000..49d8fb2
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/avro/RoleHistoryWriter.java
@@ -0,0 +1,449 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.avro;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.avro.AvroTypeException;
+import org.apache.avro.Schema;
+import org.apache.avro.io.DatumReader;
+import org.apache.avro.io.DatumWriter;
+import org.apache.avro.io.Decoder;
+import org.apache.avro.io.DecoderFactory;
+import org.apache.avro.io.Encoder;
+import org.apache.avro.io.EncoderFactory;
+import org.apache.avro.specific.SpecificDatumReader;
+import org.apache.avro.specific.SpecificDatumWriter;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.GlobFilter;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathFilter;
+import org.apache.slider.common.SliderKeys;
+import org.apache.slider.common.tools.SliderUtils;
+import org.apache.slider.core.exceptions.BadConfigException;
+import org.apache.slider.server.appmaster.state.NodeEntry;
+import org.apache.slider.server.appmaster.state.NodeInstance;
+import org.apache.slider.server.appmaster.state.RoleHistory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.EOFException;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.List;
+import java.util.ListIterator;
+import java.util.Locale;
+import java.util.Map;
+
+/**
+ * Write out the role history to an output stream.
+ */
+public class RoleHistoryWriter {
+ protected static final Logger log =
+ LoggerFactory.getLogger(RoleHistoryWriter.class);
+
+ /**
+ * Although Avro is designed to handle some changes, we still keep a version
+ * marker in the file to catch changes that are fundamentally incompatible
+ * at the semantic level -changes that require either a different
+ * parser or get rejected outright.
+ */
+ public static final int ROLE_HISTORY_VERSION = 0x01;
+
+ /**
+ * Write out the history.
+ * This does not update the history's dirty/savetime fields
+ *
+ * @param out outstream
+ * @param history history
+ * @param savetime time in millis for the save time to go in as a record
+ * @return no of records written
+ * @throws IOException IO failures
+ */
+ public long write(OutputStream out, RoleHistory history, long savetime)
+ throws IOException {
+ try {
+ DatumWriter<RoleHistoryRecord> writer =
+ new SpecificDatumWriter<>(RoleHistoryRecord.class);
+
+ RoleHistoryRecord record = createHeaderRecord(savetime, history);
+ int roles = history.getRoleSize();
+ Schema schema = record.getSchema();
+ Encoder encoder = EncoderFactory.get().jsonEncoder(schema, out);
+ writer.write(record, encoder);
+ // now write the rolemap record
+ writer.write(createRolemapRecord(history), encoder);
+ long count = 0;
+ //now for every role history entry, write out its record
+ Collection<NodeInstance> instances = history.cloneNodemap().values();
+ for (NodeInstance instance : instances) {
+ for (int role = 0; role < roles; role++) {
+ NodeEntry nodeEntry = instance.get(role);
+
+ if (nodeEntry != null) {
+ NodeEntryRecord ner = build(nodeEntry, role, instance.hostname);
+ record = new RoleHistoryRecord(ner);
+ writer.write(record, encoder);
+ count++;
+ }
+ }
+ }
+ // footer
+ RoleHistoryFooter footer = new RoleHistoryFooter();
+ footer.setCount(count);
+ writer.write(new RoleHistoryRecord(footer), encoder);
+ encoder.flush();
+ return count;
+ } finally {
+ out.close();
+ }
+ }
+
+ /**
+ * Create the header record
+ * @param savetime time of save
+ * @param history history
+ * @return a record to place at the head of the file
+ */
+ private RoleHistoryRecord createHeaderRecord(long savetime, RoleHistory history) {
+ RoleHistoryHeader header = new RoleHistoryHeader();
+ header.setVersion(ROLE_HISTORY_VERSION);
+ header.setSaved(savetime);
+ header.setSavedx(Long.toHexString(savetime));
+ header.setSavedate(SliderUtils.toGMTString(savetime));
+ header.setRoles(history.getRoleSize());
+ return new RoleHistoryRecord(header);
+ }
+
+ /**
+ * Create the rolemap record
+ * @param history history
+ * @return a record to insert into the file
+ */
+ private RoleHistoryRecord createRolemapRecord(RoleHistory history) {
+ RoleHistoryMapping entry = new RoleHistoryMapping();
+ Map<CharSequence, Integer> mapping = history.buildMappingForHistoryFile();
+ entry.setRolemap(mapping);
+ return new RoleHistoryRecord(entry);
+ }
+
+ /**
+ * Write the history information to a file
+ *
+ * @param fs filesystem
+ * @param path path
+ * @param overwrite overwrite flag
+ * @param history history
+ * @param savetime time in millis for the save time to go in as a record
+ * @return no of records written
+ * @throws IOException IO failures
+ */
+ public long write(FileSystem fs,
+ Path path,
+ boolean overwrite,
+ RoleHistory history,
+ long savetime)
+ throws IOException {
+ FSDataOutputStream out = fs.create(path, overwrite);
+ return write(out, history, savetime);
+ }
+
+
+ /**
+ * Create the filename for a history file
+ * @param time time value
+ * @return a filename such that later filenames sort later in the directory
+ */
+ public Path createHistoryFilename(Path historyPath, long time) {
+ String filename = String.format(Locale.ENGLISH,
+ SliderKeys.HISTORY_FILENAME_CREATION_PATTERN,
+ time);
+ Path path = new Path(historyPath, filename);
+ return path;
+ }
+
+ /**
+ * Build a {@link NodeEntryRecord} from a node entry; include whether
+ * the node is in use and when it was last used.
+ * @param entry entry count
+ * @param role role index
+ * @param hostname name
+ * @return the record
+ */
+ private NodeEntryRecord build(NodeEntry entry, int role, String hostname) {
+ NodeEntryRecord record = new NodeEntryRecord(
+ hostname, role, entry.getLive() > 0, entry.getLastUsed()
+ );
+ return record;
+ }
+
+ /**
+ * Read a history, returning one that is ready to have its onThaw()
+ * method called
+ * @param in input source
+ * @return no. of entries read
+ * @throws IOException problems
+ */
+ public LoadedRoleHistory read(InputStream in) throws
+ IOException,
+ BadConfigException {
+ try {
+ LoadedRoleHistory loadedRoleHistory = new LoadedRoleHistory();
+ DatumReader<RoleHistoryRecord> reader =
+ new SpecificDatumReader<>(RoleHistoryRecord.class);
+ Decoder decoder =
+ DecoderFactory.get().jsonDecoder(RoleHistoryRecord.getClassSchema(),
+ in);
+
+ //read header : no entry -> EOF
+ RoleHistoryRecord record = reader.read(null, decoder);
+ if (record == null) {
+ throw new IOException("Role History Header not found at start of file.");
+ }
+ Object entry = record.getEntry();
+ if (!(entry instanceof RoleHistoryHeader)) {
+ throw new IOException("Role History Header not found at start of file");
+ }
+ RoleHistoryHeader header = (RoleHistoryHeader) entry;
+ if (header.getVersion() != ROLE_HISTORY_VERSION) {
+ throw new IOException(
+ String.format("Can't read role file version %04x -need %04x",
+ header.getVersion(),
+ ROLE_HISTORY_VERSION));
+ }
+ loadedRoleHistory.setHeader(header);
+ RoleHistoryFooter footer = null;
+ int records = 0;
+ //go through reading data
+ try {
+ while (footer == null) {
+ record = reader.read(null, decoder);
+ if (record == null) {
+ throw new IOException("Null record after " + records + " records");
+ }
+ entry = record.getEntry();
+
+ if (entry instanceof RoleHistoryHeader) {
+ throw new IOException("Duplicate Role History Header found");
+ } else if (entry instanceof RoleHistoryMapping) {
+ // role history mapping entry
+ if (!loadedRoleHistory.roleMap.isEmpty()) {
+ // duplicate role maps are viewed as something to warn over, rather than fail
+ log.warn("Duplicate role map; ignoring");
+ } else {
+ RoleHistoryMapping historyMapping = (RoleHistoryMapping) entry;
+ loadedRoleHistory.buildMapping(historyMapping.getRolemap());
+ }
+ } else if (entry instanceof NodeEntryRecord) {
+ // normal record
+ records++;
+ NodeEntryRecord nodeEntryRecord = (NodeEntryRecord) entry;
+ loadedRoleHistory.add(nodeEntryRecord);
+ } else if (entry instanceof RoleHistoryFooter) {
+ //tail end of the file
+ footer = (RoleHistoryFooter) entry;
+ } else {
+ // this is to handle future versions, such as when rolling back
+ // from a later version of slider
+ log.warn("Discarding unknown record {}", entry);
+ }
+ }
+ } catch (EOFException e) {
+ EOFException ex = new EOFException(
+ "End of file reached after " + records + " records");
+ ex.initCause(e);
+ throw ex;
+ }
+ // at this point there should be no data left.
+ // check by reading and expecting a -1
+ if (in.read() > 0) {
+ // footer is in stream before the last record
+ throw new EOFException(
+ "File footer reached before end of file -after " + records +
+ " records");
+ }
+ if (records != footer.getCount()) {
+ log.warn("mismatch between no of records saved {} and number read {}",
+ footer.getCount(), records);
+ }
+ return loadedRoleHistory;
+ } finally {
+ in.close();
+ }
+
+ }
+
+ /**
+ * Read a role history from a path in a filesystem
+ * @param fs filesystem
+ * @param path path to the file
+ * @return the records read
+ * @throws IOException any problem
+ */
+ public LoadedRoleHistory read(FileSystem fs, Path path)
+ throws IOException, BadConfigException {
+ FSDataInputStream instream = fs.open(path);
+ return read(instream);
+ }
+
+ /**
+ * Read from a resource in the classpath -used for testing
+ * @param resource resource
+ * @return the records read
+ * @throws IOException any problem
+ */
+ public LoadedRoleHistory read(String resource)
+ throws IOException, BadConfigException {
+
+ return read(this.getClass().getClassLoader().getResourceAsStream(resource));
+ }
+
+
+ /**
+ * Find all history entries in a dir. The dir is created if it is
+ * not already defined.
+ *
+ * The scan uses the match pattern {@link SliderKeys#HISTORY_FILENAME_MATCH_PATTERN}
+ * while dropping empty files and directories which match the pattern.
+ * The list is then sorted with a comparator that sorts on filename,
+ * relying on the filename of newer created files being later than the old ones.
+ *
+ *
+ *
+ * @param fs filesystem
+ * @param dir dir to scan
+ * @param includeEmptyFiles should empty files be included in the result?
+ * @return a possibly empty list
+ * @throws IOException IO problems
+ * @throws FileNotFoundException if the target dir is actually a path
+ */
+ public List<Path> findAllHistoryEntries(FileSystem fs,
+ Path dir,
+ boolean includeEmptyFiles) throws IOException {
+ assert fs != null;
+ assert dir != null;
+ if (!fs.exists(dir)) {
+ fs.mkdirs(dir);
+ } else if (!fs.isDirectory(dir)) {
+ throw new FileNotFoundException("Not a directory " + dir.toString());
+ }
+
+ PathFilter filter = new GlobFilter(SliderKeys.HISTORY_FILENAME_GLOB_PATTERN);
+ FileStatus[] stats = fs.listStatus(dir, filter);
+ List<Path> paths = new ArrayList<Path>(stats.length);
+ for (FileStatus stat : stats) {
+ log.debug("Possible entry: {}", stat.toString());
+ if (stat.isFile() && (includeEmptyFiles || stat.getLen() > 0)) {
+ paths.add(stat.getPath());
+ }
+ }
+ sortHistoryPaths(paths);
+ return paths;
+ }
+
+ @VisibleForTesting
+ public static void sortHistoryPaths(List<Path> paths) {
+ Collections.sort(paths, new NewerFilesFirst());
+ }
+
+ /**
+ * Iterate through the paths until one can be loaded
+ * @param paths paths to load
+ * @return the loaded history including the path -or null if all failed to load
+ */
+ public LoadedRoleHistory attemptToReadHistory(FileSystem fileSystem,
+ List<Path> paths)
+ throws BadConfigException {
+ ListIterator<Path> pathIterator = paths.listIterator();
+ boolean success = false;
+ LoadedRoleHistory history = null;
+ while (!success && pathIterator.hasNext()) {
+ Path path = pathIterator.next();
+ try {
+ history = read(fileSystem, path);
+ //success
+ success = true;
+ history.setPath(path);
+ } catch (IOException e) {
+ log.info("Failed to read {}", path, e);
+ } catch (AvroTypeException e) {
+ log.warn("Failed to parse {}", path, e);
+ } catch (Exception e) {
+ // low level event logged @ warn level
+ log.warn("Exception while reading {}", path, e);
+ }
+ }
+ return history;
+ }
+
+ /**
+ * Try to load the history from a directory -a failure to load a specific
+ * file is downgraded to a log and the next older path attempted instead
+ * @param fs filesystem
+ * @param dir dir to load from
+ * @return the history loaded, including the path
+ * @throws IOException if indexing the history directory fails.
+ */
+ public LoadedRoleHistory loadFromHistoryDir(FileSystem fs, Path dir)
+ throws IOException, BadConfigException {
+ assert fs != null: "null filesystem";
+ List<Path> entries = findAllHistoryEntries(fs, dir, false);
+ return attemptToReadHistory(fs, entries);
+ }
+
+ /**
+ * Delete all old history entries older than the one we want to keep. This
+ * uses the filename ordering to determine age, not timestamps
+ * @param fileSystem filesystem
+ * @param keep path to keep -used in thresholding the files
+ * @return the number of files deleted
+ * @throws FileNotFoundException if the path to keep is not present (safety
+ * check to stop the entire dir being purged)
+ * @throws IOException IO problems
+ */
+ public int purgeOlderHistoryEntries(FileSystem fileSystem, Path keep)
+ throws IOException { assert fileSystem != null : "null filesystem";
+ if (!fileSystem.exists(keep)) {
+ throw new FileNotFoundException(keep.toString());
+ }
+ Path dir = keep.getParent();
+ log.debug("Purging entries in {} up to {}", dir, keep);
+ List<Path> paths = findAllHistoryEntries(fileSystem, dir, true);
+ Collections.sort(paths, new OlderFilesFirst());
+ int deleteCount = 0;
+ for (Path path : paths) {
+ if (path.equals(keep)) {
+ break;
+ } else {
+ log.debug("Deleting {}", path);
+ deleteCount++;
+ fileSystem.delete(path, false);
+ }
+ }
+ return deleteCount;
+ }
+
+}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org