You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@knox.apache.org by mo...@apache.org on 2018/01/11 17:38:28 UTC

[01/53] [abbrv] knox git commit: Merge branch 'master' into KNOX-998-Package_Restructuring

Repository: knox
Updated Branches:
  refs/heads/master 99e6a54af -> 92e2ec59a


http://git-wip-us.apache.org/repos/asf/knox/blob/8affbc02/gateway-util-urltemplate/src/test/java/org/apache/knox/gateway/util/urltemplate/ParserTest.java
----------------------------------------------------------------------
diff --cc gateway-util-urltemplate/src/test/java/org/apache/knox/gateway/util/urltemplate/ParserTest.java
index 8c5f21a,0000000..70085d4
mode 100644,000000..100644
--- a/gateway-util-urltemplate/src/test/java/org/apache/knox/gateway/util/urltemplate/ParserTest.java
+++ b/gateway-util-urltemplate/src/test/java/org/apache/knox/gateway/util/urltemplate/ParserTest.java
@@@ -1,1305 -1,0 +1,1322 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.util.urltemplate;
 +
 +import org.apache.hadoop.test.category.FastTests;
 +import org.apache.hadoop.test.category.UnitTests;
 +import org.junit.Test;
 +import org.junit.experimental.categories.Category;
 +
 +import java.net.URISyntaxException;
 +import java.util.Iterator;
 +
 +import static org.hamcrest.CoreMatchers.is;
 +import static org.hamcrest.CoreMatchers.notNullValue;
 +import static org.hamcrest.CoreMatchers.nullValue;
 +import static org.junit.Assert.assertThat;
 +
 +@Category( { UnitTests.class, FastTests.class } )
 +public class ParserTest {
 +
 +  private void assertBasics(
 +      Template template,
 +      boolean isAbsolute,
 +      boolean isDirectory,
 +      boolean hasQuery,
 +      int pathSegmentsSize,
 +      int querySegmentsSize ) {
 +    assertThat( "Incorrect isAbsolute value.", template.isAbsolute(), is( isAbsolute ) );
 +    assertThat( "Incorrect isDirectory value.", template.isDirectory(), is( isDirectory ) );
 +    assertThat( "Incorrect hasQuery value.", template.hasQuery(), is( hasQuery ) );
 +    assertThat( "Incorrect path size.", template.getPath().size(), is( pathSegmentsSize ) );
 +    assertThat( "Incorrect query size.", template.getQuery().size(), is( querySegmentsSize ) );
 +  }
 +
 +  public void assertPath(
 +      Template template,
 +      int index,
 +      String paramName,
 +      String valuePattern ) {
 +    Path segment = template.getPath().get( index );
 +    assertThat( "Incorrect template queryParam name.", segment.getParamName(), is( paramName ) );
 +    assertThat( "Incorrect template value pattern.", segment.getFirstValue().getToken().getEffectivePattern(), is( valuePattern ) );
 +  }
 +
 +  public void assertPath(
 +      Template template,
 +      int index,
 +      String paramName,
 +      String valuePattern,
 +      int type,
 +      int minRequired,
 +      int maxAllowed ) {
 +    Path segment = template.getPath().get( index );
 +    assertThat( "Param name wrong.", segment.getParamName(), is( paramName ) );
 +    assertThat( "Value pattern wrong.", segment.getFirstValue().getEffectivePattern(), is( valuePattern ) );
 +    assertThat( "Segment type wrong.", segment.getFirstValue().getType(), is( type ) );
 +//    assertThat( "Segment min required wrong.", segment.getMinRequired(), is( minRequired ) );
 +//    assertThat( "Segment max allowed wrong.", segment.getMaxAllowed(), is( maxAllowed ) );
 +  }
 +
 +  public void assertQuery(
 +      Template template,
 +      String queryName,
 +      String paramName,
 +      String valuePattern ) {
 +    Query segment = template.getQuery().get( queryName );
 +    assertThat( "Query name wrong.", segment.getQueryName(), is( queryName ));
 +    assertThat( "Param name wrong.", segment.getParamName(), is( paramName ));
 +    assertThat( "value pattern wrong.", segment.getFirstValue().getToken().getEffectivePattern(), is( valuePattern ) );
 +  }
 +
 +  public void assertQuery(
 +      Template template,
 +      String queryName,
 +      String paramName,
 +      String valuePattern,
 +      int type,
 +      int minRequired,
 +      int maxAllowed ) {
 +    Query segment = template.getQuery().get( queryName );
 +    assertThat( "Query name wrong.", segment.getQueryName(), is( queryName ));
 +    assertThat( "Param name wrong.", segment.getParamName(), is( paramName ));
 +    assertThat( "value pattern wrong.", segment.getFirstValue().getEffectivePattern(), is( valuePattern ));
 +    assertThat( "Segment type wrong.", segment.getFirstValue().getType(), is( type ) );
 +//    assertThat( "Segment min required wrong.", segment.getMinRequired(), is( minRequired ) );
 +//    assertThat( "Segment max allowed wrong.", segment.getMaxAllowed(), is( maxAllowed ) );
 +  }
 +
 +  @Test
 +  public void testCompleteUrl() throws URISyntaxException {
 +    String text;
 +    Template template;
 +    Parser parser = new Parser();
 +
 +    text = "foo://username:password@example.com:8042/over/there/index.dtb?type=animal&name=narwhal#nose";
 +    template = parser.parseTemplate( text );
 +    assertBasics( template, true, false, true, 3, 2 );
 +    assertThat( template.toString(), is( text ) );
 +  }
 +
 +//  @Test
 +//  public void testInvalidPatterns() {
 +//    //TODO: ? in wrong spot.
 +//    //TODO: & in wrong spots.
 +//  }
 +
 +//  @Ignore( "TODO" )
 +//  @Test
 +//  public void testRegexPatterns() {
 +//  }
 +
 +  @Test
 +  public void testTemplates() throws URISyntaxException {
 +    String text;
 +    Template template;
 +
 +    text = "{path}";
 +    template = Parser.parseTemplate( text );
 +    assertBasics( template, false, false, false, 1, 0 );
 +    assertPath( template, 0, "path", "**" );
 +    assertThat( template.toString(), is( text ) );
 +
 +    text = "{pathA}/{pathB}";
 +    template = Parser.parseTemplate( text );
 +    assertBasics( template, false, false, false, 2, 0 );
 +    assertPath( template, 0, "pathA", "**" );
 +    assertPath( template, 1, "pathB", "**" );
 +    assertThat( template.toString(), is( text ) );
 +
 +    text = "?paramA={valueA}";
 +    template = Parser.parseTemplate( text );
 +    assertBasics( template, false, false, true, 0, 1 );
 +    assertQuery( template, "paramA", "valueA", "**" );
 +    assertThat( template.toString(), is( text ) );
 +
 +    text = "?paramA={valueA}&paramB={valueB}";
 +    template = Parser.parseTemplate( text );
 +    assertBasics( template, false, false, true, 0, 2 );
 +    assertQuery( template, "paramA", "valueA", "**" );
 +    assertQuery( template, "paramB", "valueB", "**" );
 +    assertThat( template.toString(), is( text ) );
 +
 +    text = "?paramA={valueA}?paramB={valueB}";
 +    template = Parser.parseTemplate( text );
 +    assertBasics( template, false, false, true, 0, 2 );
 +    assertQuery( template, "paramA", "valueA", "**" );
 +    assertQuery( template, "paramB", "valueB", "**" );
 +    //assertThat( template.toString(), is( text ) );
 +
 +    text = "{pathA}?paramA={valueA}";
 +    template = Parser.parseTemplate( text );
 +    assertBasics( template, false, false, true, 1, 1 );
 +    assertPath( template, 0, "pathA", "**" );
 +    assertQuery( template, "paramA", "valueA", "**" );
 +    assertThat( template.toString(), is( text ) );
 +  }
 +
 +  @Test
 +  public void testStaticPatterns() throws Exception {
 +    Parser parser = new Parser();
 +    String text;
 +    Template template;
 +
 +    text = "";
 +    template = parser.parseTemplate( text );
 +    assertBasics( template, false, false, false, 0, 0 );
 +    assertThat( template.toString(), is( text ) );
 +
 +    text = "/";
 +    template = parser.parseTemplate( text );
 +    assertBasics( template, true, true, false, 0, 0 );
 +    assertThat( template.toString(), is( text ) );
 +
 +    text = "?";
 +    template = parser.parseTemplate( text );
 +    assertBasics( template, false, false, true, 0, 0 );
 +    assertThat( template.toString(), is( text ) );
 +
 +    text = "#";
 +    template = parser.parseTemplate( text );
 +    assertBasics( template, false, false, false, 0, 0 );
 +    assertThat( template.hasFragment(), is( true ) );
 +    assertThat( template.getFragment(), nullValue() );
 +    assertThat( template.toString(), is( text ) );
 +
 +    text = "path";
 +    template = parser.parseTemplate( text );
 +    assertBasics( template, false, false, false, 1, 0 );
 +    assertPath( template, 0, "", "path" );
 +    assertThat( template.toString(), is( text ) );
 +
 +    text = "/path";
 +    template = parser.parseTemplate( text );
 +    assertBasics( template, true, false, false, 1, 0 );
 +    assertPath( template, 0, "", "path" );
 +    assertThat( template.toString(), is( text ) );
 +
 +//    text = "//path";
 +//    template = parser.parseTemplate( text );
 +//    assertBasics( template, true, false, false, 1, 0 );
 +//    assertPath( template, 0, "", "path" );
 +
 +    text = "path/";
 +    template = parser.parseTemplate( text );
 +    assertBasics( template, false, true, false, 1, 0 );
 +    assertPath( template, 0, "", "path" );
 +    assertThat( template.toString(), is( text ) );
 +
 +    text = "path//";
 +    template = parser.parseTemplate( text );
 +    assertBasics( template, false, true, false, 1, 0 );
 +    assertPath( template, 0, "", "path" );
 +    //IMPROVE assertThat( template.toString(), is( text ) );
 +    assertThat( template.getPattern(), is( text ) );
 +
 +    text = "/path/";
 +    template = parser.parseTemplate( text );
 +    assertBasics( template, true, true, false, 1, 0 );
 +    assertPath( template, 0, "", "path" );
 +    assertThat( template.toString(), is( text ) );
 +
 +//    text = "//path//";
 +//    template = parser.parseTemplate( text );
 +//    assertBasics( template, true, true, false, 1, 0 );
 +//    assertPath( template, 0, "", "path" );
 +
 +    text = "pathA/pathB";
 +    template = parser.parseTemplate( text );
 +    assertBasics( template, false, false, false, 2, 0 );
 +    assertPath( template, 0, "", "pathA" );
 +    assertPath( template, 1, "", "pathB" );
 +    assertThat( template.toString(), is( text ) );
 +
 +    text = "pathA//pathB";
 +    template = parser.parseTemplate( text );
 +    assertBasics( template, false, false, false, 2, 0 );
 +    assertPath( template, 0, "", "pathA" );
 +    assertPath( template, 1, "", "pathB" );
 +    //IMPROVE assertThat( template.toString(), is( text ) );
 +    assertThat( template.getPattern(), is( text ) );
 +
 +    text = "/pathA/pathB";
 +    template = parser.parseTemplate( text );
 +    assertBasics( template, true, false, false, 2, 0 );
 +    assertPath( template, 0, "", "pathA" );
 +    assertPath( template, 1, "", "pathB" );
 +    assertThat( template.toString(), is( text ) );
 +
 +    text = "/pathA//pathB";
 +    template = parser.parseTemplate( text );
 +    assertBasics( template, true, false, false, 2, 0 );
 +    assertPath( template, 0, "", "pathA" );
 +    assertPath( template, 1, "", "pathB" );
 +    //IMPROVE assertThat( template.toString(), is( text ) );
 +    assertThat( template.getPattern(), is( text ) );
 +
 +    text = "pathA/pathB/";
 +    template = parser.parseTemplate( text );
 +    assertBasics( template, false, true, false, 2, 0 );
 +    assertPath( template, 0, "", "pathA" );
 +    assertPath( template, 1, "", "pathB" );
 +    assertThat( template.toString(), is( text ) );
 +
 +    text = "pathA//pathB/";
 +    template = parser.parseTemplate( text );
 +    assertBasics( template, false, true, false, 2, 0 );
 +    assertPath( template, 0, "", "pathA" );
 +    assertPath( template, 1, "", "pathB" );
 +    //IMPROVE assertThat( template.toString(), is( text ) );
 +    assertThat( template.getPattern(), is( text ) );
 +
 +    text = "/pathA/pathB/";
 +    template = parser.parseTemplate( text );
 +    assertBasics( template, true, true, false, 2, 0 );
 +    assertPath( template, 0, "", "pathA" );
 +    assertPath( template, 1, "", "pathB" );
 +    assertThat( template.toString(), is( text ) );
 +
 +    text = "/pathA//pathB/";
 +    template = parser.parseTemplate( text );
 +    assertBasics( template, true, true, false, 2, 0 );
 +    assertPath( template, 0, "", "pathA" );
 +    assertPath( template, 1, "", "pathB" );
 +    //IMPROVE assertThat( template.toString(), is( text ) );
 +    assertThat( template.getPattern(), is( text ) );
 +
 +    text = "/?";
 +    template = parser.parseTemplate( text );
 +    assertBasics( template, true, true, true, 0, 0 );
 +    assertThat( template.toString(), is( text ) );
 +
 +//    text = "//??";
 +//    template = parser.parseTemplate( text );
 +//    assertBasics( template, true, true, true, 0, 0 );
 +
 +    text = "?name=value";
 +    template = parser.parseTemplate( text );
 +    assertBasics( template, false, false, true, 0, 1 );
 +    assertQuery( template, "name", "", "value" );
 +    assertThat( template.toString(), is( text ) );
 +
 +    text = "?name1=value1&name2=value2";
 +    template = parser.parseTemplate( text );
 +    assertBasics( template, false, false, true, 0, 2 );
 +    assertQuery( template, "name1", "", "value1" );
 +    assertQuery( template, "name2", "", "value2" );
 +    assertThat( template.toString(), is( text ) );
 +
 +    text = "?name1=value1&&name2=value2";
 +    template = parser.parseTemplate( text );
 +    assertBasics( template, false, false, true, 0, 2 );
 +    assertQuery( template, "name1", "", "value1" );
 +    assertQuery( template, "name2", "", "value2" );
 +    //IMPROVE assertThat( template.toString(), is( text ) );
 +    assertThat( template.getPattern(), is( text ) );
 +
 +    text = "/?name=value";
 +    template = parser.parseTemplate( text );
 +    assertBasics( template, true, true, true, 0, 1 );
 +    assertQuery( template, "name", "", "value" );
 +    assertThat( template.toString(), is( text ) );
 +
 +    text = "/?name1=value1&name2=value2";
 +    template = parser.parseTemplate( text );
 +    assertBasics( template, true, true, true, 0, 2 );
 +    assertQuery( template, "name1", "", "value1" );
 +    assertQuery( template, "name2", "", "value2" );
 +    assertThat( template.toString(), is( text ) );
 +  }
 +
++  /**
++   *  KNOX-1055
++   *  In some cases & could be encoded as &
++   */
++  @Test
++  public void testEncodedChar() throws URISyntaxException {
++    Parser parser = new Parser();
++    String text;
++    Template template;
++
++    text = "stage?id=007&attempt=0";
++    template = parser.parseTemplate( text );
++    assertBasics( template, false, false, true, 1, 2 );
++    assertQuery( template, "id", "", "007" );
++    assertQuery( template, "attempt", "", "0" );
++  }
++
 +  @Test
 +  public void testParameterizedPathTemplatesWithWildcardAndRegex() throws URISyntaxException {
 +    String text;
 +    Template template;
 +
 +    text = "{path}";
 +    template = Parser.parseTemplate( text );
 +    assertBasics( template, false, false, false, 1, 0 );
 +    assertPath( template, 0, "path", "**", Segment.GLOB, 1, 1 );
 +    assertThat( template.toString(), is( text ) );
 +
 +    text = "{path=static}";
 +    template = Parser.parseTemplate( text );
 +    assertBasics( template, false, false, false, 1, 0 );
 +    assertPath( template, 0, "path", "static", Segment.STATIC, 1, 1 );
 +    assertThat( template.toString(), is( text ) );
 +
 +    text = "{path=*}";
 +    template = Parser.parseTemplate( text );
 +    assertBasics( template, false, false, false, 1, 0 );
 +    assertPath( template, 0, "path", "*", Segment.STAR, 1, 1 );
 +    assertThat( template.toString(), is( text ) );
 +
 +    text = "{path=**}";
 +    template = Parser.parseTemplate( text );
 +    assertBasics( template, false, false, false, 1, 0 );
 +    assertPath( template, 0, "path", "**", Segment.GLOB, 0, Integer.MAX_VALUE );
 +    assertThat( template.toString(), is( text ) );
 +
 +    text = "{path=wild*card}";
 +    template = Parser.parseTemplate( text );
 +    assertBasics( template, false, false, false, 1, 0 );
 +    assertPath( template, 0, "path", "wild*card", Segment.REGEX, 1, 1 );
 +    assertThat( template.toString(), is( text ) );
 +  }
 +
 +  @Test
 +  public void testParameterizedQueryTemplatesWithWildcardAndRegex() throws URISyntaxException {
 +    String text;
 +    Template template;
 +
 +    text = "?query={queryParam}";
 +    template = Parser.parseTemplate( text );
 +    assertBasics( template, false, false, true, 0, 1 );
 +    assertQuery( template, "query", "queryParam", "**", Segment.GLOB, 1, 1 );
 +    assertThat( template.toString(), is( text ) );
 +
 +    text = "?query={queryParam=static}";
 +    template = Parser.parseTemplate( text );
 +    assertBasics( template, false, false, true, 0, 1 );
 +    assertQuery( template, "query", "queryParam", "static", Segment.STATIC, 1, 1 );
 +    assertThat( template.toString(), is( text ) );
 +
 +    text = "?query={queryParam=*}";
 +    template = Parser.parseTemplate( text );
 +    assertBasics( template, false, false, true, 0, 1 );
 +    assertQuery( template, "query", "queryParam", "*", Segment.STAR, 1, 1 );
 +    assertThat( template.toString(), is( text ) );
 +
 +    text = "?query={queryParam=**}";
 +    template = Parser.parseTemplate( text );
 +    assertBasics( template, false, false, true, 0, 1 );
 +    assertQuery( template, "query", "queryParam", "**", Segment.GLOB, 0, Integer.MAX_VALUE );
 +    assertThat( template.toString(), is( text ) );
 +
 +    text = "?query={queryParam=wild*card}";
 +    template = Parser.parseTemplate( text );
 +    assertBasics( template, false, false, true, 0, 1 );
 +    assertQuery( template, "query", "queryParam", "wild*card", Segment.REGEX, 1, 1 );
 +    assertThat( template.toString(), is( text ) );
 +  }
 +
 +  @Test
 +  public void testGlobPattern() throws URISyntaxException {
 +    String text;
 +    Template template;
 +
 +    text = "**";
 +    template = Parser.parseTemplate( text );
 +    assertBasics( template, false, false, false, 1, 0 );
 +    assertThat( template.toString(), is( text ) );
 +
 +    text = "/**";
 +    template = Parser.parseTemplate( text );
 +    assertBasics( template, true, false, false, 1, 0 );
 +    assertThat( template.toString(), is( text ) );
 +
 +    text = "**/";
 +    template = Parser.parseTemplate( text );
 +    assertBasics( template, false, true, false, 1, 0 );
 +    assertThat( template.toString(), is( text ) );
 +
 +    text = "/**/";
 +    template = Parser.parseTemplate( text );
 +    assertBasics( template, true, true, false, 1, 0 );
 +    assertThat( template.toString(), is( text ) );
 +
 +    text = "/**/path";
 +    template = Parser.parseTemplate( text );
 +    assertBasics( template, true, false, false, 2, 0 );
 +    assertThat( template.toString(), is( text ) );
 +  }
 +
 +//  @Ignore( "TODO" )
 +//  @Test
 +//  public void testPatternsWithSchemeAndAuthority() throws URISyntaxException {
 +//    String text;
 +//    Template template;
 +//
 +//    text = "http:";
 +//    template = Parser.parse( text );
 +//
 +//    text = "http:/path";
 +//    template = Parser.parse( text );
 +//
 +//    text = "http://host";
 +//    template = Parser.parse( text );
 +//
 +//    text = "http://host/";
 +//    template = Parser.parse( text );
 +//
 +//    text = "http://host:80";
 +//    template = Parser.parse( text );
 +//
 +//    text = "http://host:80/";
 +//    template = Parser.parse( text );
 +//
 +//
 +//    text = "{scheme}:";
 +//    template = Parser.parse( text );
 +//
 +//    text = "{scheme}:/{path}";
 +//    template = Parser.parse( text );
 +//
 +//    text = "{scheme}://{host}";
 +//    template = Parser.parse( text );
 +//
 +//    text = "{scheme}://{host}/";
 +//    template = Parser.parse( text );
 +//
 +//    text = "{scheme}://{host}:{port}";
 +//    template = Parser.parse( text );
 +//
 +//    text = "{scheme}://{host}:{port}/";
 +//    template = Parser.parse( text );
 +//
 +//
 +//    text = "{scheme=http}:/{path=index.html}";
 +//    template = Parser.parse( text );
 +//
 +//    text = "{scheme=http}://{host=*.com}";
 +//    template = Parser.parse( text );
 +//
 +//    text = "{scheme=https}://{host=*.edu}/";
 +//    template = Parser.parse( text );
 +//
 +//    text = "{scheme=rmi}://{host=*}:{port=80}";
 +//    template = Parser.parse( text );
 +//
 +//    text = "{scheme=ftp}://{host=localhost*}:{port=*80}/";
 +//    template = Parser.parse( text );
 +//  }
 +
 +  @Test
 +  public void testAuthority() throws URISyntaxException {
 +    String text;
 +    Template template;
 +    String image;
 +
 +    text = "//";
 +    template = Parser.parseTemplate( text );
 +    assertThat( template.hasAuthority(), is( true ) );
 +    assertThat( template.getUsername(), nullValue() );
 +    assertThat( template.getPassword(), nullValue() );
 +    assertThat( template.getHost(), nullValue() );
 +    assertThat( template.getPort(), nullValue() );
 +    assertThat( template.toString(), is( text ) );
 +
 +    text = "//:@:";
 +    template = Parser.parseTemplate( text );
 +    assertThat( template.hasAuthority(), is( true ) );
 +    assertThat( template.getUsername(), nullValue() );
 +    assertThat( template.getPassword(), nullValue() );
 +    assertThat( template.getHost(), nullValue() );
 +    assertThat( template.getPort(), nullValue() );
 +    //IMPROVE assertThat( template.toString(), is( text ) );
 +    assertThat( template.getPattern(), is( text ) );
 +
 +    text = "//host";
 +    template = Parser.parseTemplate( text );
 +    assertThat( template.hasAuthority(), is( true ) );
 +    assertThat( template.getUsername(), nullValue() );
 +    assertThat( template.getPassword(), nullValue() );
 +    assertThat( template.getHost().getFirstValue().getOriginalPattern(), is( "host" ) );
 +    assertThat( template.getHost().getFirstValue().getEffectivePattern(), is( "host" ) );
 +    assertThat( template.getPort(), nullValue() );
 +    assertThat( template.toString(), is( text ) );
 +
 +    text = "//@host";
 +    template = Parser.parseTemplate( text );
 +    assertThat( template.hasAuthority(), is( true ) );
 +    assertThat( template.getUsername(), nullValue() );
 +    assertThat( template.getPassword(), nullValue() );
 +    assertThat( template.getHost().getFirstValue().getOriginalPattern(), is( "host" ) );
 +    assertThat( template.getHost().getFirstValue().getEffectivePattern(), is( "host" ) );
 +    assertThat( template.getPort(), nullValue() );
 +    //IMPROVE assertThat( template.toString(), is( text ) );
 +    assertThat( template.getPattern(), is( text ) );
 +
 +    text = "//@:80";
 +    template = Parser.parseTemplate( text );
 +    assertThat( template.hasAuthority(), is( true ) );
 +    assertThat( template.getUsername(), nullValue() );
 +    assertThat( template.getPassword(), nullValue() );
 +    assertThat( template.getHost(), nullValue() );
 +    assertThat( template.getPort().getFirstValue().getOriginalPattern(), is( "80" ) );
 +    assertThat( template.getPort().getFirstValue().getEffectivePattern(), is( "80" ) );
 +    //IMPROVE assertThat( template.toString(), is( text ) );
 +    assertThat( template.getPattern(), is( text ) );
 +
 +    text = "//username@";
 +    template = Parser.parseTemplate( text );
 +    assertThat( template.hasAuthority(), is( true ) );
 +    assertThat( template.getUsername().getFirstValue().getOriginalPattern(), is( "username" ) );
 +    assertThat( template.getUsername().getFirstValue().getEffectivePattern(), is( "username" ) );
 +    assertThat( template.getPassword(), nullValue() );
 +    assertThat( template.getHost(), nullValue() );
 +    assertThat( template.getPort(), nullValue() );
 +    assertThat( template.toString(), is( text ) );
 +
 +    text = "//:password@";
 +    template = Parser.parseTemplate( text );
 +    assertThat( template.hasAuthority(), is( true ) );
 +    assertThat( template.getUsername(), nullValue() );
 +    assertThat( template.getPassword().getFirstValue().getOriginalPattern(), is( "password" ) );
 +    assertThat( template.getPassword().getFirstValue().getEffectivePattern(), is( "password" ) );
 +    assertThat( template.getHost(), nullValue() );
 +    assertThat( template.getPort(), nullValue() );
 +    assertThat( template.toString(), is( text ) );
 +
 +    text = "//{host}:{port}";
 +    template = Parser.parseTemplate( text );
 +    assertThat( template.hasAuthority(), is( true ) );
 +    assertThat( template.getUsername(), nullValue() );
 +    assertThat( template.getPassword(), nullValue() );
 +    assertThat( template.getHost().getParamName(), is( "host" ) );
 +    assertThat( template.getHost().getFirstValue().getOriginalPattern(), nullValue() );
 +    assertThat( template.getHost().getFirstValue().getEffectivePattern(), is( "*" ) );
 +    assertThat( template.getPort().getParamName(), is( "port" ) );
 +    assertThat( template.getPort().getFirstValue().getOriginalPattern(), nullValue() );
 +    assertThat( template.getPort().getFirstValue().getEffectivePattern(), is( "*" ) );
 +    image = template.toString();
 +    assertThat( image, is( "//{host}:{port}" ) );
 +    assertThat( template.toString(), is( text ) );
 +
 +    text = "{host}:{port}";
 +    template = Parser.parseTemplate( text );
 +    assertThat( template.hasAuthority(), is( true ) );
 +    assertThat( template.getUsername(), nullValue() );
 +    assertThat( template.getPassword(), nullValue() );
 +    assertThat( template.getHost().getParamName(), is( "host" ) );
 +    assertThat( template.getHost().getFirstValue().getOriginalPattern(), nullValue() );
 +    assertThat( template.getHost().getFirstValue().getEffectivePattern(), is( "*" ) );
 +    assertThat( template.getPort().getParamName(), is( "port" ) );
 +    assertThat( template.getPort().getFirstValue().getOriginalPattern(), nullValue() );
 +    assertThat( template.getPort().getFirstValue().getEffectivePattern(), is( "*" ) );
 +    image = template.toString();
 +    assertThat( image, is( "{host}:{port}" ) );
 +    assertThat( template.toString(), is( text ) );
 +  }
 +
 +  @Test
 +  public void testQuery() throws URISyntaxException {
 +    String text;
 +    Template template;
 +    Query query;
 +    Iterator<Segment.Value> values;
 +    Segment.Value value;
 +
 +    text = "?queryName";
 +    template = Parser.parseTemplate( text );
 +    assertBasics( template, false, false, true, 0, 1 );
 +    query = template.getQuery().get( "queryName" );
 +    assertThat( query, notNullValue() );
 +    assertThat( query.getQueryName(), is( "queryName" ) );
 +    assertThat( query.getParamName(), is( "" ) );
 +    assertThat( query.getFirstValue().getEffectivePattern(), nullValue() ); //is( "*" ) );
 +    assertThat( template.toString(), is( text ) );
 +
 +    text = "?query=value1&query=value2";
 +    template = Parser.parseTemplate( text );
 +    assertBasics( template, false, false, true, 0, 1 );
 +    query = template.getQuery().get( "query" );
 +    assertThat( query, notNullValue() );
 +    assertThat( query.getQueryName(), is( "query" ) );
 +    assertThat( query.getParamName(), is( "" ) );
 +    values = query.getValues().iterator();
 +    value = values.next();
 +    assertThat( value.getOriginalPattern(), is( "value1" ) );
 +    assertThat( value.getEffectivePattern(), is( "value1" ) );
 +    value = values.next();
 +    assertThat( value.getOriginalPattern(), is( "value2" ) );
 +    assertThat( value.getEffectivePattern(), is( "value2" ) );
 +    assertThat( values.hasNext(), is( false ) );
 +    assertThat( template.toString(), is( text ) );
 +  }
 +
 +  @Test
 +  public void testFragment() throws URISyntaxException {
 +    String text;
 +    Template template;
 +
 +    text = "#fragment";
 +    template = Parser.parseTemplate( text );
 +    assertBasics( template, false, false, false, 0, 0 );
 +    assertThat( template.hasFragment(), is( true ) );
 +    assertThat( template.getFragment().getFirstValue().getEffectivePattern(), is( "fragment" ) );
 +    assertThat( template.toString(), is( text ) );
 +  }
 +
 +  @Test
 +  public void testEdgeCases() throws URISyntaxException {
 +    Parser parser = new Parser();
 +    String text;
 +    Template template;
 +
 +    text = "//";
 +    template = Parser.parseTemplate( text );
 +    assertBasics( template, false, false, false, 0, 0 );
 +    assertThat( template.hasAuthority(), is( true ) );
 +    assertThat( template.toString(), is( text ) );
 +
 +    text = "??";
 +    template = Parser.parseTemplate( text );
 +    assertBasics( template, false, false, true, 0, 0 );
 +    //IMPROVE assertThat( template.toString(), is( text ) );
 +    assertThat( template.getPattern(), is( text ) );
 +
 +    text = "##";
 +    template = Parser.parseTemplate( text );
 +    assertBasics( template, false, false, false, 0, 0 );
 +    assertThat( template.hasFragment(), is( true ) );
 +    assertThat( template.getFragment().getFirstValue().getEffectivePattern(), is( "#" ) );
 +    assertThat( template.toString(), is( text ) );
 +
 +    text = "??name=value";
 +    template = Parser.parseTemplate( text );
 +    assertBasics( template, false, false, true, 0, 1 );
 +    assertQuery( template, "name", "", "value" );
 +    //IMPROVE assertThat( template.toString(), is( text ) );
 +    assertThat( template.getPattern(), is( text ) );
 +
 +    text = "//?";
 +    template = Parser.parseTemplate( text );
 +    assertBasics( template, false, false, true, 0, 0 );
 +    assertThat( template.hasAuthority(), is( true ) );
 +    assertThat( template.getUsername(), nullValue() );
 +    assertThat( template.getPassword(), nullValue() );
 +    assertThat( template.getHost(), nullValue() );
 +    assertThat( template.getPort(), nullValue() );
 +    assertThat( template.toString(), is( text ) );
 +
 +    text = "//#";
 +    template = Parser.parseTemplate( text );
 +    assertBasics( template, false, false, false, 0, 0 );
 +    assertThat( template.hasAuthority(), is( true ) );
 +    assertThat( template.getUsername(), nullValue() );
 +    assertThat( template.getPassword(), nullValue() );
 +    assertThat( template.getHost(), nullValue() );
 +    assertThat( template.getPort(), nullValue() );
 +    assertThat( template.toString(), is( text ) );
 +
 +    text = ":";
 +    template = parser.parseTemplate( text );
 +    assertBasics( template, false, false, false, 1, 0 );
 +    assertThat( template.hasScheme(), is( false ) );
 +    assertThat( template.getScheme(), nullValue() );
 +    assertThat( template.hasAuthority(), is( false ) );
 +    assertThat( template.getHost(), nullValue() );
 +    assertThat( template.getPort(), nullValue() );
 +    assertThat( template.getPath().get( 0 ).getFirstValue().getOriginalPattern(), is( ":" ) );
 +    assertThat( template.getPath().get( 0 ).getFirstValue().getEffectivePattern(), is( ":" ) );
 +    assertThat( template.toString(), is( ":" ) );
 +    assertThat( template.toString(), is( text ) );
 +
 +    text = ":?";
 +    template = Parser.parseTemplate( text );
 +    assertBasics( template, false, false, true, 1, 0 );
 +    assertThat( template.hasScheme(), is( false ) );
 +    assertThat( template.getScheme(), nullValue() );
 +    assertThat( template.getPath().get( 0 ).getFirstValue().getOriginalPattern(), is( ":" ) );
 +    assertThat( template.getPath().get( 0 ).getFirstValue().getEffectivePattern(), is( ":" ) );
 +    assertThat( template.hasQuery(), is( true ) );
 +    assertThat( template.toString(), is( text ) );
 +
 +    text = ":#";
 +    template = Parser.parseTemplate( text );
 +    assertBasics( template, false, false, false, 1, 0 );
 +    assertThat( template.hasScheme(), is( false ) );
 +    assertThat( template.getScheme(), nullValue() );
 +    assertThat( template.getPath().get( 0 ).getFirstValue().getOriginalPattern(), is( ":" ) );
 +    assertThat( template.getPath().get( 0 ).getFirstValue().getEffectivePattern(), is( ":" ) );
 +    assertThat( template.hasFragment(), is( true ) );
 +    assertThat( template.getFragment(), nullValue() );
 +    assertThat( template.toString(), is( text ) );
 +
 +    text = "http:?";
 +    template = Parser.parseTemplate( text );
 +    assertBasics( template, false, false, true, 0, 0 );
 +    assertThat( template.hasScheme(), is( true ) );
 +    assertThat( template.getScheme().getFirstValue().getOriginalPattern(), is( "http" ) );
 +    assertThat( template.getScheme().getFirstValue().getEffectivePattern(), is( "http" ) );
 +    assertThat( template.hasQuery(), is( true ) );
 +    assertThat( template.toString(), is( text ) );
 +
 +    text = "http:#";
 +    template = Parser.parseTemplate( text );
 +    assertBasics( template, false, false, false, 0, 0 );
 +    assertThat( template.hasScheme(), is( true ) );
 +    assertThat( template.getScheme().getFirstValue().getOriginalPattern(), is( "http" ) );
 +    assertThat( template.getScheme().getFirstValue().getEffectivePattern(), is( "http" ) );
 +    assertThat( template.hasFragment(), is( true ) );
 +    assertThat( template.getFragment(), nullValue() );
 +    assertThat( template.toString(), is( text ) );
 +
 +    text = "scheme:path?";
 +    template = Parser.parseTemplate( text );
 +    assertBasics( template, false, false, true, 1, 0 );
 +    assertThat( template.toString(), is( text ) );
 +
 +    text = "scheme:path#";
 +    template = Parser.parseTemplate( text );
 +    assertBasics( template, false, false, false, 1, 0 );
 +    assertThat( template.hasFragment(), is( true ) );
 +    assertThat( template.getFragment(), nullValue() );
 +    assertThat( template.toString(), is( text ) );
 +
 +    text = "//host/";
 +    template = Parser.parseTemplate( text );
 +    assertBasics( template, true, true, false, 0, 0 );
 +    assertThat( template.hasAuthority(), is( true ) );
 +    assertThat( template.getHost().getFirstValue().getOriginalPattern(), is( "host" ) );
 +    assertThat( template.getHost().getFirstValue().getEffectivePattern(), is( "host" ) );
 +    assertThat( template.toString(), is( text ) );
 +
 +    text = "//host?";
 +    template = Parser.parseTemplate( text );
 +    assertBasics( template, false, false, true, 0, 0 );
 +    assertThat( template.hasAuthority(), is( true ) );
 +    assertThat( template.getHost().getFirstValue().getOriginalPattern(), is( "host" ) );
 +    assertThat( template.getHost().getFirstValue().getEffectivePattern(), is( "host" ) );
 +    assertThat( template.toString(), is( text ) );
 +
 +    text = "//host#";
 +    template = Parser.parseTemplate( text );
 +    assertBasics( template, false, false, false, 0, 0 );
 +    assertThat( template.hasAuthority(), is( true ) );
 +    assertThat( template.hasFragment(), is( true ) );
 +    assertThat( template.getFragment(), nullValue() );
 +    assertThat( template.getHost().getFirstValue().getOriginalPattern(), is( "host" ) );
 +    assertThat( template.getHost().getFirstValue().getEffectivePattern(), is( "host" ) );
 +    assertThat( template.toString(), is( text ) );
 +
 +    text = "///";
 +    template = Parser.parseTemplate( text );
 +    assertBasics( template, true, true, false, 0, 0 );
 +    assertThat( template.hasAuthority(), is( true ) );
 +    assertThat( template.toString(), is( text ) );
 +
 +    text = "//:";
 +    template = Parser.parseTemplate( text );
 +    assertBasics( template, false, false, false, 0, 0 );
 +    assertThat( template.hasAuthority(), is( true ) );
 +    //IMPROVE assertThat( template.toString(), is( text ) );
 +    assertThat( template.getPattern(), is( text ) );
 +
 +    text = "//?";
 +    template = Parser.parseTemplate( text );
 +    assertBasics( template, false, false, true, 0, 0 );
 +    assertThat( template.hasAuthority(), is( true ) );
 +    assertThat( template.toString(), is( text ) );
 +
 +    text = "//#";
 +    template = Parser.parseTemplate( text );
 +    assertBasics( template, false, false, false, 0, 0 );
 +    assertThat( template.hasAuthority(), is( true ) );
 +    assertThat( template.hasFragment(), is( true ) );
 +    assertThat( template.getFragment(), nullValue() );
 +    assertThat( template.toString(), is( text ) );
 +
 +    text = "//:/";
 +    template = Parser.parseTemplate( text );
 +    assertBasics( template, true, true, false, 0, 0 );
 +    assertThat( template.hasAuthority(), is( true ) );
 +    assertThat( template.getHost(), nullValue() );
 +    //IMPROVE assertThat( template.toString(), is( text ) );
 +    assertThat( template.getPattern(), is( text ) );
 +
 +    text = "//:?";
 +    template = Parser.parseTemplate( text );
 +    assertBasics( template, false, false, true, 0, 0 );
 +    assertThat( template.getHost(), nullValue() );
 +    //IMPROVE assertThat( template.toString(), is( text ) );
 +    assertThat( template.getPattern(), is( text ) );
 +
 +    text = "//:#";
 +    template = Parser.parseTemplate( text );
 +    assertBasics( template, false, false, false, 0, 0 );
 +    assertThat( template.hasFragment(), is( true ) );
 +    assertThat( template.getHost(), nullValue() );
 +    //IMPROVE assertThat( template.toString(), is( text ) );
 +    assertThat( template.getPattern(), is( text ) );
 +
 +    text = "///#";
 +    template = Parser.parseTemplate( text );
 +    assertBasics( template, true, true, false, 0, 0 );
 +    assertThat( template.hasFragment(), is( true ) );
 +    assertThat( template.getHost(), nullValue() );
 +    assertThat( template.getFragment(), nullValue() );
 +    assertThat( template.toString(), is( text ) );
 +
 +    text = "///path#";
 +    template = Parser.parseTemplate( text );
 +    assertBasics( template, true, false, false, 1, 0 );
 +    assertThat( template.hasFragment(), is( true ) );
 +    assertThat( template.getHost(), nullValue() );
 +    assertThat( template.getFragment(), nullValue() );
 +    assertThat( template.toString(), is( text ) );
 +
 +    text = "///?";
 +    template = Parser.parseTemplate( text );
 +    assertBasics( template, true, true, true, 0, 0 );
 +    assertThat( template.getHost(), nullValue() );
 +    assertThat( template.getFragment(), nullValue() );
 +    assertThat( template.toString(), is( text ) );
 +
 +    text = "///path?";
 +    template = Parser.parseTemplate( text );
 +    assertBasics( template, true, false, true, 1, 0 );
 +    assertThat( template.getHost(), nullValue() );
 +    assertThat( template.getFragment(), nullValue() );
 +    assertThat( template.toString(), is( text ) );
 +  }
 +
 +  @Test
 +  public void testQueryRemainder() throws URISyntaxException {
 +    String text;
 +    Template template;
 +    Query query;
 +
 +    text = "?*";
 +    template = Parser.parseTemplate( text );
 +    assertBasics( template, false, false, true, 0, 0 );
 +    query = template.getExtra();
 +    assertThat( query, notNullValue() );
 +    assertThat( query.getQueryName(), is( "*" ) );
 +    assertThat( query.getParamName(), is( "" ) );
 +    assertThat( query.getFirstValue().getOriginalPattern(), nullValue() );
 +    assertThat( query.getFirstValue().getEffectivePattern(), nullValue() ); //is( "*" ) );
 +    assertThat( template.toString(), is( text ) );
 +
 +    text = "?**";
 +    template = Parser.parseTemplate( text );
 +    assertBasics( template, false, false, true, 0, 0 );
 +    query = template.getExtra();
 +    assertThat( query, notNullValue() );
 +    assertThat( query.getQueryName(), is( "**" ) );
 +    assertThat( query.getParamName(), is( "" ) );
 +    assertThat( query.getFirstValue().getOriginalPattern(), nullValue() );
 +    assertThat( query.getFirstValue().getEffectivePattern(), nullValue() ); //is( "*" ) );
 +    assertThat( template.toString(), is( text ) );
 +
 +    text = "?{*}";
 +    template = Parser.parseTemplate( text );
 +    assertBasics( template, false, false, true, 0, 0 );
 +    query = template.getExtra();
 +    assertThat( query, notNullValue() );
 +    assertThat( query.getQueryName(), is( "*" ) );
 +    assertThat( query.getParamName(), is( "*" ) );
 +    assertThat( query.getFirstValue().getOriginalPattern(), nullValue() );
 +    assertThat( query.getFirstValue().getEffectivePattern(), is( "**" ) );
 +    assertThat( template.toString(), is( text ) );
 +
 +    text = "?{**}";
 +    template = Parser.parseTemplate( text );
 +    assertBasics( template, false, false, true, 0, 0 );
 +    query = template.getExtra();
 +    assertThat( query, notNullValue() );
 +    assertThat( query.getQueryName(), is( "**" ) );
 +    assertThat( query.getParamName(), is( "**" ) );
 +    assertThat( query.getFirstValue().getOriginalPattern(), nullValue() );
 +    assertThat( query.getFirstValue().getEffectivePattern(), is( "**" ) );
 +    assertThat( template.toString(), is( text ) );
 +
 +    text = "?*={*}";
 +    template = Parser.parseTemplate( text );
 +    assertBasics( template, false, false, true, 0, 0 );
 +    query = template.getExtra();
 +    assertThat( query, notNullValue() );
 +    assertThat( query.getQueryName(), is( "*" ) );
 +    assertThat( query.getParamName(), is( "*" ) );
 +    assertThat( query.getFirstValue().getOriginalPattern(), nullValue() );
 +    assertThat( query.getFirstValue().getEffectivePattern(), is( "**" ) );
 +    //IMPROVE    assertThat( template.toString(), is( text ) );
 +    assertThat( template.getPattern(), is( text ) );
 +
 +    text = "?**={**}";
 +    template = Parser.parseTemplate( text );
 +    assertBasics( template, false, false, true, 0, 0 );
 +    query = template.getExtra();
 +    assertThat( query, notNullValue() );
 +    assertThat( query.getQueryName(), is( "**" ) );
 +    assertThat( query.getParamName(), is( "**" ) );
 +    assertThat( query.getFirstValue().getOriginalPattern(), nullValue() );
 +    assertThat( query.getFirstValue().getEffectivePattern(), is( "**" ) );
 +    //IMPROVE    assertThat( template.toString(), is( text ) );
 +    assertThat( template.getPattern(), is( text ) );
 +
 +    text = "?**={**=**}";
 +    template = Parser.parseTemplate( text );
 +    assertBasics( template, false, false, true, 0, 0 );
 +    query = template.getExtra();
 +    assertThat( query, notNullValue() );
 +    assertThat( query.getQueryName(), is( "**" ) );
 +    assertThat( query.getParamName(), is( "**" ) );
 +    assertThat( query.getFirstValue().getOriginalPattern(), is( "**" ) );
 +    assertThat( query.getFirstValue().getEffectivePattern(), is( "**" ) );
 +    //IMPROVE assertThat( template.toString(), is( text ) );
 +    assertThat( template.getPattern(), is( text ) );
 +  }
 +
 +  @Test
 +  public void testSimplifiedQuerySyntax() throws URISyntaxException {
 +    String text;
 +    Template template;
 +    Query query;
 +
 +    text = "?{queryParam}";
 +    template = Parser.parseTemplate( text );
 +    assertBasics( template, false, false, true, 0, 1 );
 +    query = template.getQuery().get( "queryParam" );
 +    assertThat( query, notNullValue() );
 +    assertThat( query.getQueryName(), is( "queryParam" ) );
 +    assertThat( query.getParamName(), is( "queryParam" ) );
 +    assertThat( query.getFirstValue().getOriginalPattern(), nullValue() );
 +    assertThat( query.getFirstValue().getEffectivePattern(), is( "**" ) );
 +    //IMPROVE  assertThat( template.toString(), is( text ) );
 +    assertThat( template.getPattern(), is( text ) );
 +
 +    text = "?{queryParam=value}";
 +    template = Parser.parseTemplate( text );
 +    assertBasics( template, false, false, true, 0, 1 );
 +    query = template.getQuery().get( "queryParam" );
 +    assertThat( query, notNullValue() );
 +    assertThat( query.getQueryName(), is( "queryParam" ) );
 +    assertThat( query.getParamName(), is( "queryParam" ) );
 +    assertThat( query.getFirstValue().getOriginalPattern(), is( "value" ) );
 +    assertThat( query.getFirstValue().getEffectivePattern(), is( "value" ) );
 +    //IMPROVE assertThat( template.toString(), is( text ) );
 +    assertThat( template.getPattern(), is( text ) );
 +  }
 +
 +  @Test
 +  public void testAllWildcardUseCases() throws URISyntaxException {
 +    String text;
 +    Template template;
 +
 +    text = "*://*:*/**?**";
 +    template = Parser.parseTemplate( text );
 +    assertThat( template, notNullValue() );
 +    assertThat( template.toString(), is( text ) );
 +
 +    text = "*://*:*/**/path?{**}";
 +    template = Parser.parseTemplate( text );
 +    assertThat( template, notNullValue() );
 +    assertThat( template.toString(), is( text ) );
 +
 +    text = "*://*:*/**/webhdfs/v1/?{**}";
 +    template = Parser.parseTemplate( text );
 +    assertThat( template, notNullValue() );
 +    assertThat( template.toString(), is( text ) );
 +  }
 +
 +  @Test
 +  public void testQueryNameWithoutValue() throws URISyntaxException {
 +    Parser parser = new Parser();
 +    String text;
 +    Template template;
 +    String string;
 +    Expander expander = new Expander();
 +
 +    text = "*://*:*/**?X";
 +    template = parser.parseTemplate( text );
 +    assertThat( template.hasScheme(), is( true ) );
 +    assertThat( template.getScheme().getParamName(), is( "" ) );
 +    assertThat( template.getScheme().getFirstValue().getOriginalPattern(), is( "*" ) );
 +    assertThat( template.getScheme().getFirstValue().getEffectivePattern(), is( "*" ) );
 +    assertThat( template.getHost().getParamName(), is( "" ) );
 +    assertThat( template.getHost().getFirstValue().getOriginalPattern(), is( "*" ) );
 +    assertThat( template.getHost().getFirstValue().getEffectivePattern(), is( "*" ) );
 +    assertThat( template.getPort().getParamName(), is( "" ) );
 +    assertThat( template.getPort().getFirstValue().getOriginalPattern(), is( "*" ) );
 +    assertThat( template.getPort().getFirstValue().getEffectivePattern(), is( "*" ) );
 +    assertThat( template.getPath().size(), is( 1 ) );
 +    assertThat( template.getPath().get( 0 ).getParamName(), is( "" ) );
 +    assertThat( template.getPath().get( 0 ).getFirstValue().getOriginalPattern(), is( "**" ) );
 +    assertThat( template.getPath().get( 0 ).getFirstValue().getEffectivePattern(), is( "**" ) );
 +    assertThat( template.hasAuthority(), is( true ) );
 +    assertThat( template, notNullValue() );
 +    assertThat( template.getQuery().get( "X" ), notNullValue() );
 +    string = expander.expandToString( template, null, null );
 +    assertThat( string, is( text ) );
 +    assertThat( template.toString(), is( text ) );
 +
 +    text = "*://*:*/**?X=";
 +    template = Parser.parseTemplate( text );
 +    assertThat( template, notNullValue() );
 +    assertThat( template.getQuery().get( "X" ), notNullValue() );
 +    string = expander.expandToString( template, null, null );
 +    assertThat( string, is( "*://*:*/**?X" ) );
 +    //IMPROVE assertThat( template.toString(), is( text ) );
 +    assertThat( template.getPattern(), is( text ) );
 +
 +    text = "http://localhost:62142/gateway/cluster/webhdfs/data/v1/tmp/GatewayWebHdfsFuncTest/testBasicHdfsUseCase/dir/file?aG9zdD1sb2NhbGhvc3QmcG9ydD02MjEzOSZvcD1DUkVBVEUmdXNlci5uYW1lPWhkZnM";
 +    template = Parser.parseTemplate( text );
 +    assertThat( template, notNullValue() );
 +    assertThat( template.getQuery().get( "aG9zdD1sb2NhbGhvc3QmcG9ydD02MjEzOSZvcD1DUkVBVEUmdXNlci5uYW1lPWhkZnM" ), notNullValue() );
 +    string = expander.expandToString( template, null, null );
 +    assertThat( string, is( "http://localhost:62142/gateway/cluster/webhdfs/data/v1/tmp/GatewayWebHdfsFuncTest/testBasicHdfsUseCase/dir/file?aG9zdD1sb2NhbGhvc3QmcG9ydD02MjEzOSZvcD1DUkVBVEUmdXNlci5uYW1lPWhkZnM" ) );
 +    assertThat( template.toString(), is( text ) );
 +
 +    text = "http://localhost:62142/gateway/cluster/webhdfs/data/v1/tmp/GatewayWebHdfsFuncTest/testBasicHdfsUseCase/dir/file?aG9zdD1sb2NhbGhvc3QmcG9ydD02MjEzOSZvcD1DUkVBVEUmdXNlci5uYW1lPWhkZnM=";
 +    template = Parser.parseTemplate( text );
 +    assertThat( template, notNullValue() );
 +    assertThat( template.getQuery().get( "aG9zdD1sb2NhbGhvc3QmcG9ydD02MjEzOSZvcD1DUkVBVEUmdXNlci5uYW1lPWhkZnM" ), notNullValue() );
 +    string = expander.expandToString( template, null, null );
 +    assertThat( string, is( "http://localhost:62142/gateway/cluster/webhdfs/data/v1/tmp/GatewayWebHdfsFuncTest/testBasicHdfsUseCase/dir/file?aG9zdD1sb2NhbGhvc3QmcG9ydD02MjEzOSZvcD1DUkVBVEUmdXNlci5uYW1lPWhkZnM" ) );
 +    //IMPROVE assertThat( template.toString(), is( text ) );
 +    assertThat( template.getPattern(), is( text ) );
 +  }
 +
 +  @Test
 +  public void testTemplateWithOnlyAuthority() throws Exception {
 +    String text;
 +    Template template;
 +    Parser parser = new Parser();
 +
 +    text = "test-host:42";
 +    template = parser.parseTemplate( text );
 +    assertThat( template.hasScheme(), is( false ) );
 +    assertThat( template.getHost().getFirstValue().getOriginalPattern(), is( "test-host" ) );
 +    assertThat( template.getHost().getFirstValue().getEffectivePattern(), is( "test-host" ) );
 +    assertThat( template.getPort().getFirstValue().getOriginalPattern(), is( "42" ) );
 +    assertThat( template.getPort().getFirstValue().getEffectivePattern(), is( "42" ) );
 +    assertThat( template.toString(), is( text ) );
 +
 +    text = "{test-host}:{test-port}";
 +    template = parser.parseTemplate( text );
 +    assertThat( template.hasScheme(), is( false ) );
 +    assertThat( template.getHost().getParamName(), is( "test-host" ) );
 +    assertThat( template.getHost().getFirstValue().getToken().getOriginalPattern(), nullValue() );
 +    assertThat( template.getHost().getFirstValue().getToken().getEffectivePattern(), is( "*" ) );
 +    assertThat( template.getPort().getParamName(), is( "test-port" ) );
 +    assertThat( template.getHost().getFirstValue().getToken().getOriginalPattern(), nullValue() );
 +    assertThat( template.getPort().getFirstValue().getToken().getEffectivePattern(), is( "*" ) );
 +    assertThat( template.toString(), is( text ) );
 +  }
 +
 +  @Test
 +  public void testTemplateWithoutAuthority() throws Exception {
 +    String text;
 +    Template template;
 +    Parser parser = new Parser();
 +
 +    text = "test-scheme:/test-path";
 +    template = parser.parseTemplate( text );
 +    assertThat( template.hasScheme(), is( true ) );
 +    assertThat( template.getScheme().getFirstValue().getOriginalPattern(), is( "test-scheme" ) );
 +    assertThat( template.getScheme().getFirstValue().getEffectivePattern(), is( "test-scheme" ) );
 +    assertThat( template.hasAuthority(), is( false ) );
 +    assertThat( template.getPath().size(), is( 1 ) );
 +    assertThat( template.getPath().get( 0 ).getFirstValue().getOriginalPattern(), is( "test-path" ) );
 +    assertThat( template.getPath().get( 0 ).getFirstValue().getEffectivePattern(), is( "test-path" ) );
 +    assertThat( template.hasQuery(), is( false ) );
 +    assertThat( template.toString(), is( text ) );
 +
 +    text = "test-scheme:///test-path";
 +    template = parser.parseTemplate( text );
 +    assertThat( template.hasScheme(), is( true ) );
 +    assertThat( template.getScheme().getFirstValue().getOriginalPattern(), is( "test-scheme" ) );
 +    assertThat( template.getScheme().getFirstValue().getEffectivePattern(), is( "test-scheme" ) );
 +    assertThat( template.hasAuthority(), is( true ) );
 +    assertThat( template.getUsername(), nullValue() );
 +    assertThat( template.getPassword(), nullValue() );
 +    assertThat( template.getHost(), nullValue() );
 +    assertThat( template.getPort(), nullValue() );
 +    assertThat( template.getPath().size(), is( 1 ) );
 +    assertThat( template.getPath().get( 0 ).getFirstValue().getEffectivePattern(), is( "test-path" ) );
 +    assertThat( template.hasQuery(), is( false ) );
 +    assertThat( template.toString(), is( text ) );
 +
 +    text = "{test-scheme}:/{test-path}";
 +    template = parser.parseTemplate( text );
 +    assertThat( template.hasScheme(), is( true ) );
 +    assertThat( template.getScheme().getParamName(), is( "test-scheme" ) );
 +    assertThat( template.getScheme().getFirstValue().getOriginalPattern(), nullValue() );
 +    assertThat( template.getScheme().getFirstValue().getEffectivePattern(), is( "*" ) );
 +    assertThat( template.hasAuthority(), is( false ) );
 +    assertThat( template.getPath().size(), is( 1 ) );
 +    assertThat( template.getPath().get( 0 ).getParamName(), is( "test-path" ) );
 +    assertThat( template.getPath().get( 0 ).getFirstValue().getOriginalPattern(), nullValue() );
 +    assertThat( template.getPath().get( 0 ).getFirstValue().getEffectivePattern(), is( "**" ) );
 +    assertThat( template.hasQuery(), is( false ) );
 +    assertThat( template.toString(), is( text ) );
 +
 +    text = "{test-scheme}:///{test-path}";
 +    template = parser.parseTemplate( text );
 +    assertThat( template.hasScheme(), is( true ) );
 +    assertThat( template.getScheme().getParamName(), is( "test-scheme" ) );
 +    assertThat( template.getScheme().getFirstValue().getOriginalPattern(), nullValue() );
 +    assertThat( template.getScheme().getFirstValue().getEffectivePattern(), is( "*" ) );
 +    assertThat( template.hasAuthority(), is( true ) );
 +    assertThat( template.getUsername(), nullValue() );
 +    assertThat( template.getPassword(), nullValue() );
 +    assertThat( template.getHost(), nullValue() );
 +    assertThat( template.getPort(), nullValue() );
 +    assertThat( template.getPath().size(), is( 1 ) );
 +    assertThat( template.getPath().get( 0 ).getParamName(), is( "test-path" ) );
 +    assertThat( template.getPath().get( 0 ).getFirstValue().getOriginalPattern(), nullValue() );
 +    assertThat( template.getPath().get( 0 ).getFirstValue().getEffectivePattern(), is( "**" ) );
 +    assertThat( template.hasQuery(), is( false ) );
 +    assertThat( template.toString(), is( text ) );
 +  }
 +
 +  @Test
 +  public void testAuthorityWildcards() throws Exception {
 +    String text;
 +    Template template;
 +
 +    text = "*://*:*/";
 +    template = Parser.parseTemplate( text );
 +    assertThat( template.getHost().getFirstValue().getOriginalPattern(), is( "*" ) );
 +    assertThat( template.getHost().getFirstValue().getEffectivePattern(), is( "*" ) );
 +    assertThat( template.getPort().getFirstValue().getOriginalPattern(), is( "*" ) );
 +    assertThat( template.getPort().getFirstValue().getEffectivePattern(), is( "*" ) );
 +    assertThat( template.toString(), is( text ) );
 +
 +    text = "*://**/";
 +    template = Parser.parseTemplate( text );
 +    assertThat( template.getHost().getFirstValue().getOriginalPattern(), is( "**" ) );
 +    assertThat( template.getHost().getFirstValue().getEffectivePattern(), is( "*" ) );
 +    assertThat( template.getPort(), nullValue() );
 +    assertThat( template.toString(), is( text ) );
 +
 +    text = "*://*/";
 +    template = Parser.parseTemplate( text );
 +    assertThat( template.getHost().getFirstValue().getOriginalPattern(), is( "*" ) );
 +    assertThat( template.getHost().getFirstValue().getEffectivePattern(), is( "*" ) );
 +    assertThat( template.getPort(), nullValue() );
 +    assertThat( template.toString(), is( text ) );
 +
 +    text = "*://**:**/";
 +    template = Parser.parseTemplate( text );
 +    assertThat( template.getHost().getFirstValue().getOriginalPattern(), is( "**" ) );
 +    assertThat( template.getHost().getFirstValue().getEffectivePattern(), is( "*" ) );
 +    assertThat( template.getPort().getFirstValue().getOriginalPattern(), is( "**" ) );
 +    assertThat( template.getPort().getFirstValue().getEffectivePattern(), is( "*" ) );
 +    assertThat( template.toString(), is( text ) );
 +  }
 +
 +  @Test
 +  public void testParseTemplateToken() {
 +    Builder builder;
 +    String input;
 +    Token output;
 +
 +    builder = new Builder( "" );
 +
 +    input = "{";
 +    output = Parser.parseTemplateToken( builder, input, "~" );
 +    assertThat( output.getParameterName(), is( "" ) );
 +    assertThat( output.getOriginalPattern(), is( "{" ) );
 +    assertThat( output.getEffectivePattern(), is( "{" ) );
 +
 +    input = "}";
 +    output = Parser.parseTemplateToken( builder, input, "~" );
 +    assertThat( output.getParameterName(), is( "" ) );
 +    assertThat( output.getOriginalPattern(), is( "}" ) );
 +    assertThat( output.getEffectivePattern(), is( "}" ) );
 +
 +    input = "{X";
 +    output = Parser.parseTemplateToken( builder, input, "~" );
 +    assertThat( output.getParameterName(), is( "" ) );
 +    assertThat( output.getOriginalPattern(), is( "{X" ) );
 +    assertThat( output.getEffectivePattern(), is( "{X" ) );
 +
 +    input = "X}";
 +    output = Parser.parseTemplateToken( builder, input, "~" );
 +    assertThat( output.getParameterName(), is( "" ) );
 +    assertThat( output.getOriginalPattern(), is( "X}" ) );
 +    assertThat( output.getEffectivePattern(), is( "X}" ) );
 +
 +    input = "X";
 +    output = Parser.parseTemplateToken( builder, input, "~" );
 +    assertThat( output.getParameterName(), is( "" ) );
 +    assertThat( output.getOriginalPattern(), is( "X" ) );
 +    assertThat( output.getEffectivePattern(), is( "X" ) );
 +
 +    input = "$";
 +    output = Parser.parseTemplateToken( builder, input, "~" );
 +    assertThat( output.getParameterName(), is( "" ) );
 +    assertThat( output.getOriginalPattern(), is( "$" ) );
 +    assertThat( output.getEffectivePattern(), is( "$" ) );
 +
 +    input = "";
 +    output = Parser.parseTemplateToken( builder, input, Segment.GLOB_PATTERN );
 +    assertThat( output.getParameterName(), is( "" ) );
 +    assertThat( output.getOriginalPattern(), is( "" ) );
 +    assertThat( output.getEffectivePattern(), is( "" ) );
 +  }
 +
 +  @Test
 +  public void testBugKnox599() throws Exception {
 +    Template template;
 +    Template input;
 +    Matcher<String> matcher;
 +
 +    matcher = new Matcher<String>();
 +    template = Parser.parseTemplate( "*://*:*/**/webhdfs/v1/{path=**}?{**}" );
 +    matcher.add( template, "test-value" );
 +
 +    input = Parser.parseTemplate( "http://kminder-os-u14-23-knoxha-150922-1352-2.novalocal:1022/gateway/sandbox/webhdfs/v1/user/hrt_qa/knox-ha/knox_webhdfs_client_dir/test_file?op=CREATE&delegation=XXX&namenoderpcaddress=nameservice&createflag=&createparent=true&overwrite=true" );
 +
 +    assertThat( input.getQuery().get( "createflag" ).getFirstValue().getPattern(), is( "" ) );
 +
 +    input = Parser.parseTemplate( "http://kminder-os-u14-23-knoxha-150922-1352-2.novalocal:1022/gateway/sandbox/webhdfs/v1/user/hrt_qa/knox-ha/knox_webhdfs_client_dir/test_file?op=CREATE&delegation=XXX&namenoderpcaddress=nameservice&createflag&createparent=true&overwrite=true" );
 +
 +    assertThat( input.getQuery().get( "createflag" ).getFirstValue().getPattern(), nullValue() );
 +  }
 +
 +  @Test
 +  public void testParserLiteralsWithReservedCharactersBugKnox394() throws Exception {
 +    Template template;
 +    String image;
 +
 +    template = Parser.parseLiteral( "{}" );
 +    image = template.toString();
 +    assertThat( image, is( "{}" ) );
 +
 +    template = Parser.parseLiteral( "{app.path}/child/path" );
 +    image = template.toString();
 +    assertThat( image, is( "{app.path}/child/path" ) );
 +
 +    template = Parser.parseLiteral( "${app.path}/child/path" );
 +    image = template.toString();
 +    assertThat( image, is( "${app.path}/child/path" ) );
 +
 +  }
 +
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/8affbc02/pom.xml
----------------------------------------------------------------------


[39/53] [abbrv] knox git commit: Merge branch 'master' into KNOX-998-Package_Restructuring

Posted by mo...@apache.org.
http://git-wip-us.apache.org/repos/asf/knox/blob/22a7304a/gateway-discovery-ambari/src/test/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariServiceDiscoveryTest.java
----------------------------------------------------------------------
diff --cc gateway-discovery-ambari/src/test/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariServiceDiscoveryTest.java
index 21627ad,0000000..05fc4eb
mode 100644,000000..100644
--- a/gateway-discovery-ambari/src/test/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariServiceDiscoveryTest.java
+++ b/gateway-discovery-ambari/src/test/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariServiceDiscoveryTest.java
@@@ -1,858 -1,0 +1,870 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements. See the NOTICE file distributed with this
 + * work for additional information regarding copyright ownership. The ASF
 + * licenses this file to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance with the License.
 + * You may obtain a copy of the License at
 + *
 + * http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 + * License for the specific language governing permissions and limitations under
 + * the License.
 + */
 +package org.apache.knox.gateway.topology.discovery.ambari;
 +
 +import net.minidev.json.JSONObject;
 +import net.minidev.json.JSONValue;
 +import org.apache.knox.gateway.topology.discovery.ServiceDiscovery;
 +import org.apache.knox.gateway.topology.discovery.ServiceDiscoveryConfig;
 +import org.easymock.EasyMock;
 +import org.junit.Test;
 +
 +import java.util.HashMap;
 +import java.util.List;
 +import java.util.Map;
 +
 +import static org.junit.Assert.assertNotNull;
 +import static org.junit.Assert.assertEquals;
 +import static org.junit.Assert.assertTrue;
 +
 +
 +/**
 + * Test the Ambari ServiceDiscovery implementation.
 + *
 + * N.B. These tests do NOT verify Ambari API responses. They DO validate the Ambari ServiceDiscovery implementation's
 + *      treatment of the responses as they were observed at the time the tests are developed.
 + */
 +public class AmbariServiceDiscoveryTest {
 +
 +    @Test
 +    public void testSingleClusterDiscovery() throws Exception {
 +        final String discoveryAddress = "http://ambarihost:8080";
 +        final String clusterName = "testCluster";
 +        ServiceDiscovery sd = new TestAmbariServiceDiscovery(clusterName);
 +
 +        ServiceDiscoveryConfig sdc = EasyMock.createNiceMock(ServiceDiscoveryConfig.class);
 +        EasyMock.expect(sdc.getAddress()).andReturn(discoveryAddress).anyTimes();
 +        EasyMock.expect(sdc.getUser()).andReturn(null).anyTimes();
 +        EasyMock.replay(sdc);
 +
 +        ServiceDiscovery.Cluster cluster = sd.discover(sdc, clusterName);
 +        assertNotNull(cluster);
 +        assertEquals(clusterName, cluster.getName());
 +        assertTrue(AmbariCluster.class.isAssignableFrom(cluster.getClass()));
 +        assertEquals(6, ((AmbariCluster) cluster).getComponents().size());
 +
 +//        printServiceURLs(cluster);
 +    }
 +
 +
 +    @Test
 +    public void testBulkClusterDiscovery() throws Exception {
 +        final String discoveryAddress = "http://ambarihost:8080";
 +        final String clusterName = "anotherCluster";
 +        ServiceDiscovery sd = new TestAmbariServiceDiscovery(clusterName);
 +
 +        ServiceDiscoveryConfig sdc = EasyMock.createNiceMock(ServiceDiscoveryConfig.class);
 +        EasyMock.expect(sdc.getAddress()).andReturn(discoveryAddress).anyTimes();
 +        EasyMock.expect(sdc.getUser()).andReturn(null).anyTimes();
 +        EasyMock.replay(sdc);
 +
 +        Map<String, ServiceDiscovery.Cluster> clusters = sd.discover(sdc);
 +        assertNotNull(clusters);
 +        assertEquals(1, clusters.size());
 +        ServiceDiscovery.Cluster cluster = clusters.get(clusterName);
 +        assertNotNull(cluster);
 +        assertEquals(clusterName, cluster.getName());
 +        assertTrue(AmbariCluster.class.isAssignableFrom(cluster.getClass()));
 +        assertEquals(6, ((AmbariCluster) cluster).getComponents().size());
 +
 +//        printServiceURLs(cluster, "NAMENODE", "WEBHCAT", "OOZIE", "RESOURCEMANAGER");
 +    }
 +
 +
 +    private static void printServiceURLs(ServiceDiscovery.Cluster cluster) {
 +        final String[] services = new String[]{"NAMENODE",
 +                                               "JOBTRACKER",
 +                                               "WEBHDFS",
 +                                               "WEBHCAT",
 +                                               "OOZIE",
 +                                               "WEBHBASE",
 +                                               "HIVE",
 +                                               "RESOURCEMANAGER"};
 +        printServiceURLs(cluster, services);
 +    }
 +
 +
 +    private static void printServiceURLs(ServiceDiscovery.Cluster cluster, String...services) {
 +        for (String name : services) {
 +            StringBuilder sb = new StringBuilder();
 +            List<String> urls = cluster.getServiceURLs(name);
 +            if (urls != null && !urls.isEmpty()) {
 +                for (String url : urls) {
 +                    sb.append(url);
 +                    sb.append(" ");
 +                }
 +            }
 +            System.out.println(String.format("%18s: %s", name, sb.toString()));
 +        }
 +    }
 +
 +
 +    /**
 +     * ServiceDiscovery implementation derived from AmbariServiceDiscovery, so the invokeREST method can be overridden
 +     * to eliminate the need to perform actual HTTP interactions with a real Ambari endpoint.
 +     */
 +    private static final class TestAmbariServiceDiscovery extends AmbariServiceDiscovery {
 +
++        final static String CLUSTER_PLACEHOLDER = TestRESTInvoker.CLUSTER_PLACEHOLDER;
++
++        TestAmbariServiceDiscovery(String clusterName) {
++            super(new TestRESTInvoker(clusterName));
++        }
++
++    }
++
++    private static final class TestRESTInvoker extends RESTInvoker {
++
 +        final static String CLUSTER_PLACEHOLDER = "CLUSTER_NAME";
 +
 +        private Map<String, JSONObject> cannedResponses = new HashMap<>();
 +
-         TestAmbariServiceDiscovery(String clusterName) {
-             cannedResponses.put(AMBARI_CLUSTERS_URI,
-                                 (JSONObject) JSONValue.parse(CLUSTERS_JSON_TEMPLATE.replaceAll(CLUSTER_PLACEHOLDER,
-                                                                                                clusterName)));
++        TestRESTInvoker(String clusterName) {
++            super(null);
++
++            cannedResponses.put(AmbariServiceDiscovery.AMBARI_CLUSTERS_URI,
++                    (JSONObject) JSONValue.parse(CLUSTERS_JSON_TEMPLATE.replaceAll(CLUSTER_PLACEHOLDER,
++                            clusterName)));
 +
-             cannedResponses.put(String.format(AMBARI_HOSTROLES_URI, clusterName),
-                                 (JSONObject) JSONValue.parse(HOSTROLES_JSON_TEMPLATE.replaceAll(CLUSTER_PLACEHOLDER,
-                                                                                                 clusterName)));
++            cannedResponses.put(String.format(AmbariServiceDiscovery.AMBARI_HOSTROLES_URI, clusterName),
++                    (JSONObject) JSONValue.parse(HOSTROLES_JSON_TEMPLATE.replaceAll(CLUSTER_PLACEHOLDER,
++                            clusterName)));
 +
-             cannedResponses.put(String.format(AMBARI_SERVICECONFIGS_URI, clusterName),
-                                 (JSONObject) JSONValue.parse(SERVICECONFIGS_JSON_TEMPLATE.replaceAll(CLUSTER_PLACEHOLDER,
-                                                                                                      clusterName)));
++            cannedResponses.put(String.format(AmbariServiceDiscovery.AMBARI_SERVICECONFIGS_URI, clusterName),
++                    (JSONObject) JSONValue.parse(SERVICECONFIGS_JSON_TEMPLATE.replaceAll(CLUSTER_PLACEHOLDER,
++                            clusterName)));
 +        }
 +
 +        @Override
-         protected JSONObject invokeREST(String url, String username, String passwordAlias) {
++        JSONObject invoke(String url, String username, String passwordAlias) {
 +            return cannedResponses.get(url.substring(url.indexOf("/api")));
 +        }
 +    }
 +
 +
 +    ////////////////////////////////////////////////////////////////////////
 +    //  JSON response templates, based on actual response content excerpts
 +    ////////////////////////////////////////////////////////////////////////
 +
 +    private static final String CLUSTERS_JSON_TEMPLATE =
 +    "{\n" +
 +    "  \"href\" : \"http://c6401.ambari.apache.org:8080/api/v1/clusters\",\n" +
 +    "  \"items\" : [\n" +
 +    "    {\n" +
 +    "      \"href\" : \"http://c6401.ambari.apache.org:8080/api/v1/clusters/"+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "      \"Clusters\" : {\n" +
 +    "        \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "        \"version\" : \"HDP-2.6\"\n" +
 +    "      }\n" +
 +    "    }\n" +
 +    "  ]" +
 +    "}";
 +
 +
 +    private static final String HOSTROLES_JSON_TEMPLATE =
 +    "{\n" +
 +    "  \"href\" : \"http://c6401.ambari.apache.org:8080/api/v1/clusters/"+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"/services?fields=components/host_components/HostRoles\",\n" +
 +    "  \"items\" : [\n" +
 +    "    {\n" +
 +    "      \"href\" : \"http://c6401.ambari.apache.org:8080/api/v1/clusters/"+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"/services/AMBARI_METRICS\",\n" +
 +    "      \"ServiceInfo\" : {\n" +
 +    "        \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "        \"service_name\" : \"AMBARI_METRICS\"\n" +
 +    "      },\n" +
 +    "      \"components\" : [\n" +
 +    "        {\n" +
 +    "          \"href\" : \"http://c6401.ambari.apache.org:8080/api/v1/clusters/"+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"/services/AMBARI_METRICS/components/METRICS_COLLECTOR\",\n" +
 +    "          \"ServiceComponentInfo\" : {\n" +
 +    "            \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "            \"component_name\" : \"METRICS_COLLECTOR\",\n" +
 +    "            \"service_name\" : \"AMBARI_METRICS\"\n" +
 +    "          },\n" +
 +    "          \"host_components\" : [\n" +
 +    "            {\n" +
 +    "              \"href\" : \"http://c6401.ambari.apache.org:8080/api/v1/clusters/"+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"/hosts/c6403.ambari.apache.org/host_components/METRICS_COLLECTOR\",\n" +
 +    "              \"HostRoles\" : {\n" +
 +    "                \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "                \"component_name\" : \"METRICS_COLLECTOR\",\n" +
 +    "                \"host_name\" : \"c6403.ambari.apache.org\",\n" +
 +    "                \"public_host_name\" : \"c6403.ambari.apache.org\",\n" +
 +    "                \"service_name\" : \"AMBARI_METRICS\",\n" +
 +    "                \"stack_id\" : \"HDP-2.6\",\n" +
 +    "              }\n" +
 +    "            }\n" +
 +    "          ]\n" +
 +    "        },\n" +
 +    "        {\n" +
 +    "          \"href\" : \"http://c6401.ambari.apache.org:8080/api/v1/clusters/"+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"/services/HBASE/components/HBASE_MASTER\",\n" +
 +    "          \"ServiceComponentInfo\" : {\n" +
 +    "            \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "            \"component_name\" : \"HBASE_MASTER\",\n" +
 +    "            \"service_name\" : \"HBASE\"\n" +
 +    "          },\n" +
 +    "          \"host_components\" : [\n" +
 +    "            {\n" +
 +    "              \"href\" : \"http://c6401.ambari.apache.org:8080/api/v1/clusters/"+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"/hosts/c6401.ambari.apache.org/host_components/HBASE_MASTER\",\n" +
 +    "              \"HostRoles\" : {\n" +
 +    "                \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "                \"component_name\" : \"HBASE_MASTER\",\n" +
 +    "                \"host_name\" : \"c6401.ambari.apache.org\",\n" +
 +    "                \"public_host_name\" : \"c6401.ambari.apache.org\",\n" +
 +    "                \"service_name\" : \"HBASE\",\n" +
 +    "                \"stack_id\" : \"HDP-2.6\",\n" +
 +    "              }\n" +
 +    "            }\n" +
 +    "          ]\n" +
 +    "        }\n" +
 +    "      ]\n" +
 +    "    },\n" +
 +    "    {\n" +
 +    "      \"href\" : \"http://c6401.ambari.apache.org:8080/api/v1/clusters/"+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"/services/HDFS\",\n" +
 +    "      \"ServiceInfo\" : {\n" +
 +    "        \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "        \"service_name\" : \"HDFS\"\n" +
 +    "      },\n" +
 +    "      \"components\" : [\n" +
 +    "        {\n" +
 +    "          \"href\" : \"http://c6401.ambari.apache.org:8080/api/v1/clusters/"+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"/services/HDFS/components/NAMENODE\",\n" +
 +    "          \"ServiceComponentInfo\" : {\n" +
 +    "            \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "            \"component_name\" : \"NAMENODE\",\n" +
 +    "            \"service_name\" : \"HDFS\"\n" +
 +    "          },\n" +
 +    "          \"host_components\" : [\n" +
 +    "            {\n" +
 +    "              \"href\" : \"http://c6401.ambari.apache.org:8080/api/v1/clusters/"+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"/hosts/c6401.ambari.apache.org/host_components/NAMENODE\",\n" +
 +    "              \"HostRoles\" : {\n" +
 +    "                \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "                \"component_name\" : \"NAMENODE\",\n" +
 +    "                \"host_name\" : \"c6401.ambari.apache.org\",\n" +
 +    "                \"public_host_name\" : \"c6401.ambari.apache.org\",\n" +
 +    "                \"service_name\" : \"HDFS\",\n" +
 +    "                \"stack_id\" : \"HDP-2.6\",\n" +
 +    "              }\n" +
 +    "            }\n" +
 +    "          ]\n" +
 +    "        },\n" +
 +    "        {\n" +
 +    "          \"href\" : \"http://c6401.ambari.apache.org:8080/api/v1/clusters/"+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"/services/HDFS/components/SECONDARY_NAMENODE\",\n" +
 +    "          \"ServiceComponentInfo\" : {\n" +
 +    "            \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "            \"component_name\" : \"SECONDARY_NAMENODE\",\n" +
 +    "            \"service_name\" : \"HDFS\"\n" +
 +    "          },\n" +
 +    "          \"host_components\" : [\n" +
 +    "            {\n" +
 +    "              \"href\" : \"http://c6401.ambari.apache.org:8080/api/v1/clusters/"+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"/hosts/c6402.ambari.apache.org/host_components/SECONDARY_NAMENODE\",\n" +
 +    "              \"HostRoles\" : {\n" +
 +    "                \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "                \"component_name\" : \"SECONDARY_NAMENODE\",\n" +
 +    "                \"host_name\" : \"c6402.ambari.apache.org\",\n" +
 +    "                \"public_host_name\" : \"c6402.ambari.apache.org\",\n" +
 +    "                \"service_name\" : \"HDFS\",\n" +
 +    "                \"stack_id\" : \"HDP-2.6\",\n" +
 +    "              }\n" +
 +    "            }\n" +
 +    "          ]\n" +
 +    "        }\n" +
 +    "      ]\n" +
 +    "    },\n" +
 +    "    {\n" +
 +    "      \"href\" : \"http://c6401.ambari.apache.org:8080/api/v1/clusters/"+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"/services/HIVE\",\n" +
 +    "      \"ServiceInfo\" : {\n" +
 +    "        \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "        \"service_name\" : \"HIVE\"\n" +
 +    "      },\n" +
 +    "      \"components\" : [\n" +
 +    "        {\n" +
 +    "          \"href\" : \"http://c6401.ambari.apache.org:8080/api/v1/clusters/"+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"/services/HIVE/components/HCAT\",\n" +
 +    "          \"ServiceComponentInfo\" : {\n" +
 +    "            \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "            \"component_name\" : \"HCAT\",\n" +
 +    "            \"service_name\" : \"HIVE\"\n" +
 +    "          },\n" +
 +    "          \"host_components\" : [\n" +
 +    "            {\n" +
 +    "              \"href\" : \"http://c6401.ambari.apache.org:8080/api/v1/clusters/"+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"/hosts/c6403.ambari.apache.org/host_components/HCAT\",\n" +
 +    "              \"HostRoles\" : {\n" +
 +    "                \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "                \"component_name\" : \"HCAT\",\n" +
 +    "                \"host_name\" : \"c6403.ambari.apache.org\",\n" +
 +    "                \"public_host_name\" : \"c6403.ambari.apache.org\",\n" +
 +    "                \"service_name\" : \"HIVE\",\n" +
 +    "                \"stack_id\" : \"HDP-2.6\",\n" +
 +    "              }\n" +
 +    "            }\n" +
 +    "          ]\n" +
 +    "        }\n" +
 +    "        {\n" +
 +    "          \"href\" : \"http://c6401.ambari.apache.org:8080/api/v1/clusters/"+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"/services/HIVE/components/HIVE_METASTORE\",\n" +
 +    "          \"ServiceComponentInfo\" : {\n" +
 +    "            \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "            \"component_name\" : \"HIVE_METASTORE\",\n" +
 +    "            \"service_name\" : \"HIVE\"\n" +
 +    "          },\n" +
 +    "          \"host_components\" : [\n" +
 +    "            {\n" +
 +    "              \"href\" : \"http://c6401.ambari.apache.org:8080/api/v1/clusters/"+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"/hosts/c6402.ambari.apache.org/host_components/HIVE_METASTORE\",\n" +
 +    "              \"HostRoles\" : {\n" +
 +    "                \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "                \"component_name\" : \"HIVE_METASTORE\",\n" +
 +    "                \"host_name\" : \"c6402.ambari.apache.org\",\n" +
 +    "                \"public_host_name\" : \"c6402.ambari.apache.org\",\n" +
 +    "                \"service_name\" : \"HIVE\",\n" +
 +    "                \"stack_id\" : \"HDP-2.6\",\n" +
 +    "              }\n" +
 +    "            }\n" +
 +    "          ]\n" +
 +    "        },\n" +
 +    "        {\n" +
 +    "          \"href\" : \"http://c6401.ambari.apache.org:8080/api/v1/clusters/"+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"/services/HIVE/components/HIVE_SERVER\",\n" +
 +    "          \"ServiceComponentInfo\" : {\n" +
 +    "            \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "            \"component_name\" : \"HIVE_SERVER\",\n" +
 +    "            \"service_name\" : \"HIVE\"\n" +
 +    "          },\n" +
 +    "          \"host_components\" : [\n" +
 +    "            {\n" +
 +    "              \"href\" : \"http://c6401.ambari.apache.org:8080/api/v1/clusters/"+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"/hosts/c6402.ambari.apache.org/host_components/HIVE_SERVER\",\n" +
 +    "              \"HostRoles\" : {\n" +
 +    "                \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "                \"component_name\" : \"HIVE_SERVER\",\n" +
 +    "                \"host_name\" : \"c6402.ambari.apache.org\",\n" +
 +    "                \"public_host_name\" : \"c6402.ambari.apache.org\",\n" +
 +    "                \"service_name\" : \"HIVE\",\n" +
 +    "                \"stack_id\" : \"HDP-2.6\",\n" +
 +    "              }\n" +
 +    "            }\n" +
 +    "          ]\n" +
 +    "        },\n" +
 +    "        {\n" +
 +    "          \"href\" : \"http://c6401.ambari.apache.org:8080/api/v1/clusters/"+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"/services/HIVE/components/WEBHCAT_SERVER\",\n" +
 +    "          \"ServiceComponentInfo\" : {\n" +
 +    "            \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "            \"component_name\" : \"WEBHCAT_SERVER\",\n" +
 +    "            \"service_name\" : \"HIVE\"\n" +
 +    "          },\n" +
 +    "          \"host_components\" : [\n" +
 +    "            {\n" +
 +    "              \"href\" : \"http://c6401.ambari.apache.org:8080/api/v1/clusters/"+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"/hosts/c6402.ambari.apache.org/host_components/WEBHCAT_SERVER\",\n" +
 +    "              \"HostRoles\" : {\n" +
 +    "                \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "                \"component_name\" : \"WEBHCAT_SERVER\",\n" +
 +    "                \"host_name\" : \"c6402.ambari.apache.org\",\n" +
 +    "                \"public_host_name\" : \"c6402.ambari.apache.org\",\n" +
 +    "                \"service_name\" : \"HIVE\",\n" +
 +    "                \"stack_id\" : \"HDP-2.6\",\n" +
 +    "              }\n" +
 +    "            }\n" +
 +    "          ]\n" +
 +    "        }\n" +
 +    "      ]\n" +
 +    "    },\n" +
 +    "    {\n" +
 +    "      \"href\" : \"http://c6401.ambari.apache.org:8080/api/v1/clusters/"+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"/services/OOZIE\",\n" +
 +    "      \"ServiceInfo\" : {\n" +
 +    "        \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "        \"service_name\" : \"OOZIE\"\n" +
 +    "      },\n" +
 +    "      \"components\" : [\n" +
 +    "        {\n" +
 +    "          \"href\" : \"http://c6401.ambari.apache.org:8080/api/v1/clusters/"+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"/services/OOZIE/components/OOZIE_SERVER\",\n" +
 +    "          \"ServiceComponentInfo\" : {\n" +
 +    "            \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "            \"component_name\" : \"OOZIE_SERVER\",\n" +
 +    "            \"service_name\" : \"OOZIE\"\n" +
 +    "          },\n" +
 +    "          \"host_components\" : [\n" +
 +    "            {\n" +
 +    "              \"href\" : \"http://c6401.ambari.apache.org:8080/api/v1/clusters/"+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"/hosts/c6402.ambari.apache.org/host_components/OOZIE_SERVER\",\n" +
 +    "              \"HostRoles\" : {\n" +
 +    "                \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "                \"component_name\" : \"OOZIE_SERVER\",\n" +
 +    "                \"host_name\" : \"c6402.ambari.apache.org\",\n" +
 +    "                \"public_host_name\" : \"c6402.ambari.apache.org\",\n" +
 +    "                \"service_name\" : \"OOZIE\",\n" +
 +    "                \"stack_id\" : \"HDP-2.6\"\n" +
 +    "              }\n" +
 +    "            }\n" +
 +    "          ]\n" +
 +    "        }\n" +
 +    "      ]\n" +
 +    "    },\n" +
 +    "    {\n" +
 +    "      \"href\" : \"http://c6401.ambari.apache.org:8080/api/v1/clusters/"+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"/services/YARN\",\n" +
 +    "      \"ServiceInfo\" : {\n" +
 +    "        \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "        \"service_name\" : \"YARN\"\n" +
 +    "      },\n" +
 +    "      \"components\" : [\n" +
 +    "        {\n" +
 +    "          \"href\" : \"http://c6401.ambari.apache.org:8080/api/v1/clusters/"+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"/services/YARN/components/APP_TIMELINE_SERVER\",\n" +
 +    "          \"ServiceComponentInfo\" : {\n" +
 +    "            \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "            \"component_name\" : \"APP_TIMELINE_SERVER\",\n" +
 +    "            \"service_name\" : \"YARN\"\n" +
 +    "          },\n" +
 +    "          \"host_components\" : [\n" +
 +    "            {\n" +
 +    "              \"href\" : \"http://c6401.ambari.apache.org:8080/api/v1/clusters/"+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"/hosts/c6402.ambari.apache.org/host_components/APP_TIMELINE_SERVER\",\n" +
 +    "              \"HostRoles\" : {\n" +
 +    "                \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "                \"component_name\" : \"APP_TIMELINE_SERVER\",\n" +
 +    "                \"host_name\" : \"c6402.ambari.apache.org\",\n" +
 +    "                \"public_host_name\" : \"c6402.ambari.apache.org\",\n" +
 +    "                \"service_name\" : \"YARN\",\n" +
 +    "                \"stack_id\" : \"HDP-2.6\"\n" +
 +    "              }\n" +
 +    "            }\n" +
 +    "          ]\n" +
 +    "        },\n" +
 +    "        {\n" +
 +    "          \"href\" : \"http://c6401.ambari.apache.org:8080/api/v1/clusters/"+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"/services/YARN/components/NODEMANAGER\",\n" +
 +    "          \"ServiceComponentInfo\" : {\n" +
 +    "            \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "            \"component_name\" : \"NODEMANAGER\",\n" +
 +    "            \"service_name\" : \"YARN\"\n" +
 +    "          },\n" +
 +    "          \"host_components\" : [\n" +
 +    "            {\n" +
 +    "              \"href\" : \"http://c6401.ambari.apache.org:8080/api/v1/clusters/"+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"/hosts/c6403.ambari.apache.org/host_components/NODEMANAGER\",\n" +
 +    "              \"HostRoles\" : {\n" +
 +    "                \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "                \"component_name\" : \"NODEMANAGER\",\n" +
 +    "                \"host_name\" : \"c6403.ambari.apache.org\",\n" +
 +    "                \"public_host_name\" : \"c6403.ambari.apache.org\",\n" +
 +    "                \"service_name\" : \"YARN\",\n" +
 +    "                \"stack_id\" : \"HDP-2.6\"\n" +
 +    "              }\n" +
 +    "            }\n" +
 +    "          ]\n" +
 +    "        },\n" +
 +    "        {\n" +
 +    "          \"href\" : \"http://c6401.ambari.apache.org:8080/api/v1/clusters/"+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"/services/YARN/components/RESOURCEMANAGER\",\n" +
 +    "          \"ServiceComponentInfo\" : {\n" +
 +    "            \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "            \"component_name\" : \"RESOURCEMANAGER\",\n" +
 +    "            \"service_name\" : \"YARN\"\n" +
 +    "          },\n" +
 +    "          \"host_components\" : [\n" +
 +    "            {\n" +
 +    "              \"href\" : \"http://c6401.ambari.apache.org:8080/api/v1/clusters/"+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"/hosts/c6402.ambari.apache.org/host_components/RESOURCEMANAGER\",\n" +
 +    "              \"HostRoles\" : {\n" +
 +    "                \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "                \"component_name\" : \"RESOURCEMANAGER\",\n" +
 +    "                \"ha_state\" : \"ACTIVE\",\n" +
 +    "                \"host_name\" : \"c6402.ambari.apache.org\",\n" +
 +    "                \"public_host_name\" : \"c6402.ambari.apache.org\",\n" +
 +    "                \"service_name\" : \"YARN\",\n" +
 +    "                \"stack_id\" : \"HDP-2.6\"\n" +
 +    "              }\n" +
 +    "            }\n" +
 +    "          ]\n" +
 +    "        }\n" +
 +    "      ]\n" +
 +    "    },\n" +
 +    "    {\n" +
 +    "      \"href\" : \"http://c6401.ambari.apache.org:8080/api/v1/clusters/"+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"/services/ZOOKEEPER\",\n" +
 +    "      \"ServiceInfo\" : {\n" +
 +    "        \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "        \"service_name\" : \"ZOOKEEPER\"\n" +
 +    "      },\n" +
 +    "      \"components\" : [\n" +
 +    "        {\n" +
 +    "          \"href\" : \"http://c6401.ambari.apache.org:8080/api/v1/clusters/"+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"/services/ZOOKEEPER/components/ZOOKEEPER_SERVER\",\n" +
 +    "          \"ServiceComponentInfo\" : {\n" +
 +    "            \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "            \"component_name\" : \"ZOOKEEPER_SERVER\",\n" +
 +    "            \"service_name\" : \"ZOOKEEPER\"\n" +
 +    "          },\n" +
 +    "          \"host_components\" : [\n" +
 +    "            {\n" +
 +    "              \"href\" : \"http://c6401.ambari.apache.org:8080/api/v1/clusters/"+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"/hosts/c6401.ambari.apache.org/host_components/ZOOKEEPER_SERVER\",\n" +
 +    "              \"HostRoles\" : {\n" +
 +    "                \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "                \"component_name\" : \"ZOOKEEPER_SERVER\",\n" +
 +    "                \"host_name\" : \"c6401.ambari.apache.org\",\n" +
 +    "                \"public_host_name\" : \"c6401.ambari.apache.org\",\n" +
 +    "                \"service_name\" : \"ZOOKEEPER\",\n" +
 +    "                \"stack_id\" : \"HDP-2.6\"\n" +
 +    "              }\n" +
 +    "            },\n" +
 +    "            {\n" +
 +    "              \"href\" : \"http://c6401.ambari.apache.org:8080/api/v1/clusters/"+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"/hosts/c6402.ambari.apache.org/host_components/ZOOKEEPER_SERVER\",\n" +
 +    "              \"HostRoles\" : {\n" +
 +    "                \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "                \"component_name\" : \"ZOOKEEPER_SERVER\",\n" +
 +    "                \"host_name\" : \"c6402.ambari.apache.org\",\n" +
 +    "                \"public_host_name\" : \"c6402.ambari.apache.org\",\n" +
 +    "                \"service_name\" : \"ZOOKEEPER\",\n" +
 +    "                \"stack_id\" : \"HDP-2.6\"\n" +
 +    "              }\n" +
 +    "            },\n" +
 +    "            {\n" +
 +    "              \"href\" : \"http://c6401.ambari.apache.org:8080/api/v1/clusters/"+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"/hosts/c6403.ambari.apache.org/host_components/ZOOKEEPER_SERVER\",\n" +
 +    "              \"HostRoles\" : {\n" +
 +    "                \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "                \"component_name\" : \"ZOOKEEPER_SERVER\",\n" +
 +    "                \"host_name\" : \"c6403.ambari.apache.org\",\n" +
 +    "                \"public_host_name\" : \"c6403.ambari.apache.org\",\n" +
 +    "                \"service_name\" : \"ZOOKEEPER\",\n" +
 +    "                \"stack_id\" : \"HDP-2.6\"\n" +
 +    "              }\n" +
 +    "            }\n" +
 +    "          ]\n" +
 +    "        }\n" +
 +    "      ]\n" +
 +    "    }\n" +
 +    "  ]\n" +
 +    "}\n";
 +
 +
 +    private static final String SERVICECONFIGS_JSON_TEMPLATE =
 +    "{\n" +
 +    "  \"href\" : \"http://c6401.ambari.apache.org:8080/api/v1/clusters/"+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"/configurations/service_config_versions?is_current=true\",\n" +
 +    "  \"items\" : [\n" +
 +    "    {\n" +
 +    "      \"href\" : \"http://c6401.ambari.apache.org:8080/api/v1/clusters/"+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"/configurations/service_config_versions?service_name=HBASE&service_config_version=1\",\n" +
 +    "      \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "      \"configurations\" : [\n" +
 +    "        {\n" +
 +    "          \"Config\" : {\n" +
 +    "            \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "            \"stack_id\" : \"HDP-2.6\"\n" +
 +    "          },\n" +
 +    "          \"type\" : \"hbase-site\",\n" +
 +    "          \"tag\" : \"version1503410563715\",\n" +
 +    "          \"version\" : 1,\n" +
 +    "          \"properties\" : {\n" +
 +    "            \"hbase.master.info.bindAddress\" : \"0.0.0.0\",\n" +
 +    "            \"hbase.master.info.port\" : \"16010\",\n" +
 +    "            \"hbase.master.port\" : \"16000\",\n" +
 +    "            \"hbase.regionserver.info.port\" : \"16030\",\n" +
 +    "            \"hbase.regionserver.port\" : \"16020\",\n" +
 +    "            \"hbase.zookeeper.property.clientPort\" : \"2181\",\n" +
 +    "            \"hbase.zookeeper.quorum\" : \"c6403.ambari.apache.org,c6402.ambari.apache.org,c6401.ambari.apache.org\",\n" +
 +    "            \"hbase.zookeeper.useMulti\" : \"true\",\n" +
 +    "            \"zookeeper.znode.parent\" : \"/hbase-unsecure\"\n" +
 +    "          },\n" +
 +    "          \"properties_attributes\" : { }\n" +
 +    "        },\n" +
 +    "      ],\n" +
 +    "      \"is_current\" : true,\n" +
 +    "      \"service_config_version\" : 1,\n" +
 +    "      \"service_config_version_note\" : \"Initial configurations for HBase\",\n" +
 +    "      \"service_name\" : \"HBASE\",\n" +
 +    "      \"stack_id\" : \"HDP-2.6\",\n" +
 +    "      \"user\" : \"admin\"\n" +
 +    "    },\n" +
 +    "    {\n" +
 +    "      \"href\" : \"http://c6401.ambari.apache.org:8080/api/v1/clusters/"+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"/configurations/service_config_versions?service_name=HDFS&service_config_version=2\",\n" +
 +    "      \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "      \"configurations\" : [\n" +
 +    "        {\n" +
 +    "          \"Config\" : {\n" +
 +    "            \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "            \"stack_id\" : \"HDP-2.6\"\n" +
 +    "          },\n" +
 +    "          \"type\" : \"hdfs-site\",\n" +
 +    "          \"tag\" : \"version1\",\n" +
 +    "          \"version\" : 1,\n" +
 +    "          \"properties\" : {\n" +
 +    "            \"dfs.cluster.administrators\" : \" hdfs\",\n" +
 +    "            \"dfs.datanode.address\" : \"0.0.0.0:50010\",\n" +
 +    "            \"dfs.datanode.http.address\" : \"0.0.0.0:50075\",\n" +
 +    "            \"dfs.datanode.https.address\" : \"0.0.0.0:50475\",\n" +
 +    "            \"dfs.datanode.ipc.address\" : \"0.0.0.0:8010\",\n" +
 +    "            \"dfs.http.policy\" : \"HTTP_ONLY\",\n" +
 +    "            \"dfs.https.port\" : \"50470\",\n" +
 +    "            \"dfs.journalnode.http-address\" : \"0.0.0.0:8480\",\n" +
 +    "            \"dfs.journalnode.https-address\" : \"0.0.0.0:8481\",\n" +
 +    "            \"dfs.namenode.http-address\" : \"c6401.ambari.apache.org:50070\",\n" +
 +    "            \"dfs.namenode.https-address\" : \"c6401.ambari.apache.org:50470\",\n" +
 +    "            \"dfs.namenode.rpc-address\" : \"c6401.ambari.apache.org:8020\",\n" +
 +    "            \"dfs.namenode.secondary.http-address\" : \"c6402.ambari.apache.org:50090\",\n" +
 +    "            \"dfs.webhdfs.enabled\" : \"true\"\n" +
 +    "          },\n" +
 +    "          \"properties_attributes\" : {\n" +
 +    "            \"final\" : {\n" +
 +    "              \"dfs.webhdfs.enabled\" : \"true\",\n" +
 +    "              \"dfs.namenode.http-address\" : \"true\",\n" +
 +    "              \"dfs.support.append\" : \"true\",\n" +
 +    "              \"dfs.namenode.name.dir\" : \"true\",\n" +
 +    "              \"dfs.datanode.failed.volumes.tolerated\" : \"true\",\n" +
 +    "              \"dfs.datanode.data.dir\" : \"true\"\n" +
 +    "            }\n" +
 +    "          }\n" +
 +    "        },\n" +
 +    "        {\n" +
 +    "          \"Config\" : {\n" +
 +    "            \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "            \"stack_id\" : \"HDP-2.6\"\n" +
 +    "          },\n" +
 +    "          \"type\" : \"core-site\",\n" +
 +    "          \"tag\" : \"version1502131215159\",\n" +
 +    "          \"version\" : 2,\n" +
 +    "          \"properties\" : {\n" +
 +    "            \"hadoop.http.authentication.simple.anonymous.allowed\" : \"true\",\n" +
 +    "            \"net.topology.script.file.name\" : \"/etc/hadoop/conf/topology_script.py\"\n" +
 +    "          },\n" +
 +    "          \"properties_attributes\" : {\n" +
 +    "            \"final\" : {\n" +
 +    "              \"fs.defaultFS\" : \"true\"\n" +
 +    "            }\n" +
 +    "          }\n" +
 +    "        }\n" +
 +    "      ],\n" +
 +    "      \"is_current\" : true,\n" +
 +    "      \"service_config_version\" : 2,\n" +
 +    "      \"service_config_version_note\" : \"knox trusted proxy support\",\n" +
 +    "      \"service_name\" : \"HDFS\",\n" +
 +    "      \"stack_id\" : \"HDP-2.6\",\n" +
 +    "      \"user\" : \"admin\"\n" +
 +    "    },\n" +
 +    "    {\n" +
 +    "      \"href\" : \"http://c6401.ambari.apache.org:8080/api/v1/clusters/"+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"/configurations/service_config_versions?service_name=HIVE&service_config_version=3\",\n" +
 +    "      \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "      \"configurations\" : [\n" +
 +    "        {\n" +
 +    "          \"Config\" : {\n" +
 +    "            \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "            \"stack_id\" : \"HDP-2.6\"\n" +
 +    "          },\n" +
 +    "          \"type\" : \"hive-env\",\n" +
 +    "          \"tag\" : \"version1\",\n" +
 +    "          \"version\" : 1,\n" +
 +    "          \"properties\" : {\n" +
 +    "            \"hive_security_authorization\" : \"None\",\n" +
 +    "            \"webhcat_user\" : \"hcat\"\n" +
 +    "          },\n" +
 +    "          \"properties_attributes\" : { }\n" +
 +    "        },\n" +
 +    "        {\n" +
 +    "          \"Config\" : {\n" +
 +    "            \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "            \"stack_id\" : \"HDP-2.6\"\n" +
 +    "          },\n" +
 +    "          \"type\" : \"hiveserver2-site\",\n" +
 +    "          \"tag\" : \"version1\",\n" +
 +    "          \"version\" : 1,\n" +
 +    "          \"properties\" : {\n" +
 +    "            \"hive.metastore.metrics.enabled\" : \"true\",\n" +
 +    "            \"hive.security.authorization.enabled\" : \"false\",\n" +
 +    "            \"hive.service.metrics.hadoop2.component\" : \"hiveserver2\",\n" +
 +    "            \"hive.service.metrics.reporter\" : \"HADOOP2\"\n" +
 +    "          },\n" +
 +    "          \"properties_attributes\" : { }\n" +
 +    "        },\n" +
 +    "        {\n" +
 +    "          \"Config\" : {\n" +
 +    "            \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "            \"stack_id\" : \"HDP-2.6\"\n" +
 +    "          },\n" +
 +    "          \"type\" : \"hive-interactive-site\",\n" +
 +    "          \"tag\" : \"version1\",\n" +
 +    "          \"version\" : 1,\n" +
 +    "          \"properties\" : {\n" +
 +    "            \"hive.server2.enable.doAs\" : \"false\",\n" +
 +    "            \"hive.server2.tez.default.queues\" : \"default\",\n" +
 +    "            \"hive.server2.tez.initialize.default.sessions\" : \"true\",\n" +
 +    "            \"hive.server2.tez.sessions.custom.queue.allowed\" : \"ignore\",\n" +
 +    "            \"hive.server2.tez.sessions.per.default.queue\" : \"1\",\n" +
 +    "            \"hive.server2.tez.sessions.restricted.configs\" : \"hive.execution.mode,hive.execution.engine\",\n" +
 +    "            \"hive.server2.thrift.http.port\" : \"10501\",\n" +
 +    "            \"hive.server2.thrift.port\" : \"10500\",\n" +
 +    "            \"hive.server2.webui.port\" : \"10502\",\n" +
 +    "            \"hive.server2.webui.use.ssl\" : \"false\",\n" +
 +    "            \"hive.server2.zookeeper.namespace\" : \"hiveserver2-hive2\"\n" +
 +    "          },\n" +
 +    "          \"properties_attributes\" : { }\n" +
 +    "        },\n" +
 +    "        {\n" +
 +    "          \"Config\" : {\n" +
 +    "            \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "            \"stack_id\" : \"HDP-2.6\"\n" +
 +    "          },\n" +
 +    "          \"type\" : \"tez-interactive-site\",\n" +
 +    "          \"tag\" : \"version1\",\n" +
 +    "          \"version\" : 1,\n" +
 +    "          \"properties\" : {\n" +
 +    "            \"tez.am.am-rm.heartbeat.interval-ms.max\" : \"10000\",\n" +
 +    "            \"tez.am.client.heartbeat.poll.interval.millis\" : \"6000\",\n" +
 +    "            \"tez.am.client.heartbeat.timeout.secs\" : \"90\"\n" +
 +    "          },\n" +
 +    "          \"properties_attributes\" : { }\n" +
 +    "        },\n" +
 +    "        {\n" +
 +    "          \"Config\" : {\n" +
 +    "            \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "            \"stack_id\" : \"HDP-2.6\"\n" +
 +    "          },\n" +
 +    "          \"type\" : \"hive-site\",\n" +
 +    "          \"tag\" : \"version1502130841736\",\n" +
 +    "          \"version\" : 2,\n" +
 +    "          \"properties\" : {\n" +
 +    "            \"hive.metastore.sasl.enabled\" : \"false\",\n" +
 +    "            \"hive.metastore.server.max.threads\" : \"100000\",\n" +
 +    "            \"hive.metastore.uris\" : \"thrift://c6402.ambari.apache.org:9083\",\n" +
 +    "            \"hive.server2.allow.user.substitution\" : \"true\",\n" +
 +    "            \"hive.server2.authentication\" : \"NONE\",\n" +
 +    "            \"hive.server2.authentication.spnego.keytab\" : \"HTTP/_HOST@EXAMPLE.COM\",\n" +
 +    "            \"hive.server2.authentication.spnego.principal\" : \"/etc/security/keytabs/spnego.service.keytab\",\n" +
 +    "            \"hive.server2.enable.doAs\" : \"true\",\n" +
 +    "            \"hive.server2.support.dynamic.service.discovery\" : \"true\",\n" +
 +    "            \"hive.server2.thrift.http.path\" : \"cliservice\",\n" +
 +    "            \"hive.server2.thrift.http.port\" : \"10001\",\n" +
 +    "            \"hive.server2.thrift.max.worker.threads\" : \"500\",\n" +
 +    "            \"hive.server2.thrift.port\" : \"10000\",\n" +
 +    "            \"hive.server2.thrift.sasl.qop\" : \"auth\",\n" +
 +    "            \"hive.server2.transport.mode\" : \"http\",\n" +
 +    "            \"hive.server2.use.SSL\" : \"false\",\n" +
 +    "            \"hive.server2.zookeeper.namespace\" : \"hiveserver2\"\n" +
 +    "          },\n" +
 +    "          \"properties_attributes\" : {\n" +
 +    "            \"hidden\" : {\n" +
 +    "              \"javax.jdo.option.ConnectionPassword\" : \"HIVE_CLIENT,WEBHCAT_SERVER,HCAT,CONFIG_DOWNLOAD\"\n" +
 +    "            }\n" +
 +    "          }\n" +
 +    "        },\n" +
 +    "        {\n" +
 +    "          \"Config\" : {\n" +
 +    "            \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "            \"stack_id\" : \"HDP-2.6\"\n" +
 +    "          },\n" +
 +    "          \"type\" : \"webhcat-site\",\n" +
 +    "          \"tag\" : \"version1502131111746\",\n" +
 +    "          \"version\" : 2,\n" +
 +    "          \"properties\" : {\n" +
 +    "            \"templeton.port\" : \"50111\",\n" +
 +    "            \"templeton.zookeeper.hosts\" : \"c6403.ambari.apache.org:2181,c6401.ambari.apache.org:2181,c6402.ambari.apache.org:2181\",\n" +
 +    "            \"webhcat.proxyuser.knox.groups\" : \"users\",\n" +
 +    "            \"webhcat.proxyuser.knox.hosts\" : \"*\",\n" +
 +    "            \"webhcat.proxyuser.root.groups\" : \"*\",\n" +
 +    "            \"webhcat.proxyuser.root.hosts\" : \"c6401.ambari.apache.org\"\n" +
 +    "          },\n" +
 +    "          \"properties_attributes\" : { }\n" +
 +    "        }\n" +
 +    "      ],\n" +
 +    "      \"createtime\" : 1502131110745,\n" +
 +    "      \"group_id\" : -1,\n" +
 +    "      \"group_name\" : \"Default\",\n" +
 +    "      \"hosts\" : [ ],\n" +
 +    "      \"is_cluster_compatible\" : true,\n" +
 +    "      \"is_current\" : true,\n" +
 +    "      \"service_config_version\" : 3,\n" +
 +    "      \"service_config_version_note\" : \"knox trusted proxy support\",\n" +
 +    "      \"service_name\" : \"HIVE\",\n" +
 +    "      \"stack_id\" : \"HDP-2.6\",\n" +
 +    "      \"user\" : \"admin\"\n" +
 +    "    },\n" +
 +    "    {\n" +
 +    "      \"href\" : \"http://c6401.ambari.apache.org:8080/api/v1/clusters/"+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"/configurations/service_config_versions?service_name=OOZIE&service_config_version=3\",\n" +
 +    "      \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "      \"configurations\" : [\n" +
 +    "        {\n" +
 +    "          \"Config\" : {\n" +
 +    "            \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "            \"stack_id\" : \"HDP-2.6\"\n" +
 +    "          },\n" +
 +    "          \"type\" : \"oozie-site\",\n" +
 +    "          \"tag\" : \"version1502131137103\",\n" +
 +    "          \"version\" : 3,\n" +
 +    "          \"properties\" : {\n" +
 +    "            \"oozie.base.url\" : \"http://c6402.ambari.apache.org:11000/oozie\",\n" +
 +    "          },\n" +
 +    "          \"properties_attributes\" : { }\n" +
 +    "        }\n" +
 +    "      ],\n" +
 +    "      \"is_current\" : true,\n" +
 +    "      \"service_config_version\" : 3,\n" +
 +    "      \"service_name\" : \"OOZIE\",\n" +
 +    "      \"stack_id\" : \"HDP-2.6\",\n" +
 +    "      \"user\" : \"admin\"\n" +
 +    "    },\n" +
 +    "    {\n" +
 +    "      \"href\" : \"http://c6401.ambari.apache.org:8080/api/v1/clusters/"+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"/configurations/service_config_versions?service_name=TEZ&service_config_version=1\",\n" +
 +    "      \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "      \"configurations\" : [\n" +
 +    "        {\n" +
 +    "          \"Config\" : {\n" +
 +    "            \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "            \"stack_id\" : \"HDP-2.6\"\n" +
 +    "          },\n" +
 +    "          \"type\" : \"tez-site\",\n" +
 +    "          \"tag\" : \"version1\",\n" +
 +    "          \"version\" : 1,\n" +
 +    "          \"properties\" : {\n" +
 +    "            \"tez.use.cluster.hadoop-libs\" : \"false\"\n" +
 +    "          },\n" +
 +    "          \"properties_attributes\" : { }\n" +
 +    "        }\n" +
 +    "      ],\n" +
 +    "      \"createtime\" : 1502122253525,\n" +
 +    "      \"group_id\" : -1,\n" +
 +    "      \"group_name\" : \"Default\",\n" +
 +    "      \"hosts\" : [ ],\n" +
 +    "      \"is_cluster_compatible\" : true,\n" +
 +    "      \"is_current\" : true,\n" +
 +    "      \"service_config_version\" : 1,\n" +
 +    "      \"service_config_version_note\" : \"Initial configurations for Tez\",\n" +
 +    "      \"service_name\" : \"TEZ\",\n" +
 +    "      \"stack_id\" : \"HDP-2.6\",\n" +
 +    "      \"user\" : \"admin\"\n" +
 +    "    },\n" +
 +    "    {\n" +
 +    "      \"href\" : \"http://c6401.ambari.apache.org:8080/api/v1/clusters/"+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"/configurations/service_config_versions?service_name=YARN&service_config_version=1\",\n" +
 +    "      \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "      \"configurations\" : [\n" +
 +    "        {\n" +
 +    "          \"Config\" : {\n" +
 +    "            \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "            \"stack_id\" : \"HDP-2.6\"\n" +
 +    "          },\n" +
 +    "          \"type\" : \"yarn-site\",\n" +
 +    "          \"tag\" : \"version1\",\n" +
 +    "          \"version\" : 1,\n" +
 +    "          \"properties\" : {\n" +
 +    "            \"hadoop.registry.rm.enabled\" : \"true\",\n" +
 +    "            \"hadoop.registry.zk.quorum\" : \"c6403.ambari.apache.org:2181,c6401.ambari.apache.org:2181,c6402.ambari.apache.org:2181\",\n" +
 +    "            \"yarn.acl.enable\" : \"false\",\n" +
 +    "            \"yarn.http.policy\" : \"HTTP_ONLY\",\n" +
 +    "            \"yarn.nodemanager.address\" : \"0.0.0.0:45454\",\n" +
 +    "            \"yarn.nodemanager.bind-host\" : \"0.0.0.0\",\n" +
 +    "            \"yarn.resourcemanager.address\" : \"c6402.ambari.apache.org:8050\",\n" +
 +    "            \"yarn.resourcemanager.admin.address\" : \"c6402.ambari.apache.org:8141\",\n" +
 +    "            \"yarn.resourcemanager.ha.enabled\" : \"false\",\n" +
 +    "            \"yarn.resourcemanager.hostname\" : \"c6402.ambari.apache.org\",\n" +
 +    "            \"yarn.resourcemanager.resource-tracker.address\" : \"c6402.ambari.apache.org:8025\",\n" +
 +    "            \"yarn.resourcemanager.scheduler.address\" : \"c6402.ambari.apache.org:8030\",\n" +
 +    "            \"yarn.resourcemanager.webapp.address\" : \"c6402.ambari.apache.org:8088\",\n" +
 +    "            \"yarn.resourcemanager.webapp.delegation-token-auth-filter.enabled\" : \"false\",\n" +
 +    "            \"yarn.resourcemanager.webapp.https.address\" : \"c6402.ambari.apache.org:8090\",\n" +
 +    "            \"yarn.resourcemanager.zk-address\" : \"c6403.ambari.apache.org:2181,c6401.ambari.apache.org:2181,c6402.ambari.apache.org:2181\"\n" +
 +    "          },\n" +
 +    "          \"properties_attributes\" : { }\n" +
 +    "        }\n" +
 +    "      ],\n" +
 +    "      \"is_current\" : true,\n" +
 +    "      \"service_config_version\" : 1,\n" +
 +    "      \"service_name\" : \"YARN\",\n" +
 +    "      \"stack_id\" : \"HDP-2.6\",\n" +
 +    "      \"user\" : \"admin\"\n" +
 +    "    }\n" +
 +    "  ]\n" +
 +    "}";
 +
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/22a7304a/gateway-provider-security-pac4j/src/main/java/org/apache/knox/gateway/pac4j/filter/Pac4jDispatcherFilter.java
----------------------------------------------------------------------
diff --cc gateway-provider-security-pac4j/src/main/java/org/apache/knox/gateway/pac4j/filter/Pac4jDispatcherFilter.java
index fe39f25,0000000..6e04932
mode 100644,000000..100644
--- a/gateway-provider-security-pac4j/src/main/java/org/apache/knox/gateway/pac4j/filter/Pac4jDispatcherFilter.java
+++ b/gateway-provider-security-pac4j/src/main/java/org/apache/knox/gateway/pac4j/filter/Pac4jDispatcherFilter.java
@@@ -1,214 -1,0 +1,215 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.pac4j.filter;
 +
 +import org.apache.knox.gateway.i18n.messages.MessagesFactory;
 +import org.apache.knox.gateway.pac4j.Pac4jMessages;
 +import org.apache.knox.gateway.pac4j.session.KnoxSessionStore;
 +import org.apache.knox.gateway.services.GatewayServices;
 +import org.apache.knox.gateway.services.security.KeystoreService;
 +import org.apache.knox.gateway.services.security.MasterService;
 +import org.apache.knox.gateway.services.security.AliasService;
 +import org.apache.knox.gateway.services.security.AliasServiceException;
 +import org.apache.knox.gateway.services.security.CryptoService;
 +import org.pac4j.config.client.PropertiesConfigFactory;
 +import org.pac4j.core.client.Client;
 +import org.pac4j.core.config.Config;
- import org.pac4j.core.config.ConfigSingleton;
- import org.pac4j.core.context.J2EContext;
 +import org.pac4j.core.util.CommonHelper;
 +import org.pac4j.http.client.indirect.IndirectBasicAuthClient;
 +import org.pac4j.http.credentials.authenticator.test.SimpleTestUsernamePasswordAuthenticator;
 +import org.pac4j.j2e.filter.CallbackFilter;
 +import org.pac4j.j2e.filter.SecurityFilter;
 +
 +import javax.servlet.*;
 +import javax.servlet.http.HttpServletRequest;
 +import javax.servlet.http.HttpServletResponse;
 +import java.io.IOException;
 +import java.util.Enumeration;
 +import java.util.HashMap;
 +import java.util.List;
 +import java.util.Map;
 +
 +/**
 + * <p>This is the main filter for the pac4j provider. The pac4j provider module heavily relies on the j2e-pac4j library (https://github.com/pac4j/j2e-pac4j).</p>
 + * <p>This filter dispatches the HTTP calls between the j2e-pac4j filters:</p>
 + * <ul>
 + *     <li>to the {@link CallbackFilter} if the <code>client_name</code> parameter exists: it finishes the authentication process</li>
 + *     <li>to the {@link RequiresAuthenticationFilter} otherwise: it starts the authentication process (redirection to the identity provider) if the user is not authenticated</li>
 + * </ul>
 + * <p>It uses the {@link KnoxSessionStore} to manage session data. The generated cookies are defined on a domain name
 + * which can be configured via the domain suffix parameter: <code>pac4j.cookie.domain.suffix</code>.</p>
 + * <p>The callback url must be defined to the current protected url (KnoxSSO service for example) via the parameter: <code>pac4j.callbackUrl</code>.</p>
 + *
 + * @since 0.8.0
 + */
 +public class Pac4jDispatcherFilter implements Filter {
 +
 +  private static Pac4jMessages log = MessagesFactory.get(Pac4jMessages.class);
 +
 +  public static final String TEST_BASIC_AUTH = "testBasicAuth";
 +
 +  public static final String PAC4J_CALLBACK_URL = "pac4j.callbackUrl";
 +
 +  public static final String PAC4J_CALLBACK_PARAMETER = "pac4jCallback";
 +
 +  private static final String PAC4J_COOKIE_DOMAIN_SUFFIX_PARAM = "pac4j.cookie.domain.suffix";
 +
++  private static final String PAC4J_CONFIG = "pac4j.config";
++
 +  private CallbackFilter callbackFilter;
 +
 +  private SecurityFilter securityFilter;
 +  private MasterService masterService = null;
 +  private KeystoreService keystoreService = null;
 +  private AliasService aliasService = null;
 +
 +  @Override
 +  public void init( FilterConfig filterConfig ) throws ServletException {
 +    // JWT service
 +    final ServletContext context = filterConfig.getServletContext();
 +    CryptoService cryptoService = null;
 +    String clusterName = null;
 +    if (context != null) {
 +      GatewayServices services = (GatewayServices) context.getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE);
 +      clusterName = (String) context.getAttribute(GatewayServices.GATEWAY_CLUSTER_ATTRIBUTE);
 +      if (services != null) {
 +        keystoreService = (KeystoreService) services.getService(GatewayServices.KEYSTORE_SERVICE);
 +        cryptoService = (CryptoService) services.getService(GatewayServices.CRYPTO_SERVICE);
 +        aliasService = (AliasService) services.getService(GatewayServices.ALIAS_SERVICE);
 +        masterService = (MasterService) services.getService("MasterService");
 +      }
 +    }
 +    // crypto service, alias service and cluster name are mandatory
 +    if (cryptoService == null || aliasService == null || clusterName == null) {
 +      log.cryptoServiceAndAliasServiceAndClusterNameRequired();
 +      throw new ServletException("The crypto service, alias service and cluster name are required.");
 +    }
 +    try {
 +      aliasService.getPasswordFromAliasForCluster(clusterName, KnoxSessionStore.PAC4J_PASSWORD, true);
 +    } catch (AliasServiceException e) {
 +      log.unableToGenerateAPasswordForEncryption(e);
 +      throw new ServletException("Unable to generate a password for encryption.");
 +    }
 +
 +    // url to SSO authentication provider
 +    String pac4jCallbackUrl = filterConfig.getInitParameter(PAC4J_CALLBACK_URL);
 +    if (pac4jCallbackUrl == null) {
 +      log.ssoAuthenticationProviderUrlRequired();
 +      throw new ServletException("Required pac4j callback URL is missing.");
 +    }
 +    // add the callback parameter to know it's a callback
 +    pac4jCallbackUrl = CommonHelper.addParameter(pac4jCallbackUrl, PAC4J_CALLBACK_PARAMETER, "true");
 +
 +    final Config config;
 +    final String clientName;
 +    // client name from servlet parameter (mandatory)
 +    final String clientNameParameter = filterConfig.getInitParameter("clientName");
 +    if (clientNameParameter == null) {
 +      log.clientNameParameterRequired();
 +      throw new ServletException("Required pac4j clientName parameter is missing.");
 +    }
 +    if (TEST_BASIC_AUTH.equalsIgnoreCase(clientNameParameter)) {
 +      // test configuration
 +      final IndirectBasicAuthClient indirectBasicAuthClient = new IndirectBasicAuthClient(new SimpleTestUsernamePasswordAuthenticator());
 +      indirectBasicAuthClient.setRealmName("Knox TEST");
 +      config = new Config(pac4jCallbackUrl, indirectBasicAuthClient);
 +      clientName = "IndirectBasicAuthClient";
 +    } else {
 +      // get clients from the init parameters
 +      final Map<String, String> properties = new HashMap<>();
 +      final Enumeration<String> names = filterConfig.getInitParameterNames();
 +      addDefaultConfig(clientNameParameter, properties);
 +      while (names.hasMoreElements()) {
 +        final String key = names.nextElement();
 +        properties.put(key, filterConfig.getInitParameter(key));
 +      }
 +      final PropertiesConfigFactory propertiesConfigFactory = new PropertiesConfigFactory(pac4jCallbackUrl, properties);
 +      config = propertiesConfigFactory.build();
 +      final List<Client> clients = config.getClients().getClients();
 +      if (clients == null || clients.size() == 0) {
 +        log.atLeastOnePac4jClientMustBeDefined();
 +        throw new ServletException("At least one pac4j client must be defined.");
 +      }
 +      if (CommonHelper.isBlank(clientNameParameter)) {
 +        clientName = clients.get(0).getName();
 +      } else {
 +        clientName = clientNameParameter;
 +      }
 +    }
 +
 +    callbackFilter = new CallbackFilter();
++    callbackFilter.setConfigOnly(config);
 +    securityFilter = new SecurityFilter();
 +    securityFilter.setClients(clientName);
-     securityFilter.setConfig(config);
++    securityFilter.setConfigOnly(config);
 +
 +    final String domainSuffix = filterConfig.getInitParameter(PAC4J_COOKIE_DOMAIN_SUFFIX_PARAM);
 +    config.setSessionStore(new KnoxSessionStore(cryptoService, clusterName, domainSuffix));
-     ConfigSingleton.setConfig(config);
 +  }
 +
 +  private void addDefaultConfig(String clientNameParameter, Map<String, String> properties) {
 +    // add default saml params
 +    if (clientNameParameter.contains("SAML2Client")) {
 +      properties.put(PropertiesConfigFactory.SAML_KEYSTORE_PATH,
 +          keystoreService.getKeystorePath());
 +
 +      properties.put(PropertiesConfigFactory.SAML_KEYSTORE_PASSWORD,
 +          new String(masterService.getMasterSecret()));
 +
 +      // check for provisioned alias for private key
 +      char[] gip = null;
 +      try {
 +        gip = aliasService.getGatewayIdentityPassphrase();
 +      }
 +      catch(AliasServiceException ase) {
 +        log.noPrivateKeyPasshraseProvisioned(ase);
 +      }
 +      if (gip != null) {
 +        properties.put(PropertiesConfigFactory.SAML_PRIVATE_KEY_PASSWORD,
 +            new String(gip));
 +      }
 +      else {
 +        // no alias provisioned then use the master
 +        properties.put(PropertiesConfigFactory.SAML_PRIVATE_KEY_PASSWORD,
 +            new String(masterService.getMasterSecret()));
 +      }
 +    }
 +  }
 +
 +  @Override
 +  public void doFilter( ServletRequest servletRequest, ServletResponse servletResponse, FilterChain filterChain) throws IOException, ServletException {
 +
 +    final HttpServletRequest request = (HttpServletRequest) servletRequest;
 +    final HttpServletResponse response = (HttpServletResponse) servletResponse;
-     final J2EContext context = new J2EContext(request, response, ConfigSingleton.getConfig().getSessionStore());
++    request.setAttribute(PAC4J_CONFIG, securityFilter.getConfig());
++//    final J2EContext context = new J2EContext(request, response, securityFilter.getConfig().getSessionStore());
 +
 +    // it's a callback from an identity provider
 +    if (request.getParameter(PAC4J_CALLBACK_PARAMETER) != null) {
 +      // apply CallbackFilter
 +      callbackFilter.doFilter(servletRequest, servletResponse, filterChain);
 +    } else {
 +      // otherwise just apply security and requires authentication
 +      // apply RequiresAuthenticationFilter
 +      securityFilter.doFilter(servletRequest, servletResponse, filterChain);
 +    }
 +  }
 +
 +  @Override
 +  public void destroy() { }
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/22a7304a/gateway-provider-security-pac4j/src/main/java/org/apache/knox/gateway/pac4j/filter/Pac4jIdentityAdapter.java
----------------------------------------------------------------------
diff --cc gateway-provider-security-pac4j/src/main/java/org/apache/knox/gateway/pac4j/filter/Pac4jIdentityAdapter.java
index 6387a0b,0000000..bc66003
mode 100644,000000..100644
--- a/gateway-provider-security-pac4j/src/main/java/org/apache/knox/gateway/pac4j/filter/Pac4jIdentityAdapter.java
+++ b/gateway-provider-security-pac4j/src/main/java/org/apache/knox/gateway/pac4j/filter/Pac4jIdentityAdapter.java
@@@ -1,146 -1,0 +1,161 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.pac4j.filter;
 +
- import org.apache.knox.gateway.audit.api.Action;
- import org.apache.knox.gateway.audit.api.ActionOutcome;
- import org.apache.knox.gateway.audit.api.AuditService;
- import org.apache.knox.gateway.audit.api.AuditServiceFactory;
- import org.apache.knox.gateway.audit.api.Auditor;
- import org.apache.knox.gateway.audit.api.ResourceType;
++import org.apache.knox.gateway.audit.api.*;
 +import org.apache.knox.gateway.audit.log4j.audit.AuditConstants;
 +import org.apache.knox.gateway.filter.AbstractGatewayFilter;
 +import org.apache.knox.gateway.security.PrimaryPrincipal;
- import org.pac4j.core.config.ConfigSingleton;
++import org.pac4j.core.config.Config;
 +import org.pac4j.core.context.J2EContext;
 +import org.pac4j.core.profile.CommonProfile;
 +import org.pac4j.core.profile.ProfileManager;
 +import org.slf4j.Logger;
 +import org.slf4j.LoggerFactory;
 +
 +import javax.security.auth.Subject;
 +import javax.servlet.Filter;
 +import javax.servlet.FilterChain;
 +import javax.servlet.FilterConfig;
 +import javax.servlet.ServletException;
 +import javax.servlet.ServletRequest;
 +import javax.servlet.ServletResponse;
 +import javax.servlet.http.HttpServletRequest;
 +import javax.servlet.http.HttpServletResponse;
 +import java.io.IOException;
 +import java.security.PrivilegedActionException;
 +import java.security.PrivilegedExceptionAction;
 +import java.util.Optional;
 +
 +/**
 + * <p>This filter retrieves the authenticated user saved by the pac4j provider and injects it into the J2E HTTP request.</p>
 + *
 + * @since 0.8.0
 + */
 +public class Pac4jIdentityAdapter implements Filter {
 +
 +  private static final Logger logger = LoggerFactory.getLogger(Pac4jIdentityAdapter.class);
 +
++  public static final String PAC4J_ID_ATTRIBUTE = "pac4j.id_attribute";
++  private static final String PAC4J_CONFIG = "pac4j.config";
++
 +  private static AuditService auditService = AuditServiceFactory.getAuditService();
 +  private static Auditor auditor = auditService.getAuditor(
 +      AuditConstants.DEFAULT_AUDITOR_NAME, AuditConstants.KNOX_SERVICE_NAME,
 +      AuditConstants.KNOX_COMPONENT_NAME );
 +
 +  private String testIdentifier;
 +
++  private String idAttribute;
++
 +  @Override
 +  public void init( FilterConfig filterConfig ) throws ServletException {
++    idAttribute = filterConfig.getInitParameter(PAC4J_ID_ATTRIBUTE);
 +  }
 +
 +  public void destroy() {
 +  }
 +
 +  public void doFilter(ServletRequest servletRequest, ServletResponse servletResponse, FilterChain chain)
 +      throws IOException, ServletException {
 +
 +    final HttpServletRequest request = (HttpServletRequest) servletRequest;
 +    final HttpServletResponse response = (HttpServletResponse) servletResponse;
-     final J2EContext context = new J2EContext(request, response, ConfigSingleton.getConfig().getSessionStore());
++    final J2EContext context = new J2EContext(request, response,
++        ((Config)request.getAttribute(PAC4J_CONFIG)).getSessionStore());
 +    final ProfileManager<CommonProfile> manager = new ProfileManager<CommonProfile>(context);
 +    final Optional<CommonProfile> optional = manager.get(true);
 +    if (optional.isPresent()) {
 +      CommonProfile profile = optional.get();
 +      logger.debug("User authenticated as: {}", profile);
 +      manager.remove(true);
-       final String id = profile.getId();
++      String id = null;
++      if (idAttribute != null) {
++        Object attribute = profile.getAttribute(idAttribute);
++        if (attribute != null) {
++          id = attribute.toString();
++        }
++        if (id == null) {
++          logger.error("Invalid attribute_id: {} configured to be used as principal"
++              + " falling back to default id", idAttribute);
++        }
++      }
++      if (id == null) {
++        id = profile.getId();
++      }
 +      testIdentifier = id;
 +      PrimaryPrincipal pp = new PrimaryPrincipal(id);
 +      Subject subject = new Subject();
 +      subject.getPrincipals().add(pp);
 +      auditService.getContext().setUsername(id);
 +      String sourceUri = (String)request.getAttribute( AbstractGatewayFilter.SOURCE_REQUEST_CONTEXT_URL_ATTRIBUTE_NAME );
 +      auditor.audit(Action.AUTHENTICATION, sourceUri, ResourceType.URI, ActionOutcome.SUCCESS);
 +
 +      doAs(request, response, chain, subject);
 +    }
 +  }
 +
 +  private void doAs(final ServletRequest request,
 +      final ServletResponse response, final FilterChain chain, Subject subject)
 +      throws IOException, ServletException {
 +    try {
 +      Subject.doAs(
 +          subject,
 +          new PrivilegedExceptionAction<Object>() {
 +            public Object run() throws Exception {
 +              chain.doFilter(request, response);
 +              return null;
 +            }
 +          }
 +          );
 +    }
 +    catch (PrivilegedActionException e) {
 +      Throwable t = e.getCause();
 +      if (t instanceof IOException) {
 +        throw (IOException) t;
 +      }
 +      else if (t instanceof ServletException) {
 +        throw (ServletException) t;
 +      }
 +      else {
 +        throw new ServletException(t);
 +      }
 +    }
 +  }
 +
 +  /**
 +   * For tests only.
 +   */
 +  public static void setAuditService(AuditService auditService) {
 +    Pac4jIdentityAdapter.auditService = auditService;
 +  }
 +
 +  /**
 +   * For tests only.
 +   */
 +  public static void setAuditor(Auditor auditor) {
 +    Pac4jIdentityAdapter.auditor = auditor;
 +  }
 +
 +  /**
 +   * For tests only.
 +     */
 +  public String getTestIdentifier() {
 +    return testIdentifier;
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/22a7304a/gateway-provider-security-pac4j/src/test/java/org/apache/knox/gateway/pac4j/MockHttpServletRequest.java
----------------------------------------------------------------------
diff --cc gateway-provider-security-pac4j/src/test/java/org/apache/knox/gateway/pac4j/MockHttpServletRequest.java
index 7a3a833,0000000..18f4913
mode 100644,000000..100644
--- a/gateway-provider-security-pac4j/src/test/java/org/apache/knox/gateway/pac4j/MockHttpServletRequest.java
+++ b/gateway-provider-security-pac4j/src/test/java/org/apache/knox/gateway/pac4j/MockHttpServletRequest.java
@@@ -1,88 -1,0 +1,94 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.pac4j;
 +
 +import javax.servlet.http.*;
 +
 +import java.util.HashMap;
 +import java.util.Map;
 +
 +import static org.mockito.Mockito.*;
 +
 +public class MockHttpServletRequest extends HttpServletRequestWrapper {
 +
 +    private String requestUrl;
 +    private Cookie[] cookies;
 +    private String serverName;
 +    private Map<String, String> parameters = new HashMap<>();
 +    private Map<String, String> headers = new HashMap<>();
++    private Map<String, Object> attributes = new HashMap<>();
 +
 +    public MockHttpServletRequest() {
 +        super(mock(HttpServletRequest.class));
 +    }
 +
 +    @Override
 +    public Cookie[] getCookies() {
 +        return cookies;
 +    }
 +
 +    public void setCookies(final Cookie[] cookies) {
 +        this.cookies = cookies;
 +    }
 +
 +    @Override
 +    public StringBuffer getRequestURL() {
 +        return new StringBuffer(requestUrl);
 +    }
 +
 +    public void setRequestURL(final String requestUrl) {
 +        this.requestUrl = requestUrl;
 +    }
 +
 +    @Override
 +    public String getServerName() {
 +        return serverName;
 +    }
 +
 +    public void setServerName(final String serverName) {
 +        this.serverName = serverName;
 +    }
 +
 +    @Override
 +    public String getParameter(String name) {
 +        return parameters.get(name);
 +    }
 +
 +    public void addParameter(String key, String value) {
 +        parameters.put(key, value);
 +    }
 +
 +    @Override
 +    public String getHeader(String name) {
 +        return headers.get(name);
 +    }
 +
 +    public void addHeader(String key, String value) {
 +        headers.put(key, value);
 +    }
 +
 +    @Override
++    public void setAttribute(String name, Object value) {
++        attributes.put(name, value);
++    }
++
++    @Override
 +    public Object getAttribute(String name) {
-         return null;
++        return attributes.get(name);
 +    }
 +}


[46/53] [abbrv] knox git commit: KNOX-998 - Merge from trunk 0.14.0 code

Posted by mo...@apache.org.
http://git-wip-us.apache.org/repos/asf/knox/blob/e766b3b7/gateway-discovery-ambari/src/test/java/org/apache/hadoop/gateway/topology/discovery/ambari/AmbariConfigurationMonitorTest.java
----------------------------------------------------------------------
diff --git a/gateway-discovery-ambari/src/test/java/org/apache/hadoop/gateway/topology/discovery/ambari/AmbariConfigurationMonitorTest.java b/gateway-discovery-ambari/src/test/java/org/apache/hadoop/gateway/topology/discovery/ambari/AmbariConfigurationMonitorTest.java
deleted file mode 100644
index 2d8b276..0000000
--- a/gateway-discovery-ambari/src/test/java/org/apache/hadoop/gateway/topology/discovery/ambari/AmbariConfigurationMonitorTest.java
+++ /dev/null
@@ -1,319 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.gateway.topology.discovery.ambari;
-
-import org.apache.commons.io.FileUtils;
-import org.apache.hadoop.gateway.config.GatewayConfig;
-import org.apache.hadoop.gateway.topology.discovery.ServiceDiscoveryConfig;
-import org.easymock.EasyMock;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-
-import java.io.File;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.UUID;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-
-public class AmbariConfigurationMonitorTest {
-
-    private File dataDir = null;
-
-    @Before
-    public void setup() throws Exception {
-        File targetDir = new File( System.getProperty("user.dir"), "target");
-        File tempDir = new File(targetDir, this.getClass().getName() + "__data__" + UUID.randomUUID());
-        FileUtils.forceMkdir(tempDir);
-        dataDir = tempDir;
-    }
-
-    @After
-    public void tearDown() throws Exception {
-        dataDir.delete();
-    }
-
-    @Test
-    public void testPollingMonitor() throws Exception {
-        final String addr1 = "http://host1:8080";
-        final String addr2 = "http://host2:8080";
-        final String cluster1Name = "Cluster_One";
-        final String cluster2Name = "Cluster_Two";
-
-
-        GatewayConfig config = EasyMock.createNiceMock(GatewayConfig.class);
-        EasyMock.expect(config.getGatewayDataDir()).andReturn(dataDir.getAbsolutePath()).anyTimes();
-        EasyMock.expect(config.getClusterMonitorPollingInterval(AmbariConfigurationMonitor.getType()))
-                .andReturn(10)
-                .anyTimes();
-        EasyMock.replay(config);
-
-        // Create the monitor
-        TestableAmbariConfigurationMonitor monitor = new TestableAmbariConfigurationMonitor(config);
-
-        // Clear the system property now that the monitor has been initialized
-        System.clearProperty(AmbariConfigurationMonitor.INTERVAL_PROPERTY_NAME);
-
-
-        // Sequence of config changes for testing monitoring for updates
-        Map<String, Map<String, List<List<AmbariCluster.ServiceConfiguration>>>> updateConfigurations = new HashMap<>();
-
-        updateConfigurations.put(addr1, new HashMap<>());
-        updateConfigurations.get(addr1).put(cluster1Name, Arrays.asList(Arrays.asList(createTestServiceConfig("zoo.cfg", "3"),
-                                                                                      createTestServiceConfig("hive-site", "2")),
-                                                                        Arrays.asList(createTestServiceConfig("zoo.cfg", "3"),
-                                                                                      createTestServiceConfig("hive-site", "3")),
-                                                                        Arrays.asList(createTestServiceConfig("zoo.cfg", "2"),
-                                                                                      createTestServiceConfig("hive-site", "1"))));
-
-        updateConfigurations.put(addr2, new HashMap<>());
-        updateConfigurations.get(addr2).put(cluster2Name, Arrays.asList(Arrays.asList(createTestServiceConfig("zoo.cfg", "1"),
-                                                                                      createTestServiceConfig("hive-site", "1")),
-                                                                        Collections.singletonList(createTestServiceConfig("zoo.cfg", "1")),
-                                                                        Arrays.asList(createTestServiceConfig("zoo.cfg", "1"),
-                                                                                      createTestServiceConfig("hive-site", "2"))));
-
-        updateConfigurations.get(addr2).put(cluster1Name, Arrays.asList(Arrays.asList(createTestServiceConfig("zoo.cfg", "2"),
-                                                                                      createTestServiceConfig("hive-site", "4")),
-                                                                        Arrays.asList(createTestServiceConfig("zoo.cfg", "3"),
-                                                                                      createTestServiceConfig("hive-site", "4"),
-                                                                                      createTestServiceConfig("yarn-site", "1")),
-                                                                        Arrays.asList(createTestServiceConfig("zoo.cfg", "1"),
-                                                                                      createTestServiceConfig("hive-site", "2"))));
-
-        Map<String, Map<String, Integer>> configChangeIndex = new HashMap<>();
-        configChangeIndex.put(addr1, new HashMap<>());
-        configChangeIndex.get(addr1).put(cluster1Name, 0);
-        configChangeIndex.get(addr1).put(cluster2Name, 0);
-        configChangeIndex.put(addr2, new HashMap<>());
-        configChangeIndex.get(addr2).put(cluster2Name, 0);
-
-        // Setup the initial test update data
-        // Cluster 1 data change
-        monitor.addTestConfigVersion(addr1, cluster1Name, "zoo.cfg", "2");
-        monitor.addTestConfigVersion(addr1, cluster1Name, "hive-site", "1");
-
-        // Cluster 2 NO data change
-        monitor.addTestConfigVersion(addr2, cluster1Name, "zoo.cfg", "1");
-        monitor.addTestConfigVersion(addr2, cluster1Name, "hive-site", "1");
-
-        // Cluster 3 data change
-        monitor.addTestConfigVersion(addr2, cluster2Name, "zoo.cfg", "1");
-        monitor.addTestConfigVersion(addr2, cluster2Name, "hive-site", "2");
-
-        Map<String, Map<String, AmbariCluster.ServiceConfiguration>> initialAmbariClusterConfigs = new HashMap<>();
-
-        Map<String, AmbariCluster.ServiceConfiguration> cluster1Configs = new HashMap<>();
-        AmbariCluster.ServiceConfiguration zooCfg = createTestServiceConfig("zoo.cfg", "1");
-        cluster1Configs.put("ZOOKEEPER", zooCfg);
-
-        AmbariCluster.ServiceConfiguration hiveSite = createTestServiceConfig("hive-site", "1");
-        cluster1Configs.put("Hive", hiveSite);
-
-        initialAmbariClusterConfigs.put(cluster1Name, cluster1Configs);
-        AmbariCluster cluster1 = createTestCluster(cluster1Name, initialAmbariClusterConfigs);
-
-        // Tell the monitor about the cluster configurations
-        monitor.addClusterConfigVersions(cluster1, createTestDiscoveryConfig(addr1));
-
-        monitor.addClusterConfigVersions(createTestCluster(cluster2Name, initialAmbariClusterConfigs),
-                                         createTestDiscoveryConfig(addr2));
-
-        monitor.addClusterConfigVersions(createTestCluster(cluster1Name, initialAmbariClusterConfigs),
-                                         createTestDiscoveryConfig(addr2));
-
-        final Map<String, Map<String, Integer>> changeNotifications = new HashMap<>();
-        monitor.addListener((src, cname) -> {
-//            System.out.println("Cluster config changed: " + cname + " @ " + src);
-            // Record the notification
-            Integer notificationCount  = changeNotifications.computeIfAbsent(src, s -> new HashMap<>())
-                                                            .computeIfAbsent(cname, c -> Integer.valueOf(0));
-            changeNotifications.get(src).put(cname, (notificationCount+=1));
-
-            // Update the config version
-            int changeIndex = configChangeIndex.get(src).get(cname);
-            if (changeIndex < updateConfigurations.get(src).get(cname).size()) {
-                List<AmbariCluster.ServiceConfiguration> changes = updateConfigurations.get(src).get(cname).get(changeIndex);
-
-//                System.out.println("Applying config update " + changeIndex + " to " + cname + " @ " + src + " ...");
-                for (AmbariCluster.ServiceConfiguration change : changes) {
-                    monitor.updateConfigState(src, cname, change.getType(), change.getVersion());
-//                    System.out.println("    Updated " + change.getType() + " to version " + change.getVersion());
-                }
-
-                // Increment the change index
-                configChangeIndex.get(src).replace(cname, changeIndex + 1);
-
-//                System.out.println("Monitor config updated for " + cname + " @ " + src + " : " + changeIndex );
-            }
-        });
-
-        try {
-            monitor.start();
-
-            long expiration = System.currentTimeMillis() + (1000 * 30);
-            while (!areChangeUpdatesExhausted(updateConfigurations, configChangeIndex)
-                                                                        && (System.currentTimeMillis() < expiration)) {
-                try {
-                    Thread.sleep(5);
-                } catch (InterruptedException e) {
-                    //
-                }
-            }
-
-        } finally {
-            monitor.stop();
-        }
-
-        assertNotNull("Expected changes to have been reported for source 1.",
-                      changeNotifications.get(addr1));
-
-        assertEquals("Expected changes to have been reported.",
-                     3, changeNotifications.get(addr1).get(cluster1Name).intValue());
-
-        assertNotNull("Expected changes to have been reported for source 2.",
-                      changeNotifications.get(addr2));
-
-        assertEquals("Expected changes to have been reported.",
-                     3, changeNotifications.get(addr2).get(cluster2Name).intValue());
-
-        assertNull("Expected changes to have been reported.",
-                   changeNotifications.get(addr2).get(cluster1Name));
-    }
-
-
-    private static boolean areChangeUpdatesExhausted(Map<String, Map<String, List<List<AmbariCluster.ServiceConfiguration>>>> updates,
-                                              Map<String, Map<String, Integer>> configChangeIndeces) {
-        boolean isExhausted = true;
-
-        for (String address : updates.keySet()) {
-            Map<String, List<List<AmbariCluster.ServiceConfiguration>>> clusterConfigs = updates.get(address);
-            for (String clusterName : clusterConfigs.keySet()) {
-                Integer configChangeCount = clusterConfigs.get(clusterName).size();
-                if (configChangeIndeces.get(address).containsKey(clusterName)) {
-                    if (configChangeIndeces.get(address).get(clusterName) < configChangeCount) {
-                        isExhausted = false;
-                        break;
-                    }
-                }
-            }
-        }
-
-        return isExhausted;
-    }
-
-    /**
-     *
-     * @param name           The cluster name
-     * @param serviceConfigs A map of service configurations (keyed by service name)
-     *
-     * @return
-     */
-    private AmbariCluster createTestCluster(String name,
-                                            Map<String, Map<String, AmbariCluster.ServiceConfiguration>> serviceConfigs) {
-        AmbariCluster c = EasyMock.createNiceMock(AmbariCluster.class);
-        EasyMock.expect(c.getName()).andReturn(name).anyTimes();
-        EasyMock.expect(c.getServiceConfigurations()).andReturn(serviceConfigs).anyTimes();
-        EasyMock.replay(c);
-        return c;
-    }
-
-    private AmbariCluster.ServiceConfiguration createTestServiceConfig(String name, String version) {
-        AmbariCluster.ServiceConfiguration sc = EasyMock.createNiceMock(AmbariCluster.ServiceConfiguration.class);
-        EasyMock.expect(sc.getType()).andReturn(name).anyTimes();
-        EasyMock.expect(sc.getVersion()).andReturn(version).anyTimes();
-        EasyMock.replay(sc);
-        return sc;
-    }
-
-    private ServiceDiscoveryConfig createTestDiscoveryConfig(String address) {
-        return createTestDiscoveryConfig(address, null, null);
-    }
-
-    private ServiceDiscoveryConfig createTestDiscoveryConfig(String address, String username, String pwdAlias) {
-        ServiceDiscoveryConfig sdc = EasyMock.createNiceMock(ServiceDiscoveryConfig.class);
-        EasyMock.expect(sdc.getAddress()).andReturn(address).anyTimes();
-        EasyMock.expect(sdc.getUser()).andReturn(username).anyTimes();
-        EasyMock.expect(sdc.getPasswordAlias()).andReturn(pwdAlias).anyTimes();
-        EasyMock.replay(sdc);
-        return sdc;
-    }
-
-    /**
-     * AmbariConfigurationMonitor extension that replaces the collection of updated configuration data with a static
-     * mechanism rather than the REST invocation mechanism.
-     */
-    private static final class TestableAmbariConfigurationMonitor extends AmbariConfigurationMonitor {
-
-        Map<String, Map<String, Map<String, String>>> configVersionData = new HashMap<>();
-
-        TestableAmbariConfigurationMonitor(GatewayConfig config) {
-            super(config, null);
-        }
-
-        void addTestConfigVersion(String address, String clusterName, String configType, String configVersion) {
-            configVersionData.computeIfAbsent(address, a -> new HashMap<>())
-                             .computeIfAbsent(clusterName, cl -> new HashMap<>())
-                             .put(configType, configVersion);
-        }
-
-        void addTestConfigVersions(String address, String clusterName, Map<String, String> configVersions) {
-            configVersionData.computeIfAbsent(address, a -> new HashMap<>())
-                             .computeIfAbsent(clusterName, cl -> new HashMap<>())
-                             .putAll(configVersions);
-        }
-
-        void updateTestConfigVersion(String address, String clusterName, String configType, String updatedVersions) {
-            configVersionData.computeIfAbsent(address, a -> new HashMap<>())
-                             .computeIfAbsent(clusterName, cl -> new HashMap<>())
-                             .replace(configType, updatedVersions);
-        }
-
-        void updateTestConfigVersions(String address, String clusterName, Map<String, String> updatedVersions) {
-            configVersionData.computeIfAbsent(address, a -> new HashMap<>())
-                             .computeIfAbsent(clusterName, cl -> new HashMap<>())
-                             .replaceAll((k,v) -> updatedVersions.get(k));
-        }
-
-        void updateConfigState(String address, String clusterName, String configType, String configVersion) {
-            configVersionsLock.writeLock().lock();
-            try {
-                if (ambariClusterConfigVersions.containsKey(address)) {
-                    ambariClusterConfigVersions.get(address).get(clusterName).replace(configType, configVersion);
-                }
-            } finally {
-                configVersionsLock.writeLock().unlock();
-            }
-        }
-
-        @Override
-        Map<String, String> getUpdatedConfigVersions(String address, String clusterName) {
-            Map<String, Map<String, String>> clusterConfigVersions = configVersionData.get(address);
-            if (clusterConfigVersions != null) {
-                return clusterConfigVersions.get(clusterName);
-            }
-            return null;
-        }
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/knox/blob/e766b3b7/gateway-discovery-ambari/src/test/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariConfigurationMonitorTest.java
----------------------------------------------------------------------
diff --git a/gateway-discovery-ambari/src/test/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariConfigurationMonitorTest.java b/gateway-discovery-ambari/src/test/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariConfigurationMonitorTest.java
new file mode 100644
index 0000000..7411545
--- /dev/null
+++ b/gateway-discovery-ambari/src/test/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariConfigurationMonitorTest.java
@@ -0,0 +1,319 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.knox.gateway.topology.discovery.ambari;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.knox.gateway.config.GatewayConfig;
+import org.apache.knox.gateway.topology.discovery.ServiceDiscoveryConfig;
+import org.easymock.EasyMock;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.File;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.UUID;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+
+public class AmbariConfigurationMonitorTest {
+
+    private File dataDir = null;
+
+    @Before
+    public void setup() throws Exception {
+        File targetDir = new File( System.getProperty("user.dir"), "target");
+        File tempDir = new File(targetDir, this.getClass().getName() + "__data__" + UUID.randomUUID());
+        FileUtils.forceMkdir(tempDir);
+        dataDir = tempDir;
+    }
+
+    @After
+    public void tearDown() throws Exception {
+        dataDir.delete();
+    }
+
+    @Test
+    public void testPollingMonitor() throws Exception {
+        final String addr1 = "http://host1:8080";
+        final String addr2 = "http://host2:8080";
+        final String cluster1Name = "Cluster_One";
+        final String cluster2Name = "Cluster_Two";
+
+
+        GatewayConfig config = EasyMock.createNiceMock(GatewayConfig.class);
+        EasyMock.expect(config.getGatewayDataDir()).andReturn(dataDir.getAbsolutePath()).anyTimes();
+        EasyMock.expect(config.getClusterMonitorPollingInterval(AmbariConfigurationMonitor.getType()))
+                .andReturn(10)
+                .anyTimes();
+        EasyMock.replay(config);
+
+        // Create the monitor
+        TestableAmbariConfigurationMonitor monitor = new TestableAmbariConfigurationMonitor(config);
+
+        // Clear the system property now that the monitor has been initialized
+        System.clearProperty(AmbariConfigurationMonitor.INTERVAL_PROPERTY_NAME);
+
+
+        // Sequence of config changes for testing monitoring for updates
+        Map<String, Map<String, List<List<AmbariCluster.ServiceConfiguration>>>> updateConfigurations = new HashMap<>();
+
+        updateConfigurations.put(addr1, new HashMap<>());
+        updateConfigurations.get(addr1).put(cluster1Name, Arrays.asList(Arrays.asList(createTestServiceConfig("zoo.cfg", "3"),
+                                                                                      createTestServiceConfig("hive-site", "2")),
+                                                                        Arrays.asList(createTestServiceConfig("zoo.cfg", "3"),
+                                                                                      createTestServiceConfig("hive-site", "3")),
+                                                                        Arrays.asList(createTestServiceConfig("zoo.cfg", "2"),
+                                                                                      createTestServiceConfig("hive-site", "1"))));
+
+        updateConfigurations.put(addr2, new HashMap<>());
+        updateConfigurations.get(addr2).put(cluster2Name, Arrays.asList(Arrays.asList(createTestServiceConfig("zoo.cfg", "1"),
+                                                                                      createTestServiceConfig("hive-site", "1")),
+                                                                        Collections.singletonList(createTestServiceConfig("zoo.cfg", "1")),
+                                                                        Arrays.asList(createTestServiceConfig("zoo.cfg", "1"),
+                                                                                      createTestServiceConfig("hive-site", "2"))));
+
+        updateConfigurations.get(addr2).put(cluster1Name, Arrays.asList(Arrays.asList(createTestServiceConfig("zoo.cfg", "2"),
+                                                                                      createTestServiceConfig("hive-site", "4")),
+                                                                        Arrays.asList(createTestServiceConfig("zoo.cfg", "3"),
+                                                                                      createTestServiceConfig("hive-site", "4"),
+                                                                                      createTestServiceConfig("yarn-site", "1")),
+                                                                        Arrays.asList(createTestServiceConfig("zoo.cfg", "1"),
+                                                                                      createTestServiceConfig("hive-site", "2"))));
+
+        Map<String, Map<String, Integer>> configChangeIndex = new HashMap<>();
+        configChangeIndex.put(addr1, new HashMap<>());
+        configChangeIndex.get(addr1).put(cluster1Name, 0);
+        configChangeIndex.get(addr1).put(cluster2Name, 0);
+        configChangeIndex.put(addr2, new HashMap<>());
+        configChangeIndex.get(addr2).put(cluster2Name, 0);
+
+        // Setup the initial test update data
+        // Cluster 1 data change
+        monitor.addTestConfigVersion(addr1, cluster1Name, "zoo.cfg", "2");
+        monitor.addTestConfigVersion(addr1, cluster1Name, "hive-site", "1");
+
+        // Cluster 2 NO data change
+        monitor.addTestConfigVersion(addr2, cluster1Name, "zoo.cfg", "1");
+        monitor.addTestConfigVersion(addr2, cluster1Name, "hive-site", "1");
+
+        // Cluster 3 data change
+        monitor.addTestConfigVersion(addr2, cluster2Name, "zoo.cfg", "1");
+        monitor.addTestConfigVersion(addr2, cluster2Name, "hive-site", "2");
+
+        Map<String, Map<String, AmbariCluster.ServiceConfiguration>> initialAmbariClusterConfigs = new HashMap<>();
+
+        Map<String, AmbariCluster.ServiceConfiguration> cluster1Configs = new HashMap<>();
+        AmbariCluster.ServiceConfiguration zooCfg = createTestServiceConfig("zoo.cfg", "1");
+        cluster1Configs.put("ZOOKEEPER", zooCfg);
+
+        AmbariCluster.ServiceConfiguration hiveSite = createTestServiceConfig("hive-site", "1");
+        cluster1Configs.put("Hive", hiveSite);
+
+        initialAmbariClusterConfigs.put(cluster1Name, cluster1Configs);
+        AmbariCluster cluster1 = createTestCluster(cluster1Name, initialAmbariClusterConfigs);
+
+        // Tell the monitor about the cluster configurations
+        monitor.addClusterConfigVersions(cluster1, createTestDiscoveryConfig(addr1));
+
+        monitor.addClusterConfigVersions(createTestCluster(cluster2Name, initialAmbariClusterConfigs),
+                                         createTestDiscoveryConfig(addr2));
+
+        monitor.addClusterConfigVersions(createTestCluster(cluster1Name, initialAmbariClusterConfigs),
+                                         createTestDiscoveryConfig(addr2));
+
+        final Map<String, Map<String, Integer>> changeNotifications = new HashMap<>();
+        monitor.addListener((src, cname) -> {
+//            System.out.println("Cluster config changed: " + cname + " @ " + src);
+            // Record the notification
+            Integer notificationCount  = changeNotifications.computeIfAbsent(src, s -> new HashMap<>())
+                                                            .computeIfAbsent(cname, c -> Integer.valueOf(0));
+            changeNotifications.get(src).put(cname, (notificationCount+=1));
+
+            // Update the config version
+            int changeIndex = configChangeIndex.get(src).get(cname);
+            if (changeIndex < updateConfigurations.get(src).get(cname).size()) {
+                List<AmbariCluster.ServiceConfiguration> changes = updateConfigurations.get(src).get(cname).get(changeIndex);
+
+//                System.out.println("Applying config update " + changeIndex + " to " + cname + " @ " + src + " ...");
+                for (AmbariCluster.ServiceConfiguration change : changes) {
+                    monitor.updateConfigState(src, cname, change.getType(), change.getVersion());
+//                    System.out.println("    Updated " + change.getType() + " to version " + change.getVersion());
+                }
+
+                // Increment the change index
+                configChangeIndex.get(src).replace(cname, changeIndex + 1);
+
+//                System.out.println("Monitor config updated for " + cname + " @ " + src + " : " + changeIndex );
+            }
+        });
+
+        try {
+            monitor.start();
+
+            long expiration = System.currentTimeMillis() + (1000 * 30);
+            while (!areChangeUpdatesExhausted(updateConfigurations, configChangeIndex)
+                                                                        && (System.currentTimeMillis() < expiration)) {
+                try {
+                    Thread.sleep(5);
+                } catch (InterruptedException e) {
+                    //
+                }
+            }
+
+        } finally {
+            monitor.stop();
+        }
+
+        assertNotNull("Expected changes to have been reported for source 1.",
+                      changeNotifications.get(addr1));
+
+        assertEquals("Expected changes to have been reported.",
+                     3, changeNotifications.get(addr1).get(cluster1Name).intValue());
+
+        assertNotNull("Expected changes to have been reported for source 2.",
+                      changeNotifications.get(addr2));
+
+        assertEquals("Expected changes to have been reported.",
+                     3, changeNotifications.get(addr2).get(cluster2Name).intValue());
+
+        assertNull("Expected changes to have been reported.",
+                   changeNotifications.get(addr2).get(cluster1Name));
+    }
+
+
+    private static boolean areChangeUpdatesExhausted(Map<String, Map<String, List<List<AmbariCluster.ServiceConfiguration>>>> updates,
+                                              Map<String, Map<String, Integer>> configChangeIndeces) {
+        boolean isExhausted = true;
+
+        for (String address : updates.keySet()) {
+            Map<String, List<List<AmbariCluster.ServiceConfiguration>>> clusterConfigs = updates.get(address);
+            for (String clusterName : clusterConfigs.keySet()) {
+                Integer configChangeCount = clusterConfigs.get(clusterName).size();
+                if (configChangeIndeces.get(address).containsKey(clusterName)) {
+                    if (configChangeIndeces.get(address).get(clusterName) < configChangeCount) {
+                        isExhausted = false;
+                        break;
+                    }
+                }
+            }
+        }
+
+        return isExhausted;
+    }
+
+    /**
+     *
+     * @param name           The cluster name
+     * @param serviceConfigs A map of service configurations (keyed by service name)
+     *
+     * @return
+     */
+    private AmbariCluster createTestCluster(String name,
+                                            Map<String, Map<String, AmbariCluster.ServiceConfiguration>> serviceConfigs) {
+        AmbariCluster c = EasyMock.createNiceMock(AmbariCluster.class);
+        EasyMock.expect(c.getName()).andReturn(name).anyTimes();
+        EasyMock.expect(c.getServiceConfigurations()).andReturn(serviceConfigs).anyTimes();
+        EasyMock.replay(c);
+        return c;
+    }
+
+    private AmbariCluster.ServiceConfiguration createTestServiceConfig(String name, String version) {
+        AmbariCluster.ServiceConfiguration sc = EasyMock.createNiceMock(AmbariCluster.ServiceConfiguration.class);
+        EasyMock.expect(sc.getType()).andReturn(name).anyTimes();
+        EasyMock.expect(sc.getVersion()).andReturn(version).anyTimes();
+        EasyMock.replay(sc);
+        return sc;
+    }
+
+    private ServiceDiscoveryConfig createTestDiscoveryConfig(String address) {
+        return createTestDiscoveryConfig(address, null, null);
+    }
+
+    private ServiceDiscoveryConfig createTestDiscoveryConfig(String address, String username, String pwdAlias) {
+        ServiceDiscoveryConfig sdc = EasyMock.createNiceMock(ServiceDiscoveryConfig.class);
+        EasyMock.expect(sdc.getAddress()).andReturn(address).anyTimes();
+        EasyMock.expect(sdc.getUser()).andReturn(username).anyTimes();
+        EasyMock.expect(sdc.getPasswordAlias()).andReturn(pwdAlias).anyTimes();
+        EasyMock.replay(sdc);
+        return sdc;
+    }
+
+    /**
+     * AmbariConfigurationMonitor extension that replaces the collection of updated configuration data with a static
+     * mechanism rather than the REST invocation mechanism.
+     */
+    private static final class TestableAmbariConfigurationMonitor extends AmbariConfigurationMonitor {
+
+        Map<String, Map<String, Map<String, String>>> configVersionData = new HashMap<>();
+
+        TestableAmbariConfigurationMonitor(GatewayConfig config) {
+            super(config, null);
+        }
+
+        void addTestConfigVersion(String address, String clusterName, String configType, String configVersion) {
+            configVersionData.computeIfAbsent(address, a -> new HashMap<>())
+                             .computeIfAbsent(clusterName, cl -> new HashMap<>())
+                             .put(configType, configVersion);
+        }
+
+        void addTestConfigVersions(String address, String clusterName, Map<String, String> configVersions) {
+            configVersionData.computeIfAbsent(address, a -> new HashMap<>())
+                             .computeIfAbsent(clusterName, cl -> new HashMap<>())
+                             .putAll(configVersions);
+        }
+
+        void updateTestConfigVersion(String address, String clusterName, String configType, String updatedVersions) {
+            configVersionData.computeIfAbsent(address, a -> new HashMap<>())
+                             .computeIfAbsent(clusterName, cl -> new HashMap<>())
+                             .replace(configType, updatedVersions);
+        }
+
+        void updateTestConfigVersions(String address, String clusterName, Map<String, String> updatedVersions) {
+            configVersionData.computeIfAbsent(address, a -> new HashMap<>())
+                             .computeIfAbsent(clusterName, cl -> new HashMap<>())
+                             .replaceAll((k,v) -> updatedVersions.get(k));
+        }
+
+        void updateConfigState(String address, String clusterName, String configType, String configVersion) {
+            configVersionsLock.writeLock().lock();
+            try {
+                if (ambariClusterConfigVersions.containsKey(address)) {
+                    ambariClusterConfigVersions.get(address).get(clusterName).replace(configType, configVersion);
+                }
+            } finally {
+                configVersionsLock.writeLock().unlock();
+            }
+        }
+
+        @Override
+        Map<String, String> getUpdatedConfigVersions(String address, String clusterName) {
+            Map<String, Map<String, String>> clusterConfigVersions = configVersionData.get(address);
+            if (clusterConfigVersions != null) {
+                return clusterConfigVersions.get(clusterName);
+            }
+            return null;
+        }
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/knox/blob/e766b3b7/gateway-server/src/main/java/org/apache/hadoop/gateway/services/topology/impl/DefaultClusterConfigurationMonitorService.java
----------------------------------------------------------------------
diff --git a/gateway-server/src/main/java/org/apache/hadoop/gateway/services/topology/impl/DefaultClusterConfigurationMonitorService.java b/gateway-server/src/main/java/org/apache/hadoop/gateway/services/topology/impl/DefaultClusterConfigurationMonitorService.java
deleted file mode 100644
index 342ce11..0000000
--- a/gateway-server/src/main/java/org/apache/hadoop/gateway/services/topology/impl/DefaultClusterConfigurationMonitorService.java
+++ /dev/null
@@ -1,81 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.gateway.services.topology.impl;
-
-import org.apache.hadoop.gateway.config.GatewayConfig;
-import org.apache.hadoop.gateway.services.ServiceLifecycleException;
-import org.apache.hadoop.gateway.services.security.AliasService;
-import org.apache.hadoop.gateway.topology.ClusterConfigurationMonitorService;
-import org.apache.hadoop.gateway.topology.discovery.ClusterConfigurationMonitor;
-import org.apache.hadoop.gateway.topology.discovery.ClusterConfigurationMonitorProvider;
-
-import java.util.HashMap;
-import java.util.Map;
-import java.util.ServiceLoader;
-
-
-public class DefaultClusterConfigurationMonitorService implements ClusterConfigurationMonitorService {
-
-    private AliasService aliasService = null;
-
-    private Map<String, ClusterConfigurationMonitor> monitors = new HashMap<>();
-
-    @Override
-    public void init(GatewayConfig config, Map<String, String> options) throws ServiceLifecycleException {
-        ServiceLoader<ClusterConfigurationMonitorProvider> providers =
-                                                        ServiceLoader.load(ClusterConfigurationMonitorProvider.class);
-        for (ClusterConfigurationMonitorProvider provider : providers) {
-            // Check the gateway configuration to determine if this type of monitor is enabled
-            if (config.isClusterMonitorEnabled(provider.getType())) {
-                ClusterConfigurationMonitor monitor = provider.newInstance(config, aliasService);
-                if (monitor != null) {
-                    monitors.put(provider.getType(), monitor);
-                }
-            }
-        }
-    }
-
-    @Override
-    public void start() {
-        for (ClusterConfigurationMonitor monitor : monitors.values()) {
-            monitor.start();
-        }
-    }
-
-    @Override
-    public void stop() {
-        for (ClusterConfigurationMonitor monitor : monitors.values()) {
-            monitor.stop();
-        }
-    }
-
-    @Override
-    public ClusterConfigurationMonitor getMonitor(String type) {
-        return monitors.get(type);
-    }
-
-    @Override
-    public void addListener(ClusterConfigurationMonitor.ConfigurationChangeListener listener) {
-        for (ClusterConfigurationMonitor monitor : monitors.values()) {
-            monitor.addListener(listener);
-        }
-    }
-
-    public void setAliasService(AliasService aliasService) {
-        this.aliasService = aliasService;
-    }
-}

http://git-wip-us.apache.org/repos/asf/knox/blob/e766b3b7/gateway-server/src/main/java/org/apache/hadoop/gateway/topology/monitor/DefaultConfigurationMonitorProvider.java
----------------------------------------------------------------------
diff --git a/gateway-server/src/main/java/org/apache/hadoop/gateway/topology/monitor/DefaultConfigurationMonitorProvider.java b/gateway-server/src/main/java/org/apache/hadoop/gateway/topology/monitor/DefaultConfigurationMonitorProvider.java
deleted file mode 100644
index 7b34e3d..0000000
--- a/gateway-server/src/main/java/org/apache/hadoop/gateway/topology/monitor/DefaultConfigurationMonitorProvider.java
+++ /dev/null
@@ -1,31 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.gateway.topology.monitor;
-
-import org.apache.hadoop.gateway.config.GatewayConfig;
-import org.apache.hadoop.gateway.services.config.client.RemoteConfigurationRegistryClientService;
-
-
-public class DefaultConfigurationMonitorProvider implements RemoteConfigurationMonitorProvider {
-
-    @Override
-    public RemoteConfigurationMonitor newInstance(final GatewayConfig                            config,
-                                                  final RemoteConfigurationRegistryClientService clientService) {
-        return new DefaultRemoteConfigurationMonitor(config, clientService);
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/knox/blob/e766b3b7/gateway-server/src/main/java/org/apache/hadoop/gateway/topology/monitor/DefaultRemoteConfigurationMonitor.java
----------------------------------------------------------------------
diff --git a/gateway-server/src/main/java/org/apache/hadoop/gateway/topology/monitor/DefaultRemoteConfigurationMonitor.java b/gateway-server/src/main/java/org/apache/hadoop/gateway/topology/monitor/DefaultRemoteConfigurationMonitor.java
deleted file mode 100644
index af60058..0000000
--- a/gateway-server/src/main/java/org/apache/hadoop/gateway/topology/monitor/DefaultRemoteConfigurationMonitor.java
+++ /dev/null
@@ -1,228 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.gateway.topology.monitor;
-
-import org.apache.commons.io.FileUtils;
-import org.apache.hadoop.gateway.GatewayMessages;
-import org.apache.hadoop.gateway.config.GatewayConfig;
-import org.apache.hadoop.gateway.i18n.messages.MessagesFactory;
-import org.apache.hadoop.gateway.services.config.client.RemoteConfigurationRegistryClient.ChildEntryListener;
-import org.apache.hadoop.gateway.services.config.client.RemoteConfigurationRegistryClient.EntryListener;
-import org.apache.hadoop.gateway.services.config.client.RemoteConfigurationRegistryClient;
-import org.apache.hadoop.gateway.services.config.client.RemoteConfigurationRegistryClientService;
-import org.apache.zookeeper.ZooDefs;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-
-
-class DefaultRemoteConfigurationMonitor implements RemoteConfigurationMonitor {
-
-    private static final String NODE_KNOX = "/knox";
-    private static final String NODE_KNOX_CONFIG = NODE_KNOX + "/config";
-    private static final String NODE_KNOX_PROVIDERS = NODE_KNOX_CONFIG + "/shared-providers";
-    private static final String NODE_KNOX_DESCRIPTORS = NODE_KNOX_CONFIG + "/descriptors";
-
-    private static GatewayMessages log = MessagesFactory.get(GatewayMessages.class);
-
-    // N.B. This is ZooKeeper-specific, and should be abstracted when another registry is supported
-    private static final RemoteConfigurationRegistryClient.EntryACL AUTHENTICATED_USERS_ALL;
-    static {
-        AUTHENTICATED_USERS_ALL = new RemoteConfigurationRegistryClient.EntryACL() {
-            public String getId() {
-                return "";
-            }
-
-            public String getType() {
-                return "auth";
-            }
-
-            public Object getPermissions() {
-                return ZooDefs.Perms.ALL;
-            }
-
-            public boolean canRead() {
-                return true;
-            }
-
-            public boolean canWrite() {
-                return true;
-            }
-        };
-    }
-
-    private RemoteConfigurationRegistryClient client = null;
-
-    private File providersDir;
-    private File descriptorsDir;
-
-    /**
-     * @param config                The gateway configuration
-     * @param registryClientService The service from which the remote registry client should be acquired.
-     */
-    DefaultRemoteConfigurationMonitor(GatewayConfig                            config,
-                                      RemoteConfigurationRegistryClientService registryClientService) {
-        this.providersDir   = new File(config.getGatewayProvidersConfigDir());
-        this.descriptorsDir = new File(config.getGatewayDescriptorsDir());
-
-        if (registryClientService != null) {
-            String clientName = config.getRemoteConfigurationMonitorClientName();
-            if (clientName != null) {
-                this.client = registryClientService.get(clientName);
-                if (this.client == null) {
-                    log.unresolvedClientConfigurationForRemoteMonitoring(clientName);
-                }
-            } else {
-                log.missingClientConfigurationForRemoteMonitoring();
-            }
-        }
-    }
-
-    @Override
-    public void start() throws Exception {
-        if (client == null) {
-            throw new IllegalStateException("Failed to acquire a remote configuration registry client.");
-        }
-
-        final String monitorSource = client.getAddress();
-        log.startingRemoteConfigurationMonitor(monitorSource);
-
-        // Ensure the existence of the expected entries and their associated ACLs
-        ensureEntries();
-
-        // Confirm access to the remote provider configs directory znode
-        List<String> providerConfigs = client.listChildEntries(NODE_KNOX_PROVIDERS);
-        if (providerConfigs == null) {
-            // Either the ZNode does not exist, or there is an authentication problem
-            throw new IllegalStateException("Unable to access remote path: " + NODE_KNOX_PROVIDERS);
-        }
-
-        // Confirm access to the remote descriptors directory znode
-        List<String> descriptors = client.listChildEntries(NODE_KNOX_DESCRIPTORS);
-        if (descriptors == null) {
-            // Either the ZNode does not exist, or there is an authentication problem
-            throw new IllegalStateException("Unable to access remote path: " + NODE_KNOX_DESCRIPTORS);
-        }
-
-        // Register a listener for provider config znode additions/removals
-        client.addChildEntryListener(NODE_KNOX_PROVIDERS, new ConfigDirChildEntryListener(providersDir));
-
-        // Register a listener for descriptor znode additions/removals
-        client.addChildEntryListener(NODE_KNOX_DESCRIPTORS, new ConfigDirChildEntryListener(descriptorsDir));
-
-        log.monitoringRemoteConfigurationSource(monitorSource);
-    }
-
-
-    @Override
-    public void stop() throws Exception {
-        client.removeEntryListener(NODE_KNOX_PROVIDERS);
-        client.removeEntryListener(NODE_KNOX_DESCRIPTORS);
-    }
-
-    private void ensureEntries() {
-        ensureEntry(NODE_KNOX);
-        ensureEntry(NODE_KNOX_CONFIG);
-        ensureEntry(NODE_KNOX_PROVIDERS);
-        ensureEntry(NODE_KNOX_DESCRIPTORS);
-    }
-
-    private void ensureEntry(String name) {
-        if (!client.entryExists(name)) {
-            client.createEntry(name);
-        } else {
-            // Validate the ACL
-            List<RemoteConfigurationRegistryClient.EntryACL> entryACLs = client.getACL(name);
-            for (RemoteConfigurationRegistryClient.EntryACL entryACL : entryACLs) {
-                // N.B. This is ZooKeeper-specific, and should be abstracted when another registry is supported
-                // For now, check for ZooKeeper world:anyone with ANY permissions (even read-only)
-                if (entryACL.getType().equals("world") && entryACL.getId().equals("anyone")) {
-                    log.suspectWritableRemoteConfigurationEntry(name);
-
-                    // If the client is authenticated, but "anyone" can write the content, then the content may not
-                    // be trustworthy.
-                    if (client.isAuthenticationConfigured()) {
-                        log.correctingSuspectWritableRemoteConfigurationEntry(name);
-
-                        // Replace the existing ACL with one that permits only authenticated users
-                        client.setACL(name, Collections.singletonList(AUTHENTICATED_USERS_ALL));
-                  }
-                }
-            }
-        }
-    }
-
-    private static class ConfigDirChildEntryListener implements ChildEntryListener {
-        File localDir;
-
-        ConfigDirChildEntryListener(File localDir) {
-            this.localDir = localDir;
-        }
-
-        @Override
-        public void childEvent(RemoteConfigurationRegistryClient client, Type type, String path) {
-            File localFile = new File(localDir, path.substring(path.lastIndexOf("/") + 1));
-
-            switch (type) {
-                case REMOVED:
-                    FileUtils.deleteQuietly(localFile);
-                    log.deletedRemoteConfigFile(localDir.getName(), localFile.getName());
-                    try {
-                        client.removeEntryListener(path);
-                    } catch (Exception e) {
-                        log.errorRemovingRemoteConfigurationListenerForPath(path, e);
-                    }
-                    break;
-                case ADDED:
-                    try {
-                        client.addEntryListener(path, new ConfigEntryListener(localDir));
-                    } catch (Exception e) {
-                        log.errorAddingRemoteConfigurationListenerForPath(path, e);
-                    }
-                    break;
-            }
-        }
-    }
-
-    private static class ConfigEntryListener implements EntryListener {
-        private File localDir;
-
-        ConfigEntryListener(File localDir) {
-            this.localDir = localDir;
-        }
-
-        @Override
-        public void entryChanged(RemoteConfigurationRegistryClient client, String path, byte[] data) {
-            File localFile = new File(localDir, path.substring(path.lastIndexOf("/")));
-            if (data != null) {
-                try {
-                    FileUtils.writeByteArrayToFile(localFile, data);
-                    log.downloadedRemoteConfigFile(localDir.getName(), localFile.getName());
-                } catch (IOException e) {
-                    log.errorDownloadingRemoteConfiguration(path, e);
-                }
-            } else {
-                FileUtils.deleteQuietly(localFile);
-                log.deletedRemoteConfigFile(localDir.getName(), localFile.getName());
-            }
-        }
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/knox/blob/e766b3b7/gateway-server/src/main/java/org/apache/hadoop/gateway/topology/monitor/RemoteConfigurationMonitorFactory.java
----------------------------------------------------------------------
diff --git a/gateway-server/src/main/java/org/apache/hadoop/gateway/topology/monitor/RemoteConfigurationMonitorFactory.java b/gateway-server/src/main/java/org/apache/hadoop/gateway/topology/monitor/RemoteConfigurationMonitorFactory.java
deleted file mode 100644
index 4d2df45..0000000
--- a/gateway-server/src/main/java/org/apache/hadoop/gateway/topology/monitor/RemoteConfigurationMonitorFactory.java
+++ /dev/null
@@ -1,74 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.gateway.topology.monitor;
-
-import org.apache.hadoop.gateway.GatewayMessages;
-import org.apache.hadoop.gateway.GatewayServer;
-import org.apache.hadoop.gateway.config.GatewayConfig;
-import org.apache.hadoop.gateway.i18n.messages.MessagesFactory;
-import org.apache.hadoop.gateway.services.GatewayServices;
-import org.apache.hadoop.gateway.services.config.client.RemoteConfigurationRegistryClientService;
-
-import java.util.ServiceLoader;
-
-public class RemoteConfigurationMonitorFactory {
-
-    private static final GatewayMessages log = MessagesFactory.get(GatewayMessages.class);
-
-    private static RemoteConfigurationRegistryClientService remoteConfigRegistryClientService = null;
-
-    public static void setClientService(RemoteConfigurationRegistryClientService clientService) {
-        remoteConfigRegistryClientService = clientService;
-    }
-
-    private static RemoteConfigurationRegistryClientService getClientService() {
-        if (remoteConfigRegistryClientService == null) {
-            GatewayServices services = GatewayServer.getGatewayServices();
-            if (services != null) {
-                remoteConfigRegistryClientService = services.getService(GatewayServices.REMOTE_REGISTRY_CLIENT_SERVICE);
-            }
-        }
-
-        return remoteConfigRegistryClientService;
-    }
-
-    /**
-     *
-     * @param config The GatewayConfig
-     *
-     * @return The first RemoteConfigurationMonitor extension that is found.
-     */
-    public static RemoteConfigurationMonitor get(GatewayConfig config) {
-        RemoteConfigurationMonitor rcm = null;
-
-        ServiceLoader<RemoteConfigurationMonitorProvider> providers =
-                                                 ServiceLoader.load(RemoteConfigurationMonitorProvider.class);
-        for (RemoteConfigurationMonitorProvider provider : providers) {
-            try {
-                rcm = provider.newInstance(config, getClientService());
-                if (rcm != null) {
-                    break;
-                }
-            } catch (Exception e) {
-                log.remoteConfigurationMonitorInitFailure(e.getLocalizedMessage(), e);
-            }
-        }
-
-        return rcm;
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/knox/blob/e766b3b7/gateway-server/src/main/java/org/apache/knox/gateway/services/CLIGatewayServices.java
----------------------------------------------------------------------
diff --git a/gateway-server/src/main/java/org/apache/knox/gateway/services/CLIGatewayServices.java b/gateway-server/src/main/java/org/apache/knox/gateway/services/CLIGatewayServices.java
index a1ed549..f168d44 100644
--- a/gateway-server/src/main/java/org/apache/knox/gateway/services/CLIGatewayServices.java
+++ b/gateway-server/src/main/java/org/apache/knox/gateway/services/CLIGatewayServices.java
@@ -24,7 +24,7 @@ import org.apache.knox.gateway.descriptor.FilterParamDescriptor;
 import org.apache.knox.gateway.descriptor.ResourceDescriptor;
 import org.apache.knox.gateway.i18n.messages.MessagesFactory;
 import org.apache.knox.gateway.service.config.remote.RemoteConfigurationRegistryClientServiceFactory;
-import org.apache.hadoop.gateway.services.config.client.RemoteConfigurationRegistryClientService;
+import org.apache.knox.gateway.services.config.client.RemoteConfigurationRegistryClientService;
 import org.apache.knox.gateway.services.topology.impl.DefaultTopologyService;
 import org.apache.knox.gateway.services.security.impl.DefaultAliasService;
 import org.apache.knox.gateway.services.security.impl.DefaultCryptoService;

http://git-wip-us.apache.org/repos/asf/knox/blob/e766b3b7/gateway-server/src/main/java/org/apache/knox/gateway/services/topology/impl/DefaultClusterConfigurationMonitorService.java
----------------------------------------------------------------------
diff --git a/gateway-server/src/main/java/org/apache/knox/gateway/services/topology/impl/DefaultClusterConfigurationMonitorService.java b/gateway-server/src/main/java/org/apache/knox/gateway/services/topology/impl/DefaultClusterConfigurationMonitorService.java
new file mode 100644
index 0000000..e7ef01d
--- /dev/null
+++ b/gateway-server/src/main/java/org/apache/knox/gateway/services/topology/impl/DefaultClusterConfigurationMonitorService.java
@@ -0,0 +1,81 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.knox.gateway.services.topology.impl;
+
+import org.apache.knox.gateway.config.GatewayConfig;
+import org.apache.knox.gateway.services.ServiceLifecycleException;
+import org.apache.knox.gateway.services.security.AliasService;
+import org.apache.knox.gateway.topology.ClusterConfigurationMonitorService;
+import org.apache.knox.gateway.topology.discovery.ClusterConfigurationMonitor;
+import org.apache.knox.gateway.topology.discovery.ClusterConfigurationMonitorProvider;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.ServiceLoader;
+
+
+public class DefaultClusterConfigurationMonitorService implements ClusterConfigurationMonitorService {
+
+    private AliasService aliasService = null;
+
+    private Map<String, ClusterConfigurationMonitor> monitors = new HashMap<>();
+
+    @Override
+    public void init(GatewayConfig config, Map<String, String> options) throws ServiceLifecycleException {
+        ServiceLoader<ClusterConfigurationMonitorProvider> providers =
+                                                        ServiceLoader.load(ClusterConfigurationMonitorProvider.class);
+        for (ClusterConfigurationMonitorProvider provider : providers) {
+            // Check the gateway configuration to determine if this type of monitor is enabled
+            if (config.isClusterMonitorEnabled(provider.getType())) {
+                ClusterConfigurationMonitor monitor = provider.newInstance(config, aliasService);
+                if (monitor != null) {
+                    monitors.put(provider.getType(), monitor);
+                }
+            }
+        }
+    }
+
+    @Override
+    public void start() {
+        for (ClusterConfigurationMonitor monitor : monitors.values()) {
+            monitor.start();
+        }
+    }
+
+    @Override
+    public void stop() {
+        for (ClusterConfigurationMonitor monitor : monitors.values()) {
+            monitor.stop();
+        }
+    }
+
+    @Override
+    public ClusterConfigurationMonitor getMonitor(String type) {
+        return monitors.get(type);
+    }
+
+    @Override
+    public void addListener(ClusterConfigurationMonitor.ConfigurationChangeListener listener) {
+        for (ClusterConfigurationMonitor monitor : monitors.values()) {
+            monitor.addListener(listener);
+        }
+    }
+
+    public void setAliasService(AliasService aliasService) {
+        this.aliasService = aliasService;
+    }
+}

http://git-wip-us.apache.org/repos/asf/knox/blob/e766b3b7/gateway-server/src/main/java/org/apache/knox/gateway/topology/monitor/DefaultConfigurationMonitorProvider.java
----------------------------------------------------------------------
diff --git a/gateway-server/src/main/java/org/apache/knox/gateway/topology/monitor/DefaultConfigurationMonitorProvider.java b/gateway-server/src/main/java/org/apache/knox/gateway/topology/monitor/DefaultConfigurationMonitorProvider.java
new file mode 100644
index 0000000..25bea08
--- /dev/null
+++ b/gateway-server/src/main/java/org/apache/knox/gateway/topology/monitor/DefaultConfigurationMonitorProvider.java
@@ -0,0 +1,31 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.knox.gateway.topology.monitor;
+
+import org.apache.knox.gateway.config.GatewayConfig;
+import org.apache.knox.gateway.services.config.client.RemoteConfigurationRegistryClientService;
+
+
+public class DefaultConfigurationMonitorProvider implements RemoteConfigurationMonitorProvider {
+
+    @Override
+    public RemoteConfigurationMonitor newInstance(final GatewayConfig                            config,
+                                                  final RemoteConfigurationRegistryClientService clientService) {
+        return new DefaultRemoteConfigurationMonitor(config, clientService);
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/knox/blob/e766b3b7/gateway-server/src/main/java/org/apache/knox/gateway/topology/monitor/DefaultRemoteConfigurationMonitor.java
----------------------------------------------------------------------
diff --git a/gateway-server/src/main/java/org/apache/knox/gateway/topology/monitor/DefaultRemoteConfigurationMonitor.java b/gateway-server/src/main/java/org/apache/knox/gateway/topology/monitor/DefaultRemoteConfigurationMonitor.java
new file mode 100644
index 0000000..efafee0
--- /dev/null
+++ b/gateway-server/src/main/java/org/apache/knox/gateway/topology/monitor/DefaultRemoteConfigurationMonitor.java
@@ -0,0 +1,228 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.knox.gateway.topology.monitor;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.knox.gateway.GatewayMessages;
+import org.apache.knox.gateway.config.GatewayConfig;
+import org.apache.knox.gateway.i18n.messages.MessagesFactory;
+import org.apache.knox.gateway.services.config.client.RemoteConfigurationRegistryClient.ChildEntryListener;
+import org.apache.knox.gateway.services.config.client.RemoteConfigurationRegistryClient.EntryListener;
+import org.apache.knox.gateway.services.config.client.RemoteConfigurationRegistryClient;
+import org.apache.knox.gateway.services.config.client.RemoteConfigurationRegistryClientService;
+import org.apache.zookeeper.ZooDefs;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
+
+class DefaultRemoteConfigurationMonitor implements RemoteConfigurationMonitor {
+
+    private static final String NODE_KNOX = "/knox";
+    private static final String NODE_KNOX_CONFIG = NODE_KNOX + "/config";
+    private static final String NODE_KNOX_PROVIDERS = NODE_KNOX_CONFIG + "/shared-providers";
+    private static final String NODE_KNOX_DESCRIPTORS = NODE_KNOX_CONFIG + "/descriptors";
+
+    private static GatewayMessages log = MessagesFactory.get(GatewayMessages.class);
+
+    // N.B. This is ZooKeeper-specific, and should be abstracted when another registry is supported
+    private static final RemoteConfigurationRegistryClient.EntryACL AUTHENTICATED_USERS_ALL;
+    static {
+        AUTHENTICATED_USERS_ALL = new RemoteConfigurationRegistryClient.EntryACL() {
+            public String getId() {
+                return "";
+            }
+
+            public String getType() {
+                return "auth";
+            }
+
+            public Object getPermissions() {
+                return ZooDefs.Perms.ALL;
+            }
+
+            public boolean canRead() {
+                return true;
+            }
+
+            public boolean canWrite() {
+                return true;
+            }
+        };
+    }
+
+    private RemoteConfigurationRegistryClient client = null;
+
+    private File providersDir;
+    private File descriptorsDir;
+
+    /**
+     * @param config                The gateway configuration
+     * @param registryClientService The service from which the remote registry client should be acquired.
+     */
+    DefaultRemoteConfigurationMonitor(GatewayConfig                            config,
+                                      RemoteConfigurationRegistryClientService registryClientService) {
+        this.providersDir   = new File(config.getGatewayProvidersConfigDir());
+        this.descriptorsDir = new File(config.getGatewayDescriptorsDir());
+
+        if (registryClientService != null) {
+            String clientName = config.getRemoteConfigurationMonitorClientName();
+            if (clientName != null) {
+                this.client = registryClientService.get(clientName);
+                if (this.client == null) {
+                    log.unresolvedClientConfigurationForRemoteMonitoring(clientName);
+                }
+            } else {
+                log.missingClientConfigurationForRemoteMonitoring();
+            }
+        }
+    }
+
+    @Override
+    public void start() throws Exception {
+        if (client == null) {
+            throw new IllegalStateException("Failed to acquire a remote configuration registry client.");
+        }
+
+        final String monitorSource = client.getAddress();
+        log.startingRemoteConfigurationMonitor(monitorSource);
+
+        // Ensure the existence of the expected entries and their associated ACLs
+        ensureEntries();
+
+        // Confirm access to the remote provider configs directory znode
+        List<String> providerConfigs = client.listChildEntries(NODE_KNOX_PROVIDERS);
+        if (providerConfigs == null) {
+            // Either the ZNode does not exist, or there is an authentication problem
+            throw new IllegalStateException("Unable to access remote path: " + NODE_KNOX_PROVIDERS);
+        }
+
+        // Confirm access to the remote descriptors directory znode
+        List<String> descriptors = client.listChildEntries(NODE_KNOX_DESCRIPTORS);
+        if (descriptors == null) {
+            // Either the ZNode does not exist, or there is an authentication problem
+            throw new IllegalStateException("Unable to access remote path: " + NODE_KNOX_DESCRIPTORS);
+        }
+
+        // Register a listener for provider config znode additions/removals
+        client.addChildEntryListener(NODE_KNOX_PROVIDERS, new ConfigDirChildEntryListener(providersDir));
+
+        // Register a listener for descriptor znode additions/removals
+        client.addChildEntryListener(NODE_KNOX_DESCRIPTORS, new ConfigDirChildEntryListener(descriptorsDir));
+
+        log.monitoringRemoteConfigurationSource(monitorSource);
+    }
+
+
+    @Override
+    public void stop() throws Exception {
+        client.removeEntryListener(NODE_KNOX_PROVIDERS);
+        client.removeEntryListener(NODE_KNOX_DESCRIPTORS);
+    }
+
+    private void ensureEntries() {
+        ensureEntry(NODE_KNOX);
+        ensureEntry(NODE_KNOX_CONFIG);
+        ensureEntry(NODE_KNOX_PROVIDERS);
+        ensureEntry(NODE_KNOX_DESCRIPTORS);
+    }
+
+    private void ensureEntry(String name) {
+        if (!client.entryExists(name)) {
+            client.createEntry(name);
+        } else {
+            // Validate the ACL
+            List<RemoteConfigurationRegistryClient.EntryACL> entryACLs = client.getACL(name);
+            for (RemoteConfigurationRegistryClient.EntryACL entryACL : entryACLs) {
+                // N.B. This is ZooKeeper-specific, and should be abstracted when another registry is supported
+                // For now, check for ZooKeeper world:anyone with ANY permissions (even read-only)
+                if (entryACL.getType().equals("world") && entryACL.getId().equals("anyone")) {
+                    log.suspectWritableRemoteConfigurationEntry(name);
+
+                    // If the client is authenticated, but "anyone" can write the content, then the content may not
+                    // be trustworthy.
+                    if (client.isAuthenticationConfigured()) {
+                        log.correctingSuspectWritableRemoteConfigurationEntry(name);
+
+                        // Replace the existing ACL with one that permits only authenticated users
+                        client.setACL(name, Collections.singletonList(AUTHENTICATED_USERS_ALL));
+                  }
+                }
+            }
+        }
+    }
+
+    private static class ConfigDirChildEntryListener implements ChildEntryListener {
+        File localDir;
+
+        ConfigDirChildEntryListener(File localDir) {
+            this.localDir = localDir;
+        }
+
+        @Override
+        public void childEvent(RemoteConfigurationRegistryClient client, Type type, String path) {
+            File localFile = new File(localDir, path.substring(path.lastIndexOf("/") + 1));
+
+            switch (type) {
+                case REMOVED:
+                    FileUtils.deleteQuietly(localFile);
+                    log.deletedRemoteConfigFile(localDir.getName(), localFile.getName());
+                    try {
+                        client.removeEntryListener(path);
+                    } catch (Exception e) {
+                        log.errorRemovingRemoteConfigurationListenerForPath(path, e);
+                    }
+                    break;
+                case ADDED:
+                    try {
+                        client.addEntryListener(path, new ConfigEntryListener(localDir));
+                    } catch (Exception e) {
+                        log.errorAddingRemoteConfigurationListenerForPath(path, e);
+                    }
+                    break;
+            }
+        }
+    }
+
+    private static class ConfigEntryListener implements EntryListener {
+        private File localDir;
+
+        ConfigEntryListener(File localDir) {
+            this.localDir = localDir;
+        }
+
+        @Override
+        public void entryChanged(RemoteConfigurationRegistryClient client, String path, byte[] data) {
+            File localFile = new File(localDir, path.substring(path.lastIndexOf("/")));
+            if (data != null) {
+                try {
+                    FileUtils.writeByteArrayToFile(localFile, data);
+                    log.downloadedRemoteConfigFile(localDir.getName(), localFile.getName());
+                } catch (IOException e) {
+                    log.errorDownloadingRemoteConfiguration(path, e);
+                }
+            } else {
+                FileUtils.deleteQuietly(localFile);
+                log.deletedRemoteConfigFile(localDir.getName(), localFile.getName());
+            }
+        }
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/knox/blob/e766b3b7/gateway-server/src/main/java/org/apache/knox/gateway/topology/monitor/RemoteConfigurationMonitorFactory.java
----------------------------------------------------------------------
diff --git a/gateway-server/src/main/java/org/apache/knox/gateway/topology/monitor/RemoteConfigurationMonitorFactory.java b/gateway-server/src/main/java/org/apache/knox/gateway/topology/monitor/RemoteConfigurationMonitorFactory.java
new file mode 100644
index 0000000..d020532
--- /dev/null
+++ b/gateway-server/src/main/java/org/apache/knox/gateway/topology/monitor/RemoteConfigurationMonitorFactory.java
@@ -0,0 +1,74 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.knox.gateway.topology.monitor;
+
+import org.apache.knox.gateway.GatewayMessages;
+import org.apache.knox.gateway.GatewayServer;
+import org.apache.knox.gateway.config.GatewayConfig;
+import org.apache.knox.gateway.i18n.messages.MessagesFactory;
+import org.apache.knox.gateway.services.GatewayServices;
+import org.apache.knox.gateway.services.config.client.RemoteConfigurationRegistryClientService;
+
+import java.util.ServiceLoader;
+
+public class RemoteConfigurationMonitorFactory {
+
+    private static final GatewayMessages log = MessagesFactory.get(GatewayMessages.class);
+
+    private static RemoteConfigurationRegistryClientService remoteConfigRegistryClientService = null;
+
+    public static void setClientService(RemoteConfigurationRegistryClientService clientService) {
+        remoteConfigRegistryClientService = clientService;
+    }
+
+    private static RemoteConfigurationRegistryClientService getClientService() {
+        if (remoteConfigRegistryClientService == null) {
+            GatewayServices services = GatewayServer.getGatewayServices();
+            if (services != null) {
+                remoteConfigRegistryClientService = services.getService(GatewayServices.REMOTE_REGISTRY_CLIENT_SERVICE);
+            }
+        }
+
+        return remoteConfigRegistryClientService;
+    }
+
+    /**
+     *
+     * @param config The GatewayConfig
+     *
+     * @return The first RemoteConfigurationMonitor extension that is found.
+     */
+    public static RemoteConfigurationMonitor get(GatewayConfig config) {
+        RemoteConfigurationMonitor rcm = null;
+
+        ServiceLoader<RemoteConfigurationMonitorProvider> providers =
+                                                 ServiceLoader.load(RemoteConfigurationMonitorProvider.class);
+        for (RemoteConfigurationMonitorProvider provider : providers) {
+            try {
+                rcm = provider.newInstance(config, getClientService());
+                if (rcm != null) {
+                    break;
+                }
+            } catch (Exception e) {
+                log.remoteConfigurationMonitorInitFailure(e.getLocalizedMessage(), e);
+            }
+        }
+
+        return rcm;
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/knox/blob/e766b3b7/gateway-server/src/main/java/org/apache/knox/gateway/util/KnoxCLI.java
----------------------------------------------------------------------
diff --git a/gateway-server/src/main/java/org/apache/knox/gateway/util/KnoxCLI.java b/gateway-server/src/main/java/org/apache/knox/gateway/util/KnoxCLI.java
index 9a87dd0..928c37e 100644
--- a/gateway-server/src/main/java/org/apache/knox/gateway/util/KnoxCLI.java
+++ b/gateway-server/src/main/java/org/apache/knox/gateway/util/KnoxCLI.java
@@ -1855,7 +1855,7 @@ public class KnoxCLI extends Configured implements Tool {
     static final String DESC = "Lists all of the remote configuration registry clients defined in gateway-site.xml.\n";
 
     /* (non-Javadoc)
-     * @see org.apache.hadoop.gateway.util.KnoxCLI.Command#execute()
+     * @see org.apache.knox.gateway.util.KnoxCLI.Command#execute()
      */
     @Override
     public void execute() throws Exception {
@@ -1870,7 +1870,7 @@ public class KnoxCLI extends Configured implements Tool {
     }
 
     /* (non-Javadoc)
-     * @see org.apache.hadoop.gateway.util.KnoxCLI.Command#getUsage()
+     * @see org.apache.knox.gateway.util.KnoxCLI.Command#getUsage()
      */
     @Override
     public String getUsage() {
@@ -1958,7 +1958,7 @@ public class KnoxCLI extends Configured implements Tool {
     }
 
     /* (non-Javadoc)
-     * @see org.apache.hadoop.gateway.util.KnoxCLI.Command#execute()
+     * @see org.apache.knox.gateway.util.KnoxCLI.Command#execute()
      */
     @Override
     public void execute() throws Exception {
@@ -1966,7 +1966,7 @@ public class KnoxCLI extends Configured implements Tool {
     }
 
     /* (non-Javadoc)
-     * @see org.apache.hadoop.gateway.util.KnoxCLI.Command#getUsage()
+     * @see org.apache.knox.gateway.util.KnoxCLI.Command#getUsage()
      */
     @Override
     public String getUsage() {
@@ -1987,7 +1987,7 @@ public class KnoxCLI extends Configured implements Tool {
     }
 
     /* (non-Javadoc)
-     * @see org.apache.hadoop.gateway.util.KnoxCLI.Command#execute()
+     * @see org.apache.knox.gateway.util.KnoxCLI.Command#execute()
      */
     @Override
     public void execute() throws Exception {
@@ -1995,7 +1995,7 @@ public class KnoxCLI extends Configured implements Tool {
     }
 
     /* (non-Javadoc)
-     * @see org.apache.hadoop.gateway.util.KnoxCLI.Command#getUsage()
+     * @see org.apache.knox.gateway.util.KnoxCLI.Command#getUsage()
      */
     @Override
     public String getUsage() {
@@ -2016,7 +2016,7 @@ public class KnoxCLI extends Configured implements Tool {
     }
 
     /* (non-Javadoc)
-     * @see org.apache.hadoop.gateway.util.KnoxCLI.Command#execute()
+     * @see org.apache.knox.gateway.util.KnoxCLI.Command#execute()
      */
     @Override
     public void execute() throws Exception {
@@ -2039,7 +2039,7 @@ public class KnoxCLI extends Configured implements Tool {
     }
 
     /* (non-Javadoc)
-     * @see org.apache.hadoop.gateway.util.KnoxCLI.Command#getUsage()
+     * @see org.apache.knox.gateway.util.KnoxCLI.Command#getUsage()
      */
     @Override
     public String getUsage() {

http://git-wip-us.apache.org/repos/asf/knox/blob/e766b3b7/gateway-server/src/main/resources/META-INF/services/org.apache.hadoop.gateway.topology.monitor.RemoteConfigurationMonitorProvider
----------------------------------------------------------------------
diff --git a/gateway-server/src/main/resources/META-INF/services/org.apache.hadoop.gateway.topology.monitor.RemoteConfigurationMonitorProvider b/gateway-server/src/main/resources/META-INF/services/org.apache.hadoop.gateway.topology.monitor.RemoteConfigurationMonitorProvider
deleted file mode 100644
index bd4023e..0000000
--- a/gateway-server/src/main/resources/META-INF/services/org.apache.hadoop.gateway.topology.monitor.RemoteConfigurationMonitorProvider
+++ /dev/null
@@ -1,19 +0,0 @@
-##########################################################################
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-##########################################################################
-
-org.apache.hadoop.gateway.topology.monitor.DefaultConfigurationMonitorProvider

http://git-wip-us.apache.org/repos/asf/knox/blob/e766b3b7/gateway-server/src/main/resources/META-INF/services/org.apache.knox.gateway.topology.monitor.RemoteConfigurationMonitorProvider
----------------------------------------------------------------------
diff --git a/gateway-server/src/main/resources/META-INF/services/org.apache.knox.gateway.topology.monitor.RemoteConfigurationMonitorProvider b/gateway-server/src/main/resources/META-INF/services/org.apache.knox.gateway.topology.monitor.RemoteConfigurationMonitorProvider
new file mode 100644
index 0000000..63f438a
--- /dev/null
+++ b/gateway-server/src/main/resources/META-INF/services/org.apache.knox.gateway.topology.monitor.RemoteConfigurationMonitorProvider
@@ -0,0 +1,19 @@
+##########################################################################
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##########################################################################
+
+org.apache.knox.gateway.topology.monitor.DefaultConfigurationMonitorProvider


[10/53] [abbrv] knox git commit: Merge branch 'master' into KNOX-998-Package_Restructuring

Posted by mo...@apache.org.
http://git-wip-us.apache.org/repos/asf/knox/blob/58780d37/gateway-spi/src/main/java/org/apache/knox/gateway/services/security/token/impl/JWTToken.java
----------------------------------------------------------------------
diff --cc gateway-spi/src/main/java/org/apache/knox/gateway/services/security/token/impl/JWTToken.java
index 27b1a30,0000000..e765c27
mode 100644,000000..100644
--- a/gateway-spi/src/main/java/org/apache/knox/gateway/services/security/token/impl/JWTToken.java
+++ b/gateway-spi/src/main/java/org/apache/knox/gateway/services/security/token/impl/JWTToken.java
@@@ -1,273 -1,0 +1,281 @@@
 +  /**
 +   * Licensed to the Apache Software Foundation (ASF) under one
 +   * or more contributor license agreements.  See the NOTICE file
 +   * distributed with this work for additional information
 +   * regarding copyright ownership.  The ASF licenses this file
 +   * to you under the Apache License, Version 2.0 (the
 +   * "License"); you may not use this file except in compliance
 +   * with the License.  You may obtain a copy of the License at
 +   *
 +   *     http://www.apache.org/licenses/LICENSE-2.0
 +   *
 +   * Unless required by applicable law or agreed to in writing, software
 +   * distributed under the License is distributed on an "AS IS" BASIS,
 +   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 +   * See the License for the specific language governing permissions and
 +   * limitations under the License.
 +   */
 +package org.apache.knox.gateway.services.security.token.impl;
 +
- import java.io.UnsupportedEncodingException;
 +import java.text.ParseException;
 +import java.util.Date;
 +import java.util.ArrayList;
 +import java.util.List;
- import java.util.Map;
 +
- import org.apache.commons.codec.binary.Base64;
 +import org.apache.knox.gateway.i18n.messages.MessagesFactory;
 +
 +import com.nimbusds.jose.JOSEException;
 +import com.nimbusds.jose.JWSAlgorithm;
 +import com.nimbusds.jose.JWSHeader;
 +import com.nimbusds.jose.JWSSigner;
 +import com.nimbusds.jose.JWSVerifier;
 +import com.nimbusds.jose.Payload;
 +import com.nimbusds.jose.util.Base64URL;
 +import com.nimbusds.jwt.JWTClaimsSet;
 +import com.nimbusds.jwt.SignedJWT;
 +
 +public class JWTToken implements JWT {
 +  private static JWTProviderMessages log = MessagesFactory.get( JWTProviderMessages.class );
 +
 +  SignedJWT jwt = null;
 +
 +  private JWTToken(String header, String claims, String signature) throws ParseException {
 +    jwt = new SignedJWT(new Base64URL(header), new Base64URL(claims), new Base64URL(signature));
 +  }
 +
 +  public JWTToken(String serializedJWT) throws ParseException {
 +    try {
 +      jwt = SignedJWT.parse(serializedJWT);
 +    } catch (ParseException e) {
 +      log.unableToParseToken(e);
 +      throw e;
 +    }
 +  }
 +
 +  public JWTToken(String alg, String[] claimsArray) {
 +    this(alg, claimsArray, null);
 +  }
 +
 +  public JWTToken(String alg, String[] claimsArray, List<String> audiences) {
 +    JWSHeader header = new JWSHeader(new JWSAlgorithm(alg));
 +
 +    if (claimsArray[2] != null) {
 +      if (audiences == null) {
 +        audiences = new ArrayList<String>();
 +      }
 +      audiences.add(claimsArray[2]);
 +    }
 +    JWTClaimsSet claims = null;
 +    JWTClaimsSet.Builder builder = new JWTClaimsSet.Builder()
 +    .issuer(claimsArray[0])
 +    .subject(claimsArray[1])
 +    .audience(audiences);
 +    if(claimsArray[3] != null) {
 +      builder = builder.expirationTime(new Date(Long.parseLong(claimsArray[3])));
 +    }
 +
 +    claims = builder.build();
 +
 +    jwt = new SignedJWT(header, claims);
 +  }
 +
 +  /* (non-Javadoc)
-    * @see JWT#getPayloadToSign()
++   * @see org.apache.knox.gateway.services.security.token.impl.JWT#getPayloadToSign()
 +   */
 +  @Override
 +  public String getHeader() {
 +    JWSHeader header = jwt.getHeader();
 +    return header.toString();
 +  }
 +
 +  /* (non-Javadoc)
-    * @see JWT#getPayloadToSign()
++   * @see org.apache.knox.gateway.services.security.token.impl.JWT#getPayloadToSign()
 +   */
 +  @Override
 +  public String getClaims() {
 +    String c = null;
 +    JWTClaimsSet claims = null;
 +    try {
 +      claims = (JWTClaimsSet) jwt.getJWTClaimsSet();
 +      c = claims.toJSONObject().toJSONString();
 +    } catch (ParseException e) {
 +      log.unableToParseToken(e);
 +    }
 +    return c;
 +  }
 +
 +  /* (non-Javadoc)
-    * @see JWT#getPayloadToSign()
++   * @see org.apache.knox.gateway.services.security.token.impl.JWT#getPayloadToSign()
 +   */
 +  @Override
 +  public String getPayload() {
 +    Payload payload = jwt.getPayload();
 +    return payload.toString();
 +  }
 +
 +  public String toString() {
 +    return jwt.serialize();
 +  }
 +
 +  /* (non-Javadoc)
-    * @see JWT#setSignaturePayload(byte[])
++   * @see org.apache.knox.gateway.services.security.token.impl.JWT#setSignaturePayload(byte[])
 +   */
 +  @Override
 +  public void setSignaturePayload(byte[] payload) {
 +//    this.payload = payload;
 +  }
 +
 +  /* (non-Javadoc)
-    * @see JWT#getSignaturePayload()
++   * @see org.apache.knox.gateway.services.security.token.impl.JWT#getSignaturePayload()
 +   */
 +  @Override
 +  public byte[] getSignaturePayload() {
 +    byte[] b = null;
 +    Base64URL b64 = jwt.getSignature();
 +    if (b64 != null) {
 +      b = b64.decode();
 +    }
 +    return b;
 +  }
 +
 +  public static JWTToken parseToken(String wireToken) throws ParseException {
 +    log.parsingToken(wireToken);
 +    String[] parts = wireToken.split("\\.");
 +    return new JWTToken(parts[0], parts[1], parts[2]);
 +  }
 +
 +  /* (non-Javadoc)
-    * @see JWT#getClaim(java.lang.String)
++   * @see org.apache.knox.gateway.services.security.token.impl.JWT#getClaim(java.lang.String)
 +   */
 +  @Override
 +  public String getClaim(String claimName) {
 +    String claim = null;
 +
 +    try {
 +      claim = jwt.getJWTClaimsSet().getStringClaim(claimName);
 +    } catch (ParseException e) {
 +      log.unableToParseToken(e);
 +    }
 +
 +    return claim;
 +  }
 +
 +  /* (non-Javadoc)
-    * @see JWT#getSubject()
++   * @see org.apache.knox.gateway.services.security.token.impl.JWT#getSubject()
 +   */
 +  @Override
 +  public String getSubject() {
 +    return getClaim(JWT.SUBJECT);
 +  }
 +
 +  /* (non-Javadoc)
-    * @see JWT#getIssuer()
++   * @see org.apache.knox.gateway.services.security.token.impl.JWT#getIssuer()
 +   */
 +  @Override
 +  public String getIssuer() {
 +    return getClaim(JWT.ISSUER);
 +  }
 +
 +  /* (non-Javadoc)
-    * @see JWT#getAudience()
++   * @see org.apache.knox.gateway.services.security.token.impl.JWT#getAudience()
 +   */
 +  @Override
 +  public String getAudience() {
 +    String[] claim = null;
 +    String c = null;
 +
 +    claim = getAudienceClaims();
 +    if (claim != null) {
 +      c = claim[0];
 +    }
 +
 +    return c;
 +  }
 +
 +  /* (non-Javadoc)
-    * @see JWT#getAudienceClaims()
++   * @see org.apache.knox.gateway.services.security.token.impl.JWT#getAudienceClaims()
 +   */
 +  @Override
 +  public String[] getAudienceClaims() {
 +    String[] claims = null;
 +
 +    try {
 +      claims = jwt.getJWTClaimsSet().getStringArrayClaim(JWT.AUDIENCE);
 +    } catch (ParseException e) {
 +      log.unableToParseToken(e);
 +    }
 +
 +    return claims;
 +  }
 +
 +  /* (non-Javadoc)
-    * @see JWT#getExpires()
++   * @see org.apache.knox.gateway.services.security.token.impl.JWT#getExpires()
 +   */
 +  @Override
 +  public String getExpires() {
 +    Date expires = getExpiresDate();
 +    if (expires != null) {
 +      return String.valueOf(expires.getTime());
 +    }
 +    return null;
 +  }
 +
 +  @Override
 +  public Date getExpiresDate() {
 +    Date date = null;
 +    try {
 +      date = jwt.getJWTClaimsSet().getExpirationTime();
 +    } catch (ParseException e) {
 +      log.unableToParseToken(e);
 +    }
 +    return date;
 +  }
 +
++  @Override
++  public Date getNotBeforeDate() {
++    Date date = null;
++    try {
++      date = jwt.getJWTClaimsSet().getNotBeforeTime();
++    } catch (ParseException e) {
++      log.unableToParseToken(e);
++    }
++    return date;
++  }
++
 +  /* (non-Javadoc)
-    * @see JWT#getPrincipal()
++   * @see org.apache.knox.gateway.services.security.token.impl.JWT#getPrincipal()
 +   */
 +  @Override
 +  public String getPrincipal() {
 +    return getClaim(JWT.PRINCIPAL);
 +  }
 +
 +
 +  /* (non-Javadoc)
 +   * @see org.apache.knox.gateway.services.security.token.impl.JWT#sign(JWSSigner)
 +   */
 +  @Override
 +  public void sign(JWSSigner signer) {
 +    try {
 +      jwt.sign(signer);
 +    } catch (JOSEException e) {
 +      log.unableToSignToken(e);
 +    }
 +  }
 +
 +  /* (non-Javadoc)
 +   * @see org.apache.knox.gateway.services.security.token.impl.JWT#verify(JWSVerifier)
 +   */
 +  public boolean verify(JWSVerifier verifier) {
 +    boolean rc = false;
 +
 +    try {
 +      rc = jwt.verify(verifier);
 +    } catch (JOSEException e) {
 +      // TODO Auto-generated catch block
 +      log.unableToVerifyToken(e);
 +    }
 +
 +    return rc;
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/58780d37/gateway-spi/src/test/java/org/apache/knox/gateway/services/security/token/impl/JWTTokenTest.java
----------------------------------------------------------------------
diff --cc gateway-spi/src/test/java/org/apache/knox/gateway/services/security/token/impl/JWTTokenTest.java
index 1b0df9e,0000000..2c23e92
mode 100644,000000..100644
--- a/gateway-spi/src/test/java/org/apache/knox/gateway/services/security/token/impl/JWTTokenTest.java
+++ b/gateway-spi/src/test/java/org/apache/knox/gateway/services/security/token/impl/JWTTokenTest.java
@@@ -1,223 -1,0 +1,240 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.services.security.token.impl;
 +
 +import java.security.KeyPair;
 +import java.security.KeyPairGenerator;
 +import java.security.NoSuchAlgorithmException;
 +import java.security.interfaces.RSAPrivateKey;
 +import java.security.interfaces.RSAPublicKey;
++import java.text.ParseException;
 +import java.util.ArrayList;
 +import java.util.Date;
++import java.util.List;
 +
++import org.junit.BeforeClass;
 +import org.junit.Test;
 +
 +import com.nimbusds.jose.JWSAlgorithm;
 +import com.nimbusds.jose.JWSSigner;
 +import com.nimbusds.jose.JWSVerifier;
 +import com.nimbusds.jose.crypto.RSASSASigner;
 +import com.nimbusds.jose.crypto.RSASSAVerifier;
 +
 +public class JWTTokenTest extends org.junit.Assert {
 +  private static final String JWT_TOKEN = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpYXQiOjE0MTY5MjkxMDksImp0aSI6ImFhN2Y4ZDBhOTVjIiwic2NvcGVzIjpbInJlcG8iLCJwdWJsaWNfcmVwbyJdfQ.XCEwpBGvOLma4TCoh36FU7XhUbcskygS81HE1uHLf0E";
 +  private static final String HEADER = "{\"typ\":\"JWT\",\"alg\":\"HS256\"}";
 +
-   private RSAPublicKey publicKey;
-   private RSAPrivateKey privateKey;
++  private static RSAPublicKey publicKey;
++  private static RSAPrivateKey privateKey;
 +
-   public JWTTokenTest() throws Exception, NoSuchAlgorithmException {
++  @BeforeClass
++  public static void setup() throws Exception, NoSuchAlgorithmException {
 +    KeyPairGenerator kpg = KeyPairGenerator.getInstance("RSA");
 +    kpg.initialize(2048);
 +
 +    KeyPair kp = kpg.genKeyPair();
 +    publicKey = (RSAPublicKey) kp.getPublic();
 +    privateKey = (RSAPrivateKey) kp.getPrivate();
 +  }
 +
 +  @Test
 +  public void testTokenParsing() throws Exception {
 +    JWTToken token = JWTToken.parseToken(JWT_TOKEN);
 +    assertEquals(token.getHeader(), HEADER);
 +
 +    assertEquals(token.getClaim("jti"), "aa7f8d0a95c");
 +  }
 +
 +  @Test
 +  public void testTokenCreation() throws Exception {
 +    String[] claims = new String[4];
 +    claims[0] = "KNOXSSO";
 +    claims[1] = "john.doe@example.com";
 +    claims[2] = "https://login.example.com";
 +    claims[3] = Long.toString( ( System.currentTimeMillis()/1000 ) + 300);
-     JWTToken token = new JWTToken("RS256", claims);
++    JWT token = new JWTToken("RS256", claims);
 +
 +    assertEquals("KNOXSSO", token.getIssuer());
 +    assertEquals("john.doe@example.com", token.getSubject());
 +    assertEquals("https://login.example.com", token.getAudience());
 +  }
 +
 +  @Test
 +  public void testTokenCreationWithAudienceListSingle() throws Exception {
 +    String[] claims = new String[4];
 +    claims[0] = "KNOXSSO";
 +    claims[1] = "john.doe@example.com";
 +    claims[2] = null;
 +    claims[3] = Long.toString( ( System.currentTimeMillis()/1000 ) + 300);
-     ArrayList<String> audiences = new ArrayList<String>();
++    List<String> audiences = new ArrayList<String>();
 +    audiences.add("https://login.example.com");
 +
-     JWTToken token = new JWTToken("RS256", claims, audiences);
++    JWT token = new JWTToken("RS256", claims, audiences);
 +
 +    assertEquals("KNOXSSO", token.getIssuer());
 +    assertEquals("john.doe@example.com", token.getSubject());
 +    assertEquals("https://login.example.com", token.getAudience());
 +    assertEquals(1, token.getAudienceClaims().length);
 +  }
 +
 +  @Test
 +  public void testTokenCreationWithAudienceListMultiple() throws Exception {
 +    String[] claims = new String[4];
 +    claims[0] = "KNOXSSO";
 +    claims[1] = "john.doe@example.com";
 +    claims[2] = null;
 +    claims[3] = Long.toString( ( System.currentTimeMillis()/1000 ) + 300);
-     ArrayList<String> audiences = new ArrayList<String>();
++    List<String> audiences = new ArrayList<String>();
 +    audiences.add("https://login.example.com");
 +    audiences.add("KNOXSSO");
 +
-     JWTToken token = new JWTToken("RS256", claims, audiences);
++    JWT token = new JWTToken("RS256", claims, audiences);
 +
 +    assertEquals("KNOXSSO", token.getIssuer());
 +    assertEquals("john.doe@example.com", token.getSubject());
 +    assertEquals("https://login.example.com", token.getAudience());
 +    assertEquals(2, token.getAudienceClaims().length);
 +  }
 +
 +  @Test
 +  public void testTokenCreationWithAudienceListCombined() throws Exception {
 +    String[] claims = new String[4];
 +    claims[0] = "KNOXSSO";
 +    claims[1] = "john.doe@example.com";
 +    claims[2] = "LJM";
 +    claims[3] = Long.toString( ( System.currentTimeMillis()/1000 ) + 300);
 +    ArrayList<String> audiences = new ArrayList<String>();
 +    audiences.add("https://login.example.com");
 +    audiences.add("KNOXSSO");
 +
 +    JWTToken token = new JWTToken("RS256", claims, audiences);
 +
 +    assertEquals("KNOXSSO", token.getIssuer());
 +    assertEquals("john.doe@example.com", token.getSubject());
 +    assertEquals("https://login.example.com", token.getAudience());
 +    assertEquals(3, token.getAudienceClaims().length);
 +  }
 +
 +  @Test
 +  public void testTokenCreationWithNullAudienceList() throws Exception {
 +    String[] claims = new String[4];
 +    claims[0] = "KNOXSSO";
 +    claims[1] = "john.doe@example.com";
 +    claims[2] = null;
 +    claims[3] = Long.toString( ( System.currentTimeMillis()/1000 ) + 300);
-     ArrayList<String> audiences = null;
++    List<String> audiences = null;
 +
-     JWTToken token = new JWTToken("RS256", claims, audiences);
++    JWT token = new JWTToken("RS256", claims, audiences);
 +
 +    assertEquals("KNOXSSO", token.getIssuer());
 +    assertEquals("john.doe@example.com", token.getSubject());
 +    assertEquals(null, token.getAudience());
 +    assertArrayEquals(null, token.getAudienceClaims());
 +  }
 +
 +  @Test
 +  public void testTokenCreationRS512() throws Exception {
 +    String[] claims = new String[4];
 +    claims[0] = "KNOXSSO";
 +    claims[1] = "john.doe@example.com";
 +    claims[2] = "https://login.example.com";
 +    claims[3] = Long.toString( ( System.currentTimeMillis()/1000 ) + 300);
 +    JWTToken token = new JWTToken(JWSAlgorithm.RS512.getName(), claims);
 +
 +    assertEquals("KNOXSSO", token.getIssuer());
 +    assertEquals("john.doe@example.com", token.getSubject());
 +    assertEquals("https://login.example.com", token.getAudience());
 +    assertTrue(token.getHeader().contains(JWSAlgorithm.RS512.getName()));
 +  }
 +
 +  @Test
 +  public void testTokenSignature() throws Exception {
 +    String[] claims = new String[4];
 +    claims[0] = "KNOXSSO";
 +    claims[1] = "john.doe@example.com";
 +    claims[2] = "https://login.example.com";
 +    claims[3] = Long.toString( ( System.currentTimeMillis()/1000 ) + 300);
-     JWTToken token = new JWTToken("RS256", claims);
- 
++    JWT token = new JWTToken("RS256", claims);
 +
 +    assertEquals("KNOXSSO", token.getIssuer());
 +    assertEquals("john.doe@example.com", token.getSubject());
 +    assertEquals("https://login.example.com", token.getAudience());
 +
 +    // Sign the token
 +    JWSSigner signer = new RSASSASigner(privateKey);
 +    token.sign(signer);
 +    assertTrue(token.getSignaturePayload().length > 0);
 +
 +    // Verify the signature
 +    JWSVerifier verifier = new RSASSAVerifier((RSAPublicKey) publicKey);
 +    assertTrue(token.verify(verifier));
 +  }
 +
 +  @Test
 +  public void testTokenSignatureRS512() throws Exception {
 +    String[] claims = new String[4];
 +    claims[0] = "KNOXSSO";
 +    claims[1] = "john.doe@example.com";
 +    claims[2] = "https://login.example.com";
 +    claims[3] = Long.toString( ( System.currentTimeMillis()/1000 ) + 300);
-     JWTToken token = new JWTToken(JWSAlgorithm.RS512.getName(), claims);
++    JWT token = new JWTToken(JWSAlgorithm.RS512.getName(), claims);
 +
 +    assertEquals("KNOXSSO", token.getIssuer());
 +    assertEquals("john.doe@example.com", token.getSubject());
 +    assertEquals("https://login.example.com", token.getAudience());
 +    assertTrue(token.getHeader().contains(JWSAlgorithm.RS512.getName()));
 +
 +    // Sign the token
 +    JWSSigner signer = new RSASSASigner(privateKey);
 +    token.sign(signer);
 +    assertTrue(token.getSignaturePayload().length > 0);
 +
 +    // Verify the signature
 +    JWSVerifier verifier = new RSASSAVerifier((RSAPublicKey) publicKey);
 +    assertTrue(token.verify(verifier));
 +  }
 +
 +  @Test
 +  public void testTokenExpiry() throws Exception {
 +    String[] claims = new String[4];
 +    claims[0] = "KNOXSSO";
 +    claims[1] = "john.doe@example.com";
 +    claims[2] = "https://login.example.com";
 +    claims[3] = Long.toString( ( System.currentTimeMillis()/1000 ) + 300);
-     JWTToken token = new JWTToken("RS256", claims);
++    JWT token = new JWTToken("RS256", claims);
 +
 +    assertNotNull(token.getExpires());
 +    assertNotNull(token.getExpiresDate());
 +    assertEquals(token.getExpiresDate(), new Date(Long.valueOf(token.getExpires())));
 +  }
++
++  @Test
++  public void testUnsignedToken() throws Exception {
++      String unsignedToken = "eyJhbGciOiJub25lIn0.eyJzdWIiOiJhbGljZSIsImp0aSI6ImY2YmNj"
++          + "MDVjLWI4MTktNGM0Mi1iMGMyLWJlYmY1MDE4YWFiZiJ9.";
++
++      try {
++          new JWTToken(unsignedToken);
++          fail("Failure expected on an unsigned token");
++      } catch (ParseException ex) {
++          // expected
++      }
++  }
++
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/58780d37/pom.xml
----------------------------------------------------------------------


[13/53] [abbrv] knox git commit: Merge branch 'master' into KNOX-998-Package_Restructuring

Posted by mo...@apache.org.
http://git-wip-us.apache.org/repos/asf/knox/blob/58780d37/gateway-provider-security-pac4j/src/main/java/org/apache/knox/gateway/pac4j/filter/Pac4jDispatcherFilter.java
----------------------------------------------------------------------
diff --cc gateway-provider-security-pac4j/src/main/java/org/apache/knox/gateway/pac4j/filter/Pac4jDispatcherFilter.java
index a87c8d0,0000000..fe39f25
mode 100644,000000..100644
--- a/gateway-provider-security-pac4j/src/main/java/org/apache/knox/gateway/pac4j/filter/Pac4jDispatcherFilter.java
+++ b/gateway-provider-security-pac4j/src/main/java/org/apache/knox/gateway/pac4j/filter/Pac4jDispatcherFilter.java
@@@ -1,215 -1,0 +1,214 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.pac4j.filter;
 +
 +import org.apache.knox.gateway.i18n.messages.MessagesFactory;
 +import org.apache.knox.gateway.pac4j.Pac4jMessages;
 +import org.apache.knox.gateway.pac4j.session.KnoxSessionStore;
 +import org.apache.knox.gateway.services.GatewayServices;
 +import org.apache.knox.gateway.services.security.KeystoreService;
 +import org.apache.knox.gateway.services.security.MasterService;
 +import org.apache.knox.gateway.services.security.AliasService;
 +import org.apache.knox.gateway.services.security.AliasServiceException;
 +import org.apache.knox.gateway.services.security.CryptoService;
 +import org.pac4j.config.client.PropertiesConfigFactory;
 +import org.pac4j.core.client.Client;
 +import org.pac4j.core.config.Config;
 +import org.pac4j.core.config.ConfigSingleton;
 +import org.pac4j.core.context.J2EContext;
- import org.pac4j.core.context.Pac4jConstants;
 +import org.pac4j.core.util.CommonHelper;
 +import org.pac4j.http.client.indirect.IndirectBasicAuthClient;
 +import org.pac4j.http.credentials.authenticator.test.SimpleTestUsernamePasswordAuthenticator;
 +import org.pac4j.j2e.filter.CallbackFilter;
- import org.pac4j.j2e.filter.RequiresAuthenticationFilter;
++import org.pac4j.j2e.filter.SecurityFilter;
 +
 +import javax.servlet.*;
 +import javax.servlet.http.HttpServletRequest;
 +import javax.servlet.http.HttpServletResponse;
 +import java.io.IOException;
 +import java.util.Enumeration;
 +import java.util.HashMap;
 +import java.util.List;
 +import java.util.Map;
 +
 +/**
 + * <p>This is the main filter for the pac4j provider. The pac4j provider module heavily relies on the j2e-pac4j library (https://github.com/pac4j/j2e-pac4j).</p>
 + * <p>This filter dispatches the HTTP calls between the j2e-pac4j filters:</p>
 + * <ul>
 + *     <li>to the {@link CallbackFilter} if the <code>client_name</code> parameter exists: it finishes the authentication process</li>
 + *     <li>to the {@link RequiresAuthenticationFilter} otherwise: it starts the authentication process (redirection to the identity provider) if the user is not authenticated</li>
 + * </ul>
 + * <p>It uses the {@link KnoxSessionStore} to manage session data. The generated cookies are defined on a domain name
 + * which can be configured via the domain suffix parameter: <code>pac4j.cookie.domain.suffix</code>.</p>
 + * <p>The callback url must be defined to the current protected url (KnoxSSO service for example) via the parameter: <code>pac4j.callbackUrl</code>.</p>
 + *
 + * @since 0.8.0
 + */
 +public class Pac4jDispatcherFilter implements Filter {
 +
 +  private static Pac4jMessages log = MessagesFactory.get(Pac4jMessages.class);
 +
 +  public static final String TEST_BASIC_AUTH = "testBasicAuth";
 +
 +  public static final String PAC4J_CALLBACK_URL = "pac4j.callbackUrl";
 +
 +  public static final String PAC4J_CALLBACK_PARAMETER = "pac4jCallback";
 +
 +  private static final String PAC4J_COOKIE_DOMAIN_SUFFIX_PARAM = "pac4j.cookie.domain.suffix";
 +
 +  private CallbackFilter callbackFilter;
 +
-   private RequiresAuthenticationFilter requiresAuthenticationFilter;
++  private SecurityFilter securityFilter;
 +  private MasterService masterService = null;
 +  private KeystoreService keystoreService = null;
 +  private AliasService aliasService = null;
 +
 +  @Override
 +  public void init( FilterConfig filterConfig ) throws ServletException {
 +    // JWT service
 +    final ServletContext context = filterConfig.getServletContext();
 +    CryptoService cryptoService = null;
 +    String clusterName = null;
 +    if (context != null) {
 +      GatewayServices services = (GatewayServices) context.getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE);
 +      clusterName = (String) context.getAttribute(GatewayServices.GATEWAY_CLUSTER_ATTRIBUTE);
 +      if (services != null) {
 +        keystoreService = (KeystoreService) services.getService(GatewayServices.KEYSTORE_SERVICE);
 +        cryptoService = (CryptoService) services.getService(GatewayServices.CRYPTO_SERVICE);
 +        aliasService = (AliasService) services.getService(GatewayServices.ALIAS_SERVICE);
 +        masterService = (MasterService) services.getService("MasterService");
 +      }
 +    }
 +    // crypto service, alias service and cluster name are mandatory
 +    if (cryptoService == null || aliasService == null || clusterName == null) {
 +      log.cryptoServiceAndAliasServiceAndClusterNameRequired();
 +      throw new ServletException("The crypto service, alias service and cluster name are required.");
 +    }
 +    try {
 +      aliasService.getPasswordFromAliasForCluster(clusterName, KnoxSessionStore.PAC4J_PASSWORD, true);
 +    } catch (AliasServiceException e) {
 +      log.unableToGenerateAPasswordForEncryption(e);
 +      throw new ServletException("Unable to generate a password for encryption.");
 +    }
 +
 +    // url to SSO authentication provider
 +    String pac4jCallbackUrl = filterConfig.getInitParameter(PAC4J_CALLBACK_URL);
 +    if (pac4jCallbackUrl == null) {
 +      log.ssoAuthenticationProviderUrlRequired();
 +      throw new ServletException("Required pac4j callback URL is missing.");
 +    }
 +    // add the callback parameter to know it's a callback
 +    pac4jCallbackUrl = CommonHelper.addParameter(pac4jCallbackUrl, PAC4J_CALLBACK_PARAMETER, "true");
 +
 +    final Config config;
 +    final String clientName;
 +    // client name from servlet parameter (mandatory)
-     final String clientNameParameter = filterConfig.getInitParameter(Pac4jConstants.CLIENT_NAME);
++    final String clientNameParameter = filterConfig.getInitParameter("clientName");
 +    if (clientNameParameter == null) {
 +      log.clientNameParameterRequired();
 +      throw new ServletException("Required pac4j clientName parameter is missing.");
 +    }
 +    if (TEST_BASIC_AUTH.equalsIgnoreCase(clientNameParameter)) {
 +      // test configuration
 +      final IndirectBasicAuthClient indirectBasicAuthClient = new IndirectBasicAuthClient(new SimpleTestUsernamePasswordAuthenticator());
 +      indirectBasicAuthClient.setRealmName("Knox TEST");
 +      config = new Config(pac4jCallbackUrl, indirectBasicAuthClient);
 +      clientName = "IndirectBasicAuthClient";
 +    } else {
 +      // get clients from the init parameters
 +      final Map<String, String> properties = new HashMap<>();
 +      final Enumeration<String> names = filterConfig.getInitParameterNames();
 +      addDefaultConfig(clientNameParameter, properties);
 +      while (names.hasMoreElements()) {
 +        final String key = names.nextElement();
 +        properties.put(key, filterConfig.getInitParameter(key));
 +      }
 +      final PropertiesConfigFactory propertiesConfigFactory = new PropertiesConfigFactory(pac4jCallbackUrl, properties);
 +      config = propertiesConfigFactory.build();
 +      final List<Client> clients = config.getClients().getClients();
 +      if (clients == null || clients.size() == 0) {
 +        log.atLeastOnePac4jClientMustBeDefined();
 +        throw new ServletException("At least one pac4j client must be defined.");
 +      }
 +      if (CommonHelper.isBlank(clientNameParameter)) {
 +        clientName = clients.get(0).getName();
 +      } else {
 +        clientName = clientNameParameter;
 +      }
 +    }
 +
 +    callbackFilter = new CallbackFilter();
-     requiresAuthenticationFilter = new RequiresAuthenticationFilter();
-     requiresAuthenticationFilter.setClientName(clientName);
-     requiresAuthenticationFilter.setConfig(config);
++    securityFilter = new SecurityFilter();
++    securityFilter.setClients(clientName);
++    securityFilter.setConfig(config);
 +
 +    final String domainSuffix = filterConfig.getInitParameter(PAC4J_COOKIE_DOMAIN_SUFFIX_PARAM);
 +    config.setSessionStore(new KnoxSessionStore(cryptoService, clusterName, domainSuffix));
 +    ConfigSingleton.setConfig(config);
 +  }
 +
 +  private void addDefaultConfig(String clientNameParameter, Map<String, String> properties) {
 +    // add default saml params
 +    if (clientNameParameter.contains("SAML2Client")) {
 +      properties.put(PropertiesConfigFactory.SAML_KEYSTORE_PATH,
 +          keystoreService.getKeystorePath());
 +
 +      properties.put(PropertiesConfigFactory.SAML_KEYSTORE_PASSWORD,
 +          new String(masterService.getMasterSecret()));
 +
 +      // check for provisioned alias for private key
 +      char[] gip = null;
 +      try {
 +        gip = aliasService.getGatewayIdentityPassphrase();
 +      }
 +      catch(AliasServiceException ase) {
 +        log.noPrivateKeyPasshraseProvisioned(ase);
 +      }
 +      if (gip != null) {
 +        properties.put(PropertiesConfigFactory.SAML_PRIVATE_KEY_PASSWORD,
 +            new String(gip));
 +      }
 +      else {
 +        // no alias provisioned then use the master
 +        properties.put(PropertiesConfigFactory.SAML_PRIVATE_KEY_PASSWORD,
 +            new String(masterService.getMasterSecret()));
 +      }
 +    }
 +  }
 +
 +  @Override
 +  public void doFilter( ServletRequest servletRequest, ServletResponse servletResponse, FilterChain filterChain) throws IOException, ServletException {
 +
 +    final HttpServletRequest request = (HttpServletRequest) servletRequest;
 +    final HttpServletResponse response = (HttpServletResponse) servletResponse;
 +    final J2EContext context = new J2EContext(request, response, ConfigSingleton.getConfig().getSessionStore());
 +
 +    // it's a callback from an identity provider
 +    if (request.getParameter(PAC4J_CALLBACK_PARAMETER) != null) {
 +      // apply CallbackFilter
 +      callbackFilter.doFilter(servletRequest, servletResponse, filterChain);
 +    } else {
 +      // otherwise just apply security and requires authentication
 +      // apply RequiresAuthenticationFilter
-       requiresAuthenticationFilter.doFilter(servletRequest, servletResponse, filterChain);
++      securityFilter.doFilter(servletRequest, servletResponse, filterChain);
 +    }
 +  }
 +
 +  @Override
 +  public void destroy() { }
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/58780d37/gateway-provider-security-pac4j/src/main/java/org/apache/knox/gateway/pac4j/filter/Pac4jIdentityAdapter.java
----------------------------------------------------------------------
diff --cc gateway-provider-security-pac4j/src/main/java/org/apache/knox/gateway/pac4j/filter/Pac4jIdentityAdapter.java
index 90395f1,0000000..6387a0b
mode 100644,000000..100644
--- a/gateway-provider-security-pac4j/src/main/java/org/apache/knox/gateway/pac4j/filter/Pac4jIdentityAdapter.java
+++ b/gateway-provider-security-pac4j/src/main/java/org/apache/knox/gateway/pac4j/filter/Pac4jIdentityAdapter.java
@@@ -1,142 -1,0 +1,146 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.pac4j.filter;
 +
 +import org.apache.knox.gateway.audit.api.Action;
 +import org.apache.knox.gateway.audit.api.ActionOutcome;
 +import org.apache.knox.gateway.audit.api.AuditService;
 +import org.apache.knox.gateway.audit.api.AuditServiceFactory;
 +import org.apache.knox.gateway.audit.api.Auditor;
 +import org.apache.knox.gateway.audit.api.ResourceType;
 +import org.apache.knox.gateway.audit.log4j.audit.AuditConstants;
 +import org.apache.knox.gateway.filter.AbstractGatewayFilter;
 +import org.apache.knox.gateway.security.PrimaryPrincipal;
 +import org.pac4j.core.config.ConfigSingleton;
 +import org.pac4j.core.context.J2EContext;
++import org.pac4j.core.profile.CommonProfile;
 +import org.pac4j.core.profile.ProfileManager;
- import org.pac4j.core.profile.UserProfile;
 +import org.slf4j.Logger;
 +import org.slf4j.LoggerFactory;
 +
 +import javax.security.auth.Subject;
 +import javax.servlet.Filter;
 +import javax.servlet.FilterChain;
 +import javax.servlet.FilterConfig;
 +import javax.servlet.ServletException;
 +import javax.servlet.ServletRequest;
 +import javax.servlet.ServletResponse;
 +import javax.servlet.http.HttpServletRequest;
 +import javax.servlet.http.HttpServletResponse;
 +import java.io.IOException;
 +import java.security.PrivilegedActionException;
 +import java.security.PrivilegedExceptionAction;
++import java.util.Optional;
 +
 +/**
 + * <p>This filter retrieves the authenticated user saved by the pac4j provider and injects it into the J2E HTTP request.</p>
 + *
 + * @since 0.8.0
 + */
 +public class Pac4jIdentityAdapter implements Filter {
 +
 +  private static final Logger logger = LoggerFactory.getLogger(Pac4jIdentityAdapter.class);
 +
 +  private static AuditService auditService = AuditServiceFactory.getAuditService();
 +  private static Auditor auditor = auditService.getAuditor(
 +      AuditConstants.DEFAULT_AUDITOR_NAME, AuditConstants.KNOX_SERVICE_NAME,
 +      AuditConstants.KNOX_COMPONENT_NAME );
 +
 +  private String testIdentifier;
 +
 +  @Override
 +  public void init( FilterConfig filterConfig ) throws ServletException {
 +  }
 +
 +  public void destroy() {
 +  }
 +
 +  public void doFilter(ServletRequest servletRequest, ServletResponse servletResponse, FilterChain chain)
 +      throws IOException, ServletException {
 +
 +    final HttpServletRequest request = (HttpServletRequest) servletRequest;
 +    final HttpServletResponse response = (HttpServletResponse) servletResponse;
 +    final J2EContext context = new J2EContext(request, response, ConfigSingleton.getConfig().getSessionStore());
-     final ProfileManager manager = new ProfileManager(context);
-     final UserProfile profile = manager.get(true);
-     logger.debug("User authenticated as: {}", profile);
-     manager.remove(true);
-     final String id = profile.getId();
-     testIdentifier = id;
-     PrimaryPrincipal pp = new PrimaryPrincipal(id);
-     Subject subject = new Subject();
-     subject.getPrincipals().add(pp);
-     auditService.getContext().setUsername(id);
-     String sourceUri = (String)request.getAttribute( AbstractGatewayFilter.SOURCE_REQUEST_CONTEXT_URL_ATTRIBUTE_NAME );
-     auditor.audit(Action.AUTHENTICATION, sourceUri, ResourceType.URI, ActionOutcome.SUCCESS);
-     
-     doAs(request, response, chain, subject);
++    final ProfileManager<CommonProfile> manager = new ProfileManager<CommonProfile>(context);
++    final Optional<CommonProfile> optional = manager.get(true);
++    if (optional.isPresent()) {
++      CommonProfile profile = optional.get();
++      logger.debug("User authenticated as: {}", profile);
++      manager.remove(true);
++      final String id = profile.getId();
++      testIdentifier = id;
++      PrimaryPrincipal pp = new PrimaryPrincipal(id);
++      Subject subject = new Subject();
++      subject.getPrincipals().add(pp);
++      auditService.getContext().setUsername(id);
++      String sourceUri = (String)request.getAttribute( AbstractGatewayFilter.SOURCE_REQUEST_CONTEXT_URL_ATTRIBUTE_NAME );
++      auditor.audit(Action.AUTHENTICATION, sourceUri, ResourceType.URI, ActionOutcome.SUCCESS);
++
++      doAs(request, response, chain, subject);
++    }
 +  }
-   
++
 +  private void doAs(final ServletRequest request,
 +      final ServletResponse response, final FilterChain chain, Subject subject)
 +      throws IOException, ServletException {
 +    try {
 +      Subject.doAs(
 +          subject,
 +          new PrivilegedExceptionAction<Object>() {
 +            public Object run() throws Exception {
 +              chain.doFilter(request, response);
 +              return null;
 +            }
 +          }
 +          );
 +    }
 +    catch (PrivilegedActionException e) {
 +      Throwable t = e.getCause();
 +      if (t instanceof IOException) {
 +        throw (IOException) t;
 +      }
 +      else if (t instanceof ServletException) {
 +        throw (ServletException) t;
 +      }
 +      else {
 +        throw new ServletException(t);
 +      }
 +    }
 +  }
 +
 +  /**
 +   * For tests only.
 +   */
 +  public static void setAuditService(AuditService auditService) {
 +    Pac4jIdentityAdapter.auditService = auditService;
 +  }
 +
 +  /**
 +   * For tests only.
 +   */
 +  public static void setAuditor(Auditor auditor) {
 +    Pac4jIdentityAdapter.auditor = auditor;
 +  }
 +
 +  /**
 +   * For tests only.
 +     */
 +  public String getTestIdentifier() {
 +    return testIdentifier;
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/58780d37/gateway-provider-security-pac4j/src/main/java/org/apache/knox/gateway/pac4j/session/KnoxSessionStore.java
----------------------------------------------------------------------
diff --cc gateway-provider-security-pac4j/src/main/java/org/apache/knox/gateway/pac4j/session/KnoxSessionStore.java
index 6ce002c,0000000..4ba55ea
mode 100644,000000..100644
--- a/gateway-provider-security-pac4j/src/main/java/org/apache/knox/gateway/pac4j/session/KnoxSessionStore.java
+++ b/gateway-provider-security-pac4j/src/main/java/org/apache/knox/gateway/pac4j/session/KnoxSessionStore.java
@@@ -1,120 -1,0 +1,146 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.pac4j.session;
 +
 +import org.apache.commons.codec.binary.Base64;
 +import org.apache.knox.gateway.services.security.CryptoService;
 +import org.apache.knox.gateway.services.security.EncryptionResult;
 +import org.apache.knox.gateway.util.Urls;
 +import org.pac4j.core.context.ContextHelper;
 +import org.pac4j.core.context.Cookie;
 +import org.pac4j.core.context.WebContext;
 +import org.pac4j.core.context.session.SessionStore;
 +import org.pac4j.core.exception.TechnicalException;
 +import org.pac4j.core.util.JavaSerializationHelper;
 +import org.slf4j.Logger;
 +import org.slf4j.LoggerFactory;
 +
 +import java.io.Serializable;
++import java.util.Map;
 +
 +/**
 + * Specific session store where data are saved into cookies (and not in memory).
 + * Each data is encrypted and base64 encoded before being saved as a cookie (for security reasons).
 + *
 + * @since 0.8.0
 + */
 +public class KnoxSessionStore implements SessionStore {
 +
 +    private static final Logger logger = LoggerFactory.getLogger(KnoxSessionStore.class);
 +
 +    public static final String PAC4J_PASSWORD = "pac4j.password";
 +
 +    public static final String PAC4J_SESSION_PREFIX = "pac4j.session.";
 +
 +    private final JavaSerializationHelper javaSerializationHelper;
 +
 +    private final CryptoService cryptoService;
 +
 +    private final String clusterName;
 +
 +    private final String domainSuffix;
 +
 +    public KnoxSessionStore(final CryptoService cryptoService, final String clusterName, final String domainSuffix) {
 +        javaSerializationHelper = new JavaSerializationHelper();
 +        this.cryptoService = cryptoService;
 +        this.clusterName = clusterName;
 +        this.domainSuffix = domainSuffix;
 +    }
 +
 +    public String getOrCreateSessionId(WebContext context) {
 +        return null;
 +    }
 +
 +    private Serializable decryptBase64(final String v) {
 +        if (v != null && v.length() > 0) {
 +            byte[] bytes = Base64.decodeBase64(v);
 +            EncryptionResult result = EncryptionResult.fromByteArray(bytes);
 +            byte[] clear = cryptoService.decryptForCluster(this.clusterName,
 +                    PAC4J_PASSWORD,
 +                    result.cipher,
 +                    result.iv,
 +                    result.salt);
 +            if (clear != null) {
 +                return javaSerializationHelper.unserializeFromBytes(clear);
 +            }
 +        }
 +        return null;
 +    }
 +
 +    public Object get(WebContext context, String key) {
 +        final Cookie cookie = ContextHelper.getCookie(context, PAC4J_SESSION_PREFIX + key);
 +        Object value = null;
 +        if (cookie != null) {
 +            value = decryptBase64(cookie.getValue());
 +        }
 +        logger.debug("Get from session: {} = {}", key, value);
 +        return value;
 +    }
 +
 +    private String encryptBase64(final Object o) {
-         if (o == null || o.equals("")) {
++        if (o == null || o.equals("")
++            || (o instanceof Map<?,?> && ((Map<?,?>)o).isEmpty())) {
 +            return null;
 +        } else {
 +            final byte[] bytes = javaSerializationHelper.serializeToBytes((Serializable) o);
 +            EncryptionResult result = cryptoService.encryptForCluster(this.clusterName, PAC4J_PASSWORD, bytes);
 +            return Base64.encodeBase64String(result.toByteAray());
 +        }
 +    }
 +
 +    public void set(WebContext context, String key, Object value) {
 +        logger.debug("Save in session: {} = {}", key, value);
 +        final Cookie cookie = new Cookie(PAC4J_SESSION_PREFIX + key, encryptBase64(value));
 +        try {
 +            String domain = Urls.getDomainName(context.getFullRequestURL(), this.domainSuffix);
 +            if (domain == null) {
 +                domain = context.getServerName();
 +            }
 +            cookie.setDomain(domain);
 +        } catch (final Exception e) {
 +            throw new TechnicalException(e);
 +        }
 +        cookie.setHttpOnly(true);
 +        cookie.setSecure(ContextHelper.isHttpsOrSecure(context));
 +        context.addResponseCookie(cookie);
 +    }
++
++    @Override
++    public SessionStore buildFromTrackableSession(WebContext arg0, Object arg1) {
++        // TODO Auto-generated method stub
++        return null;
++    }
++
++    @Override
++    public boolean destroySession(WebContext arg0) {
++        // TODO Auto-generated method stub
++        return false;
++    }
++
++    @Override
++    public Object getTrackableSession(WebContext arg0) {
++        // TODO Auto-generated method stub
++        return null;
++    }
++
++    @Override
++    public boolean renewSession(WebContext arg0) {
++        // TODO Auto-generated method stub
++        return false;
++    }
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/58780d37/gateway-provider-security-pac4j/src/test/java/org/apache/knox/gateway/pac4j/Pac4jProviderTest.java
----------------------------------------------------------------------
diff --cc gateway-provider-security-pac4j/src/test/java/org/apache/knox/gateway/pac4j/Pac4jProviderTest.java
index 606d042,0000000..e4e0462
mode 100644,000000..100644
--- a/gateway-provider-security-pac4j/src/test/java/org/apache/knox/gateway/pac4j/Pac4jProviderTest.java
+++ b/gateway-provider-security-pac4j/src/test/java/org/apache/knox/gateway/pac4j/Pac4jProviderTest.java
@@@ -1,150 -1,0 +1,150 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.pac4j;
 +
 +import org.apache.knox.gateway.audit.api.AuditContext;
 +import org.apache.knox.gateway.audit.api.AuditService;
 +import org.apache.knox.gateway.audit.api.Auditor;
 +import org.apache.knox.gateway.pac4j.filter.Pac4jDispatcherFilter;
 +import org.apache.knox.gateway.pac4j.filter.Pac4jIdentityAdapter;
 +import org.apache.knox.gateway.pac4j.session.KnoxSessionStore;
 +import org.apache.knox.gateway.services.GatewayServices;
 +import org.apache.knox.gateway.services.security.AliasService;
 +import org.apache.knox.gateway.services.security.impl.DefaultCryptoService;
 +import org.junit.Test;
 +import org.pac4j.core.client.Clients;
 +import org.pac4j.core.context.Pac4jConstants;
 +import org.pac4j.http.client.indirect.IndirectBasicAuthClient;
 +
 +import javax.servlet.*;
 +import javax.servlet.http.*;
 +
 +import java.util.HashMap;
 +import java.util.List;
 +import java.util.Map;
 +
 +import static org.mockito.Mockito.*;
 +import static org.junit.Assert.*;
 +
 +/**
 + * This class simulates a full authentication process using pac4j.
 + */
 +public class Pac4jProviderTest {
 +
 +    private static final String LOCALHOST = "127.0.0.1";
 +    private static final String HADOOP_SERVICE_URL = "https://" + LOCALHOST + ":8443/gateway/sandox/webhdfs/v1/tmp?op=LISTSTATUS";
 +    private static final String KNOXSSO_SERVICE_URL = "https://" + LOCALHOST + ":8443/gateway/idp/api/v1/websso";
 +    private static final String PAC4J_CALLBACK_URL = KNOXSSO_SERVICE_URL;
 +    private static final String ORIGINAL_URL = "originalUrl";
 +    private static final String CLUSTER_NAME = "knox";
 +    private static final String PAC4J_PASSWORD = "pwdfortest";
 +    private static final String CLIENT_CLASS = IndirectBasicAuthClient.class.getSimpleName();
 +    private static final String USERNAME = "jleleu";
 +
 +    @Test
 +    public void test() throws Exception {
 +        final AliasService aliasService = mock(AliasService.class);
 +        when(aliasService.getPasswordFromAliasForCluster(CLUSTER_NAME, KnoxSessionStore.PAC4J_PASSWORD, true)).thenReturn(PAC4J_PASSWORD.toCharArray());
 +        when(aliasService.getPasswordFromAliasForCluster(CLUSTER_NAME, KnoxSessionStore.PAC4J_PASSWORD)).thenReturn(PAC4J_PASSWORD.toCharArray());
 +
 +        final DefaultCryptoService cryptoService = new DefaultCryptoService();
 +        cryptoService.setAliasService(aliasService);
 +
 +        final GatewayServices services = mock(GatewayServices.class);
 +        when(services.getService(GatewayServices.CRYPTO_SERVICE)).thenReturn(cryptoService);
 +        when(services.getService(GatewayServices.ALIAS_SERVICE)).thenReturn(aliasService);
 +
 +        final ServletContext context = mock(ServletContext.class);
 +        when(context.getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE)).thenReturn(services);
 +        when(context.getAttribute(GatewayServices.GATEWAY_CLUSTER_ATTRIBUTE)).thenReturn(CLUSTER_NAME);
 +
 +        final FilterConfig config = mock(FilterConfig.class);
 +        when(config.getServletContext()).thenReturn(context);
 +        when(config.getInitParameter(Pac4jDispatcherFilter.PAC4J_CALLBACK_URL)).thenReturn(PAC4J_CALLBACK_URL);
-         when(config.getInitParameter(Pac4jConstants.CLIENT_NAME)).thenReturn(Pac4jDispatcherFilter.TEST_BASIC_AUTH);
++        when(config.getInitParameter("clientName")).thenReturn(Pac4jDispatcherFilter.TEST_BASIC_AUTH);
 +
 +        final Pac4jDispatcherFilter dispatcher = new Pac4jDispatcherFilter();
 +        dispatcher.init(config);
 +        final Pac4jIdentityAdapter adapter = new Pac4jIdentityAdapter();
 +        adapter.init(config);
-         adapter.setAuditor(mock(Auditor.class));
++        Pac4jIdentityAdapter.setAuditor(mock(Auditor.class));
 +        final AuditService auditService = mock(AuditService.class);
 +        when(auditService.getContext()).thenReturn(mock(AuditContext.class));
-         adapter.setAuditService(auditService);
++        Pac4jIdentityAdapter.setAuditService(auditService);
 +
 +        // step 1: call the KnoxSSO service with an original url pointing to an Hadoop service (redirected by the SSOCookieProvider)
 +        MockHttpServletRequest request = new MockHttpServletRequest();
 +        request.setRequestURL(KNOXSSO_SERVICE_URL + "?" + ORIGINAL_URL + "=" + HADOOP_SERVICE_URL);
 +        request.setCookies(new Cookie[0]);
 +        request.setServerName(LOCALHOST);
 +        MockHttpServletResponse response = new MockHttpServletResponse();
 +        FilterChain filterChain = mock(FilterChain.class);
 +        dispatcher.doFilter(request, response, filterChain);
 +        // it should be a redirection to the idp topology
 +        assertEquals(302, response.getStatus());
 +        assertEquals(PAC4J_CALLBACK_URL + "?" + Pac4jDispatcherFilter.PAC4J_CALLBACK_PARAMETER + "=true&" + Clients.DEFAULT_CLIENT_NAME_PARAMETER + "=" + CLIENT_CLASS, response.getHeaders().get("Location"));
 +        // we should have one cookie for the saved requested url
 +        List<Cookie> cookies = response.getCookies();
 +        assertEquals(1, cookies.size());
 +        final Cookie requestedUrlCookie = cookies.get(0);
 +        assertEquals(KnoxSessionStore.PAC4J_SESSION_PREFIX + Pac4jConstants.REQUESTED_URL, requestedUrlCookie.getName());
 +
 +        // step 2: send credentials to the callback url (callback from the identity provider)
 +        request = new MockHttpServletRequest();
 +        request.setCookies(new Cookie[]{requestedUrlCookie});
 +        request.setRequestURL(PAC4J_CALLBACK_URL + "?" + Pac4jDispatcherFilter.PAC4J_CALLBACK_PARAMETER + "=true&" + Clients.DEFAULT_CLIENT_NAME_PARAMETER + "=" + Clients.DEFAULT_CLIENT_NAME_PARAMETER + "=" + CLIENT_CLASS);
 +        request.addParameter(Pac4jDispatcherFilter.PAC4J_CALLBACK_PARAMETER, "true");
 +        request.addParameter(Clients.DEFAULT_CLIENT_NAME_PARAMETER, CLIENT_CLASS);
 +        request.addHeader("Authorization", "Basic amxlbGV1OmpsZWxldQ==");
 +        request.setServerName(LOCALHOST);
 +        response = new MockHttpServletResponse();
 +        filterChain = mock(FilterChain.class);
 +        dispatcher.doFilter(request, response, filterChain);
 +        // it should be a redirection to the original url
 +        assertEquals(302, response.getStatus());
 +        assertEquals(KNOXSSO_SERVICE_URL + "?" + ORIGINAL_URL + "=" + HADOOP_SERVICE_URL, response.getHeaders().get("Location"));
 +        // we should have 3 cookies among with the user profile
 +        cookies = response.getCookies();
 +        Map<String, String> mapCookies = new HashMap<>();
 +        assertEquals(3, cookies.size());
 +        for (final Cookie cookie : cookies) {
 +            mapCookies.put(cookie.getName(), cookie.getValue());
 +        }
 +        assertNull(mapCookies.get(KnoxSessionStore.PAC4J_SESSION_PREFIX + CLIENT_CLASS + "$attemptedAuthentication"));
-         assertNotNull(mapCookies.get(KnoxSessionStore.PAC4J_SESSION_PREFIX + Pac4jConstants.USER_PROFILE));
++        assertNotNull(mapCookies.get(KnoxSessionStore.PAC4J_SESSION_PREFIX + Pac4jConstants.USER_PROFILES));
 +        assertNull(mapCookies.get(KnoxSessionStore.PAC4J_SESSION_PREFIX + Pac4jConstants.REQUESTED_URL));
 +
 +        // step 3: turn pac4j identity into KnoxSSO identity
 +        request = new MockHttpServletRequest();
 +        request.setCookies(cookies.toArray(new Cookie[cookies.size()]));
 +        request.setRequestURL(KNOXSSO_SERVICE_URL + "?" + ORIGINAL_URL + "=" + HADOOP_SERVICE_URL);
 +        request.setServerName(LOCALHOST);
 +        response = new MockHttpServletResponse();
 +        filterChain = mock(FilterChain.class);
 +        dispatcher.doFilter(request, response, filterChain);
 +        assertEquals(0, response.getStatus());
 +        adapter.doFilter(request, response, filterChain);
 +        cookies = response.getCookies();
 +        assertEquals(1, cookies.size());
 +        final Cookie userProfileCookie = cookies.get(0);
 +        // the user profile has been cleaned
-         assertEquals(KnoxSessionStore.PAC4J_SESSION_PREFIX + Pac4jConstants.USER_PROFILE, userProfileCookie.getName());
++        assertEquals(KnoxSessionStore.PAC4J_SESSION_PREFIX + Pac4jConstants.USER_PROFILES, userProfileCookie.getName());
 +        assertNull(userProfileCookie.getValue());
 +        assertEquals(USERNAME, adapter.getTestIdentifier());
 +    }
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/58780d37/gateway-server/src/main/java/org/apache/knox/gateway/services/token/impl/DefaultTokenAuthorityService.java
----------------------------------------------------------------------
diff --cc gateway-server/src/main/java/org/apache/knox/gateway/services/token/impl/DefaultTokenAuthorityService.java
index 7f52b51,0000000..5fc3148
mode 100644,000000..100644
--- a/gateway-server/src/main/java/org/apache/knox/gateway/services/token/impl/DefaultTokenAuthorityService.java
+++ b/gateway-server/src/main/java/org/apache/knox/gateway/services/token/impl/DefaultTokenAuthorityService.java
@@@ -1,226 -1,0 +1,240 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.services.token.impl;
 +
 +import java.security.KeyStoreException;
 +import java.security.Principal;
 +import java.security.PublicKey;
 +import java.security.interfaces.RSAPrivateKey;
 +import java.security.interfaces.RSAPublicKey;
 +import java.util.Map;
++import java.util.Set;
 +import java.util.List;
 +import java.util.ArrayList;
++import java.util.HashSet;
 +
 +import javax.security.auth.Subject;
 +
 +import org.apache.knox.gateway.config.GatewayConfig;
 +import org.apache.knox.gateway.services.Service;
 +import org.apache.knox.gateway.services.ServiceLifecycleException;
 +import org.apache.knox.gateway.services.security.AliasService;
 +import org.apache.knox.gateway.services.security.AliasServiceException;
 +import org.apache.knox.gateway.services.security.KeystoreService;
 +import org.apache.knox.gateway.services.security.KeystoreServiceException;
 +import org.apache.knox.gateway.services.security.token.JWTokenAuthority;
 +import org.apache.knox.gateway.services.security.token.TokenServiceException;
 +import org.apache.knox.gateway.services.security.token.impl.JWT;
 +import org.apache.knox.gateway.services.security.token.impl.JWTToken;
 +
 +import com.nimbusds.jose.JWSSigner;
 +import com.nimbusds.jose.JWSVerifier;
 +import com.nimbusds.jose.crypto.RSASSASigner;
 +import com.nimbusds.jose.crypto.RSASSAVerifier;
 +
 +public class DefaultTokenAuthorityService implements JWTokenAuthority, Service {
 +
 +  private static final String SIGNING_KEY_PASSPHRASE = "signing.key.passphrase";
++  private static final Set<String> SUPPORTED_SIG_ALGS = new HashSet<>();
 +  private AliasService as = null;
 +  private KeystoreService ks = null;
 +  String signingKeyAlias = null;
 +
++  static {
++      // Only standard RSA signature algorithms are accepted
++      // https://tools.ietf.org/html/rfc7518
++      SUPPORTED_SIG_ALGS.add("RS256");
++      SUPPORTED_SIG_ALGS.add("RS384");
++      SUPPORTED_SIG_ALGS.add("RS512");
++      SUPPORTED_SIG_ALGS.add("PS256");
++      SUPPORTED_SIG_ALGS.add("PS384");
++      SUPPORTED_SIG_ALGS.add("PS512");
++  }
++
 +  public void setKeystoreService(KeystoreService ks) {
 +    this.ks = ks;
 +  }
 +
 +  public void setAliasService(AliasService as) {
 +    this.as = as;
 +  }
 +
 +  /* (non-Javadoc)
 +   * @see org.apache.knox.gateway.provider.federation.jwt.JWTokenAuthority#issueToken(javax.security.auth.Subject, java.lang.String)
 +   */
 +  @Override
 +  public JWT issueToken(Subject subject, String algorithm) throws TokenServiceException {
 +    Principal p = (Principal) subject.getPrincipals().toArray()[0];
 +    return issueToken(p, algorithm);
 +  }
 +
 +  /* (non-Javadoc)
 +   * @see org.apache.knox.gateway.provider.federation.jwt.JWTokenAuthority#issueToken(java.security.Principal, java.lang.String)
 +   */
 +  @Override
 +  public JWT issueToken(Principal p, String algorithm) throws TokenServiceException {
 +    return issueToken(p, null, algorithm);
 +  }
 +
 +  /* (non-Javadoc)
 +   * @see org.apache.knox.gateway.provider.federation.jwt.JWTokenAuthority#issueToken(java.security.Principal, java.lang.String, long expires)
 +   */
 +  @Override
 +  public JWT issueToken(Principal p, String algorithm, long expires) throws TokenServiceException {
 +    return issueToken(p, (String)null, algorithm, expires);
 +  }
 +
 +  public JWT issueToken(Principal p, String audience, String algorithm)
 +      throws TokenServiceException {
 +    return issueToken(p, audience, algorithm, -1);
 +  }
 +
 +  /* (non-Javadoc)
 +   * @see org.apache.knox.gateway.provider.federation.jwt.JWTokenAuthority#issueToken(java.security.Principal, java.lang.String, java.lang.String)
 +   */
 +  @Override
 +  public JWT issueToken(Principal p, String audience, String algorithm, long expires)
 +      throws TokenServiceException {
-     ArrayList<String> audiences = null;
++    List<String> audiences = null;
 +    if (audience != null) {
 +      audiences = new ArrayList<String>();
 +      audiences.add(audience);
 +    }
 +    return issueToken(p, audiences, algorithm, expires);
 +  }
 +
 +  @Override
 +  public JWT issueToken(Principal p, List<String> audiences, String algorithm, long expires)
 +      throws TokenServiceException {
 +    String[] claimArray = new String[4];
 +    claimArray[0] = "KNOXSSO";
 +    claimArray[1] = p.getName();
 +    claimArray[2] = null;
 +    if (expires == -1) {
 +      claimArray[3] = null;
 +    }
 +    else {
 +      claimArray[3] = String.valueOf(expires);
 +    }
 +
-     JWTToken token = null;
-     if ("RS256".equals(algorithm)) {
-       token = new JWTToken("RS256", claimArray, audiences);
++    JWT token = null;
++    if (SUPPORTED_SIG_ALGS.contains(algorithm)) {
++      token = new JWTToken(algorithm, claimArray, audiences);
 +      RSAPrivateKey key;
 +      char[] passphrase = null;
 +      try {
 +        passphrase = getSigningKeyPassphrase();
 +      } catch (AliasServiceException e) {
 +        throw new TokenServiceException(e);
 +      }
 +      try {
 +        key = (RSAPrivateKey) ks.getSigningKey(getSigningKeyAlias(),
 +            passphrase);
 +        JWSSigner signer = new RSASSASigner(key);
 +        token.sign(signer);
 +      } catch (KeystoreServiceException e) {
 +        throw new TokenServiceException(e);
 +      }
 +    }
 +    else {
 +      throw new TokenServiceException("Cannot issue token - Unsupported algorithm");
 +    }
 +
 +    return token;
 +  }
 +
 +  private char[] getSigningKeyPassphrase() throws AliasServiceException {
 +    char[] phrase = as.getPasswordFromAliasForGateway(SIGNING_KEY_PASSPHRASE);
 +    if (phrase == null) {
 +      phrase = as.getGatewayIdentityPassphrase();
 +    }
 +    return phrase;
 +  }
 +
 +  private String getSigningKeyAlias() {
 +    if (signingKeyAlias == null) {
 +      return "gateway-identity";
 +    }
 +    return signingKeyAlias;
 +  }
 +
 +  @Override
 +  public boolean verifyToken(JWT token)
 +      throws TokenServiceException {
 +    return verifyToken(token, null);
 +  }
 +
 +  @Override
 +  public boolean verifyToken(JWT token, RSAPublicKey publicKey)
 +      throws TokenServiceException {
 +    boolean rc = false;
 +    PublicKey key;
 +    try {
 +      if (publicKey == null) {
 +        key = ks.getSigningKeystore().getCertificate(getSigningKeyAlias()).getPublicKey();
 +      }
 +      else {
 +        key = publicKey;
 +      }
 +      JWSVerifier verifier = new RSASSAVerifier((RSAPublicKey) key);
 +      // TODO: interrogate the token for issuer claim in order to determine the public key to use for verification
 +      // consider jwk for specifying the key too
 +      rc = token.verify(verifier);
 +    } catch (KeyStoreException e) {
 +      throw new TokenServiceException("Cannot verify token.", e);
 +    } catch (KeystoreServiceException e) {
 +      throw new TokenServiceException("Cannot verify token.", e);
 +    }
 +    return rc;
 +  }
 +
 +  @Override
 +  public void init(GatewayConfig config, Map<String, String> options)
 +      throws ServiceLifecycleException {
 +    if (as == null || ks == null) {
 +      throw new ServiceLifecycleException("Alias or Keystore service is not set");
 +    }
 +    signingKeyAlias = config.getSigningKeyAlias();
 +
 +    @SuppressWarnings("unused")
 +    RSAPrivateKey key;
 +    char[] passphrase = null;
 +    try {
 +      passphrase = as.getPasswordFromAliasForGateway(SIGNING_KEY_PASSPHRASE);
 +      if (passphrase != null) {
 +        key = (RSAPrivateKey) ks.getSigningKey(getSigningKeyAlias(),
 +            passphrase);
 +        if (key == null) {
 +          throw new ServiceLifecycleException("Provisioned passphrase cannot be used to acquire signing key.");
 +        }
 +      }
 +    } catch (AliasServiceException e) {
 +      throw new ServiceLifecycleException("Provisioned signing key passphrase cannot be acquired.", e);
 +    } catch (KeystoreServiceException e) {
 +      throw new ServiceLifecycleException("Provisioned signing key passphrase cannot be acquired.", e);
 +    }
 +  }
 +
 +  @Override
 +  public void start() throws ServiceLifecycleException {
 +  }
 +
 +  @Override
 +  public void stop() throws ServiceLifecycleException {
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/58780d37/gateway-server/src/main/java/org/apache/knox/gateway/services/topology/impl/DefaultTopologyService.java
----------------------------------------------------------------------
diff --cc gateway-server/src/main/java/org/apache/knox/gateway/services/topology/impl/DefaultTopologyService.java
index 9f6f762,0000000..455b0fa
mode 100644,000000..100644
--- a/gateway-server/src/main/java/org/apache/knox/gateway/services/topology/impl/DefaultTopologyService.java
+++ b/gateway-server/src/main/java/org/apache/knox/gateway/services/topology/impl/DefaultTopologyService.java
@@@ -1,673 -1,0 +1,689 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +
 +package org.apache.knox.gateway.services.topology.impl;
 +
 +
 +import org.apache.commons.digester3.Digester;
 +import org.apache.commons.digester3.binder.DigesterLoader;
 +import org.apache.commons.io.FileUtils;
 +import org.apache.commons.io.FilenameUtils;
 +import org.apache.commons.io.monitor.FileAlterationListener;
 +import org.apache.commons.io.monitor.FileAlterationListenerAdaptor;
 +import org.apache.commons.io.monitor.FileAlterationMonitor;
 +import org.apache.commons.io.monitor.FileAlterationObserver;
 +import org.apache.knox.gateway.GatewayMessages;
 +import org.apache.knox.gateway.audit.api.Action;
 +import org.apache.knox.gateway.audit.api.ActionOutcome;
 +import org.apache.knox.gateway.audit.api.AuditServiceFactory;
 +import org.apache.knox.gateway.audit.api.Auditor;
 +import org.apache.knox.gateway.audit.api.ResourceType;
 +import org.apache.knox.gateway.audit.log4j.audit.AuditConstants;
 +import org.apache.knox.gateway.config.GatewayConfig;
 +import org.apache.knox.gateway.i18n.messages.MessagesFactory;
 +import org.apache.knox.gateway.service.definition.ServiceDefinition;
 +import org.apache.knox.gateway.services.ServiceLifecycleException;
 +import org.apache.knox.gateway.services.topology.TopologyService;
 +import org.apache.knox.gateway.topology.Topology;
 +import org.apache.knox.gateway.topology.TopologyEvent;
 +import org.apache.knox.gateway.topology.TopologyListener;
 +import org.apache.knox.gateway.topology.TopologyMonitor;
 +import org.apache.knox.gateway.topology.TopologyProvider;
 +import org.apache.knox.gateway.topology.builder.TopologyBuilder;
 +import org.apache.knox.gateway.topology.validation.TopologyValidator;
 +import org.apache.knox.gateway.topology.xml.AmbariFormatXmlTopologyRules;
 +import org.apache.knox.gateway.topology.xml.KnoxFormatXmlTopologyRules;
 +import org.apache.knox.gateway.util.ServiceDefinitionsLoader;
 +import org.apache.knox.gateway.services.security.AliasService;
 +import org.apache.knox.gateway.topology.simple.SimpleDescriptorHandler;
 +import org.eclipse.persistence.jaxb.JAXBContextProperties;
 +import org.xml.sax.SAXException;
 +
 +import javax.xml.bind.JAXBContext;
 +import javax.xml.bind.JAXBException;
 +import javax.xml.bind.Marshaller;
 +import java.io.File;
 +import java.io.FileFilter;
 +import java.io.IOException;
 +import java.net.URISyntaxException;
 +import java.util.ArrayList;
 +import java.util.Collection;
 +import java.util.Collections;
 +import java.util.HashMap;
 +import java.util.HashSet;
 +import java.util.List;
 +import java.util.Map;
 +import java.util.Set;
 +
 +import static org.apache.commons.digester3.binder.DigesterLoader.newLoader;
 +
 +
 +public class DefaultTopologyService
 +    extends FileAlterationListenerAdaptor
 +    implements TopologyService, TopologyMonitor, TopologyProvider, FileFilter, FileAlterationListener {
 +
 +  private static Auditor auditor = AuditServiceFactory.getAuditService().getAuditor(
 +    AuditConstants.DEFAULT_AUDITOR_NAME, AuditConstants.KNOX_SERVICE_NAME,
 +    AuditConstants.KNOX_COMPONENT_NAME);
 +
 +  private static final List<String> SUPPORTED_TOPOLOGY_FILE_EXTENSIONS = new ArrayList<String>();
 +  static {
 +    SUPPORTED_TOPOLOGY_FILE_EXTENSIONS.add("xml");
 +    SUPPORTED_TOPOLOGY_FILE_EXTENSIONS.add("conf");
 +  }
 +
 +  private static GatewayMessages log = MessagesFactory.get(GatewayMessages.class);
 +  private static DigesterLoader digesterLoader = newLoader(new KnoxFormatXmlTopologyRules(), new AmbariFormatXmlTopologyRules());
 +  private List<FileAlterationMonitor> monitors = new ArrayList<>();
 +  private File topologiesDirectory;
 +  private File descriptorsDirectory;
 +
 +  private Set<TopologyListener> listeners;
 +  private volatile Map<File, Topology> topologies;
 +  private AliasService aliasService;
 +
 +
 +  private Topology loadTopology(File file) throws IOException, SAXException, URISyntaxException, InterruptedException {
 +    final long TIMEOUT = 250; //ms
 +    final long DELAY = 50; //ms
 +    log.loadingTopologyFile(file.getAbsolutePath());
 +    Topology topology;
 +    long start = System.currentTimeMillis();
 +    while (true) {
 +      try {
 +        topology = loadTopologyAttempt(file);
 +        break;
 +      } catch (IOException e) {
 +        if (System.currentTimeMillis() - start < TIMEOUT) {
 +          log.failedToLoadTopologyRetrying(file.getAbsolutePath(), Long.toString(DELAY), e);
 +          Thread.sleep(DELAY);
 +        } else {
 +          throw e;
 +        }
 +      } catch (SAXException e) {
 +        if (System.currentTimeMillis() - start < TIMEOUT) {
 +          log.failedToLoadTopologyRetrying(file.getAbsolutePath(), Long.toString(DELAY), e);
 +          Thread.sleep(DELAY);
 +        } else {
 +          throw e;
 +        }
 +      }
 +    }
 +    return topology;
 +  }
 +
 +  private Topology loadTopologyAttempt(File file) throws IOException, SAXException, URISyntaxException {
 +    Topology topology;
 +    Digester digester = digesterLoader.newDigester();
 +    TopologyBuilder topologyBuilder = digester.parse(FileUtils.openInputStream(file));
 +    if (null == topologyBuilder) {
 +      return null;
 +    }
 +    topology = topologyBuilder.build();
 +    topology.setUri(file.toURI());
 +    topology.setName(FilenameUtils.removeExtension(file.getName()));
 +    topology.setTimestamp(file.lastModified());
 +    return topology;
 +  }
 +
 +  private void redeployTopology(Topology topology) {
 +    File topologyFile = new File(topology.getUri());
 +    try {
 +      TopologyValidator tv = new TopologyValidator(topology);
 +
 +      if(tv.validateTopology()) {
 +        throw new SAXException(tv.getErrorString());
 +      }
 +
 +      long start = System.currentTimeMillis();
 +      long limit = 1000L; // One second.
 +      long elapsed = 1;
 +      while (elapsed <= limit) {
 +        try {
 +          long origTimestamp = topologyFile.lastModified();
 +          long setTimestamp = Math.max(System.currentTimeMillis(), topologyFile.lastModified() + elapsed);
 +          if(topologyFile.setLastModified(setTimestamp)) {
 +            long newTimstamp = topologyFile.lastModified();
 +            if(newTimstamp > origTimestamp) {
 +              break;
 +            } else {
 +              Thread.sleep(10);
 +              elapsed = System.currentTimeMillis() - start;
 +              continue;
 +            }
 +          } else {
 +            auditor.audit(Action.REDEPLOY, topology.getName(), ResourceType.TOPOLOGY,
 +                ActionOutcome.FAILURE);
 +            log.failedToRedeployTopology(topology.getName());
 +            break;
 +          }
 +        } catch (InterruptedException e) {
 +          auditor.audit(Action.REDEPLOY, topology.getName(), ResourceType.TOPOLOGY,
 +              ActionOutcome.FAILURE);
 +          log.failedToRedeployTopology(topology.getName(), e);
 +          e.printStackTrace();
 +        }
 +      }
 +    } catch (SAXException e) {
 +      auditor.audit(Action.REDEPLOY, topology.getName(), ResourceType.TOPOLOGY, ActionOutcome.FAILURE);
 +      log.failedToRedeployTopology(topology.getName(), e);
 +    }
 +  }
 +
 +  private List<TopologyEvent> createChangeEvents(
 +      Map<File, Topology> oldTopologies,
 +      Map<File, Topology> newTopologies) {
 +    ArrayList<TopologyEvent> events = new ArrayList<TopologyEvent>();
 +    // Go through the old topologies and find anything that was deleted.
 +    for (File file : oldTopologies.keySet()) {
 +      if (!newTopologies.containsKey(file)) {
 +        events.add(new TopologyEvent(TopologyEvent.Type.DELETED, oldTopologies.get(file)));
 +      }
 +    }
 +    // Go through the new topologies and figure out what was updated vs added.
 +    for (File file : newTopologies.keySet()) {
 +      if (oldTopologies.containsKey(file)) {
 +        Topology oldTopology = oldTopologies.get(file);
 +        Topology newTopology = newTopologies.get(file);
 +        if (newTopology.getTimestamp() > oldTopology.getTimestamp()) {
 +          events.add(new TopologyEvent(TopologyEvent.Type.UPDATED, newTopologies.get(file)));
 +        }
 +      } else {
 +        events.add(new TopologyEvent(TopologyEvent.Type.CREATED, newTopologies.get(file)));
 +      }
 +    }
 +    return events;
 +  }
 +
 +  private File calculateAbsoluteTopologiesDir(GatewayConfig config) {
 +    String normalizedTopologyDir = FilenameUtils.normalize(config.getGatewayTopologyDir());
 +    File topoDir = new File(normalizedTopologyDir);
 +    topoDir = topoDir.getAbsoluteFile();
 +    return topoDir;
 +  }
 +
 +  private File calculateAbsoluteConfigDir(GatewayConfig config) {
 +    File configDir = null;
 +
 +    String path = FilenameUtils.normalize(config.getGatewayConfDir());
 +    if (path != null) {
 +      configDir = new File(config.getGatewayConfDir());
 +    } else {
 +      configDir = (new File(config.getGatewayTopologyDir())).getParentFile();
 +    }
 +    configDir = configDir.getAbsoluteFile();
 +
 +    return configDir;
 +  }
 +
 +  private void  initListener(FileAlterationMonitor  monitor,
 +                            File                   directory,
 +                            FileFilter             filter,
 +                            FileAlterationListener listener) {
 +    monitors.add(monitor);
 +    FileAlterationObserver observer = new FileAlterationObserver(directory, filter);
 +    observer.addListener(listener);
 +    monitor.addObserver(observer);
 +  }
 +
 +  private void initListener(File directory, FileFilter filter, FileAlterationListener listener) throws IOException, SAXException {
 +    // Increasing the monitoring interval to 5 seconds as profiling has shown
 +    // this is rather expensive in terms of generated garbage objects.
 +    initListener(new FileAlterationMonitor(5000L), directory, filter, listener);
 +  }
 +
 +  private Map<File, Topology> loadTopologies(File directory) {
 +    Map<File, Topology> map = new HashMap<>();
 +    if (directory.isDirectory() && directory.canRead()) {
 +      for (File file : directory.listFiles(this)) {
 +        try {
 +          Topology loadTopology = loadTopology(file);
 +          if (null != loadTopology) {
 +            map.put(file, loadTopology);
 +          } else {
 +            auditor.audit(Action.LOAD, file.getAbsolutePath(), ResourceType.TOPOLOGY,
 +              ActionOutcome.FAILURE);
 +            log.failedToLoadTopology(file.getAbsolutePath());
 +          }
 +        } catch (IOException e) {
 +          // Maybe it makes sense to throw exception
 +          auditor.audit(Action.LOAD, file.getAbsolutePath(), ResourceType.TOPOLOGY,
 +            ActionOutcome.FAILURE);
 +          log.failedToLoadTopology(file.getAbsolutePath(), e);
 +        } catch (SAXException e) {
 +          // Maybe it makes sense to throw exception
 +          auditor.audit(Action.LOAD, file.getAbsolutePath(), ResourceType.TOPOLOGY,
 +            ActionOutcome.FAILURE);
 +          log.failedToLoadTopology(file.getAbsolutePath(), e);
 +        } catch (Exception e) {
 +          // Maybe it makes sense to throw exception
 +          auditor.audit(Action.LOAD, file.getAbsolutePath(), ResourceType.TOPOLOGY,
 +            ActionOutcome.FAILURE);
 +          log.failedToLoadTopology(file.getAbsolutePath(), e);
 +        }
 +      }
 +    }
 +    return map;
 +  }
 +
 +  public void setAliasService(AliasService as) {
 +    this.aliasService = as;
 +  }
 +
 +  public void deployTopology(Topology t){
 +
 +    try {
 +      File temp = new File(topologiesDirectory.getAbsolutePath() + "/" + t.getName() + ".xml.temp");
 +      Package topologyPkg = Topology.class.getPackage();
 +      String pkgName = topologyPkg.getName();
 +      String bindingFile = pkgName.replace(".", "/") + "/topology_binding-xml.xml";
 +
 +      Map<String, Object> properties = new HashMap<>(1);
 +      properties.put(JAXBContextProperties.OXM_METADATA_SOURCE, bindingFile);
 +      JAXBContext jc = JAXBContext.newInstance(pkgName, Topology.class.getClassLoader(), properties);
 +      Marshaller mr = jc.createMarshaller();
 +
 +      mr.setProperty(Marshaller.JAXB_FORMATTED_OUTPUT, true);
 +      mr.marshal(t, temp);
 +
 +      File topology = new File(topologiesDirectory.getAbsolutePath() + "/" + t.getName() + ".xml");
 +      if(!temp.renameTo(topology)) {
 +        FileUtils.forceDelete(temp);
 +        throw new IOException("Could not rename temp file");
 +      }
 +
 +      // This code will check if the topology is valid, and retrieve the errors if it is not.
 +      TopologyValidator validator = new TopologyValidator( topology.getAbsolutePath() );
 +      if( !validator.validateTopology() ){
 +        throw new SAXException( validator.getErrorString() );
 +      }
 +
 +
 +    } catch (JAXBException e) {
 +      auditor.audit(Action.DEPLOY, t.getName(), ResourceType.TOPOLOGY, ActionOutcome.FAILURE);
 +      log.failedToDeployTopology(t.getName(), e);
 +    } catch (IOException io) {
 +      auditor.audit(Action.DEPLOY, t.getName(), ResourceType.TOPOLOGY, ActionOutcome.FAILURE);
 +      log.failedToDeployTopology(t.getName(), io);
 +    } catch (SAXException sx){
 +      auditor.audit(Action.DEPLOY, t.getName(), ResourceType.TOPOLOGY, ActionOutcome.FAILURE);
 +      log.failedToDeployTopology(t.getName(), sx);
 +    }
 +    reloadTopologies();
 +  }
 +
 +  public void redeployTopologies(String topologyName) {
 +
 +    for (Topology topology : getTopologies()) {
 +      if (topologyName == null || topologyName.equals(topology.getName())) {
 +        redeployTopology(topology);
 +      }
 +    }
 +
 +  }
 +
 +  public void reloadTopologies() {
 +    try {
 +      synchronized (this) {
 +        Map<File, Topology> oldTopologies = topologies;
 +        Map<File, Topology> newTopologies = loadTopologies(topologiesDirectory);
 +        List<TopologyEvent> events = createChangeEvents(oldTopologies, newTopologies);
 +        topologies = newTopologies;
 +        notifyChangeListeners(events);
 +      }
 +    } catch (Exception e) {
 +      // Maybe it makes sense to throw exception
 +      log.failedToReloadTopologies(e);
 +    }
 +  }
 +
 +  public void deleteTopology(Topology t) {
 +    File topoDir = topologiesDirectory;
 +
 +    if(topoDir.isDirectory() && topoDir.canRead()) {
 +      File[] results = topoDir.listFiles();
 +      for (File f : results) {
 +        String fName = FilenameUtils.getBaseName(f.getName());
 +        if(fName.equals(t.getName())) {
 +          f.delete();
 +        }
 +      }
 +    }
 +    reloadTopologies();
 +  }
 +
 +  private void notifyChangeListeners(List<TopologyEvent> events) {
 +    for (TopologyListener listener : listeners) {
 +      try {
 +        listener.handleTopologyEvent(events);
 +      } catch (RuntimeException e) {
 +        auditor.audit(Action.LOAD, "Topology_Event", ResourceType.TOPOLOGY, ActionOutcome.FAILURE);
 +        log.failedToHandleTopologyEvents(e);
 +      }
 +    }
 +  }
 +
 +  public Map<String, List<String>> getServiceTestURLs(Topology t, GatewayConfig config) {
 +    File tFile = null;
 +    Map<String, List<String>> urls = new HashMap<>();
 +    if(topologiesDirectory.isDirectory() && topologiesDirectory.canRead()) {
 +      for(File f : topologiesDirectory.listFiles()){
 +        if(FilenameUtils.removeExtension(f.getName()).equals(t.getName())){
 +          tFile = f;
 +        }
 +      }
 +    }
 +    Set<ServiceDefinition> defs;
 +    if(tFile != null) {
 +      defs = ServiceDefinitionsLoader.getServiceDefinitions(new File(config.getGatewayServicesDir()));
 +
 +      for(ServiceDefinition def : defs) {
 +        urls.put(def.getRole(), def.getTestURLs());
 +      }
 +    }
 +    return urls;
 +  }
 +
 +  public Collection<Topology> getTopologies() {
 +    Map<File, Topology> map = topologies;
 +    return Collections.unmodifiableCollection(map.values());
 +  }
 +
 +  @Override
 +  public void addTopologyChangeListener(TopologyListener listener) {
 +    listeners.add(listener);
 +  }
 +
 +  @Override
 +  public void startMonitor() throws Exception {
 +    for (FileAlterationMonitor monitor : monitors) {
 +      monitor.start();
 +    }
 +  }
 +
 +  @Override
 +  public void stopMonitor() throws Exception {
 +    for (FileAlterationMonitor monitor : monitors) {
 +      monitor.stop();
 +    }
 +  }
 +
 +  @Override
 +  public boolean accept(File file) {
 +    boolean accept = false;
 +    if (!file.isDirectory() && file.canRead()) {
 +      String extension = FilenameUtils.getExtension(file.getName());
 +      if (SUPPORTED_TOPOLOGY_FILE_EXTENSIONS.contains(extension)) {
 +        accept = true;
 +      }
 +    }
 +    return accept;
 +  }
 +
 +  @Override
 +  public void onFileCreate(File file) {
 +    onFileChange(file);
 +  }
 +
 +  @Override
 +  public void onFileDelete(java.io.File file) {
 +    // For full topology descriptors, we need to make sure to delete any corresponding simple descriptors to prevent
 +    // unintended subsequent generation of the topology descriptor
 +    for (String ext : DescriptorsMonitor.SUPPORTED_EXTENSIONS) {
 +      File simpleDesc =
 +              new File(descriptorsDirectory, FilenameUtils.getBaseName(file.getName()) + "." + ext);
 +      if (simpleDesc.exists()) {
 +        simpleDesc.delete();
 +      }
 +    }
 +
 +    onFileChange(file);
 +  }
 +
 +  @Override
 +  public void onFileChange(File file) {
 +    reloadTopologies();
 +  }
 +
 +  @Override
 +  public void stop() {
 +
 +  }
 +
 +  @Override
 +  public void start() {
 +
 +  }
 +
 +  @Override
 +  public void init(GatewayConfig config, Map<String, String> options) throws ServiceLifecycleException {
 +
 +    try {
 +      listeners = new HashSet<>();
 +      topologies = new HashMap<>();
 +
 +      topologiesDirectory = calculateAbsoluteTopologiesDir(config);
 +
 +      File configDirectory = calculateAbsoluteConfigDir(config);
 +      descriptorsDirectory = new File(configDirectory, "descriptors");
 +      File sharedProvidersDirectory = new File(configDirectory, "shared-providers");
 +
 +      // Add support for conf/topologies
 +      initListener(topologiesDirectory, this, this);
 +
 +      // Add support for conf/descriptors
 +      DescriptorsMonitor dm = new DescriptorsMonitor(topologiesDirectory, aliasService);
 +      initListener(descriptorsDirectory,
 +                   dm,
 +                   dm);
 +
 +      // Add support for conf/shared-providers
 +      SharedProviderConfigMonitor spm = new SharedProviderConfigMonitor(dm, descriptorsDirectory);
 +      initListener(sharedProvidersDirectory, spm, spm);
 +
++      // For all the descriptors currently in the descriptors dir at start-up time, trigger topology generation.
++      // This happens prior to the start-up loading of the topologies.
++      String[] descriptorFilenames =  descriptorsDirectory.list();
++      if (descriptorFilenames != null) {
++          for (String descriptorFilename : descriptorFilenames) {
++              if (DescriptorsMonitor.isDescriptorFile(descriptorFilename)) {
++                  dm.onFileChange(new File(descriptorsDirectory, descriptorFilename));
++              }
++          }
++      }
++
 +    } catch (IOException | SAXException io) {
 +      throw new ServiceLifecycleException(io.getMessage());
 +    }
 +  }
 +
 +
 +  /**
 +   * Change handler for simple descriptors
 +   */
 +  public static class DescriptorsMonitor extends FileAlterationListenerAdaptor
 +                                          implements FileFilter {
 +
 +    static final List<String> SUPPORTED_EXTENSIONS = new ArrayList<String>();
 +    static {
 +      SUPPORTED_EXTENSIONS.add("json");
 +      SUPPORTED_EXTENSIONS.add("yml");
++      SUPPORTED_EXTENSIONS.add("yaml");
 +    }
 +
 +    private File topologiesDir;
 +
 +    private AliasService aliasService;
 +
 +    private Map<String, List<String>> providerConfigReferences = new HashMap<>();
 +
 +
++    static boolean isDescriptorFile(String filename) {
++      return SUPPORTED_EXTENSIONS.contains(FilenameUtils.getExtension(filename));
++    }
++
 +    public DescriptorsMonitor(File topologiesDir, AliasService aliasService) {
 +      this.topologiesDir  = topologiesDir;
 +      this.aliasService   = aliasService;
 +    }
 +
 +    List<String> getReferencingDescriptors(String providerConfigPath) {
 +      List<String> result = providerConfigReferences.get(providerConfigPath);
 +      if (result == null) {
 +        result = Collections.emptyList();
 +      }
 +      return result;
 +    }
 +
 +    @Override
 +    public void onFileCreate(File file) {
 +      onFileChange(file);
 +    }
 +
 +    @Override
 +    public void onFileDelete(File file) {
 +      // For simple descriptors, we need to make sure to delete any corresponding full topology descriptors to trigger undeployment
 +      for (String ext : DefaultTopologyService.SUPPORTED_TOPOLOGY_FILE_EXTENSIONS) {
 +        File topologyFile =
 +                new File(topologiesDir, FilenameUtils.getBaseName(file.getName()) + "." + ext);
 +        if (topologyFile.exists()) {
 +          topologyFile.delete();
 +        }
 +      }
 +
 +      String normalizedFilePath = FilenameUtils.normalize(file.getAbsolutePath());
 +      String reference = null;
 +      for (Map.Entry<String, List<String>> entry : providerConfigReferences.entrySet()) {
 +        if (entry.getValue().contains(normalizedFilePath)) {
 +          reference = entry.getKey();
 +          break;
 +        }
 +      }
 +      if (reference != null) {
 +        providerConfigReferences.get(reference).remove(normalizedFilePath);
 +      }
 +    }
 +
 +    @Override
 +    public void onFileChange(File file) {
 +      try {
 +        // When a simple descriptor has been created or modified, generate the new topology descriptor
 +        Map<String, File> result = SimpleDescriptorHandler.handle(file, topologiesDir, aliasService);
 +
 +        // Add the provider config reference relationship for handling updates to the provider config
 +        String providerConfig = FilenameUtils.normalize(result.get("reference").getAbsolutePath());
 +        if (!providerConfigReferences.containsKey(providerConfig)) {
 +          providerConfigReferences.put(providerConfig, new ArrayList<String>());
 +        }
 +        List<String> refs = providerConfigReferences.get(providerConfig);
 +        String descriptorName = FilenameUtils.normalize(file.getAbsolutePath());
 +        if (!refs.contains(descriptorName)) {
 +          // Need to check if descriptor had previously referenced another provider config, so it can be removed
 +          for (List<String> descs : providerConfigReferences.values()) {
 +            if (descs.contains(descriptorName)) {
 +              descs.remove(descriptorName);
 +            }
 +          }
 +
 +          // Add the current reference relationship
 +          refs.add(descriptorName);
 +        }
 +      } catch (Exception e) {
 +        log.simpleDescriptorHandlingError(file.getName(), e);
 +      }
 +    }
 +
 +    @Override
 +    public boolean accept(File file) {
 +      boolean accept = false;
 +      if (!file.isDirectory() && file.canRead()) {
 +        String extension = FilenameUtils.getExtension(file.getName());
 +        if (SUPPORTED_EXTENSIONS.contains(extension)) {
 +          accept = true;
 +        }
 +      }
 +      return accept;
 +    }
 +  }
 +
 +  /**
 +   * Change handler for shared provider configurations
 +   */
 +  public static class SharedProviderConfigMonitor extends FileAlterationListenerAdaptor
 +          implements FileFilter {
 +
 +    static final List<String> SUPPORTED_EXTENSIONS = new ArrayList<>();
 +    static {
 +      SUPPORTED_EXTENSIONS.add("xml");
 +    }
 +
 +    private DescriptorsMonitor descriptorsMonitor;
 +    private File descriptorsDir;
 +
 +
 +    SharedProviderConfigMonitor(DescriptorsMonitor descMonitor, File descriptorsDir) {
 +      this.descriptorsMonitor = descMonitor;
 +      this.descriptorsDir     = descriptorsDir;
 +    }
 +
 +    @Override
 +    public void onFileCreate(File file) {
 +      onFileChange(file);
 +    }
 +
 +    @Override
 +    public void onFileDelete(File file) {
 +      onFileChange(file);
 +    }
 +
 +    @Override
 +    public void onFileChange(File file) {
 +      // For shared provider configuration, we need to update any simple descriptors that reference it
 +      for (File descriptor : getReferencingDescriptors(file)) {
 +        descriptor.setLastModified(System.currentTimeMillis());
 +      }
 +    }
 +
 +    private List<File> getReferencingDescriptors(File sharedProviderConfig) {
 +      List<File> references = new ArrayList<>();
 +
 +      for (File descriptor : descriptorsDir.listFiles()) {
 +        if (DescriptorsMonitor.SUPPORTED_EXTENSIONS.contains(FilenameUtils.getExtension(descriptor.getName()))) {
 +          for (String reference : descriptorsMonitor.getReferencingDescriptors(FilenameUtils.normalize(sharedProviderConfig.getAbsolutePath()))) {
 +            references.add(new File(reference));
 +          }
 +        }
 +      }
 +
 +      return references;
 +    }
 +
 +    @Override
 +    public boolean accept(File file) {
 +      boolean accept = false;
 +      if (!file.isDirectory() && file.canRead()) {
 +        String extension = FilenameUtils.getExtension(file.getName());
 +        if (SUPPORTED_EXTENSIONS.contains(extension)) {
 +          accept = true;
 +        }
 +      }
 +      return accept;
 +    }
 +  }
 +
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/58780d37/gateway-server/src/main/java/org/apache/knox/gateway/topology/simple/SimpleDescriptor.java
----------------------------------------------------------------------
diff --cc gateway-server/src/main/java/org/apache/knox/gateway/topology/simple/SimpleDescriptor.java
index 85c0535,0000000..25997b1
mode 100644,000000..100644
--- a/gateway-server/src/main/java/org/apache/knox/gateway/topology/simple/SimpleDescriptor.java
+++ b/gateway-server/src/main/java/org/apache/knox/gateway/topology/simple/SimpleDescriptor.java
@@@ -1,46 -1,0 +1,48 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements. See the NOTICE file distributed with this
 + * work for additional information regarding copyright ownership. The ASF
 + * licenses this file to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance with the License.
 + * You may obtain a copy of the License at
 + *
 + * http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 + * License for the specific language governing permissions and limitations under
 + * the License.
 + */
 +package org.apache.knox.gateway.topology.simple;
 +
 +import java.util.List;
++import java.util.Map;
 +
 +public interface SimpleDescriptor {
 +
 +    String getName();
 +
 +    String getDiscoveryType();
 +
 +    String getDiscoveryAddress();
 +
 +    String getDiscoveryUser();
 +
 +    String getDiscoveryPasswordAlias();
 +
 +    String getClusterName();
 +
 +    String getProviderConfig();
 +
 +    List<Service> getServices();
 +
 +
 +    interface Service {
 +        String getName();
 +
++        Map<String, String> getParams();
++
 +        List<String> getURLs();
 +    }
- 
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/58780d37/gateway-server/src/main/java/org/apache/knox/gateway/topology/simple/SimpleDescriptorHandler.java
----------------------------------------------------------------------
diff --cc gateway-server/src/main/java/org/apache/knox/gateway/topology/simple/SimpleDescriptorHandler.java
index 16d5b81,0000000..b54432d
mode 100644,000000..100644
--- a/gateway-server/src/main/java/org/apache/knox/gateway/topology/simple/SimpleDescriptorHandler.java
+++ b/gateway-server/src/main/java/org/apache/knox/gateway/topology/simple/SimpleDescriptorHandler.java
@@@ -1,234 -1,0 +1,267 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements. See the NOTICE file distributed with this
 + * work for additional information regarding copyright ownership. The ASF
 + * licenses this file to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance with the License.
 + * You may obtain a copy of the License at
 + *
 + * http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 + * License for the specific language governing permissions and limitations under
 + * the License.
 + */
 +package org.apache.knox.gateway.topology.simple;
 +
 +import org.apache.knox.gateway.i18n.messages.MessagesFactory;
 +import org.apache.knox.gateway.services.Service;
 +import org.apache.knox.gateway.topology.discovery.DefaultServiceDiscoveryConfig;
 +import org.apache.knox.gateway.topology.discovery.ServiceDiscovery;
 +import org.apache.knox.gateway.topology.discovery.ServiceDiscoveryFactory;
 +import java.io.BufferedWriter;
 +import java.io.File;
 +import java.io.FileInputStream;
 +import java.io.FileWriter;
 +import java.io.InputStreamReader;
 +import java.io.IOException;
 +
 +import java.net.URI;
 +import java.net.URISyntaxException;
 +
 +import java.util.ArrayList;
 +import java.util.Collections;
 +import java.util.HashMap;
 +import java.util.List;
 +import java.util.Map;
 +
 +
 +
 +/**
 + * Processes simple topology descriptors, producing full topology files, which can subsequently be deployed to the
 + * gateway.
 + */
 +public class SimpleDescriptorHandler {
 +
 +    private static final Service[] NO_GATEWAY_SERVICES = new Service[]{};
 +
 +    private static final SimpleDescriptorMessages log = MessagesFactory.get(SimpleDescriptorMessages.class);
 +
 +    public static Map<String, File> handle(File desc) throws IOException {
 +        return handle(desc, NO_GATEWAY_SERVICES);
 +    }
 +
 +    public static Map<String, File> handle(File desc, Service...gatewayServices) throws IOException {
 +        return handle(desc, desc.getParentFile(), gatewayServices);
 +    }
 +
 +    public static Map<String, File> handle(File desc, File destDirectory) throws IOException {
 +        return handle(desc, destDirectory, NO_GATEWAY_SERVICES);
 +    }
 +
 +    public static Map<String, File> handle(File desc, File destDirectory, Service...gatewayServices) throws IOException {
 +        return handle(SimpleDescriptorFactory.parse(desc.getAbsolutePath()), desc.getParentFile(), destDirectory, gatewayServices);
 +    }
 +
 +    public static Map<String, File> handle(SimpleDescriptor desc, File srcDirectory, File destDirectory) {
 +        return handle(desc, srcDirectory, destDirectory, NO_GATEWAY_SERVICES);
 +    }
 +
 +    public static Map<String, File> handle(SimpleDescriptor desc, File srcDirectory, File destDirectory, Service...gatewayServices) {
 +        Map<String, File> result = new HashMap<>();
 +
 +        File topologyDescriptor;
 +
 +        DefaultServiceDiscoveryConfig sdc = new DefaultServiceDiscoveryConfig(desc.getDiscoveryAddress());
 +        sdc.setUser(desc.getDiscoveryUser());
 +        sdc.setPasswordAlias(desc.getDiscoveryPasswordAlias());
 +        ServiceDiscovery sd = ServiceDiscoveryFactory.get(desc.getDiscoveryType(), gatewayServices);
 +        ServiceDiscovery.Cluster cluster = sd.discover(sdc, desc.getClusterName());
 +
-         Map<String, List<String>> serviceURLs = new HashMap<>();
++        List<String> validServiceNames = new ArrayList<>();
++
++        Map<String, Map<String, String>> serviceParams = new HashMap<>();
++        Map<String, List<String>>        serviceURLs   = new HashMap<>();
 +
 +        if (cluster != null) {
 +            for (SimpleDescriptor.Service descService : desc.getServices()) {
 +                String serviceName = descService.getName();
 +
 +                List<String> descServiceURLs = descService.getURLs();
 +                if (descServiceURLs == null || descServiceURLs.isEmpty()) {
 +                    descServiceURLs = cluster.getServiceURLs(serviceName);
 +                }
 +
 +                // Validate the discovered service URLs
 +                List<String> validURLs = new ArrayList<>();
 +                if (descServiceURLs != null && !descServiceURLs.isEmpty()) {
 +                    // Validate the URL(s)
 +                    for (String descServiceURL : descServiceURLs) {
 +                        if (validateURL(serviceName, descServiceURL)) {
 +                            validURLs.add(descServiceURL);
 +                        }
 +                    }
++
++                    if (!validURLs.isEmpty()) {
++                        validServiceNames.add(serviceName);
++                    }
 +                }
 +
 +                // If there is at least one valid URL associated with the service, then add it to the map
 +                if (!validURLs.isEmpty()) {
 +                    serviceURLs.put(serviceName, validURLs);
 +                } else {
 +                    log.failedToDiscoverClusterServiceURLs(serviceName, cluster.getName());
 +                }
++
++                // Service params
++                if (descService.getParams() != null) {
++                    serviceParams.put(serviceName, descService.getParams());
++                    if (!validServiceNames.contains(serviceName)) {
++                        validServiceNames.add(serviceName);
++                    }
++                }
 +            }
 +        } else {
 +            log.failedToDiscoverClusterServices(desc.getClusterName());
 +        }
 +
 +        BufferedWriter fw = null;
 +        topologyDescriptor = null;
-         File providerConfig = null;
++        File providerConfig;
 +        try {
 +            // Verify that the referenced provider configuration exists before attempting to reading it
 +            providerConfig = resolveProviderConfigurationReference(desc.getProviderConfig(), srcDirectory);
 +            if (providerConfig == null) {
 +                log.failedToResolveProviderConfigRef(desc.getProviderConfig());
 +                throw new IllegalArgumentException("Unresolved provider configuration reference: " +
 +                                                   desc.getProviderConfig() + " ; Topology update aborted!");
 +            }
 +            result.put("reference", providerConfig);
 +
 +            // TODO: Should the contents of the provider config be validated before incorporating it into the topology?
 +
 +            String topologyFilename = desc.getName();
 +            if (topologyFilename == null) {
 +                topologyFilename = desc.getClusterName();
 +            }
 +            topologyDescriptor = new File(destDirectory, topologyFilename + ".xml");
 +            fw = new BufferedWriter(new FileWriter(topologyDescriptor));
 +
 +            fw.write("<topology>\n");
 +
 +            // Copy the externalized provider configuration content into the topology descriptor in-line
 +            InputStreamReader policyReader = new InputStreamReader(new FileInputStream(providerConfig));
 +            char[] buffer = new char[1024];
 +            int count;
 +            while ((count = policyReader.read(buffer)) > 0) {
 +                fw.write(buffer, 0, count);
 +            }
 +            policyReader.close();
 +
 +            // Sort the service names to write the services alphabetically
-             List<String> serviceNames = new ArrayList<>(serviceURLs.keySet());
++            List<String> serviceNames = new ArrayList<>(validServiceNames);
 +            Collections.sort(serviceNames);
 +
 +            // Write the service declarations
 +            for (String serviceName : serviceNames) {
 +                fw.write("    <service>\n");
 +                fw.write("        <role>" + serviceName + "</role>\n");
-                 for (String url : serviceURLs.get(serviceName)) {
-                     fw.write("        <url>" + url + "</url>\n");
++
++                // URLs
++                List<String> urls = serviceURLs.get(serviceName);
++                if (urls != null) {
++                    for (String url : urls) {
++                        fw.write("        <url>" + url + "</url>\n");
++                    }
 +                }
++
++                // Params
++                Map<String, String> svcParams = serviceParams.get(serviceName);
++                if (svcParams != null) {
++                    for (String paramName : svcParams.keySet()) {
++                        fw.write("        <param>\n");
++                        fw.write("            <name>" + paramName + "</name>\n");
++                        fw.write("            <value>" + svcParams.get(paramName) + "</value>\n");
++                        fw.write("        </param>\n");
++                    }
++                }
++
 +                fw.write("    </service>\n");
 +            }
 +
 +            fw.write("</topology>\n");
 +
 +            fw.flush();
 +        } catch (IOException e) {
 +            log.failedToGenerateTopologyFromSimpleDescriptor(topologyDescriptor.getName(), e);
 +            topologyDescriptor.delete();
 +        } finally {
 +            if (fw != null) {
 +                try {
 +                    fw.close();
 +                } catch (IOException e) {
 +                    // ignore
 +                }
 +            }
 +        }
 +
 +        result.put("topology", topologyDescriptor);
 +        return result;
 +    }
 +
 +    private static boolean validateURL(String serviceName, String url) {
 +        boolean result = false;
 +
 +        if (url != null && !url.isEmpty()) {
 +            try {
 +                new URI(url);
 +                result = true;
 +            } catch (URISyntaxException e) {
 +                log.serviceURLValidationFailed(serviceName, url, e);
 +            }
 +        }
 +
 +        return result;
 +    }
 +
++
 +    private static File resolveProviderConfigurationReference(String reference, File srcDirectory) {
 +        File providerConfig;
 +
 +        // If the reference includes a path
 +        if (reference.contains(File.separator)) {
 +            // Check if it's an absolute path
 +            providerConfig = new File(reference);
 +            if (!providerConfig.exists()) {
 +                // If it's not an absolute path, try treating it as a relative path
 +                providerConfig = new File(srcDirectory, reference);
 +                if (!providerConfig.exists()) {
 +                    providerConfig = null;
 +                }
 +            }
 +        } else { // No file path, just a name
 +            // Check if it's co-located with the referencing descriptor
 +            providerConfig = new File(srcDirectory, reference);
 +            if (!providerConfig.exists()) {
 +                // Check the shared-providers config location
 +                File sharedProvidersDir = new File(srcDirectory, "../shared-providers");
 +                if (sharedProvidersDir.exists()) {
 +                    providerConfig = new File(sharedProvidersDir, reference);
 +                    if (!providerConfig.exists()) {
 +                        // Check if it's a valid name without the extension
 +                        providerConfig = new File(sharedProvidersDir, reference + ".xml");
 +                        if (!providerConfig.exists()) {
 +                            providerConfig = null;
 +                        }
 +                    }
 +                }
 +            }
 +        }
 +
 +        return providerConfig;
 +    }
 +
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/58780d37/gateway-server/src/main/java/org/apache/knox/gateway/topology/simple/SimpleDescriptorImpl.java
----------------------------------------------------------------------
diff --cc gateway-server/src/main/java/org/apache/knox/gateway/topology/simple/SimpleDescriptorImpl.java
index 0ec7acf,0000000..4eb1954
mode 100644,000000..100644
--- a/gateway-server/src/main/java/org/apache/knox/gateway/topology/simple/SimpleDescriptorImpl.java
+++ b/gateway-server/src/main/java/org/apache/knox/gateway/topology/simple/SimpleDescriptorImpl.java
@@@ -1,111 -1,0 +1,123 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.topology.simple;
 +
 +import com.fasterxml.jackson.annotation.JsonProperty;
 +
 +import java.util.ArrayList;
 +import java.util.List;
++import java.util.Map;
 +
 +class SimpleDescriptorImpl implements SimpleDescriptor {
 +
 +    @JsonProperty("discovery-type")
 +    private String discoveryType;
 +
 +    @JsonProperty("discovery-address")
 +    private String discoveryAddress;
 +
 +    @JsonProperty("discovery-user")
 +    private String discoveryUser;
 +
 +    @JsonProperty("discovery-pwd-alias")
 +    private String discoveryPasswordAlias;
 +
 +    @JsonProperty("provider-config-ref")
 +    private String providerConfig;
 +
 +    @JsonProperty("cluster")
 +    private String cluster;
 +
 +    @JsonProperty("services")
 +    private List<ServiceImpl> services;
 +
 +    private String name = null;
 +
 +    void setName(String name) {
 +        this.name = name;
 +    }
 +
 +    @Override
 +    public String getName() {
 +        return name;
 +    }
 +
 +    @Override
 +    public String getDiscoveryType() {
 +        return discoveryType;
 +    }
 +
 +    @Override
 +    public String getDiscoveryAddress() {
 +        return discoveryAddress;
 +    }
 +
 +    @Override
 +    public String getDiscoveryUser() {
 +        return discoveryUser;
 +    }
 +
 +    @Override
 +    public String getDiscoveryPasswordAlias() {
 +        return discoveryPasswordAlias;
 +    }
 +
 +    @Override
 +    public String getClusterName() {
 +        return cluster;
 +    }
 +
 +    @Override
 +    public String getProviderConfig() {
 +        return providerConfig;
 +    }
 +
 +    @Override
 +    public List<Service> getServices() {
 +        List<Service> result = new ArrayList<>();
 +        result.addAll(services);
 +        return result;
 +    }
 +
 +    public static class ServiceImpl implements Service {
++        @JsonProperty("name")
 +        private String name;
++
++        @JsonProperty("params")
++        private Map<String, String> params;
++
++        @JsonProperty("urls")
 +        private List<String> urls;
 +
 +        @Override
 +        public String getName() {
 +            return name;
 +        }
 +
 +        @Override
++        public Map<String, String> getParams() {
++            return params;
++        }
++
++        @Override
 +        public List<String> getURLs() {
 +            return urls;
 +        }
 +    }
 +
 +}


[49/53] [abbrv] knox git commit: Merge branch 'master' into KNOX-998-Package_Restructuring

Posted by mo...@apache.org.
http://git-wip-us.apache.org/repos/asf/knox/blob/e5fd0622/gateway-server/src/main/java/org/apache/knox/gateway/util/KnoxCLI.java
----------------------------------------------------------------------
diff --cc gateway-server/src/main/java/org/apache/knox/gateway/util/KnoxCLI.java
index 928c37e,0000000..a987433
mode 100644,000000..100644
--- a/gateway-server/src/main/java/org/apache/knox/gateway/util/KnoxCLI.java
+++ b/gateway-server/src/main/java/org/apache/knox/gateway/util/KnoxCLI.java
@@@ -1,2154 -1,0 +1,2205 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.util;
 +
 +import java.io.BufferedReader;
 +import java.io.Console;
 +import java.io.File;
 +import java.io.IOException;
 +import java.io.InputStream;
 +import java.io.InputStreamReader;
 +import java.io.PrintStream;
 +import java.net.InetAddress;
 +import java.net.UnknownHostException;
 +import java.security.cert.Certificate;
 +import java.util.Arrays;
 +import java.util.HashMap;
 +import java.util.HashSet;
 +import java.util.List;
 +import java.util.Map;
 +import java.util.Properties;
 +import java.util.UUID;
 +import javax.net.ssl.SSLContext;
 +import javax.net.ssl.SSLException;
 +
 +import org.apache.commons.codec.binary.Base64;
 +import org.apache.commons.io.FileUtils;
 +import org.apache.hadoop.conf.Configuration;
 +import org.apache.hadoop.conf.Configured;
 +import org.apache.knox.gateway.GatewayCommandLine;
 +import org.apache.knox.gateway.config.GatewayConfig;
 +import org.apache.knox.gateway.config.impl.GatewayConfigImpl;
 +import org.apache.knox.gateway.deploy.DeploymentFactory;
 +import org.apache.knox.gateway.services.CLIGatewayServices;
 +import org.apache.knox.gateway.services.GatewayServices;
 +import org.apache.knox.gateway.services.Service;
 +import org.apache.knox.gateway.services.ServiceLifecycleException;
 +import org.apache.knox.gateway.services.config.client.RemoteConfigurationRegistryClient;
 +import org.apache.knox.gateway.services.config.client.RemoteConfigurationRegistryClientService;
 +import org.apache.knox.gateway.services.security.AliasService;
 +import org.apache.knox.gateway.services.security.KeystoreService;
 +import org.apache.knox.gateway.services.security.KeystoreServiceException;
 +import org.apache.knox.gateway.services.security.MasterService;
 +import org.apache.knox.gateway.services.security.impl.X509CertificateUtil;
 +import org.apache.knox.gateway.services.topology.TopologyService;
 +import org.apache.knox.gateway.topology.Provider;
 +import org.apache.knox.gateway.topology.Topology;
 +import org.apache.knox.gateway.topology.validation.TopologyValidator;
 +import org.apache.hadoop.util.Tool;
 +import org.apache.hadoop.util.ToolRunner;
 +import org.apache.http.client.ClientProtocolException;
 +import org.apache.http.client.methods.CloseableHttpResponse;
 +import org.apache.http.client.methods.HttpGet;
 +import org.apache.http.conn.ssl.SSLContexts;
 +import org.apache.http.conn.ssl.TrustSelfSignedStrategy;
 +import org.apache.http.impl.client.CloseableHttpClient;
 +import org.apache.http.impl.client.HttpClients;
 +import org.apache.log4j.PropertyConfigurator;
 +import org.apache.shiro.SecurityUtils;
 +import org.apache.shiro.authc.AuthenticationException;
 +import org.apache.shiro.authc.UsernamePasswordToken;
 +import org.apache.shiro.config.ConfigurationException;
 +import org.apache.shiro.config.Ini;
 +import org.apache.shiro.config.IniSecurityManagerFactory;
 +import org.apache.shiro.subject.Subject;
 +import org.apache.shiro.util.Factory;
 +import org.apache.shiro.util.ThreadContext;
 +import org.eclipse.persistence.oxm.MediaType;
 +import org.jboss.shrinkwrap.api.exporter.ExplodedExporter;
 +import org.jboss.shrinkwrap.api.spec.EnterpriseArchive;
 +
 +/**
 + *
 + */
 +public class KnoxCLI extends Configured implements Tool {
 +
 +  private static final String USAGE_PREFIX = "KnoxCLI {cmd} [options]";
 +  static final private String COMMANDS =
 +      "   [--help]\n" +
 +      "   [" + VersionCommand.USAGE + "]\n" +
 +      "   [" + MasterCreateCommand.USAGE + "]\n" +
 +      "   [" + CertCreateCommand.USAGE + "]\n" +
 +      "   [" + CertExportCommand.USAGE + "]\n" +
 +      "   [" + AliasCreateCommand.USAGE + "]\n" +
 +      "   [" + AliasDeleteCommand.USAGE + "]\n" +
 +      "   [" + AliasListCommand.USAGE + "]\n" +
 +      "   [" + RedeployCommand.USAGE + "]\n" +
 +      "   [" + ListTopologiesCommand.USAGE + "]\n" +
 +      "   [" + ValidateTopologyCommand.USAGE + "]\n" +
 +      "   [" + LDAPAuthCommand.USAGE + "]\n" +
 +      "   [" + LDAPSysBindCommand.USAGE + "]\n" +
 +      "   [" + ServiceTestCommand.USAGE + "]\n" +
 +      "   [" + RemoteRegistryClientsListCommand.USAGE + "]\n" +
++      "   [" + RemoteRegistryListProviderConfigsCommand.USAGE + "]\n" +
 +      "   [" + RemoteRegistryUploadProviderConfigCommand.USAGE + "]\n" +
++      "   [" + RemoteRegistryListDescriptorsCommand.USAGE + "]\n" +
 +      "   [" + RemoteRegistryUploadDescriptorCommand.USAGE + "]\n" +
 +      "   [" + RemoteRegistryDeleteProviderConfigCommand.USAGE + "]\n" +
 +      "   [" + RemoteRegistryDeleteDescriptorCommand.USAGE + "]\n" +
 +      "   [" + RemoteRegistryGetACLCommand.USAGE + "]\n";
 +
 +  /** allows stdout to be captured if necessary */
 +  public PrintStream out = System.out;
 +  /** allows stderr to be captured if necessary */
 +  public PrintStream err = System.err;
 +
 +  private static GatewayServices services = new CLIGatewayServices();
 +  private Command command;
 +  private String value = null;
 +  private String cluster = null;
 +  private String path = null;
 +  private String generate = "false";
 +  private String hostname = null;
 +  private String port = null;
 +  private boolean force = false;
 +  private boolean debug = false;
 +  private String user = null;
 +  private String pass = null;
 +  private boolean groups = false;
 +
 +  private String remoteRegistryClient = null;
 +  private String remoteRegistryEntryName = null;
 +
 +  // For testing only
 +  private String master = null;
 +  private String type = null;
 +
 +  /* (non-Javadoc)
 +   * @see org.apache.hadoop.util.Tool#run(java.lang.String[])
 +   */
 +  @Override
 +  public int run(String[] args) throws Exception {
 +    int exitCode = 0;
 +    try {
 +      exitCode = init(args);
 +      if (exitCode != 0) {
 +        return exitCode;
 +      }
 +      if (command != null && command.validate()) {
 +        initializeServices( command instanceof MasterCreateCommand );
 +        command.execute();
 +      } else if (!(command instanceof MasterCreateCommand)){
 +        out.println("ERROR: Invalid Command" + "\n" + "Unrecognized option:" +
 +            args[0] + "\n" +
 +            "A fatal exception has occurred. Program will exit.");
 +        exitCode = -2;
 +      }
 +    } catch (ServiceLifecycleException sle) {
 +      out.println("ERROR: Internal Error: Please refer to the knoxcli.log " +
 +          "file for details. " + sle.getMessage());
 +    } catch (Exception e) {
 +      e.printStackTrace( err );
 +      err.flush();
 +      return -3;
 +    }
 +    return exitCode;
 +  }
 +
 +  GatewayServices getGatewayServices() {
 +    return services;
 +  }
 +
 +  private void initializeServices(boolean persisting) throws ServiceLifecycleException {
 +    GatewayConfig config = getGatewayConfig();
 +    Map<String,String> options = new HashMap<>();
 +    options.put(GatewayCommandLine.PERSIST_LONG, Boolean.toString(persisting));
 +    if (master != null) {
 +      options.put("master", master);
 +    }
 +    services.init(config, options);
 +  }
 +
 +  /**
 +   * Parse the command line arguments and initialize the data
 +   * <pre>
 +   * % knoxcli version
 +   * % knoxcli list-topologies
 +   * % knoxcli master-create keyName [--size size] [--generate]
 +   * % knoxcli create-alias alias [--cluster clustername] [--generate] [--value v]
 +   * % knoxcli list-alias [--cluster clustername]
 +   * % knoxcli delete=alias alias [--cluster clustername]
 +   * % knoxcli create-cert alias [--hostname h]
 +   * % knoxcli redeploy [--cluster clustername]
 +   * % knoxcli validate-topology [--cluster clustername] | [--path <path/to/file>]
 +   * % knoxcli user-auth-test [--cluster clustername] [--u username] [--p password]
 +   * % knoxcli system-user-auth-test [--cluster clustername] [--d]
 +   * % knoxcli service-test [--u user] [--p password] [--cluster clustername] [--hostname name] [--port port]
 +   * % knoxcli list-registry-clients
 +   * % knoxcli get-registry-acl entryName --registry-client name
++   * % knoxcli list-provider-configs --registry-client
 +   * % knoxcli upload-provider-config filePath --registry-client name [--entry-name entryName]
++   * % knoxcli list-descriptors --registry-client
 +   * % knoxcli upload-descriptor filePath --registry-client name [--entry-name entryName]
 +   * % knoxcli delete-provider-config providerConfig --registry-client name
 +   * % knoxcli delete-descriptor descriptor --registry-client name
 +   * </pre>
 +   * @param args
 +   * @return
 +   * @throws IOException
 +   */
 +  private int init(String[] args) throws IOException {
 +    if (args.length == 0) {
 +      printKnoxShellUsage();
 +      return -1;
 +    }
 +    for (int i = 0; i < args.length; i++) { // parse command line
 +      if (args[i].equals("create-master")) {
 +        command = new MasterCreateCommand();
 +        if ((args.length > i + 1) && args[i + 1].equals("--help")) {
 +          printKnoxShellUsage();
 +          return -1;
 +        }
 +      } else if (args[i].equals("delete-alias")) {
 +        String alias = null;
 +        if (args.length >= 2) {
 +          alias = args[++i];
 +        }
 +        command = new AliasDeleteCommand(alias);
 +        if (alias == null || alias.equals("--help")) {
 +          printKnoxShellUsage();
 +          return -1;
 +        }
 +      } else if (args[i].equals("create-alias")) {
 +        String alias = null;
 +        if (args.length >= 2) {
 +          alias = args[++i];
 +        }
 +        command = new AliasCreateCommand(alias);
 +        if (alias == null || alias.equals("--help")) {
 +          printKnoxShellUsage();
 +          return -1;
 +        }
 +      } else if (args[i].equals("create-cert")) {
 +        command = new CertCreateCommand();
 +        if ((args.length > i + 1) && args[i + 1].equals("--help")) {
 +          printKnoxShellUsage();
 +          return -1;
 +        }
 +      } else if (args[i].equals("export-cert")) {
 +        command = new CertExportCommand();
 +        if ((args.length > i + 1) && args[i + 1].equals("--help")) {
 +          printKnoxShellUsage();
 +          return -1;
 +        }
 +      }else if(args[i].equals("user-auth-test")) {
 +        if(i + 1 >= args.length) {
 +          printKnoxShellUsage();
 +          return -1;
 +        } else {
 +          command = new LDAPAuthCommand();
 +        }
 +      } else if(args[i].equals("system-user-auth-test")) {
 +        if (i + 1 >= args.length){
 +          printKnoxShellUsage();
 +          return -1;
 +        } else {
 +          command = new LDAPSysBindCommand();
 +        }
 +      } else if (args[i].equals("list-alias")) {
 +        command = new AliasListCommand();
 +      } else if (args[i].equals("--value")) {
 +        if( i+1 >= args.length || args[i+1].startsWith( "-" ) ) {
 +          printKnoxShellUsage();
 +          return -1;
 +        }
 +        this.value = args[++i];
 +        if ( command != null && command instanceof MasterCreateCommand ) {
 +          this.master = this.value;
 +        }
 +      } else if ( args[i].equals("version") ) {
 +        command = new VersionCommand();
 +      } else if ( args[i].equals("redeploy") ) {
 +        command = new RedeployCommand();
 +      } else if ( args[i].equals("validate-topology") ) {
 +        if(i + 1 >= args.length) {
 +          printKnoxShellUsage();
 +          return -1;
 +        } else {
 +          command = new ValidateTopologyCommand();
 +        }
 +      } else if( args[i].equals("list-topologies") ){
 +        command = new ListTopologiesCommand();
 +      }else if ( args[i].equals("--cluster") || args[i].equals("--topology") ) {
 +        if( i+1 >= args.length || args[i+1].startsWith( "-" ) ) {
 +          printKnoxShellUsage();
 +          return -1;
 +        }
 +        this.cluster = args[++i];
 +      } else if (args[i].equals("service-test")) {
 +        if( i + 1 >= args.length) {
 +          printKnoxShellUsage();
 +          return -1;
 +        } else {
 +          command = new ServiceTestCommand();
 +        }
 +      } else if (args[i].equals("--generate")) {
 +        if ( command != null && command instanceof MasterCreateCommand ) {
 +          this.master = UUID.randomUUID().toString();
 +        } else {
 +          this.generate = "true";
 +        }
 +      } else if(args[i].equals("--type")) {
 +        if( i+1 >= args.length || args[i+1].startsWith( "-" ) ) {
 +          printKnoxShellUsage();
 +          return -1;
 +        }
 +        this.type = args[++i];
 +      } else if(args[i].equals("--path")) {
 +        if( i+1 >= args.length || args[i+1].startsWith( "-" ) ) {
 +          printKnoxShellUsage();
 +          return -1;
 +        }
 +        this.path = args[++i];
 +      }else if (args[i].equals("--hostname")) {
 +        if( i+1 >= args.length || args[i+1].startsWith( "-" ) ) {
 +          printKnoxShellUsage();
 +          return -1;
 +        }
 +        this.hostname = args[++i];
 +      } else if (args[i].equals("--port")) {
 +        if( i+1 >= args.length || args[i+1].startsWith( "-" ) ) {
 +          printKnoxShellUsage();
 +          return -1;
 +        }
 +        this.port = args[++i];
 +      } else if (args[i].equals("--master")) {
 +        // For testing only
 +        if( i+1 >= args.length || args[i+1].startsWith( "-" ) ) {
 +          printKnoxShellUsage();
 +          return -1;
 +        }
 +        this.master = args[++i];
 +      } else if (args[i].equals("--force")) {
 +        this.force = true;
 +      } else if (args[i].equals("--help")) {
 +        printKnoxShellUsage();
 +        return -1;
 +      } else if(args[i].equals("--d")) {
 +        this.debug = true;
 +      } else if(args[i].equals("--u")) {
 +        if(i + 1 <= args.length) {
 +          this.user = args[++i];
 +        } else{
 +          printKnoxShellUsage();
 +          return -1;
 +        }
 +      } else if(args[i].equals("--p")) {
 +        if(i + 1 <= args.length) {
 +          this.pass = args[++i];
 +        } else{
 +          printKnoxShellUsage();
 +          return -1;
 +        }
 +      } else if (args[i].equals("--g")) {
 +        this.groups = true;
 +      } else if (args[i].equals("list-registry-clients")) {
 +        command = new RemoteRegistryClientsListCommand();
 +      } else if (args[i].equals("--registry-client")) {
 +        if (i + 1 >= args.length || args[i + 1].startsWith("-")) {
 +          printKnoxShellUsage();
 +          return -1;
 +        }
 +        this.remoteRegistryClient = args[++i];
++      } else if (args[i].equalsIgnoreCase("list-provider-configs")) {
++        command = new RemoteRegistryListProviderConfigsCommand();
++      } else if (args[i].equalsIgnoreCase("list-descriptors")) {
++        command = new RemoteRegistryListDescriptorsCommand();
 +      } else if (args[i].equalsIgnoreCase("upload-provider-config")) {
 +        String fileName;
 +        if (i <= (args.length - 1)) {
 +          fileName = args[++i];
 +          command = new RemoteRegistryUploadProviderConfigCommand(fileName);
 +        } else {
 +          printKnoxShellUsage();
 +          return -1;
 +        }
 +      } else if (args[i].equals("upload-descriptor")) {
 +        String fileName;
 +        if (i <= (args.length - 1)) {
 +          fileName = args[++i];
 +          command = new RemoteRegistryUploadDescriptorCommand(fileName);
 +        } else {
 +          printKnoxShellUsage();
 +          return -1;
 +        }
 +      } else if (args[i].equals("--entry-name")) {
 +        if (i <= (args.length - 1)) {
 +          remoteRegistryEntryName = args[++i];
 +        } else {
 +          printKnoxShellUsage();
 +          return -1;
 +        }
 +      } else if (args[i].equals("delete-descriptor")) {
 +        if (i <= (args.length - 1)) {
 +          String entry = args[++i];
 +          command = new RemoteRegistryDeleteDescriptorCommand(entry);
 +        } else {
 +          printKnoxShellUsage();
 +          return -1;
 +        }
 +      } else if (args[i].equals("delete-provider-config")) {
 +        if (i <= (args.length - 1)) {
 +          String entry = args[++i];
 +          command = new RemoteRegistryDeleteProviderConfigCommand(entry);
 +        } else {
 +          printKnoxShellUsage();
 +          return -1;
 +        }
 +      } else if (args[i].equalsIgnoreCase("get-registry-acl")) {
 +        if (i <= (args.length - 1)) {
 +          String entry = args[++i];
 +          command = new RemoteRegistryGetACLCommand(entry);
 +        } else {
 +          printKnoxShellUsage();
 +          return -1;
 +        }
 +      } else {
 +        printKnoxShellUsage();
 +        //ToolRunner.printGenericCommandUsage(System.err);
 +        return -1;
 +      }
 +    }
 +    return 0;
 +  }
 +
 +  private void printKnoxShellUsage() {
 +    out.println( USAGE_PREFIX + "\n" + COMMANDS );
 +    if ( command != null ) {
 +      out.println(command.getUsage());
 +    } else {
 +      char[] chars = new char[79];
 +      Arrays.fill( chars, '=' );
 +      String div = new String( chars );
 +
 +      out.println( div );
 +      out.println( VersionCommand.USAGE + "\n\n" + VersionCommand.DESC );
 +      out.println();
 +      out.println( div );
 +      out.println( MasterCreateCommand.USAGE + "\n\n" + MasterCreateCommand.DESC );
 +      out.println();
 +      out.println( div );
 +      out.println( CertCreateCommand.USAGE + "\n\n" + CertCreateCommand.DESC );
 +      out.println();
 +      out.println( div );
 +      out.println( CertExportCommand.USAGE + "\n\n" + CertExportCommand.DESC );
 +      out.println();
 +      out.println( div );
 +      out.println( AliasCreateCommand.USAGE + "\n\n" + AliasCreateCommand.DESC );
 +      out.println();
 +      out.println( div );
 +      out.println( AliasDeleteCommand.USAGE + "\n\n" + AliasDeleteCommand.DESC );
 +      out.println();
 +      out.println( div );
 +      out.println( AliasListCommand.USAGE + "\n\n" + AliasListCommand.DESC );
 +      out.println();
 +      out.println( div );
 +      out.println( RedeployCommand.USAGE + "\n\n" + RedeployCommand.DESC );
 +      out.println();
 +      out.println( div );
 +      out.println(ValidateTopologyCommand.USAGE + "\n\n" + ValidateTopologyCommand.DESC);
 +      out.println();
 +      out.println( div );
 +      out.println(ListTopologiesCommand.USAGE + "\n\n" + ListTopologiesCommand.DESC);
 +      out.println();
 +      out.println( div );
 +      out.println(LDAPAuthCommand.USAGE + "\n\n" + LDAPAuthCommand.DESC);
 +      out.println();
 +      out.println( div );
 +      out.println(LDAPSysBindCommand.USAGE + "\n\n" + LDAPSysBindCommand.DESC);
 +      out.println();
 +      out.println( div );
 +      out.println(ServiceTestCommand.USAGE + "\n\n" + ServiceTestCommand.DESC);
 +      out.println();
 +      out.println( div );
 +      out.println(RemoteRegistryClientsListCommand.USAGE + "\n\n" + RemoteRegistryClientsListCommand.DESC);
 +      out.println();
 +      out.println( div );
 +      out.println(RemoteRegistryGetACLCommand.USAGE + "\n\n" + RemoteRegistryGetACLCommand.DESC);
 +      out.println();
 +      out.println( div );
++      out.println(RemoteRegistryListProviderConfigsCommand.USAGE + "\n\n" + RemoteRegistryListProviderConfigsCommand.DESC);
++      out.println();
++      out.println( div );
++      out.println(RemoteRegistryListDescriptorsCommand.USAGE + "\n\n" + RemoteRegistryListDescriptorsCommand.DESC);
++      out.println();
++      out.println( div );
 +      out.println(RemoteRegistryUploadProviderConfigCommand.USAGE + "\n\n" + RemoteRegistryUploadProviderConfigCommand.DESC);
 +      out.println();
 +      out.println( div );
 +      out.println(RemoteRegistryUploadDescriptorCommand.USAGE + "\n\n" + RemoteRegistryUploadDescriptorCommand.DESC);
 +      out.println();
 +      out.println( div );
 +      out.println(RemoteRegistryDeleteProviderConfigCommand.USAGE + "\n\n" + RemoteRegistryDeleteProviderConfigCommand.DESC);
 +      out.println();
 +      out.println( div );
 +      out.println(RemoteRegistryDeleteDescriptorCommand.USAGE + "\n\n" + RemoteRegistryDeleteDescriptorCommand.DESC);
 +      out.println();
 +      out.println( div );
 +    }
 +  }
 +
 +  private abstract class Command {
 +
 +    public boolean validate() {
 +      return true;
 +    }
 +
 +    protected Service getService(String serviceName) {
 +      Service service = null;
 +
 +      return service;
 +    }
 +
 +    public abstract void execute() throws Exception;
 +
 +    public abstract String getUsage();
 +
 +    protected AliasService getAliasService() {
 +      AliasService as = services.getService(GatewayServices.ALIAS_SERVICE);
 +      return as;
 +    }
 +
 +    protected KeystoreService getKeystoreService() {
 +      KeystoreService ks = services.getService(GatewayServices.KEYSTORE_SERVICE);
 +      return ks;
 +    }
 +
 +    protected TopologyService getTopologyService()  {
 +      TopologyService ts = services.getService(GatewayServices.TOPOLOGY_SERVICE);
 +      return ts;
 +    }
 +
 +    protected RemoteConfigurationRegistryClientService getRemoteConfigRegistryClientService() {
 +      return services.getService(GatewayServices.REMOTE_REGISTRY_CLIENT_SERVICE);
 +    }
 +
 +  }
 +
 + private class AliasListCommand extends Command {
 +
 +  public static final String USAGE = "list-alias [--cluster clustername]";
 +  public static final String DESC = "The list-alias command lists all of the aliases\n" +
 +                                    "for the given hadoop --cluster. The default\n" +
 +                                    "--cluster being the gateway itself.";
 +
 +   /* (non-Javadoc)
 +    * @see KnoxCLI.Command#execute()
 +    */
 +   @Override
 +   public void execute() throws Exception {
 +     AliasService as = getAliasService();
 +      KeystoreService keystoreService = getKeystoreService();
 +
 +     if (cluster == null) {
 +       cluster = "__gateway";
 +     }
 +      boolean credentialStoreForClusterAvailable =
 +          keystoreService.isCredentialStoreForClusterAvailable(cluster);
 +      if (credentialStoreForClusterAvailable) {
 +        out.println("Listing aliases for: " + cluster);
 +        List<String> aliases = as.getAliasesForCluster(cluster);
 +        for (String alias : aliases) {
 +          out.println(alias);
 +        }
 +        out.println("\n" + aliases.size() + " items.");
 +      } else {
 +        out.println("Invalid cluster name provided: " + cluster);
 +      }
 +   }
 +
 +   /* (non-Javadoc)
 +    * @see org.apache.knox.gateway.util.KnoxCLI.Command#getUsage()
 +    */
 +   @Override
 +   public String getUsage() {
 +     return USAGE + ":\n\n" + DESC;
 +   }
 + }
 +
 + public class CertExportCommand extends Command {
 +
 +   public static final String USAGE = "export-cert";
 +   public static final String DESC = "The export-cert command exports the public certificate\n" +
 +                                     "from the a gateway.jks keystore with the alias of gateway-identity.";
 +   private static final String GATEWAY_CREDENTIAL_STORE_NAME = "__gateway";
 +   private static final String GATEWAY_IDENTITY_PASSPHRASE = "gateway-identity-passphrase";
 +
 +    public CertExportCommand() {
 +    }
 +
 +    private GatewayConfig getGatewayConfig() {
 +      GatewayConfig result;
 +      Configuration conf = getConf();
 +      if( conf != null && conf instanceof GatewayConfig ) {
 +        result = (GatewayConfig)conf;
 +      } else {
 +        result = new GatewayConfigImpl();
 +      }
 +      return result;
 +    }
 +
 +    /* (non-Javadoc)
 +     * @see org.apache.knox.gateway.util.KnoxCLI.Command#execute()
 +     */
 +    @Override
 +    public void execute() throws Exception {
 +      KeystoreService ks = getKeystoreService();
 +
 +      AliasService as = getAliasService();
 +
 +      if (ks != null) {
 +        try {
 +          if (!ks.isKeystoreForGatewayAvailable()) {
 +            out.println("No keystore has been created for the gateway. Please use the create-cert command or populate with a CA signed cert of your own.");
 +          }
 +          char[] passphrase = as.getPasswordFromAliasForCluster(GATEWAY_CREDENTIAL_STORE_NAME, GATEWAY_IDENTITY_PASSPHRASE);
 +          if (passphrase == null) {
 +            MasterService ms = services.getService("MasterService");
 +            passphrase = ms.getMasterSecret();
 +          }
 +          Certificate cert = ks.getKeystoreForGateway().getCertificate("gateway-identity");
 +          String keyStoreDir = getGatewayConfig().getGatewaySecurityDir() + File.separator + "keystores" + File.separator;
 +          File ksd = new File(keyStoreDir);
 +          if (!ksd.exists()) {
 +            if( !ksd.mkdirs() ) {
 +              // certainly should not happen if the keystore is known to be available
 +              throw new ServiceLifecycleException("Unable to create keystores directory" + ksd.getAbsolutePath());
 +            }
 +          }
 +          if ("PEM".equals(type) || type == null) {
 +            X509CertificateUtil.writeCertificateToFile(cert, new File(keyStoreDir + "gateway-identity.pem"));
 +            out.println("Certificate gateway-identity has been successfully exported to: " + keyStoreDir + "gateway-identity.pem");
 +          }
 +          else if ("JKS".equals(type)) {
 +            X509CertificateUtil.writeCertificateToJKS(cert, new File(keyStoreDir + "gateway-client-trust.jks"));
 +            out.println("Certificate gateway-identity has been successfully exported to: " + keyStoreDir + "gateway-client-trust.jks");
 +          }
 +          else {
 +            out.println("Invalid type for export file provided. Export has not been done. Please use: [PEM|JKS] default value is PEM.");
 +          }
 +        } catch (KeystoreServiceException e) {
 +          throw new ServiceLifecycleException("Keystore was not loaded properly - the provided (or persisted) master secret may not match the password for the keystore.", e);
 +        }
 +      }
 +    }
 +
 +    /* (non-Javadoc)
 +     * @see org.apache.knox.gateway.util.KnoxCLI.Command#getUsage()
 +     */
 +    @Override
 +    public String getUsage() {
 +      return USAGE + ":\n\n" + DESC;
 +    }
 +  }
 +
 + public class CertCreateCommand extends Command {
 +
 +  public static final String USAGE = "create-cert [--hostname h]";
 +  public static final String DESC = "The create-cert command creates and populates\n" +
 +                                    "a gateway.jks keystore with a self-signed certificate\n" +
 +                                    "to be used as the gateway identity. It also adds an alias\n" +
 +                                    "to the __gateway-credentials.jceks credential store for the\n" +
 +                                    "key passphrase.";
 +  private static final String GATEWAY_CREDENTIAL_STORE_NAME = "__gateway";
 +  private static final String GATEWAY_IDENTITY_PASSPHRASE = "gateway-identity-passphrase";
 +
 +   public CertCreateCommand() {
 +   }
 +
 +   /* (non-Javadoc)
 +    * @see org.apache.knox.gateway.util.KnoxCLI.Command#execute()
 +    */
 +   @Override
 +   public void execute() throws Exception {
 +     KeystoreService ks = getKeystoreService();
 +
 +     AliasService as = getAliasService();
 +
 +     if (ks != null) {
 +       try {
 +         if (!ks.isCredentialStoreForClusterAvailable(GATEWAY_CREDENTIAL_STORE_NAME)) {
 +//           log.creatingCredentialStoreForGateway();
 +           ks.createCredentialStoreForCluster(GATEWAY_CREDENTIAL_STORE_NAME);
 +         }
 +         else {
 +//           log.credentialStoreForGatewayFoundNotCreating();
 +         }
 +         // LET'S NOT GENERATE A DIFFERENT KEY PASSPHRASE BY DEFAULT ANYMORE
 +         // IF A DEPLOYMENT WANTS TO CHANGE THE KEY PASSPHRASE TO MAKE IT MORE SECURE THEN
 +         // THEY CAN ADD THE ALIAS EXPLICITLY WITH THE CLI
 +         //as.generateAliasForCluster(GATEWAY_CREDENTIAL_STORE_NAME, GATEWAY_IDENTITY_PASSPHRASE);
 +       } catch (KeystoreServiceException e) {
 +         throw new ServiceLifecycleException("Keystore was not loaded properly - the provided (or persisted) master secret may not match the password for the keystore.", e);
 +       }
 +
 +       try {
 +         if (!ks.isKeystoreForGatewayAvailable()) {
 +//           log.creatingKeyStoreForGateway();
 +           ks.createKeystoreForGateway();
 +         }
 +         else {
 +//           log.keyStoreForGatewayFoundNotCreating();
 +         }
 +         char[] passphrase = as.getPasswordFromAliasForCluster(GATEWAY_CREDENTIAL_STORE_NAME, GATEWAY_IDENTITY_PASSPHRASE);
 +         if (passphrase == null) {
 +           MasterService ms = services.getService("MasterService");
 +           passphrase = ms.getMasterSecret();
 +         }
 +         ks.addSelfSignedCertForGateway("gateway-identity", passphrase, hostname);
 +//         logAndValidateCertificate();
 +         out.println("Certificate gateway-identity has been successfully created.");
 +       } catch (KeystoreServiceException e) {
 +         throw new ServiceLifecycleException("Keystore was not loaded properly - the provided (or persisted) master secret may not match the password for the keystore.", e);
 +       }
 +     }
 +   }
 +
 +   /* (non-Javadoc)
 +    * @see org.apache.knox.gateway.util.KnoxCLI.Command#getUsage()
 +    */
 +   @Override
 +   public String getUsage() {
 +     return USAGE + ":\n\n" + DESC;
 +   }
 +
 + }
 +
 + public class AliasCreateCommand extends Command {
 +
 +  public static final String USAGE = "create-alias aliasname [--cluster clustername] " +
 +                                     "[ (--value v) | (--generate) ]";
 +  public static final String DESC = "The create-alias command will create an alias\n"
 +                                       + "and secret pair within the credential store for the\n"
 +                                       + "indicated --cluster otherwise within the gateway\n"
 +                                       + "credential store. The actual secret may be specified via\n"
 +                                       + "the --value option or --generate (will create a random secret\n"
 +                                       + "for you) or user will be prompt to provide password.";
 +
 +  private String name = null;
 +
 +  /**
 +    * @param alias
 +    */
 +   public AliasCreateCommand(String alias) {
 +     name = alias;
 +   }
 +
 +   /* (non-Javadoc)
 +    * @see org.apache.knox.gateway.util.KnoxCLI.Command#execute()
 +    */
 +   @Override
 +   public void execute() throws Exception {
 +     AliasService as = getAliasService();
 +     if (cluster == null) {
 +       cluster = "__gateway";
 +     }
 +     if (value != null) {
 +       as.addAliasForCluster(cluster, name, value);
 +       out.println(name + " has been successfully created.");
 +     }
 +     else {
 +       if ("true".equals(generate)) {
 +         as.generateAliasForCluster(cluster, name);
 +         out.println(name + " has been successfully generated.");
 +       }
 +       else {
 +          value = new String(promptUserForPassword());
 +          as.addAliasForCluster(cluster, name, value);
 +          out.println(name + " has been successfully created.");
 +       }
 +     }
 +   }
 +
 +   /* (non-Javadoc)
 +    * @see org.apache.knox.gateway.util.KnoxCLI.Command#getUsage()
 +    */
 +   @Override
 +   public String getUsage() {
 +     return USAGE + ":\n\n" + DESC;
 +   }
 +
 +    protected char[] promptUserForPassword() {
 +      char[] password = null;
 +      Console c = System.console();
 +      if (c == null) {
 +        System.err
 +            .println("No console to fetch password from user.Consider setting via --generate or --value.");
 +        System.exit(1);
 +      }
 +
 +      boolean noMatch;
 +      do {
 +        char[] newPassword1 = c.readPassword("Enter password: ");
 +        char[] newPassword2 = c.readPassword("Enter password again: ");
 +        noMatch = !Arrays.equals(newPassword1, newPassword2);
 +        if (noMatch) {
 +          c.format("Passwords don't match. Try again.%n");
 +        } else {
 +          password = Arrays.copyOf(newPassword1, newPassword1.length);
 +        }
 +        Arrays.fill(newPassword1, ' ');
 +        Arrays.fill(newPassword2, ' ');
 +      } while (noMatch);
 +      return password;
 +    }
 +
 + }
 +
 + /**
 +  *
 +  */
 + public class AliasDeleteCommand extends Command {
 +  public static final String USAGE = "delete-alias aliasname [--cluster clustername]";
 +  public static final String DESC = "The delete-alias command removes the\n" +
 +                                    "indicated alias from the --cluster specific\n" +
 +                                    "credential store or the gateway credential store.";
 +
 +  private String name = null;
 +
 +  /**
 +    * @param alias
 +    */
 +   public AliasDeleteCommand(String alias) {
 +     name = alias;
 +   }
 +
 +   /* (non-Javadoc)
 +    * @see org.apache.knox.gateway.util.KnoxCLI.Command#execute()
 +    */
 +   @Override
 +   public void execute() throws Exception {
 +     AliasService as = getAliasService();
 +      KeystoreService keystoreService = getKeystoreService();
 +     if (as != null) {
 +       if (cluster == null) {
 +         cluster = "__gateway";
 +       }
 +        boolean credentialStoreForClusterAvailable =
 +            keystoreService.isCredentialStoreForClusterAvailable(cluster);
 +        if (credentialStoreForClusterAvailable) {
 +          List<String> aliasesForCluster = as.getAliasesForCluster(cluster);
 +          if (null == aliasesForCluster || !aliasesForCluster.contains(name)) {
 +            out.println("Deletion of Alias: " + name + " from cluster: " + cluster + " Failed. "
 +                + "\n" + "No such alias exists in the cluster.");
 +          } else {
 +            as.removeAliasForCluster(cluster, name);
 +            out.println(name + " has been successfully deleted.");
 +          }
 +        } else {
 +          out.println("Invalid cluster name provided: " + cluster);
 +        }
 +     }
 +   }
 +
 +   /* (non-Javadoc)
 +    * @see org.apache.knox.gateway.util.KnoxCLI.Command#getUsage()
 +    */
 +   @Override
 +   public String getUsage() {
 +     return USAGE + ":\n\n" + DESC;
 +   }
 +
 + }
 +
 + /**
 +  *
 +  */
 + public class MasterCreateCommand extends Command {
 +  public static final String USAGE = "create-master [--force]";
 +  public static final String DESC = "The create-master command persists the\n" +
 +                                    "master secret in a file located at:\n" +
 +                                    "{GATEWAY_HOME}/data/security/master. It\n" +
 +                                    "will prompt the user for the secret to persist.\n" +
 +                                    "Use --force to overwrite the master secret.";
 +
 +   public MasterCreateCommand() {
 +   }
 +
 +   private GatewayConfig getGatewayConfig() {
 +     GatewayConfig result;
 +     Configuration conf = getConf();
 +     if( conf != null && conf instanceof GatewayConfig ) {
 +       result = (GatewayConfig)conf;
 +     } else {
 +       result = new GatewayConfigImpl();
 +     }
 +     return result;
 +   }
 +
 +   public boolean validate() {
 +     boolean valid = true;
 +     GatewayConfig config = getGatewayConfig();
 +     File dir = new File( config.getGatewaySecurityDir() );
 +     File file = new File( dir, "master" );
 +     if( file.exists() ) {
 +       if( force ) {
 +         if( !file.canWrite() ) {
 +           out.println(
 +               "This command requires write permissions on the master secret file: " +
 +                   file.getAbsolutePath() );
 +           valid = false;
 +         } else if( !file.canWrite() ) {
 +           out.println(
 +               "This command requires write permissions on the master secret file: " +
 +                   file.getAbsolutePath() );
 +           valid = false;
 +         } else {
 +           valid = file.delete();
 +           if( !valid ) {
 +             out.println(
 +                 "Unable to delete the master secret file: " +
 +                     file.getAbsolutePath() );
 +           }
 +         }
 +       } else {
 +         out.println(
 +             "Master secret is already present on disk. " +
 +                 "Please be aware that overwriting it will require updating other security artifacts. " +
 +                 " Use --force to overwrite the existing master secret." );
 +         valid = false;
 +       }
 +     } else if( dir.exists() && !dir.canWrite() ) {
 +       out.println(
 +           "This command requires write permissions on the security directory: " +
 +               dir.getAbsolutePath() );
 +       valid = false;
 +     }
 +     return valid;
 +   }
 +
 +   /* (non-Javadoc)
 +    * @see org.apache.knox.gateway.util.KnoxCLI.Command#execute()
 +    */
 +   @Override
 +   public void execute() throws Exception {
 +     out.println("Master secret has been persisted to disk.");
 +   }
 +
 +   /* (non-Javadoc)
 +    * @see org.apache.knox.gateway.util.KnoxCLI.Command#getUsage()
 +    */
 +   @Override
 +   public String getUsage() {
 +     return USAGE + ":\n\n" + DESC;
 +   }
 + }
 +
 +  private class VersionCommand extends Command {
 +
 +    public static final String USAGE = "version";
 +    public static final String DESC = "Displays Knox version information.";
 +
 +    @Override
 +    public void execute() throws Exception {
 +      Properties buildProperties = loadBuildProperties();
 +      System.out.println(
 +          String.format(
 +              "Apache Knox: %s (%s)",
 +              buildProperties.getProperty( "build.version", "unknown" ),
 +              buildProperties.getProperty( "build.hash", "unknown" ) ) );
 +    }
 +
 +    @Override
 +    public String getUsage() {
 +      return USAGE + ":\n\n" + DESC;
 +    }
 +
 +  }
 +
 +  private class RedeployCommand extends Command {
 +
 +    public static final String USAGE = "redeploy [--cluster clustername]";
 +    public static final String DESC =
 +        "Redeploys one or all of the gateway's clusters (a.k.a topologies).";
 +
 +    @Override
 +    public void execute() throws Exception {
 +      TopologyService ts = getTopologyService();
 +      ts.reloadTopologies();
 +      if (cluster != null) {
 +        if (validateClusterName(cluster, ts)) {
 +          ts.redeployTopologies(cluster);
 +        }
 +        else {
 +          out.println("Invalid cluster name provided. Nothing to redeploy.");
 +        }
 +      }
 +    }
 +
 +    /**
 +     * @param cluster
 +     * @param ts
 +     */
 +    private boolean validateClusterName(String cluster, TopologyService ts) {
 +      boolean valid = false;
 +      for (Topology t : ts.getTopologies() ) {
 +        if (t.getName().equals(cluster)) {
 +          valid = true;
 +          break;
 +        }
 +      }
 +      return valid;
 +    }
 +
 +    @Override
 +    public String getUsage() {
 +      return USAGE + ":\n\n" + DESC;
 +    }
 +
 +  }
 +
 +  private class ValidateTopologyCommand extends Command {
 +
 +    public static final String USAGE = "validate-topology [--cluster clustername] | [--path \"path/to/file\"]";
 +    public static final String DESC = "Ensures that a cluster's description (a.k.a topology) \n" +
 +        "follows the correct formatting rules.\n" +
 +        "use the list-topologies command to get a list of available cluster names";
 +    private String file = "";
 +
 +    @Override
 +    public String getUsage() {
 +      return USAGE + ":\n\n" + DESC;
 +    }
 +
 +    public void execute() throws Exception {
 +      GatewayConfig gc = getGatewayConfig();
 +      String topDir = gc.getGatewayTopologyDir();
 +
 +      if(path != null) {
 +        file = path;
 +      } else if(cluster == null) {
 +        // The following block of code retreieves the list of files in the topologies directory
 +        File tops = new File(topDir + "/topologies");
 +        if(tops.isDirectory()) {
 +          out.println("List of files available in the topologies directory");
 +          for (File f : tops.listFiles()) {
 +            if(f.getName().endsWith(".xml")) {
 +              String fName = f.getName().replace(".xml", "");
 +              out.println(fName);
 +            }
 +          }
 +          return;
 +        } else {
 +          out.println("Could not locate topologies directory");
 +          return;
 +        }
 +
 +      } else {
 +        file = topDir + "/" + cluster + ".xml";
 +      }
 +
 +      // The following block checks a topology against the XSD
 +      out.println();
 +      out.println("File to be validated: ");
 +      out.println(file);
 +      out.println("==========================================");
 +
 +      if(new File(file).exists()) {
 +        TopologyValidator tv = new TopologyValidator(file);
 +
 +        if(tv.validateTopology()) {
 +          out.println("Topology file validated successfully");
 +        } else {
 +          out.println(tv.getErrorString()) ;
 +          out.println("Topology validation unsuccessful");
 +        }
 +      } else {
 +        out.println("The topology file specified does not exist.");
 +      }
 +    }
 +
 +  }
 +
 +  private class ListTopologiesCommand extends Command {
 +
 +    public static final String USAGE = "list-topologies";
 +    public static final String DESC = "Retrieves a list of the available topologies within the\n" +
 +        "default topologies directory. Will return topologies that may not be deployed due\n" +
 +        "errors in file formatting.";
 +
 +    @Override
 +    public String getUsage() {
 +      return USAGE + ":\n\n" + DESC;
 +    }
 +
 +    @Override
 +    public void execute() {
 +
 +      String confDir = getGatewayConfig().getGatewayConfDir();
 +      File tops = new File(confDir + "/topologies");
 +      out.println("List of files available in the topologies directory");
 +      out.println(tops.toString());
 +      if(tops.isDirectory()) {
 +        for (File f : tops.listFiles()) {
 +          if(f.getName().endsWith(".xml")) {
 +            String fName = f.getName().replace(".xml", "");
 +            out.println(fName);
 +          }
 +        }
 +        return;
 +      } else {
 +        out.println("ERR: Topologies directory does not exist.");
 +        return;
 +      }
 +
 +    }
 +
 +  }
 +
 +  private class LDAPCommand extends Command {
 +
 +    public static final String USAGE = "ldap-command";
 +    public static final String DESC = "This is an internal command. It should not be used.";
 +    protected String username = null;
 +    protected char[] password = null;
 +    protected static final String debugMessage = "For more information use --d for debug output.";
 +    protected Topology topology;
 +
 +    @Override
 +    public String getUsage() {
 +      return USAGE + ":\n\n" + DESC;
 +    }
 +
 +    @Override
 +    public void execute() {
 +      out.println("This command does not have any functionality.");
 +    }
 +
 +
 +//    First define a few Exceptions
 +    protected class NoSuchTopologyException extends Exception {
 +      public NoSuchTopologyException() {}
 +      public NoSuchTopologyException(String message) { super(message); }
 +    }
 +    protected class MissingPasswordException extends Exception {
 +      public MissingPasswordException() {}
 +      public MissingPasswordException(String message) { super(message); }
 +    }
 +
 +    protected class MissingUsernameException extends Exception {
 +      public MissingUsernameException() {};
 +      public MissingUsernameException(String message) { super(message); }
 +    }
 +
 +    protected class BadSubjectException extends Exception {
 +      public BadSubjectException() {}
 +      public BadSubjectException(String message) { super(message); }
 +    }
 +
 +    protected class NoSuchProviderException extends Exception {
 +      public NoSuchProviderException() {}
 +      public NoSuchProviderException(String name, String role, String topology) {
 +        super("Could not find provider with role: " + role + ", name: " + name + " inside of topology: " + topology);
 +      }
 +    }
 +
 +    //    returns false if any errors are printed
 +    protected boolean hasShiroProviderErrors(Topology topology, boolean groupLookup) {
 +//      First let's define the variables that represent the ShiroProvider params
 +      String mainLdapRealm = "main.ldapRealm";
 +      String contextFactory = mainLdapRealm + ".contextFactory";
 +      String groupContextFactory = "main.ldapGroupContextFactory";
 +      String authorizationEnabled = mainLdapRealm + ".authorizationEnabled";
 +      String userSearchAttributeName = mainLdapRealm + ".userSearchAttributeName";
 +      String userObjectClass = mainLdapRealm + ".userObjectClass";
 +      String authenticationMechanism = mainLdapRealm + ".authenticationMechanism"; // Should not be used up to v0.6.0)
 +      String searchBase = mainLdapRealm + ".searchBase";
 +      String groupSearchBase = mainLdapRealm + ".groupSearchBase";
 +      String userSearchBase = mainLdapRealm + ".userSearchBase";
 +      String groupObjectClass = mainLdapRealm + ".groupObjectClass";
 +      String memberAttribute = mainLdapRealm + ".memberAttribute";
 +      String memberAttributeValueTemplate = mainLdapRealm + ".memberAttributeValueTemplate";
 +      String systemUsername = contextFactory + ".systemUsername";
 +      String systemPassword = contextFactory + ".systemPassword";
 +      String url = contextFactory + ".url";
 +      String userDnTemplate = mainLdapRealm + ".userDnTemplate";
 +
 +
 +      Provider shiro = topology.getProvider("authentication", "ShiroProvider");
 +      if(shiro != null) {
 +        Map<String, String> params = shiro.getParams();
 +        int errs = 0;
 +        if(groupLookup) {
 +          int errors = 0;
 +          errors += hasParam(params, groupContextFactory, true) ? 0 : 1;
 +          errors += hasParam(params, groupObjectClass, true) ? 0 : 1;
 +          errors += hasParam(params, memberAttributeValueTemplate, true) ? 0 : 1;
 +          errors += hasParam(params, memberAttribute, true) ? 0 : 1;
 +          errors += hasParam(params, authorizationEnabled, true) ? 0 : 1;
 +          errors += hasParam(params, systemUsername, true) ? 0 : 1;
 +          errors += hasParam(params, systemPassword, true) ? 0 : 1;
 +          errors += hasParam(params, userSearchBase, true) ? 0 : 1;
 +          errors += hasParam(params, groupSearchBase, true) ? 0 : 1;
 +          errs += errors;
 +
 +        } else {
 +
 +//        Realm + Url is always required.
 +          errs += hasParam(params, mainLdapRealm, true) ? 0 : 1;
 +          errs += hasParam(params, url, true) ? 0 : 1;
 +
 +          if(hasParam(params, authorizationEnabled, false)) {
 +            int errors = 0;
 +            int searchBaseErrors = 0;
 +            errors += hasParam(params, systemUsername, true) ? 0 : 1;
 +            errors += hasParam(params, systemPassword, true) ? 0 : 1;
 +            searchBaseErrors += hasParam(params, searchBase, false) ? 0 : hasParam(params, userSearchBase, false) ? 0 : 1;
 +            if (searchBaseErrors > 0) {
 +              out.println("Warn: Both " + searchBase + " and " + userSearchBase + " are missing from the topology");
 +            }
 +            errors += searchBaseErrors;
 +            errs += errors;
 +          }
 +
 +//        If any one of these is present they must all be present
 +          if( hasParam(params, userSearchAttributeName, false) ||
 +              hasParam(params, userObjectClass, false) ||
 +              hasParam(params, searchBase, false) ||
 +              hasParam(params, userSearchBase, false)) {
 +
 +            int errors = 0;
 +            errors += hasParam(params, userSearchAttributeName, true) ? 0 : 1;
 +            errors += hasParam(params, userObjectClass, true) ? 0 : 1;
 +            errors += hasParam(params, searchBase, false) ? 0 : hasParam(params, userSearchBase, false) ? 0 : 1;
 +            errors += hasParam(params, systemUsername, true) ? 0 : 1;
 +            errors += hasParam(params, systemPassword, true) ? 0 : 1;
 +
 +            if(errors > 0) {
 +              out.println(userSearchAttributeName + " or " + userObjectClass + " or " + searchBase + " or " + userSearchBase + " was found in the topology");
 +              out.println("If any one of the above params is present then " + userSearchAttributeName + 
 +                  " and " + userObjectClass + " must both be present and either " + searchBase + " or " + userSearchBase + " must also be present.");
 +            }
 +            errs += errors;
 +          } else {
 +            errs += hasParam(params, userDnTemplate, true) ?  0 : 1;
 +
 +          }
 +        }
 +        return (errs > 0);
 +      } else {
 +        out.println("Could not obtain ShiroProvider");
 +        return true;
 +      }
 +    }
 +
 +    // Checks to see if the param name is present. If not, notify the user
 +    protected boolean hasParam(Map<String, String> params, String key, boolean notifyUser){
 +      if(params.get(key) == null){
 +        if(notifyUser) { out.println("Warn: " + key + " is not present in topology"); }
 +        return false;
 +      } else { return true; }
 +    }
 +
 +    /**
 +     *
 +     * @param ini - the path to the shiro.ini file within a topology deployment.
 +     * @param token - token for username and password
 +     * @return - true/false whether a user was successfully able to authenticate or not.
 +     */
 +    protected boolean authenticateUser(Ini ini, UsernamePasswordToken token){
 +      boolean result = false;
 +      try {
 +        Subject subject = getSubject(ini);
 +        try{
 +          subject.login(token);
 +          if(subject.isAuthenticated()){
 +            result = true;
 +          }
 +        } catch (AuthenticationException e){
 +          out.println(e.toString());
 +          out.println(e.getCause().getMessage());
 +          if (debug) {
 +            e.printStackTrace(out);
 +          } else {
 +            out.println(debugMessage);
 +          }
 +        } finally {
 +          subject.logout();
 +        }
 +      } catch (BadSubjectException e) {
 +        out.println(e.toString());
 +        if (debug){
 +          e.printStackTrace();
 +        } else {
 +          out.println(debugMessage);
 +        }
 +      } catch (ConfigurationException e) {
 +        out.println(e.toString());
 +      } catch ( Exception e ) {
 +        out.println(e.getCause());
 +        out.println(e.toString());
 +      }
 +      return result;
 +    }
 +
 +    protected boolean authenticateUser(String config, UsernamePasswordToken token) throws ConfigurationException {
 +      Ini ini = new Ini();
 +      try {
 +        ini.loadFromPath(config);
 +        return authenticateUser(ini, token);
 +      } catch (ConfigurationException e) {
 +        throw e;
 +      }
 +    }
 +
 +    /**
 +     *
 +     * @param userDn - fully qualified userDn used for LDAP authentication
 +     * @return - returns the principal found in the userDn after "uid="
 +     */
 +    protected String getPrincipal(String userDn){
 +      String result = "";
 +
 +//      Need to determine whether we are using AD or LDAP?
 +//      LDAP userDn usually starts with "uid="
 +//      AD userDn usually starts with cn/CN
 +//      Find the userDN template
 +
 +      try {
 +        Topology t = getTopology(cluster);
 +        Provider shiro = t.getProvider("authentication", "ShiroProvider");
 +
 +        String p1 = shiro.getParams().get("main.ldapRealm.userDnTemplate");
 +
 +//        We know everything between first "=" and "," will be part of the principal.
 +        int eq = userDn.indexOf("=");
 +        int com = userDn.indexOf(",");
 +        if(eq != -1 && com > eq && com != -1) {
 +          result = userDn.substring(eq + 1, com);
 +        } else {
 +          result = "";
 +        }
 +      } catch (NoSuchTopologyException e) {
 +        out.println(e.toString());
 +        result = userDn;
 +      } finally {
 +        return result;
 +      }
 +    }
 +
 +    /**
 +     *
 +     * @param t - topology configuration to use
 +     * @param config - the path to the shiro.ini file from the topology deployment.
 +     * @return - true/false whether LDAP successfully authenticated with system credentials.
 +     */
 +    protected boolean testSysBind(Topology t, String config) {
 +      boolean result = false;
 +      String username;
 +      char[] password;
 +
 +      try {
 +//        Pull out contextFactory.url param for light shiro config
 +        Provider shiro = t.getProvider("authentication", "ShiroProvider");
 +        Map<String, String> params = shiro.getParams();
 +        String url = params.get("main.ldapRealm.contextFactory.url");
 +
 +//        Build the Ini with minimum requirements
 +        Ini ini = new Ini();
 +        ini.addSection("main");
 +        ini.setSectionProperty("main", "ldapRealm", "org.apache.knox.gateway.shirorealm.KnoxLdapRealm");
 +        ini.setSectionProperty("main", "ldapContextFactory", "org.apache.knox.gateway.shirorealm.KnoxLdapContextFactory");
 +        ini.setSectionProperty("main", "ldapRealm.contextFactory.url", url);
 +
 +        username = getSystemUsername(t);
 +        password = getSystemPassword(t);
 +        result = authenticateUser(ini, new UsernamePasswordToken(username, password));
 +      } catch (MissingUsernameException | NoSuchProviderException | MissingPasswordException e) {
 +        out.println(e.toString());
 +      } catch (NullPointerException e) {
 +        out.println(e.toString());
 +      }
 +      return result;
 +    }
 +
 +    /**
 +     *
 +     * @param t - topology configuration to use
 +     * @return - the principal of the systemUsername specified in topology. null if non-existent
 +     */
 +    private String getSystemUsername(Topology t) throws MissingUsernameException, NoSuchProviderException {
 +      final String SYSTEM_USERNAME = "main.ldapRealm.contextFactory.systemUsername";
 +      String user = null;
 +      Provider shiroProvider = t.getProvider("authentication", "ShiroProvider");
 +      if(shiroProvider != null){
 +        Map<String, String> params = shiroProvider.getParams();
 +        String userDn = params.get(SYSTEM_USERNAME);
 +        user = userDn;
 +      } else {
 +        throw new NoSuchProviderException("ShiroProvider", "authentication", t.getName());
 +      }
 +      return user;
 +    }
 +
 +    /**
 +     *
 +     * @param t - topology configuration to use
 +     * @return - the systemPassword specified in topology. null if non-existent
 +     */
 +    private char[] getSystemPassword(Topology t) throws NoSuchProviderException, MissingPasswordException{
 +      final String SYSTEM_PASSWORD = "main.ldapRealm.contextFactory.systemPassword";
 +      String pass = null;
 +      Provider shiro = t.getProvider("authentication", "ShiroProvider");
 +      if(shiro != null){
 +        Map<String, String> params = shiro.getParams();
 +        pass = params.get(SYSTEM_PASSWORD);
 +      } else {
 +        throw new NoSuchProviderException("ShiroProvider", "authentication", t.getName());
 +      }
 +
 +      if(pass != null) {
 +        return pass.toCharArray();
 +      } else {
 +        throw new MissingPasswordException("ShiroProvider did not contain param: " + SYSTEM_PASSWORD);
 +      }
 +    }
 +
 +    /**
 +     *
 +     * @param config - the shiro.ini config file created in topology deployment.
 +     * @return returns the Subject given by the shiro config's settings.
 +     */
 +    protected Subject getSubject(Ini config) throws BadSubjectException {
 +      try {
 +        ThreadContext.unbindSubject();
 +        Factory factory = new IniSecurityManagerFactory(config);
 +        org.apache.shiro.mgt.SecurityManager securityManager = (org.apache.shiro.mgt.SecurityManager) factory.getInstance();
 +        SecurityUtils.setSecurityManager(securityManager);
 +        Subject subject = SecurityUtils.getSubject();
 +        if( subject != null) {
 +          return subject;
 +        } else {
 +          out.println("Error Creating Subject from config at: " + config);
 +        }
 +      } catch (Exception e){
 +        out.println(e.toString());
 +      }
 +      throw new BadSubjectException("Subject could not be created with Shiro Config at " + config);
 +    }
 +
 +    protected Subject getSubject(String config) throws ConfigurationException {
 +      Ini ini = new Ini();
 +      ini.loadFromPath(config);
 +      try {
 +        return getSubject(ini);
 +      } catch (BadSubjectException e) {
 +        throw new ConfigurationException("Could not get Subject with Ini at " + config);
 +      }
 +    }
 +
 +    /**
 +     * prompts the user for credentials in the command line if necessary
 +     * populates the username and password members.
 +     */
 +    protected void promptCredentials() {
 +      if(this.username == null){
 +        Console c = System.console();
 +        if( c != null) {
 +          this.username = c.readLine("Username: ");
 +        }else{
 +          try {
 +            BufferedReader reader = new BufferedReader(new InputStreamReader(System.in));
 +            out.println("Username: ");
 +            this.username = reader.readLine();
 +            reader.close();
 +          } catch (IOException e){
 +            out.println(e.toString());
 +            this.username = "";
 +          }
 +        }
 +      }
 +
 +      if(this.password == null){
 +        Console c = System.console();
 +        if( c != null) {
 +          this.password = c.readPassword("Password: ");
 +        }else{
 +          try {
 +            BufferedReader reader = new BufferedReader(new InputStreamReader(System.in));
 +            out.println("Password: ");
 +            String pw = reader.readLine();
 +            if(pw != null){
 +              this.password = pw.toCharArray();
 +            } else {
 +              this.password = new char[0];
 +            }
 +            reader.close();
 +          } catch (IOException e){
 +            out.println(e.toString());
 +            this.password = new char[0];
 +          }
 +        }
 +      }
 +    }
 +
 +    /**
 +     *
 +     * @param topologyName - the name of the topology to retrieve
 +     * @return - Topology object with specified name. null if topology doesn't exist in TopologyService
 +     */
 +    protected Topology getTopology(String topologyName) throws NoSuchTopologyException {
 +      TopologyService ts = getTopologyService();
 +      ts.reloadTopologies();
 +      for (Topology t : ts.getTopologies()) {
 +        if(t.getName().equals(topologyName)) {
 +          return t;
 +        }
 +      }
 +      throw new  NoSuchTopologyException("Topology " + topologyName + " does not" +
 +          " exist in the topologies directory.");
 +    }
 +
 +    /**
 +     *
 +     * @param t - Topology to use for config
 +     * @return - path of shiro.ini config file.
 +     */
 +    protected String getConfig(Topology t){
 +      File tmpDir = new File(System.getProperty("java.io.tmpdir"));
 +      DeploymentFactory.setGatewayServices(services);
 +      EnterpriseArchive archive = DeploymentFactory.createDeployment(getGatewayConfig(), t);
 +      File war = archive.as(ExplodedExporter.class).exportExploded(tmpDir, t.getName() + "_deploy.tmp");
 +      war.deleteOnExit();
 +      String config = war.getAbsolutePath() + "/%2F/WEB-INF/shiro.ini";
 +      try{
 +        FileUtils.forceDeleteOnExit(war);
 +      } catch (IOException e) {
 +        out.println(e.toString());
 +        war.deleteOnExit();
 +      }
 +      return config;
 +    }
 +
 +    /**
 +     * populates username and password if they were passed as arguments, if not will prompt user for them.
 +     */
 +    void acquireCredentials(){
 +      if(user != null){
 +        this.username = user;
 +      }
 +      if(pass != null){
 +        this.password = pass.toCharArray();
 +      }
 +      promptCredentials();
 +    }
 +
 +    /**
 +     *
 +     * @return - true or false if the topology was acquired from the topology service and populated in the topology
 +     * field.
 +     */
 +    protected boolean acquireTopology(){
 +      try {
 +        topology = getTopology(cluster);
 +      } catch (NoSuchTopologyException e) {
 +        out.println(e.toString());
 +        return false;
 +      }
 +      return true;
 +    }
 +  }
 +
 +  private class LDAPAuthCommand extends LDAPCommand {
 +
 +    public static final String USAGE = "user-auth-test [--cluster clustername] [--u username] [--p password] [--g]";
 +    public static final String DESC = "This command tests a cluster's configuration ability to\n " +
 +        "authenticate a user with a cluster's ShiroProvider settings.\n Use \"--g\" if you want to list the groups a" +
 +        " user is a member of. \nOptional: [--u username]: Provide a username argument to the command\n" +
 +        "Optional: [--p password]: Provide a password argument to the command.\n" +
 +        "If a username and password argument are not supplied, the terminal will prompt you for one.";
 +
 +    private static final String  SUBJECT_USER_GROUPS = "subject.userGroups";
 +    private HashSet<String> groupSet = new HashSet<>();
 +
 +    @Override
 +    public String getUsage() {
 +      return USAGE + ":\n\n" + DESC;
 +    }
 +
 +    @Override
 +    public void execute() {
 +      if(!acquireTopology()){
 +        return;
 +      }
 +      acquireCredentials();
 +
 +      if(topology.getProvider("authentication", "ShiroProvider") == null) {
 +        out.println("ERR: This tool currently only works with Shiro as the authentication provider.");
 +        out.println("Please update the topology to use \"ShiroProvider\" as the authentication provider.");
 +        return;
 +      }
 +
 +      String config = getConfig(topology);
 +
 +      if(new File(config).exists()) {
 +          if(authenticateUser(config, new UsernamePasswordToken(username, password))) {
 +            out.println("LDAP authentication successful!");
 +            if(groups) {
 +              if(testSysBind(topology, config)) {
 +                groupSet = getGroups(topology, new UsernamePasswordToken(username, password));
 +                if(groupSet == null || groupSet.isEmpty()) {
 +                  out.println(username + " does not belong to any groups");
 +                  if(groups) {
 +                    hasShiroProviderErrors(topology, true);
 +                    out.println("You were looking for this user's groups but this user does not belong to any.");
 +                    out.println("Your topology file may be incorrectly configured for group lookup.");
 +                  }
 +                } else {
 +                  for (Object o : groupSet.toArray()) {
 +                    out.println(username + " is a member of: " + o.toString());
 +                  }
 +                }
 +              }
 +            }
 +          } else {
 +            out.println("ERR: Unable to authenticate user: " + username);
 +          }
 +      } else {
 +        out.println("ERR: No shiro config file found.");
 +      }
 +    }
 +
 +    private HashSet<String> getGroups(Topology t, UsernamePasswordToken token){
 +      HashSet<String> groups = null;
 +      try {
 +        Subject subject = getSubject(getConfig(t));
 +        if(!subject.isAuthenticated()) {
 +          subject.login(token);
 +        }
 +        subject.hasRole(""); //Populate subject groups
 +        groups = (HashSet) subject.getSession().getAttribute(SUBJECT_USER_GROUPS);
 +        subject.logout();
 +      } catch (AuthenticationException e) {
 +        out.println("Error retrieving groups");
 +        out.println(e.toString());
 +        if(debug) {
 +          e.printStackTrace();
 +        } else {
 +          out.println(debugMessage);
 +        }
 +      } catch (ConfigurationException e) {
 +        out.println(e.toString());
 +        if(debug){
 +          e.printStackTrace();
 +        }
 +      }
 +      return groups;
 +    }
 +
 +  }
 +
 +  public class LDAPSysBindCommand extends LDAPCommand {
 +
 +    public static final String USAGE = "system-user-auth-test [--cluster clustername] [--d]";
 +    public static final String DESC = "This command tests a cluster configuration's ability to\n " +
 +        "authenticate a user with a cluster's ShiroProvider settings.";
 +
 +    @Override
 +    public String getUsage() {
 +      return USAGE + ":\n\n" + DESC;
 +    }
 +
 +    @Override
 +    public void execute() {
 +
 +      if(!acquireTopology()) {
 +        return;
 +      }
 +
 +      if(hasShiroProviderErrors(topology, false)) {
 +        out.println("Topology warnings present. SystemUser may not bind.");
 +      }
 +
 +      if(testSysBind(topology, getConfig(topology))) {
 +        out.println("System LDAP Bind successful.");
 +      } else {
 +        out.println("Unable to successfully bind to LDAP server with topology credentials. Are your parameters correct?");
 +      }
 +    }
 +  }
 +
 +  private GatewayConfig getGatewayConfig() {
 +    GatewayConfig result;
 +    Configuration conf = getConf();
 +    if(conf != null && conf instanceof GatewayConfig) {
 +      result = (GatewayConfig) conf;
 +    } else {
 +      result = new GatewayConfigImpl();
 +    }
 +    return result;
 +  }
 +
 +  public class ServiceTestCommand extends Command {
 +    public static final String USAGE = "service-test [--u username] [--p password] [--cluster clustername] [--hostname name] " +
 +        "[--port port]";
 +    public static final String DESC =
 +                        "This command requires a running instance of Knox to be present on the same machine.\n" +
 +                        "It will execute a test to make sure all services are accessible through the gateway URLs.\n" +
 +                        "Errors are reported and suggestions to resolve any problems are returned. JSON formatted.\n";
 +
 +    private boolean ssl = true;
 +    private int attempts = 0;
 +
 +    @Override
 +    public String getUsage() { return USAGE + ":\n\n" + DESC; };
 +
 +    @Override
 +    public void execute() {
 +      attempts++;
 +      SSLContext ctx = null;
 +      CloseableHttpClient client;
 +      String http = "http://";
 +      String https = "https://";
 +      GatewayConfig conf = getGatewayConfig();
 +      String gatewayPort;
 +      String host;
 +
 +
 +      if(cluster == null) {
 +        printKnoxShellUsage();
 +        out.println("A --cluster argument is required.");
 +        return;
 +      }
 +
 +      if(hostname != null) {
 +        host = hostname;
 +      } else {
 +        try {
 +          host = InetAddress.getLocalHost().getHostAddress();
 +        } catch (UnknownHostException e) {
 +          out.println(e.toString());
 +          out.println("Defaulting address to localhost. Use --hostname option to specify a different hostname");
 +          host = "localhost";
 +        }
 +      }
 +
 +      if (port != null) {
 +        gatewayPort = port;
 +      } else if (conf.getGatewayPort() > -1) {
 +        gatewayPort = Integer.toString(conf.getGatewayPort());
 +      } else {
 +        out.println("Could not get port. Please supply it using the --port option");
 +        return;
 +      }
 +
 +
 +      String path = "/" + conf.getGatewayPath();
 +      String topology = "/" + cluster;
 +      String httpServiceTestURL = http + host + ":" + gatewayPort + path + topology + "/service-test";
 +      String httpsServiceTestURL = https + host + ":" + gatewayPort + path + topology + "/service-test";
 +
 +      String authString = "";
 +//    Create Authorization String
 +      if( user != null && pass != null) {
 +        authString = "Basic " + Base64.encodeBase64String((user + ":" + pass).getBytes());
 +      } else {
 +        out.println("Username and/or password not supplied. Expect HTTP 401 Unauthorized responses.");
 +      }
 +
 +//    Attempt to build SSL context for HTTP client.
 +      try {
 +        ctx = SSLContexts.custom().loadTrustMaterial(null, new TrustSelfSignedStrategy()).build();
 +      } catch (Exception e) {
 +        out.println(e.toString());
 +      }
 +
 +//    Initialize the HTTP client
 +      if(ctx == null) {
 +        client = HttpClients.createDefault();
 +      } else {
 +        client = HttpClients.custom().setSslcontext(ctx).build();
 +      }
 +
 +      HttpGet request;
 +      if(ssl) {
 +        request = new HttpGet(httpsServiceTestURL);
 +      } else {
 +        request = new HttpGet(httpServiceTestURL);
 +      }
 +
 +
 +      request.setHeader("Authorization", authString);
 +      request.setHeader("Accept", MediaType.APPLICATION_JSON.getMediaType());
 +      try {
 +        out.println(request.toString());
 +        CloseableHttpResponse response = client.execute(request);
 +
 +        switch (response.getStatusLine().getStatusCode()) {
 +
 +          case 200:
 +            response.getEntity().writeTo(out);
 +            break;
 +          case 404:
 +            out.println("Could not find service-test resource");
 +            out.println("Make sure you have configured the SERVICE-TEST service in your topology.");
 +            break;
 +          case 500:
 +            out.println("HTTP 500 Server error");
 +            break;
 +
 +          default:
 +            out.println("Unexpected HTTP response code.");
 +            out.println(response.getStatusLine().toString());
 +            response.getEntity().writeTo(out);
 +            break;
 +        }
 +
 +        response.close();
 +        request.releaseConnection();
 +
 +      } catch (ClientProtocolException e) {
 +        out.println(e.toString());
 +        if (debug) {
 +          e.printStackTrace(out);
 +        }
 +      } catch (SSLException e) {
 +        out.println(e.toString());
 +        retryRequest();
 +      } catch (IOException e) {
 +        out.println(e.toString());
 +        retryRequest();
 +        if(debug) {
 +          e.printStackTrace(out);
 +        }
 +      } finally {
 +        try {
 +          client.close();
 +        } catch (IOException e) {
 +          out.println(e.toString());
 +        }
 +      }
 +
 +    }
 +
 +    public void retryRequest(){
 +      if(attempts < 2) {
 +        if(ssl) {
 +          ssl = false;
 +          out.println("Attempting request without SSL.");
 +        } else {
 +          ssl = true;
 +          out.println("Attempting request with SSL ");
 +        }
 +        execute();
 +      } else {
 +        out.println("Unable to successfully make request. Try using the API with cURL.");
 +      }
 +    }
 +
 +  }
 +
 +  public class RemoteRegistryClientsListCommand extends Command {
 +
 +    static final String USAGE = "list-registry-clients";
 +    static final String DESC = "Lists all of the remote configuration registry clients defined in gateway-site.xml.\n";
 +
 +    /* (non-Javadoc)
 +     * @see org.apache.knox.gateway.util.KnoxCLI.Command#execute()
 +     */
 +    @Override
 +    public void execute() throws Exception {
 +      GatewayConfig config = getGatewayConfig();
 +      List<String> remoteConfigRegistryClientNames = config.getRemoteRegistryConfigurationNames();
 +      if (!remoteConfigRegistryClientNames.isEmpty()) {
 +        out.println("Listing remote configuration registry clients:");
 +        for (String name : remoteConfigRegistryClientNames) {
 +          out.println(name);
 +        }
 +      }
 +    }
 +
 +    /* (non-Javadoc)
 +     * @see org.apache.knox.gateway.util.KnoxCLI.Command#getUsage()
 +     */
 +    @Override
 +    public String getUsage() {
 +      return USAGE + ":\n\n" + DESC;
 +    }
 + }
 +
++  private abstract class RemoteRegistryCommand extends Command {
++    static final String ROOT_ENTRY = "/knox";
++    static final String CONFIG_ENTRY = ROOT_ENTRY + "/config";
++    static final String PROVIDER_CONFIG_ENTRY = CONFIG_ENTRY + "/shared-providers";
++    static final String DESCRIPTORS_ENTRY = CONFIG_ENTRY + "/descriptors";
++
++    protected RemoteConfigurationRegistryClient getClient() {
++      RemoteConfigurationRegistryClient client = null;
++      if (remoteRegistryClient != null) {
++        RemoteConfigurationRegistryClientService cs = getRemoteConfigRegistryClientService();
++        client = cs.get(remoteRegistryClient);
++        if (client == null) {
++          out.println("No remote configuration registry identified by '" + remoteRegistryClient + "' could be found.");
++        }
++      } else {
++        out.println("Missing required argument : --registry-client\n");
++      }
++      return client;
++    }
++  }
++
++
++  public class RemoteRegistryListProviderConfigsCommand extends RemoteRegistryCommand {
++    static final String USAGE = "list-provider-configs --registry-client name";
++    static final String DESC = "Lists the provider configurations present in the specified remote registry\n";
++
++    @Override
++    public void execute() {
++      RemoteConfigurationRegistryClient client = getClient();
++      if (client != null) {
++        out.println("Provider Configurations (@" + client.getAddress() + ")");
++        List<String> entries = client.listChildEntries(PROVIDER_CONFIG_ENTRY);
++        for (String entry : entries) {
++          out.println(entry);
++        }
++        out.println();
++      }
++    }
++
++    @Override
++    public String getUsage() {
++      return USAGE + ":\n\n" + DESC;
++    }
++  }
++
++
++  public class RemoteRegistryListDescriptorsCommand extends RemoteRegistryCommand {
++    static final String USAGE = "list-descriptors --registry-client name";
++    static final String DESC = "Lists the descriptors present in the specified remote registry\n";
++
++    @Override
++    public void execute() {
++      RemoteConfigurationRegistryClient client = getClient();
++      if (client != null) {
++        out.println("Descriptors (@" + client.getAddress() + ")");
++        List<String> entries = client.listChildEntries(DESCRIPTORS_ENTRY);
++        for (String entry : entries) {
++          out.println(entry);
++        }
++        out.println();
++      }
++    }
++
++    @Override
++    public String getUsage() {
++      return USAGE + ":\n\n" + DESC;
++    }
++  }
++
 +
 +  /**
 +   * Base class for remote config registry upload commands
 +   */
-   public abstract class RemoteRegistryUploadCommand extends Command {
-     protected static final String ROOT_ENTRY = "/knox";
-     protected static final String CONFIG_ENTRY = ROOT_ENTRY + "/config";
-     protected static final String PROVIDER_CONFIG_ENTRY = CONFIG_ENTRY + "/shared-providers";
-     protected static final String DESCRIPTORS__ENTRY = CONFIG_ENTRY + "/descriptors";
- 
++  public abstract class RemoteRegistryUploadCommand extends RemoteRegistryCommand {
 +    private File sourceFile = null;
 +    protected String filename = null;
 +
 +    protected RemoteRegistryUploadCommand(String sourceFileName) {
 +      this.filename = sourceFileName;
 +    }
 +
 +    private void upload(RemoteConfigurationRegistryClient client, String entryPath, File source) throws Exception {
 +      String content = FileUtils.readFileToString(source);
 +      if (client.entryExists(entryPath)) {
 +        // If it exists, then we're going to set the data
 +        client.setEntryData(entryPath, content);
 +      } else {
 +        // If it does not exist, then create it and set the data
 +        client.createEntry(entryPath, content);
 +      }
 +    }
 +
 +    File getSourceFile() {
 +      if (sourceFile == null) {
 +        sourceFile = new File(filename);
 +      }
 +      return sourceFile;
 +    }
 +
 +    String getEntryName(String prefixPath) {
 +      String entryName = remoteRegistryEntryName;
 +      if (entryName == null) {
 +        File sourceFile = getSourceFile();
 +        if (sourceFile.exists()) {
 +          String path = sourceFile.getAbsolutePath();
 +          entryName = path.substring(path.lastIndexOf(File.separator) + 1);
 +        } else {
 +          out.println("Could not locate source file: " + filename);
 +        }
 +      }
 +      return prefixPath + "/" + entryName;
 +    }
 +
 +    protected void execute(String entryName, File sourceFile) throws Exception {
-       if (remoteRegistryClient != null) {
-         RemoteConfigurationRegistryClientService cs = getRemoteConfigRegistryClientService();
-         RemoteConfigurationRegistryClient client = cs.get(remoteRegistryClient);
-         if (client != null) {
-           if (entryName != null) {
-             upload(client, entryName, sourceFile);
-           }
-         } else {
-           out.println("No remote configuration registry identified by '" + remoteRegistryClient + "' could be found.");
++      RemoteConfigurationRegistryClient client = getClient();
++      if (client != null) {
++        if (entryName != null) {
++          upload(client, entryName, sourceFile);
 +        }
-       } else {
-         out.println("Missing required argument : --registry-client\n");
 +      }
 +    }
- 
 +  }
 +
 +
 +  public class RemoteRegistryUploadProviderConfigCommand extends RemoteRegistryUploadCommand {
 +
 +    static final String USAGE = "upload-provider-config providerConfigFile --registry-client name [--entry-name entryName]";
 +    static final String DESC = "Uploads a provider configuration to the specified remote registry client, optionally " +
 +                               "renaming the entry.\nIf the entry name is not specified, the name of the uploaded " +
 +                               "file is used.\n";
 +
 +    RemoteRegistryUploadProviderConfigCommand(String fileName) {
 +      super(fileName);
 +    }
 +
 +    /* (non-Javadoc)
 +     * @see org.apache.knox.gateway.util.KnoxCLI.Command#execute()
 +     */
 +    @Override
 +    public void execute() throws Exception {
 +      super.execute(getEntryName(PROVIDER_CONFIG_ENTRY), getSourceFile());
 +    }
 +
 +    /* (non-Javadoc)
 +     * @see org.apache.knox.gateway.util.KnoxCLI.Command#getUsage()
 +     */
 +    @Override
 +    public String getUsage() {
 +      return USAGE + ":\n\n" + DESC;
 +    }
 +  }
 +
 +
 +  public class RemoteRegistryUploadDescriptorCommand extends RemoteRegistryUploadCommand {
 +
 +    static final String USAGE = "upload-descriptor descriptorFile --registry-client name [--entry-name entryName]";
 +    static final String DESC = "Uploads a simple descriptor using the specified remote registry client, optionally " +
 +                               "renaming the entry.\nIf the entry name is not specified, the name of the uploaded " +
 +                               "file is used.\n";
 +
 +    RemoteRegistryUploadDescriptorCommand(String fileName) {
 +      super(fileName);
 +    }
 +
 +    /* (non-Javadoc)
 +     * @see org.apache.knox.gateway.util.KnoxCLI.Command#execute()
 +     */
 +    @Override
 +    public void execute() throws Exception {
-       super.execute(getEntryName(DESCRIPTORS__ENTRY), getSourceFile());
++      super.execute(getEntryName(DESCRIPTORS_ENTRY), getSourceFile());
 +    }
 +
 +    /* (non-Javadoc)
 +     * @see org.apache.knox.gateway.util.KnoxCLI.Command#getUsage()
 +     */
 +    @Override
 +    public String getUsage() {
 +      return USAGE + ":\n\n" + DESC;
 +    }
 +  }
 +
 +
-   public class RemoteRegistryGetACLCommand extends Command {
++  public class RemoteRegistryGetACLCommand extends RemoteRegistryCommand {
 +
 +    static final String USAGE = "get-registry-acl entry --registry-client name";
 +    static final String DESC = "Presents the ACL settings for the specified remote registry entry.\n";
 +
 +    private String entry = null;
 +
 +    RemoteRegistryGetACLCommand(String entry) {
 +      this.entry = entry;
 +    }
 +
 +    /* (non-Javadoc)
 +     * @see org.apache.knox.gateway.util.KnoxCLI.Command#execute()
 +     */
 +    @Override
 +    public void execute() throws Exception {
-       if (remoteRegistryClient != null) {
-         RemoteConfigurationRegistryClientService cs = getRemoteConfigRegistryClientService();
-         RemoteConfigurationRegistryClient client = cs.get(remoteRegistryClient);
-         if (client != null) {
-           if (entry != null) {
-             List<RemoteConfigurationRegistryClient.EntryACL> acls = client.getACL(entry);
-             for (RemoteConfigurationRegistryClient.EntryACL acl : acls) {
-               out.println(acl.getType() + ":" + acl.getId() + ":" + acl.getPermissions());
-             }
++      RemoteConfigurationRegistryClient client = getClient();
++      if (client != null) {
++        if (entry != null) {
++          List<RemoteConfigurationRegistryClient.EntryACL> acls = client.getACL(entry);
++          for (RemoteConfigurationRegistryClient.EntryACL acl : acls) {
++            out.println(acl.getType() + ":" + acl.getId() + ":" + acl.getPermissions());
 +          }
-         } else {
-           out.println("No remote configuration registry identified by '" + remoteRegistryClient + "' could be found.");
 +        }
-       } else {
-         out.println("Missing required argument : --registry-client\n");
 +      }
 +    }
 +
 +    /* (non-Javadoc)
 +     * @see org.apache.knox.gateway.util.KnoxCLI.Command#getUsage()
 +     */
 +    @Override
 +    public String getUsage() {
 +      return USAGE + ":\n\n" + DESC;
 +    }
 +  }
 +
 +
 +  /**
 +   * Base class for remote config registry delete commands
 +   */
-   public abstract class RemoteRegistryDeleteCommand extends Command {
-     protected static final String ROOT_ENTRY = "/knox";
-     protected static final String CONFIG_ENTRY = ROOT_ENTRY + "/config";
-     protected static final String PROVIDER_CONFIG_ENTRY = CONFIG_ENTRY + "/shared-providers";
-     protected static final String DESCRIPTORS__ENTRY = CONFIG_ENTRY + "/descriptors";
- 
++  public abstract class RemoteRegistryDeleteCommand extends RemoteRegistryCommand {
 +    protected String entryName = null;
 +
 +    protected RemoteRegistryDeleteCommand(String entryName) {
 +      this.entryName = entryName;
 +    }
 +
 +    private void delete(RemoteConfigurationRegistryClient client, String entryPath) throws Exception {
 +      if (client.entryExists(entryPath)) {
 +        // If it exists, then delete it
 +        client.deleteEntry(entryPath);
 +      }
 +    }
 +
 +    protected void execute(String entryName) throws Exception {
-       if (remoteRegistryClient != null) {
-         RemoteConfigurationRegistryClientService cs = getRemoteConfigRegistryClientService();
-         RemoteConfigurationRegistryClient client = cs.get(remoteRegistryClient);
-         if (client != null) {
-           if (entryName != null) {
-             delete(client, entryName);
-           }
-         } else {
-           out.println("No remote configuration registry identified by '" + remoteRegistryClient + "' could be found.");
++      RemoteConfigurationRegistryClient client = getClient();
++      if (client != null) {
++        if (entryName != null) {
++          delete(client, entryName);
 +        }
-       } else {
-         out.println("Missing required argument : --registry-client\n");
 +      }
 +    }
 +  }
 +
 +
 +  public class RemoteRegistryDeleteProviderConfigCommand extends RemoteRegistryDeleteCommand {
 +    static final String USAGE = "delete-provider-config providerConfig --registry-client name";
 +    static final String DESC = "Deletes a shared provider configuration from the specified remote registry.\n";
 +
 +    public RemoteRegistryDeleteProviderConfigCommand(String entryName) {
 +      super(entryName);
 +    }
 +
 +    @Override
 +    public void execute() throws Exception {
 +      execute(PROVIDER_CONFIG_ENTRY + "/" + entryName);
 +    }
 +
 +    @Override
 +    public String getUsage() {
 +      return USAGE + ":\n\n" + DESC;
 +    }
 +  }
 +
 +
 +  public class RemoteRegistryDeleteDescriptorCommand extends RemoteRegistryDeleteCommand {
 +    static final String USAGE = "delete-descriptor descriptor --registry-client name";
 +    static final String DESC = "Deletes a simple descriptor from the specified remote registry.\n";
 +
 +    public RemoteRegistryDeleteDescriptorCommand(String entryName) {
 +      super(entryName);
 +    }
 +
 +    @Override
 +    public void execute() throws Exception {
-       execute(DESCRIPTORS__ENTRY + "/" + entryName);
++      execute(DESCRIPTORS_ENTRY + "/" + entryName);
 +    }
 +
 +    @Override
 +    public String getUsage() {
 +      return USAGE + ":\n\n" + DESC;
 +    }
 +  }
 +
 +
 +  private static Properties loadBuildProperties() {
 +    Properties properties = new Properties();
 +    InputStream inputStream = KnoxCLI.class.getClassLoader().getResourceAsStream( "build.properties" );
 +    if( inputStream != null ) {
 +      try {
 +        properties.load( inputStream );
 +        inputStream.close();
 +      } catch( IOException e ) {
 +        // Ignore.
 +      }
 +    }
 +    return properties;
 +  }
 +
 +  /**
 +   * @param args
 +   * @throws Exception
 +   */
 +  public static void main(String[] args) throws Exception {
 +    PropertyConfigurator.configure( System.getProperty( "log4j.configuration" ) );
 +    int res = ToolRunner.run(new GatewayConfigImpl(), new KnoxCLI(), args);
 +    System.exit(res);
 +  }
 +}


[29/53] [abbrv] knox git commit: Merge branch 'master' into KNOX-998-Package_Restructuring

Posted by mo...@apache.org.
Merge branch 'master' into KNOX-998-Package_Restructuring

# Conflicts:
#	gateway-service-admin/src/main/java/org/apache/knox/gateway/service/admin/beans/BeanConverter.java
#	gateway-spi/src/main/java/org/apache/knox/gateway/dispatch/DefaultHttpClientFactory.java


Project: http://git-wip-us.apache.org/repos/asf/knox/repo
Commit: http://git-wip-us.apache.org/repos/asf/knox/commit/2c69152f
Tree: http://git-wip-us.apache.org/repos/asf/knox/tree/2c69152f
Diff: http://git-wip-us.apache.org/repos/asf/knox/diff/2c69152f

Branch: refs/heads/master
Commit: 2c69152f49a22dbd6c9947a26965e3041a4f92d9
Parents: 1451428 d4b0dc6
Author: Sandeep More <mo...@apache.org>
Authored: Mon Nov 13 09:44:22 2017 -0500
Committer: Sandeep More <mo...@apache.org>
Committed: Mon Nov 13 09:44:22 2017 -0500

----------------------------------------------------------------------
 CHANGES                                         |  85 +++
 build.xml                                       |   2 +-
 gateway-applications/pom.xml                    |   2 +-
 gateway-demo-ldap-launcher/pom.xml              |   2 +-
 gateway-demo-ldap/pom.xml                       |   2 +-
 gateway-discovery-ambari/pom.xml                |   2 +-
 gateway-i18n-logging-log4j/pom.xml              |   2 +-
 gateway-i18n-logging-sl4j/pom.xml               |   2 +-
 gateway-i18n/pom.xml                            |   2 +-
 gateway-provider-ha/pom.xml                     |   2 +-
 .../ha/provider/impl/DefaultURLManagerTest.java |  19 +
 .../pom.xml                                     |   2 +-
 .../pom.xml                                     |   2 +-
 .../pom.xml                                     |   2 +-
 .../pom.xml                                     |   2 +-
 .../pom.xml                                     |   2 +-
 .../pom.xml                                     |   2 +-
 gateway-provider-jersey/pom.xml                 |   2 +-
 .../pom.xml                                     |   2 +-
 .../pom.xml                                     |   2 +-
 .../pom.xml                                     |   2 +-
 .../pom.xml                                     |   2 +-
 .../pom.xml                                     |   2 +-
 gateway-provider-rewrite/pom.xml                |   2 +-
 gateway-provider-security-authc-anon/pom.xml    |   2 +-
 gateway-provider-security-authz-acls/pom.xml    |   2 +-
 gateway-provider-security-hadoopauth/pom.xml    |   2 +-
 gateway-provider-security-jwt/pom.xml           |   2 +-
 gateway-provider-security-pac4j/pom.xml         |   2 +-
 gateway-provider-security-preauth/pom.xml       |   2 +-
 gateway-provider-security-shiro/pom.xml         |   2 +-
 gateway-provider-security-webappsec/pom.xml     |   2 +-
 gateway-release/pom.xml                         |   6 +-
 gateway-server-launcher/pom.xml                 |   2 +-
 gateway-server-xforwarded-filter/pom.xml        |   2 +-
 gateway-server/pom.xml                          |   2 +-
 .../knox/gateway/deploy/DeploymentFactory.java  |  23 +
 .../ServiceDefinitionDeploymentContributor.java |  14 +-
 .../instr/InstrHttpClientBuilderProvider.java   |   3 +-
 .../builder/BeanPropertyTopologyBuilder.java    |  11 +
 .../topology/simple/SimpleDescriptor.java       |  10 +
 .../simple/SimpleDescriptorHandler.java         |  51 +-
 .../topology/simple/SimpleDescriptorImpl.java   |  42 +-
 .../xml/KnoxFormatXmlTopologyRules.java         |   2 +
 .../src/main/resources/conf/topology-v1.xsd     |   1 +
 .../simple/SimpleDescriptorFactoryTest.java     | 631 +++++++++++++------
 .../simple/SimpleDescriptorHandlerTest.java     |   8 +
 gateway-service-admin/pom.xml                   |   4 +-
 .../service/admin/TopologiesResource.java       |  25 +-
 .../service/admin/beans/BeanConverter.java      |   2 +
 .../gateway/service/admin/beans/Topology.java   |  11 +
 gateway-service-as/pom.xml                      |   2 +-
 gateway-service-definitions/pom.xml             |   2 +-
 .../service/definition/CustomDispatch.java      |  11 +
 .../resources/services/livy/0.4.0/rewrite.xml   |  33 +
 .../resources/services/livy/0.4.0/service.xml   |  28 +
 .../resources/services/nifi/1.4.0/rewrite.xml   |  27 +
 .../resources/services/nifi/1.4.0/service.xml   |  30 +
 .../services/oozieui/4.2.0/rewrite.xml          |  27 +-
 gateway-service-hbase/pom.xml                   |   2 +-
 gateway-service-health/pom.xml                  |   4 +-
 gateway-service-hive/pom.xml                    |   2 +-
 gateway-service-knoxsso/pom.xml                 |   4 +-
 gateway-service-knoxssout/pom.xml               |   4 +-
 gateway-service-knoxtoken/pom.xml               |   4 +-
 gateway-service-nifi/pom.xml                    |  38 ++
 .../hadoop/gateway/dispatch/NiFiDispatch.java   | 106 ++++
 .../hadoop/gateway/dispatch/NiFiHaDispatch.java | 111 ++++
 .../hadoop/gateway/dispatch/NiFiHeaders.java    |  26 +
 .../gateway/dispatch/NiFiRequestUtil.java       |  89 +++
 .../gateway/dispatch/NiFiResponseUtil.java      |  89 +++
 gateway-service-rm/pom.xml                      |   2 +-
 gateway-service-storm/pom.xml                   |   2 +-
 gateway-service-test/pom.xml                    |   4 +-
 gateway-service-tgs/pom.xml                     |   2 +-
 gateway-service-vault/pom.xml                   |   4 +-
 gateway-service-webhdfs/pom.xml                 |   2 +-
 gateway-shell-launcher/pom.xml                  |   2 +-
 gateway-shell-release/pom.xml                   |   2 +-
 gateway-shell-samples/pom.xml                   |   2 +-
 gateway-shell/pom.xml                           |   2 +-
 gateway-spi/pom.xml                             |   6 +-
 .../dispatch/DefaultHttpClientFactory.java      |  55 +-
 .../knox/gateway/i18n/GatewaySpiMessages.java   |   3 +
 .../apache/knox/gateway/topology/Topology.java  |   9 +
 gateway-test-release-utils/pom.xml              |   2 +-
 gateway-test-release/pom.xml                    |   2 +-
 gateway-test-release/webhdfs-kerb-test/pom.xml  |   4 +-
 gateway-test-release/webhdfs-test/pom.xml       |   4 +-
 gateway-test-utils/pom.xml                      |   2 +-
 gateway-test/pom.xml                            |   2 +-
 .../deploy/DeploymentFactoryFuncTest.java       |   5 +
 gateway-util-common/pom.xml                     |   2 +-
 gateway-util-configinjector/pom.xml             |   2 +-
 gateway-util-launcher/pom.xml                   |   2 +-
 gateway-util-urltemplate/pom.xml                |   2 +-
 hadoop-examples/pom.xml                         |   2 +-
 knox-cli-launcher/pom.xml                       |   2 +-
 pom.xml                                         |  10 +-
 99 files changed, 1504 insertions(+), 289 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/knox/blob/2c69152f/gateway-demo-ldap-launcher/pom.xml
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/knox/blob/2c69152f/gateway-demo-ldap/pom.xml
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/knox/blob/2c69152f/gateway-provider-ha/src/test/java/org/apache/knox/gateway/ha/provider/impl/DefaultURLManagerTest.java
----------------------------------------------------------------------
diff --cc gateway-provider-ha/src/test/java/org/apache/knox/gateway/ha/provider/impl/DefaultURLManagerTest.java
index c8b6c58,0000000..a2cfa54
mode 100644,000000..100644
--- a/gateway-provider-ha/src/test/java/org/apache/knox/gateway/ha/provider/impl/DefaultURLManagerTest.java
+++ b/gateway-provider-ha/src/test/java/org/apache/knox/gateway/ha/provider/impl/DefaultURLManagerTest.java
@@@ -1,73 -1,0 +1,92 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.ha.provider.impl;
 +
 +import org.junit.Test;
 +
 +import java.util.ArrayList;
 +
 +import static org.junit.Assert.assertEquals;
 +import static org.junit.Assert.assertTrue;
 +
 +public class DefaultURLManagerTest {
 +
 +   @Test
 +   public void testActiveURLManagement() {
 +      ArrayList<String> urls = new ArrayList<>();
 +      String url1 = "http://host1";
 +      urls.add(url1);
 +      String url2 = "http://host2";
 +      urls.add(url2);
 +      DefaultURLManager manager = new DefaultURLManager();
 +      manager.setURLs(urls);
 +      assertTrue(manager.getURLs().containsAll(urls));
 +      assertEquals(url1, manager.getActiveURL());
 +      manager.markFailed(url1);
 +      assertEquals(url2, manager.getActiveURL());
 +      manager.markFailed(url2);
 +      assertEquals(url1, manager.getActiveURL());
 +   }
 +
++   /**
++    * KNOX-1104
++    * Verify that a service with HaProvider configuration, but only a single URL does not break the HaProvider.
++    */
++   @Test
++   public void testSingleURLManagement() {
++      ArrayList<String> urls = new ArrayList<>();
++      String url1 = "http://host1";
++      urls.add(url1);
++      DefaultURLManager manager = new DefaultURLManager();
++      manager.setURLs(urls);
++      assertTrue(manager.getURLs().containsAll(urls));
++      assertEquals(url1, manager.getActiveURL());
++      manager.markFailed(url1);
++      assertEquals(url1, manager.getActiveURL());
++      manager.markFailed(url1);
++      assertEquals(url1, manager.getActiveURL());
++   }
++
 +   @Test
 +   public void testMarkingFailedURL() {
 +      ArrayList<String> urls = new ArrayList<>();
 +      String url1 = "http://host1:4555";
 +      urls.add(url1);
 +      String url2 = "http://host2:1234";
 +      urls.add(url2);
 +      String url3 = "http://host1:1234";
 +      urls.add(url3);
 +      String url4 = "http://host2:4555";
 +      urls.add(url4);
 +      DefaultURLManager manager = new DefaultURLManager();
 +      manager.setURLs(urls);
 +      assertTrue(manager.getURLs().containsAll(urls));
 +      assertEquals(url1, manager.getActiveURL());
 +      manager.markFailed(url1);
 +      assertEquals(url2, manager.getActiveURL());
 +      manager.markFailed(url1);
 +      assertEquals(url2, manager.getActiveURL());
 +      manager.markFailed(url3);
 +      assertEquals(url2, manager.getActiveURL());
 +      manager.markFailed(url4);
 +      assertEquals(url2, manager.getActiveURL());
 +      manager.markFailed(url2);
 +      assertEquals(url3, manager.getActiveURL());
 +   }
 +
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/2c69152f/gateway-release/pom.xml
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/knox/blob/2c69152f/gateway-server-launcher/pom.xml
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/knox/blob/2c69152f/gateway-server/src/main/java/org/apache/knox/gateway/deploy/DeploymentFactory.java
----------------------------------------------------------------------
diff --cc gateway-server/src/main/java/org/apache/knox/gateway/deploy/DeploymentFactory.java
index bb8f1f2,0000000..b3eabb2
mode 100644,000000..100644
--- a/gateway-server/src/main/java/org/apache/knox/gateway/deploy/DeploymentFactory.java
+++ b/gateway-server/src/main/java/org/apache/knox/gateway/deploy/DeploymentFactory.java
@@@ -1,772 -1,0 +1,795 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.deploy;
 +
 +import java.beans.Statement;
 +import java.io.File;
 +import java.io.IOException;
 +import java.io.StringWriter;
 +import java.util.ArrayList;
 +import java.util.Collection;
 +import java.util.HashMap;
 +import java.util.HashSet;
 +import java.util.Iterator;
 +import java.util.LinkedHashMap;
 +import java.util.List;
 +import java.util.Map;
 +import java.util.Map.Entry;
 +import java.util.ServiceLoader;
 +import java.util.Set;
 +import java.util.TreeMap;
 +import javax.xml.bind.JAXBContext;
 +import javax.xml.bind.JAXBException;
 +import javax.xml.bind.Marshaller;
 +
 +import org.apache.knox.gateway.GatewayMessages;
 +import org.apache.knox.gateway.GatewayServlet;
 +import org.apache.knox.gateway.config.GatewayConfig;
 +import org.apache.knox.gateway.deploy.impl.ApplicationDeploymentContributor;
 +import org.apache.knox.gateway.descriptor.GatewayDescriptor;
 +import org.apache.knox.gateway.descriptor.GatewayDescriptorFactory;
 +import org.apache.knox.gateway.i18n.messages.MessagesFactory;
 +import org.apache.knox.gateway.services.GatewayServices;
 +import org.apache.knox.gateway.services.registry.ServiceRegistry;
 +import org.apache.knox.gateway.topology.Application;
 +import org.apache.knox.gateway.topology.Provider;
 +import org.apache.knox.gateway.topology.Service;
 +import org.apache.knox.gateway.topology.Topology;
 +import org.apache.knox.gateway.topology.Version;
 +import org.apache.knox.gateway.util.ServiceDefinitionsLoader;
 +import org.apache.knox.gateway.util.Urls;
 +import org.jboss.shrinkwrap.api.ShrinkWrap;
 +import org.jboss.shrinkwrap.api.asset.Asset;
 +import org.jboss.shrinkwrap.api.asset.StringAsset;
 +import org.jboss.shrinkwrap.api.spec.EnterpriseArchive;
 +import org.jboss.shrinkwrap.api.spec.WebArchive;
 +import org.jboss.shrinkwrap.descriptor.api.Descriptors;
 +import org.jboss.shrinkwrap.descriptor.api.webapp30.WebAppDescriptor;
 +import org.jboss.shrinkwrap.descriptor.api.webcommon30.FilterType;
 +import org.jboss.shrinkwrap.descriptor.api.webcommon30.ServletType;
 +
 +public abstract class DeploymentFactory {
 +
 +  private static final String SERVLET_NAME_SUFFIX = "-knox-gateway-servlet";
 +  private static final String FILTER_NAME_SUFFIX = "-knox-gateway-filter";
 +  private static final GatewayMessages log = MessagesFactory.get( GatewayMessages.class );
 +  private static GatewayServices gatewayServices = null;
 +
 +  private static Map<String,Map<String,Map<Version, ServiceDeploymentContributor>>> SERVICE_CONTRIBUTOR_MAP;
 +  static {
 +    loadServiceContributors();
 +  }
 +
 +  private static Set<ProviderDeploymentContributor> PROVIDER_CONTRIBUTORS;
 +  private static Map<String,Map<String,ProviderDeploymentContributor>> PROVIDER_CONTRIBUTOR_MAP;
 +  static {
 +    loadProviderContributors();
 +  }
 +
 +  public static void setGatewayServices(GatewayServices services) {
 +    DeploymentFactory.gatewayServices = services;
 +  }
 +
 +  static List<Application> findApplicationsByUrl( Topology topology, String url ) {
 +    List<Application> foundApps = new ArrayList<Application>();
 +    if( topology != null ) {
 +      url = Urls.trimLeadingAndTrailingSlash( url );
 +      Collection<Application> searchApps = topology.getApplications();
 +      if( searchApps != null ) {
 +        for( Application searchApp : searchApps ) {
 +          List<String> searchUrls = searchApp.getUrls();
 +          if( searchUrls == null || searchUrls.isEmpty() ) {
 +            searchUrls = new ArrayList<String>(1);
 +            searchUrls.add( searchApp.getName() );
 +          }
 +          for( String searchUrl : searchUrls ) {
 +            if( url.equalsIgnoreCase( Urls.trimLeadingAndTrailingSlash( searchUrl ) ) ) {
 +              foundApps.add( searchApp );
 +              break;
 +            }
 +          }
 +        }
 +      }
 +    }
 +    return foundApps;
 +  }
 +
 +  // Verify that there are no two apps with duplicate urls.
 +  static void validateNoAppsWithDuplicateUrlsInTopology( Topology topology ) {
 +    if( topology != null ) {
 +      Collection<Application> apps = topology.getApplications();
 +      if( apps != null ) {
 +        for( Application app : apps ) {
 +          List<String> urls = app.getUrls();
 +          if( urls == null || urls.isEmpty() ) {
 +            urls = new ArrayList<String>(1);
 +            urls.add( app.getName() );
 +          }
 +          for( String url : urls ) {
 +            List<Application> dups = findApplicationsByUrl( topology, url );
 +            if( dups != null ) {
 +              for( Application dup : dups ) {
 +                if( dup != app ) {
 +                  throw new DeploymentException( "Topology " + topology.getName() + " contains applications " + app.getName() + " and " + dup.getName() + " with the same url: " + url );
 +                }
 +              }
 +            }
 +          }
 +        }
 +      }
 +    }
 +  }
 +
 +  // Verify that if there are services that there are no applications with a root url.
 +  static void validateNoAppsWithRootUrlsInServicesTopology( Topology topology ) {
 +    if( topology != null ) {
 +      if( topology.getServices() != null && !topology.getServices().isEmpty() ) {
 +        List<Application> dups = findApplicationsByUrl( topology, "/" );
 +        if( dups != null && !dups.isEmpty() ) {
 +          throw new DeploymentException( "Topology " + topology.getName() + " contains both services and an application " + dups.get( 0 ).getName() + " with a root url." );
 +        }
 +      }
 +    }
 +  }
 +
 +  static void validateTopology( Topology topology ) {
 +    validateNoAppsWithRootUrlsInServicesTopology( topology );
 +    validateNoAppsWithDuplicateUrlsInTopology( topology );
 +  }
 +
 +  public static EnterpriseArchive createDeployment( GatewayConfig config, Topology topology ) {
 +    validateTopology( topology );
 +    loadStacksServiceContributors( config );
 +    Map<String,List<ProviderDeploymentContributor>> providers = selectContextProviders( topology );
 +    Map<String,List<ServiceDeploymentContributor>> services = selectContextServices( topology );
 +    Map<String,ServiceDeploymentContributor> applications = selectContextApplications( config, topology );
 +    EnterpriseArchive ear = ShrinkWrap.create( EnterpriseArchive.class, topology.getName() );
 +    ear.addAsResource( toStringAsset( topology ), "topology.xml" );
 +    if( !services.isEmpty() ) {
 +      WebArchive war = createServicesDeployment( config, topology, providers, services );
 +      ear.addAsModule( war );
 +    }
 +    if( !applications.isEmpty() ) {
 +      for( Map.Entry<String, ServiceDeploymentContributor> application : applications.entrySet() ) {
 +        WebArchive war = createApplicationDeployment( config, topology, providers, application );
 +        ear.addAsModule( war );
 +      }
 +    }
 +    return ear;
 +  }
 +
 +  private static WebArchive createServicesDeployment(
 +      GatewayConfig config,
 +      Topology topology,
 +      Map<String,List<ProviderDeploymentContributor>> providers,
 +      Map<String,List<ServiceDeploymentContributor>> services ) {
 +    DeploymentContext context = createDeploymentContext( config, "/", topology, providers );
 +    initialize( context, providers, services, null );
 +    contribute( context, providers, services, null );
 +    finalize( context, providers, services, null );
 +    return context.getWebArchive();
 +  }
 +
 +  public static WebArchive createApplicationDeployment(
 +      GatewayConfig config,
 +      Topology topology,
 +      Map<String,List<ProviderDeploymentContributor>> providers,
 +      Map.Entry<String,ServiceDeploymentContributor> application ) {
 +    String appPath = "/" + Urls.trimLeadingAndTrailingSlash( application.getKey() );
 +    DeploymentContext context = createDeploymentContext( config, appPath, topology, providers );
 +    initialize( context, providers, null, application );
 +    contribute( context, providers, null, application );
 +    finalize( context, providers, null, application );
 +    return context.getWebArchive();
 +  }
 +
 +  private static Asset toStringAsset( Topology topology ) {
 +    StringWriter writer = new StringWriter();
 +    String xml;
 +    try {
 +      Map<String,Object> properties = new HashMap<>(2);
 +      properties.put( "eclipselink-oxm-xml",
 +          "org/apache/knox/gateway/topology/topology_binding-xml.xml");
 +      properties.put( "eclipselink.media-type", "application/xml" );
 +      JAXBContext jaxbContext = JAXBContext.newInstance( Topology.class.getPackage().getName(), Topology.class.getClassLoader() , properties );
 +      Marshaller marshaller = jaxbContext.createMarshaller();
 +      marshaller.setProperty( Marshaller.JAXB_FORMATTED_OUTPUT, true );
 +      marshaller.marshal( topology, writer );
 +      writer.close();
 +      xml = writer.toString();
 +    } catch (IOException e) {
 +      throw new DeploymentException( "Failed to marshall topology.", e );
 +    } catch (JAXBException e) {
 +      throw new DeploymentException( "Failed to marshall topology.", e );
 +    }
 +    StringAsset asset = new StringAsset( xml );
 +    return asset;
 +  }
 +
 +  private static DeploymentContext createDeploymentContext(
 +      GatewayConfig config,
 +      String archivePath,
 +      Topology topology,
 +      Map<String,List<ProviderDeploymentContributor>> providers ) {
 +    archivePath = Urls.encode( archivePath );
 +    WebArchive webArchive = ShrinkWrap.create( WebArchive.class, archivePath );
 +    WebAppDescriptor webAppDesc = Descriptors.create( WebAppDescriptor.class );
 +    GatewayDescriptor gateway = GatewayDescriptorFactory.create();
 +    DeploymentContext context = new DeploymentContextImpl(
 +        config, topology, gateway, webArchive, webAppDesc, providers );
 +    return context;
 +  }
 +
 +  // Scan through the providers in the topology.  Collect any named providers in their roles list.
 +  // Scan through all of the loaded providers.  For each that doesn't have an existing provider in the role
 +  // list add it.
 +  private static Map<String,List<ProviderDeploymentContributor>> selectContextProviders( Topology topology ) {
 +    Map<String,List<ProviderDeploymentContributor>> providers = new LinkedHashMap<String, List<ProviderDeploymentContributor>>();
++    addMissingDefaultProviders(topology);
 +    collectTopologyProviders( topology, providers );
 +    collectDefaultProviders( providers );
 +    return providers;
 +  }
 +
++  private static void addMissingDefaultProviders(Topology topology) {
++    Collection<Provider> providers = topology.getProviders();
++    HashMap<String, String> providerMap = new HashMap<>();
++    for (Provider provider : providers) {
++      providerMap.put(provider.getRole(), provider.getName());
++    }
++    // first make sure that the required provider is available from the serviceloaders
++    // for some tests the number of providers are limited to the classpath of the module
++    // and exceptions will be thrown as topologies are deployed even though they will
++    // work fine at actual server runtime.
++    if (PROVIDER_CONTRIBUTOR_MAP.get("identity-assertion") != null) {
++      // check for required providers and add the defaults if missing
++      if (!providerMap.containsKey("identity-assertion")) {
++        Provider idassertion = new Provider();
++        idassertion.setRole("identity-assertion");
++        idassertion.setName("Default");
++        idassertion.setEnabled(true);
++        providers.add(idassertion);
++      }
++    }
++  }
++
 +  private static void collectTopologyProviders(
 +      Topology topology, Map<String, List<ProviderDeploymentContributor>> defaults ) {
 +    for( Provider provider : topology.getProviders() ) {
 +      String name = provider.getName();
 +      if( name != null ) {
 +        String role = provider.getRole();
 +        Map<String,ProviderDeploymentContributor> nameMap = PROVIDER_CONTRIBUTOR_MAP.get( role );
 +        if( nameMap != null ) {
 +          ProviderDeploymentContributor contributor = nameMap.get( name );
 +          // If there isn't a contributor with this role/name try to find a "*" contributor.
 +          if( contributor == null ) {
 +            nameMap = PROVIDER_CONTRIBUTOR_MAP.get( "*" );
 +            if( nameMap != null ) {
 +              contributor = nameMap.get( name );
 +            }
 +          }
 +          if( contributor != null ) {
 +            List list = defaults.get( role );
 +            if( list == null ) {
 +              list = new ArrayList( 1 );
 +              defaults.put( role, list );
 +            }
 +            if( !list.contains( contributor ) ) {
 +              list.add( contributor );
 +            }
 +          }
 +        }
 +      }
 +    }
 +  }
 +
 +  private static void collectDefaultProviders( Map<String,List<ProviderDeploymentContributor>> defaults ) {
 +    for( ProviderDeploymentContributor contributor : PROVIDER_CONTRIBUTORS ) {
 +      String role = contributor.getRole();
 +      List<ProviderDeploymentContributor> list = defaults.get( role );
 +      if( list == null ) {
 +        list = new ArrayList<ProviderDeploymentContributor>();
 +        defaults.put( role, list );
 +      }
 +      if( list.isEmpty() ) {
 +        list.add( contributor );
 +      }
 +    }
 +  }
 +
 +  // Scan through the services in the topology.
 +  // For each that we find add it to the list of service roles included in the topology.
 +  private static Map<String,List<ServiceDeploymentContributor>> selectContextServices( Topology topology ) {
 +    Map<String,List<ServiceDeploymentContributor>> defaults
 +        = new HashMap<>();
 +    for( Service service : topology.getServices() ) {
 +      String role = service.getRole();
 +      ServiceDeploymentContributor contributor = getServiceContributor( role, service.getName(), service.getVersion() );
 +      if( contributor != null ) {
 +        List<ServiceDeploymentContributor> list = defaults.get( role );
 +        if( list == null ) {
 +          list = new ArrayList<ServiceDeploymentContributor>( 1 );
 +          defaults.put( role, list );
 +        }
 +        if( !list.contains( contributor ) ) {
 +          list.add( contributor );
 +        }
 +      }
 +    }
 +    return defaults;
 +  }
 +
 +  private static Map<String,ServiceDeploymentContributor> selectContextApplications(
 +      GatewayConfig config, Topology topology ) {
 +    Map<String,ServiceDeploymentContributor> contributors = new HashMap<>();
 +    if( topology != null ) {
 +      for( Application application : topology.getApplications() ) {
 +        String name = application.getName();
 +        if( name == null || name.isEmpty() ) {
 +          throw new DeploymentException( "Topologies cannot contain an application without a name." );
 +        }
 +        ApplicationDeploymentContributor contributor = new ApplicationDeploymentContributor( config, application );
 +        List<String> urls = application.getUrls();
 +        if( urls == null || urls.isEmpty() ) {
 +          urls = new ArrayList<String>( 1 );
 +          urls.add( "/" + name );
 +        }
 +        for( String url : urls ) {
 +          if( url == null || url.isEmpty() || url.equals( "/" ) ) {
 +            if( !topology.getServices().isEmpty() ) {
 +              throw new DeploymentException( String.format(
 +                  "Topologies with services cannot contain an application (%s) with a root url.", name ) );
 +            }
 +          }
 +          contributors.put( url, contributor );
 +        }
 +      }
 +    }
 +    return contributors;
 +  }
 +
 +  private static void initialize(
 +      DeploymentContext context,
 +      Map<String,List<ProviderDeploymentContributor>> providers,
 +      Map<String,List<ServiceDeploymentContributor>> services,
 +      Map.Entry<String,ServiceDeploymentContributor> applications ) {
 +    WebAppDescriptor wad = context.getWebAppDescriptor();
 +    String topoName = context.getTopology().getName();
 +    if( applications == null ) {
 +      String servletName = topoName + SERVLET_NAME_SUFFIX;
 +      wad.createServlet().servletName( servletName ).servletClass( GatewayServlet.class.getName() );
 +      wad.createServletMapping().servletName( servletName ).urlPattern( "/*" );
 +    } else {
 +      String filterName = topoName + FILTER_NAME_SUFFIX;
 +      wad.createFilter().filterName( filterName ).filterClass( GatewayServlet.class.getName() );
 +      wad.createFilterMapping().filterName( filterName ).urlPattern( "/*" );
 +    }
 +    if (gatewayServices != null) {
 +      gatewayServices.initializeContribution(context);
 +    } else {
 +      log.gatewayServicesNotInitialized();
 +    }
 +    initializeProviders( context, providers );
 +    initializeServices( context, services );
 +    initializeApplications( context, applications );
 +  }
 +
 +  private static void initializeProviders(
 +      DeploymentContext context,
 +      Map<String,List<ProviderDeploymentContributor>> providers ) {
 +    if( providers != null ) {
 +      for( Entry<String, List<ProviderDeploymentContributor>> entry : providers.entrySet() ) {
 +        for( ProviderDeploymentContributor contributor : entry.getValue() ) {
 +          try {
 +            injectServices( contributor );
 +            log.initializeProvider( contributor.getName(), contributor.getRole() );
 +            contributor.initializeContribution( context );
 +          } catch( Exception e ) {
 +            log.failedToInitializeContribution( e );
 +            throw new DeploymentException( "Failed to initialize contribution.", e );
 +          }
 +        }
 +      }
 +    }
 +  }
 +
 +  private static void initializeServices( DeploymentContext context, Map<String, List<ServiceDeploymentContributor>> services ) {
 +    if( services != null ) {
 +      for( Entry<String, List<ServiceDeploymentContributor>> entry : services.entrySet() ) {
 +        for( ServiceDeploymentContributor contributor : entry.getValue() ) {
 +          try {
 +            injectServices( contributor );
 +            log.initializeService( contributor.getName(), contributor.getRole() );
 +            contributor.initializeContribution( context );
 +          } catch( Exception e ) {
 +            log.failedToInitializeContribution( e );
 +            throw new DeploymentException( "Failed to initialize contribution.", e );
 +          }
 +        }
 +      }
 +    }
 +  }
 +
 +  private static void initializeApplications( DeploymentContext context, Map.Entry<String, ServiceDeploymentContributor> application ) {
 +    if( application != null ) {
 +      ServiceDeploymentContributor contributor = application.getValue();
 +      if( contributor != null ) {
 +        try {
 +          injectServices( contributor );
 +          log.initializeApplication( contributor.getName() );
 +          contributor.initializeContribution( context );
 +        } catch( Exception e ) {
 +          log.failedToInitializeContribution( e );
 +          throw new DeploymentException( "Failed to initialize application contribution.", e );
 +        }
 +      }
 +    }
 +  }
 +
 +  private static void injectServices(Object contributor) {
 +    if (gatewayServices != null) {
 +      Statement stmt = null;
 +      for(String serviceName : gatewayServices.getServiceNames()) {
 +
 +        try {
 +          // TODO: this is just a temporary injection solution
 +          // TODO: test for the existence of the setter before attempting it
 +          // TODO: avoid exception throwing when there is no setter
 +          stmt = new Statement(contributor, "set" + serviceName, new Object[]{gatewayServices.getService(serviceName)});
 +          stmt.execute();
 +        } catch (NoSuchMethodException e) {
 +          // TODO: eliminate the possibility of this being thrown up front
 +        } catch (Exception e) {
 +          // Maybe it makes sense to throw exception
 +          log.failedToInjectService( serviceName, e );
 +          throw new DeploymentException("Failed to inject service.", e);
 +        }
 +      }
 +    }
 +  }
 +
 +  private static void contribute(
 +      DeploymentContext context,
 +      Map<String,List<ProviderDeploymentContributor>> providers,
 +      Map<String,List<ServiceDeploymentContributor>> services,
 +      Map.Entry<String,ServiceDeploymentContributor> applications ) {
 +    Topology topology = context.getTopology();
 +    contributeProviders( context, topology, providers );
 +    contributeServices( context, topology, services );
 +    contributeApplications( context, topology, applications );
 +  }
 +
 +  private static void contributeProviders( DeploymentContext context, Topology topology, Map<String, List<ProviderDeploymentContributor>> providers ) {
 +    for( Provider provider : topology.getProviders() ) {
 +      ProviderDeploymentContributor contributor = getProviderContributor( providers, provider.getRole(), provider.getName() );
 +      if( contributor != null && provider.isEnabled() ) {
 +        try {
 +          log.contributeProvider( provider.getName(), provider.getRole() );
 +          contributor.contributeProvider( context, provider );
 +        } catch( Exception e ) {
 +          // Maybe it makes sense to throw exception
 +          log.failedToContributeProvider( provider.getName(), provider.getRole(), e );
 +          throw new DeploymentException("Failed to contribute provider.", e);
 +        }
 +      }
 +    }
 +  }
 +
 +  private static void contributeServices( DeploymentContext context, Topology topology, Map<String, List<ServiceDeploymentContributor>> services ) {
 +    if( services != null ) {
 +      for( Service service : topology.getServices() ) {
 +        ServiceDeploymentContributor contributor = getServiceContributor( service.getRole(), service.getName(), service.getVersion() );
 +        if( contributor != null ) {
 +          try {
 +            log.contributeService( service.getName(), service.getRole() );
 +            contributor.contributeService( context, service );
 +            if( gatewayServices != null ) {
 +              ServiceRegistry sr = gatewayServices.getService( GatewayServices.SERVICE_REGISTRY_SERVICE );
 +              if( sr != null ) {
 +                String regCode = sr.getRegistrationCode( topology.getName() );
 +                sr.registerService( regCode, topology.getName(), service.getRole(), service.getUrls() );
 +              }
 +            }
 +          } catch( Exception e ) {
 +            // Maybe it makes sense to throw exception
 +            log.failedToContributeService( service.getName(), service.getRole(), e );
 +            throw new DeploymentException( "Failed to contribute service.", e );
 +          }
 +        }
 +      }
 +    }
 +  }
 +
 +  private static void contributeApplications( DeploymentContext context, Topology topology, Map.Entry<String, ServiceDeploymentContributor> applications ) {
 +    if( applications != null ) {
 +      ServiceDeploymentContributor contributor = applications.getValue();
 +      if( contributor != null ) {
 +        try {
 +          log.contributeApplication( contributor.getName() );
 +          Application applicationDesc = topology.getApplication( applications.getKey() );
 +          contributor.contributeService( context, applicationDesc );
 +        } catch( Exception e ) {
 +          log.failedToInitializeContribution( e );
 +          throw new DeploymentException( "Failed to contribution application.", e );
 +        }
 +      }
 +    }
 +  }
 +
 +  public static ProviderDeploymentContributor getProviderContributor( String role, String name ) {
 +    ProviderDeploymentContributor contributor = null;
 +    Map<String,ProviderDeploymentContributor> nameMap = PROVIDER_CONTRIBUTOR_MAP.get( role );
 +    if( nameMap != null ) {
 +      if( name != null ) {
 +        contributor = nameMap.get( name );
 +      } else if ( !nameMap.isEmpty() ) {
 +        contributor = nameMap.values().iterator().next();
 +      }
 +    }
 +    return contributor;
 +  }
 +
 +  public static ServiceDeploymentContributor getServiceContributor( String role, String name, Version version ) {
 +    ServiceDeploymentContributor contributor = null;
 +    Map<String,Map<Version, ServiceDeploymentContributor>> nameMap = SERVICE_CONTRIBUTOR_MAP.get( role );
 +    if( nameMap != null && !nameMap.isEmpty()) {
 +      Map<Version, ServiceDeploymentContributor> versionMap = null;
 +      if ( name == null ) {
 +        versionMap = nameMap.values().iterator().next();
 +      } else {
 +        versionMap = nameMap.get( name );
 +      }
 +      if ( versionMap != null && !versionMap.isEmpty()) {
 +        if( version == null ) {
 +          contributor = ((TreeMap<Version, ServiceDeploymentContributor>) versionMap).firstEntry().getValue();
 +        } else {
 +          contributor = versionMap.get( version );
 +        }
 +      }
 +    }
 +    return contributor;
 +  }
 +
 +  private static void finalize(
 +      DeploymentContext context,
 +      Map<String,List<ProviderDeploymentContributor>> providers,
 +      Map<String,List<ServiceDeploymentContributor>> services,
 +      Map.Entry<String,ServiceDeploymentContributor> application ) {
 +    try {
 +      // Write the gateway descriptor (gateway.xml) into the war.
 +      StringWriter writer = new StringWriter();
 +      GatewayDescriptorFactory.store( context.getGatewayDescriptor(), "xml", writer );
 +      context.getWebArchive().addAsWebInfResource(
 +          new StringAsset( writer.toString() ),
 +          GatewayServlet.GATEWAY_DESCRIPTOR_LOCATION_DEFAULT );
 +
 +      // Set the location of the gateway descriptor as a servlet init param.
 +      if( application == null ) {
 +        String servletName = context.getTopology().getName() + SERVLET_NAME_SUFFIX;
 +        ServletType<WebAppDescriptor> servlet = findServlet( context, servletName );
 +        // Coverity CID 1352314
 +        if( servlet == null ) {
 +          throw new DeploymentException( "Missing servlet " + servletName );
 +        } else {
 +          servlet.createInitParam()
 +              .paramName( GatewayServlet.GATEWAY_DESCRIPTOR_LOCATION_PARAM )
 +              .paramValue( "/WEB-INF/" + GatewayServlet.GATEWAY_DESCRIPTOR_LOCATION_DEFAULT );
 +        }
 +      } else {
 +        String servletName = context.getTopology().getName() + FILTER_NAME_SUFFIX;
 +        FilterType<WebAppDescriptor> filter = findFilter( context, servletName );
 +        // Coverity CID 1352313
 +        if( filter == null ) {
 +          throw new DeploymentException( "Missing filter " + servletName );
 +        } else {
 +          filter.createInitParam()
 +              .paramName( GatewayServlet.GATEWAY_DESCRIPTOR_LOCATION_PARAM )
 +              .paramValue( "/WEB-INF/" + GatewayServlet.GATEWAY_DESCRIPTOR_LOCATION_DEFAULT );
 +        }
 +      }
 +      if (gatewayServices != null) {
 +        gatewayServices.finalizeContribution(context);
 +      }
 +      finalizeProviders( context, providers );
 +      finalizeServices( context, services );
 +      finalizeApplications( context, application );
 +      writeDeploymentDescriptor( context, application != null );
 +    } catch ( IOException e ) {
 +      throw new RuntimeException( e );
 +    }
 +  }
 +
 +  private static void finalizeProviders( DeploymentContext context, Map<String, List<ProviderDeploymentContributor>> providers ) {
 +    if( providers != null ) {
 +      for( Entry<String, List<ProviderDeploymentContributor>> entry : providers.entrySet() ) {
 +        for( ProviderDeploymentContributor contributor : entry.getValue() ) {
 +          try {
 +            log.finalizeProvider( contributor.getName(), contributor.getRole() );
 +            contributor.finalizeContribution( context );
 +          } catch( Exception e ) {
 +            // Maybe it makes sense to throw exception
 +            log.failedToFinalizeContribution( e );
 +            throw new DeploymentException( "Failed to finalize contribution.", e );
 +          }
 +        }
 +      }
 +    }
 +  }
 +
 +  private static void finalizeServices( DeploymentContext context, Map<String, List<ServiceDeploymentContributor>> services ) {
 +    if( services != null ) {
 +      for( Entry<String, List<ServiceDeploymentContributor>> entry : services.entrySet() ) {
 +        for( ServiceDeploymentContributor contributor : entry.getValue() ) {
 +          try {
 +            log.finalizeService( contributor.getName(), contributor.getRole() );
 +            contributor.finalizeContribution( context );
 +          } catch( Exception e ) {
 +            // Maybe it makes sense to throw exception
 +            log.failedToFinalizeContribution( e );
 +            throw new DeploymentException( "Failed to finalize contribution.", e );
 +          }
 +        }
 +      }
 +    }
 +  }
 +
 +  private static void finalizeApplications( DeploymentContext context, Map.Entry<String, ServiceDeploymentContributor> application ) {
 +    if( application != null ) {
 +      ServiceDeploymentContributor contributor = application.getValue();
 +      if( contributor != null ) {
 +        try {
 +          log.finalizeApplication( contributor.getName() );
 +          contributor.finalizeContribution( context );
 +        } catch( Exception e ) {
 +          log.failedToInitializeContribution( e );
 +          throw new DeploymentException( "Failed to contribution application.", e );
 +        }
 +      }
 +    }
 +  }
 +
 +  private static void writeDeploymentDescriptor( DeploymentContext context, boolean override ) {
 +    // Write the web.xml into the war.
 +    Asset webXmlAsset = new StringAsset( context.getWebAppDescriptor().exportAsString() );
 +    if( override ) {
 +      context.getWebArchive().addAsWebInfResource( webXmlAsset, "override-web.xml" );
 +    } else {
 +      context.getWebArchive().setWebXML( webXmlAsset );
 +    }
 +  }
 +
 +  public static ServletType<WebAppDescriptor> findServlet( DeploymentContext context, String name ) {
 +    List<ServletType<WebAppDescriptor>> servlets = context.getWebAppDescriptor().getAllServlet();
 +    for( ServletType<WebAppDescriptor> servlet : servlets ) {
 +      if( name.equals( servlet.getServletName() ) ) {
 +        return servlet;
 +      }
 +    }
 +    return null;
 +  }
 +
 +  public static FilterType<WebAppDescriptor> findFilter( DeploymentContext context, String name ) {
 +    List<FilterType<WebAppDescriptor>> filters = context.getWebAppDescriptor().getAllFilter();
 +    for( FilterType<WebAppDescriptor> filter : filters ) {
 +      if( name.equals( filter.getFilterName() ) ) {
 +        return filter;
 +      }
 +    }
 +    return null;
 +  }
 +
 +  private static void loadStacksServiceContributors( GatewayConfig config ) {
 +    String stacks = config.getGatewayServicesDir();
 +    log.usingServicesDirectory(stacks);
 +    File stacksDir = new File(stacks);
 +    Set<ServiceDeploymentContributor> deploymentContributors = ServiceDefinitionsLoader
 +        .loadServiceDefinitions(stacksDir);
 +    addServiceDeploymentContributors(deploymentContributors.iterator());
 +  }
 +
 +  private static void loadServiceContributors() {
 +    SERVICE_CONTRIBUTOR_MAP = new HashMap<>();
 +    ServiceLoader<ServiceDeploymentContributor> loader = ServiceLoader.load( ServiceDeploymentContributor.class );
 +    Iterator<ServiceDeploymentContributor> contributors = loader.iterator();
 +    addServiceDeploymentContributors(contributors);
 +  }
 +
 +   private static void addServiceDeploymentContributors(Iterator<ServiceDeploymentContributor> contributors) {
 +      while( contributors.hasNext() ) {
 +        ServiceDeploymentContributor contributor = contributors.next();
 +        if( contributor.getName() == null ) {
 +          log.ignoringServiceContributorWithMissingName( contributor.getClass().getName() );
 +          continue;
 +        }
 +        if( contributor.getRole() == null ) {
 +          log.ignoringServiceContributorWithMissingRole( contributor.getClass().getName() );
 +          continue;
 +        }
 +        if( contributor.getVersion() == null ) {
 +          log.ignoringServiceContributorWithMissingVersion(contributor.getClass().getName());
 +          continue;
 +        }
 +        Map<String,Map<Version, ServiceDeploymentContributor>> nameMap = SERVICE_CONTRIBUTOR_MAP.get( contributor.getRole() );
 +        if( nameMap == null ) {
 +          nameMap = new HashMap<>();
 +          SERVICE_CONTRIBUTOR_MAP.put( contributor.getRole(), nameMap );
 +        }
 +        Map<Version, ServiceDeploymentContributor> versionMap = nameMap.get(contributor.getName());
 +        if (versionMap == null) {
 +          versionMap = new TreeMap<>();
 +          nameMap.put(contributor.getName(), versionMap);
 +        }
 +        versionMap.put( contributor.getVersion(), contributor );
 +      }
 +   }
 +
 +   private static void loadProviderContributors() {
 +    Set<ProviderDeploymentContributor> set = new HashSet<>();
 +    Map<String,Map<String,ProviderDeploymentContributor>> roleMap
 +        = new HashMap<>();
 +
 +    ServiceLoader<ProviderDeploymentContributor> loader = ServiceLoader.load( ProviderDeploymentContributor.class );
 +    Iterator<ProviderDeploymentContributor> contributors = loader.iterator();
 +    while( contributors.hasNext() ) {
 +      ProviderDeploymentContributor contributor = contributors.next();
 +      if( contributor.getName() == null ) {
 +        log.ignoringProviderContributorWithMissingName( contributor.getClass().getName() );
 +        continue;
 +      }
 +      if( contributor.getRole() == null ) {
 +        log.ignoringProviderContributorWithMissingRole( contributor.getClass().getName() );
 +        continue;
 +      }
 +      set.add( contributor );
 +      Map nameMap = roleMap.get( contributor.getRole() );
 +      if( nameMap == null ) {
 +        nameMap = new HashMap<>();
 +        roleMap.put( contributor.getRole(), nameMap );
 +      }
 +      nameMap.put( contributor.getName(), contributor );
 +    }
 +    PROVIDER_CONTRIBUTORS = set;
 +    PROVIDER_CONTRIBUTOR_MAP = roleMap;
 +  }
 +
 +  static ProviderDeploymentContributor getProviderContributor(
 +      Map<String,List<ProviderDeploymentContributor>> providers, String role, String name ) {
 +    ProviderDeploymentContributor contributor = null;
 +    if( name == null ) {
 +      List<ProviderDeploymentContributor> list = providers.get( role );
 +      if( list != null && !list.isEmpty() ) {
 +        contributor = list.get( 0 );
 +      }
 +    } else {
 +      contributor = getProviderContributor( role, name );
 +      // Explicit configuration that is wrong should just fail
 +      // rather than randomly select a provider. Implicit default
 +      // providers can be selected when no name is provided.
 +      if (contributor == null || !contributor.getRole().equals(role) ||
 +          !contributor.getName().equals(name)) {
 +        throw new DeploymentException(
 +            "Failed to contribute provider. Role: " +
 +            role + " Name: " + name + ". Please check the topology for" +
 +              	    " errors in name and role and that the provider is " +
 +              	    "on the classpath.");
 +      }
 +    }
 +    return contributor;
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/2c69152f/gateway-server/src/main/java/org/apache/knox/gateway/deploy/impl/ServiceDefinitionDeploymentContributor.java
----------------------------------------------------------------------
diff --cc gateway-server/src/main/java/org/apache/knox/gateway/deploy/impl/ServiceDefinitionDeploymentContributor.java
index a056ac7,0000000..7e69af5
mode 100644,000000..100644
--- a/gateway-server/src/main/java/org/apache/knox/gateway/deploy/impl/ServiceDefinitionDeploymentContributor.java
+++ b/gateway-server/src/main/java/org/apache/knox/gateway/deploy/impl/ServiceDefinitionDeploymentContributor.java
@@@ -1,256 -1,0 +1,264 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.deploy.impl;
 +
 +import org.apache.knox.gateway.config.impl.GatewayConfigImpl;
 +import org.apache.knox.gateway.deploy.DeploymentContext;
 +import org.apache.knox.gateway.deploy.ServiceDeploymentContributorBase;
 +import org.apache.knox.gateway.descriptor.FilterDescriptor;
 +import org.apache.knox.gateway.descriptor.FilterParamDescriptor;
 +import org.apache.knox.gateway.descriptor.ResourceDescriptor;
 +import org.apache.knox.gateway.dispatch.GatewayDispatchFilter;
 +import org.apache.knox.gateway.filter.XForwardedHeaderFilter;
 +import org.apache.knox.gateway.filter.rewrite.api.CookieScopeServletFilter;
 +import org.apache.knox.gateway.filter.rewrite.api.UrlRewriteRulesDescriptor;
 +import org.apache.knox.gateway.service.definition.CustomDispatch;
 +import org.apache.knox.gateway.service.definition.Policy;
 +import org.apache.knox.gateway.service.definition.Rewrite;
 +import org.apache.knox.gateway.service.definition.Route;
 +import org.apache.knox.gateway.service.definition.ServiceDefinition;
 +import org.apache.knox.gateway.topology.Provider;
 +import org.apache.knox.gateway.topology.Service;
 +import org.apache.knox.gateway.topology.Version;
 +
 +import java.net.URISyntaxException;
 +import java.util.ArrayList;
 +import java.util.HashMap;
 +import java.util.List;
 +import java.util.Map;
 +
 +public class ServiceDefinitionDeploymentContributor extends ServiceDeploymentContributorBase {
 +
 +  private static final String DISPATCH_ROLE = "dispatch";
 +
 +  private static final String DISPATCH_IMPL_PARAM = "dispatch-impl";
 +
 +  private static final String HTTP_CLIENT_FACTORY_PARAM = "httpClientFactory";
 +
 +  private static final String SERVICE_ROLE_PARAM = "serviceRole";
 +
 +  private static final String XFORWARDED_FILTER_NAME = "XForwardedHeaderFilter";
 +
 +  private static final String XFORWARDED_FILTER_ROLE = "xforwardedheaders";
 +
 +  private static final String DEFAULT_HA_DISPATCH_CLASS = "org.apache.knox.gateway.ha.dispatch.DefaultHaDispatch";
 +
 +  private static final String COOKIE_SCOPING_FILTER_NAME = "CookieScopeServletFilter";
 +
 +  private static final String COOKIE_SCOPING_FILTER_ROLE = "cookiescopef";
 +
 +  private ServiceDefinition serviceDefinition;
 +
 +  private UrlRewriteRulesDescriptor serviceRules;
 +
 +  public ServiceDefinitionDeploymentContributor(ServiceDefinition serviceDefinition, UrlRewriteRulesDescriptor serviceRules) {
 +    this.serviceDefinition = serviceDefinition;
 +    this.serviceRules = serviceRules;
 +  }
 +
 +  @Override
 +  public String getRole() {
 +    return serviceDefinition.getRole();
 +  }
 +
 +  @Override
 +  public String getName() {
 +    return serviceDefinition.getName();
 +  }
 +
 +  @Override
 +  public Version getVersion() {
 +    return new Version(serviceDefinition.getVersion());
 +  }
 +
 +  @Override
 +  public void contributeService(DeploymentContext context, Service service) throws Exception {
 +    contributeRewriteRules(context, service);
 +    contributeResources(context, service);
 +  }
 +
 +  private void contributeRewriteRules(DeploymentContext context, Service service) {
 +    if ( serviceRules != null ) {
 +      UrlRewriteRulesDescriptor clusterRules = context.getDescriptor("rewrite");
 +      clusterRules.addRules(serviceRules);
 +    }
 +  }
 +
 +  private void contributeResources(DeploymentContext context, Service service) {
 +    Map<String, String> filterParams = new HashMap<>();
 +    List<Route> bindings = serviceDefinition.getRoutes();
 +    for ( Route binding : bindings ) {
 +      List<Rewrite> filters = binding.getRewrites();
 +      if ( filters != null && !filters.isEmpty() ) {
 +        filterParams.clear();
 +        for ( Rewrite filter : filters ) {
 +          filterParams.put(filter.getTo(), filter.getApply());
 +        }
 +      }
 +      try {
 +        contributeResource(context, service, binding, filterParams);
 +      } catch ( URISyntaxException e ) {
 +        e.printStackTrace();
 +      }
 +    }
 +
 +  }
 +
 +  private void contributeResource(DeploymentContext context, Service service, Route binding, Map<String, String> filterParams) throws URISyntaxException {
 +    List<FilterParamDescriptor> params = new ArrayList<FilterParamDescriptor>();
 +    ResourceDescriptor resource = context.getGatewayDescriptor().addResource();
 +    resource.role(service.getRole());
 +    resource.pattern(binding.getPath());
 +    //add x-forwarded filter if enabled in config
 +    if (context.getGatewayConfig().isXForwardedEnabled()) {
 +      resource.addFilter().name(XFORWARDED_FILTER_NAME).role(XFORWARDED_FILTER_ROLE).impl(XForwardedHeaderFilter.class);
 +    }
 +    if (context.getGatewayConfig().isCookieScopingToPathEnabled()) {
 +      FilterDescriptor filter = resource.addFilter().name(COOKIE_SCOPING_FILTER_NAME).role(COOKIE_SCOPING_FILTER_ROLE).impl(CookieScopeServletFilter.class);
 +      filter.param().name(GatewayConfigImpl.HTTP_PATH).value(context.getGatewayConfig().getGatewayPath());
 +    }
 +    List<Policy> policyBindings = binding.getPolicies();
 +    if ( policyBindings == null ) {
 +      policyBindings = serviceDefinition.getPolicies();
 +    }
 +    if ( policyBindings == null ) {
 +      //add default set
 +      addDefaultPolicies(context, service, filterParams, params, resource);
 +    } else {
 +      addPolicies(context, service, filterParams, params, resource, policyBindings);
 +    }
 +    addDispatchFilter(context, service, resource, binding);
 +  }
 +
 +  private void addPolicies(DeploymentContext context, Service service, Map<String, String> filterParams, List<FilterParamDescriptor> params, ResourceDescriptor resource, List<Policy> policyBindings) throws URISyntaxException {
 +    for ( Policy policyBinding : policyBindings ) {
 +      String role = policyBinding.getRole();
 +      if ( role == null ) {
 +        throw new IllegalArgumentException("Policy defined has no role for service " + service.getName());
 +      }
 +      role = role.trim().toLowerCase();
 +      if ( "rewrite".equals(role) ) {
 +        addRewriteFilter(context, service, filterParams, params, resource);
 +      } else if ( topologyContainsProviderType(context, role) ) {
 +        context.contributeFilter(service, resource, role, policyBinding.getName(), null);
 +      }
 +    }
 +  }
 +
 +  private void addDefaultPolicies(DeploymentContext context, Service service, Map<String, String> filterParams, List<FilterParamDescriptor> params, ResourceDescriptor resource) throws URISyntaxException {
 +    addWebAppSecFilters(context, service, resource);
 +    addAuthenticationFilter(context, service, resource);
 +    addRewriteFilter(context, service, filterParams, params, resource);
 +    addIdentityAssertionFilter(context, service, resource);
 +    addAuthorizationFilter(context, service, resource);
 +  }
 +
 +  private void addRewriteFilter(DeploymentContext context, Service service, Map<String, String> filterParams, List<FilterParamDescriptor> params, ResourceDescriptor resource) throws URISyntaxException {
 +    if ( !filterParams.isEmpty() ) {
 +      for ( Map.Entry<String, String> filterParam : filterParams.entrySet() ) {
 +        params.add(resource.createFilterParam().name(filterParam.getKey()).value(filterParam.getValue()));
 +      }
 +    }
 +    addRewriteFilter(context, service, resource, params);
 +  }
 +
 +  private void addDispatchFilter(DeploymentContext context, Service service, ResourceDescriptor resource, Route binding) {
 +    CustomDispatch customDispatch = binding.getDispatch();
 +    if ( customDispatch == null ) {
 +      customDispatch = serviceDefinition.getDispatch();
 +    }
 +    boolean isHaEnabled = isHaEnabled(context);
 +    if ( customDispatch != null ) {
 +      String haContributorName = customDispatch.getHaContributorName();
 +      String haClassName = customDispatch.getHaClassName();
 +      String httpClientFactory = customDispatch.getHttpClientFactory();
++      boolean useTwoWaySsl = customDispatch.getUseTwoWaySsl();
 +      if ( isHaEnabled) {
 +        if (haContributorName != null) {
 +          addDispatchFilter(context, service, resource, DISPATCH_ROLE, haContributorName);
 +        } else if (haClassName != null) {
-           addDispatchFilterForClass(context, service, resource, haClassName, httpClientFactory);
++          addDispatchFilterForClass(context, service, resource, haClassName, httpClientFactory, useTwoWaySsl);
 +        } else {
 +          addDefaultHaDispatchFilter(context, service, resource);
 +        }
 +      } else {
 +        String contributorName = customDispatch.getContributorName();
 +        if ( contributorName != null ) {
 +          addDispatchFilter(context, service, resource, DISPATCH_ROLE, contributorName);
 +        } else {
 +          String className = customDispatch.getClassName();
 +          if ( className != null ) {
-             addDispatchFilterForClass(context, service, resource, className, httpClientFactory);
++            addDispatchFilterForClass(context, service, resource, className, httpClientFactory, useTwoWaySsl);
 +          } else {
 +            //final fallback to the default dispatch
 +            addDispatchFilter(context, service, resource, DISPATCH_ROLE, "http-client");
 +          }
 +        }
 +      }
 +    } else if (isHaEnabled) {
 +      addDefaultHaDispatchFilter(context, service, resource);
 +    } else {
 +      addDispatchFilter(context, service, resource, DISPATCH_ROLE, "http-client");
 +    }
 +  }
 +
 +  private void addDefaultHaDispatchFilter(DeploymentContext context, Service service, ResourceDescriptor resource) {
 +    FilterDescriptor filter = addDispatchFilterForClass(context, service, resource, DEFAULT_HA_DISPATCH_CLASS, null);
 +    filter.param().name(SERVICE_ROLE_PARAM).value(service.getRole());
 +  }
 +
-   private FilterDescriptor addDispatchFilterForClass(DeploymentContext context, Service service, ResourceDescriptor resource, String dispatchClass, String httpClientFactory) {
++  private FilterDescriptor addDispatchFilterForClass(DeploymentContext context, Service service, ResourceDescriptor resource, String dispatchClass, String httpClientFactory, boolean useTwoWaySsl) {
 +    FilterDescriptor filter = resource.addFilter().name(getName()).role(DISPATCH_ROLE).impl(GatewayDispatchFilter.class);
 +    filter.param().name(DISPATCH_IMPL_PARAM).value(dispatchClass);
 +    if (httpClientFactory != null) {
 +      filter.param().name(HTTP_CLIENT_FACTORY_PARAM).value(httpClientFactory);
 +    }
++    // let's take the value of useTwoWaySsl which is derived from the service definition
++    // then allow it to be overridden by service params from the topology
++    filter.param().name("useTwoWaySsl").value(Boolean.toString(useTwoWaySsl));
 +    for ( Map.Entry<String, String> serviceParam : service.getParams().entrySet() ) {
 +      filter.param().name(serviceParam.getKey()).value(serviceParam.getValue());
 +    }
 +    if ( context.getGatewayConfig().isHadoopKerberosSecured() ) {
 +      filter.param().name("kerberos").value("true");
 +    } else {
 +      //TODO: [sumit] Get rid of special case. Add config/param capabilities to service definitions?
 +      //special case for hive
 +      filter.param().name("basicAuthPreemptive").value("true");
 +    }
 +    return filter;
 +  }
 +
++  private FilterDescriptor addDispatchFilterForClass(DeploymentContext context, Service service, ResourceDescriptor resource, String dispatchClass, String httpClientFactory) {
++    return addDispatchFilterForClass(context, service, resource, dispatchClass, httpClientFactory, false);
++  }
++
 +  private boolean isHaEnabled(DeploymentContext context) {
 +    Provider provider = getProviderByRole(context, "ha");
 +    if ( provider != null && provider.isEnabled() ) {
 +      Map<String, String> params = provider.getParams();
 +      if ( params != null ) {
 +        if ( params.containsKey(getRole()) ) {
 +          return true;
 +        }
 +      }
 +    }
 +    return false;
 +  }
 +
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/2c69152f/gateway-server/src/main/java/org/apache/knox/gateway/services/metrics/impl/instr/InstrHttpClientBuilderProvider.java
----------------------------------------------------------------------
diff --cc gateway-server/src/main/java/org/apache/knox/gateway/services/metrics/impl/instr/InstrHttpClientBuilderProvider.java
index 073adcd,0000000..1299d6f
mode 100644,000000..100644
--- a/gateway-server/src/main/java/org/apache/knox/gateway/services/metrics/impl/instr/InstrHttpClientBuilderProvider.java
+++ b/gateway-server/src/main/java/org/apache/knox/gateway/services/metrics/impl/instr/InstrHttpClientBuilderProvider.java
@@@ -1,71 -1,0 +1,70 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + * <p>
 + * http://www.apache.org/licenses/LICENSE-2.0
 + * <p>
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.services.metrics.impl.instr;
 +
 +import com.codahale.metrics.MetricRegistry;
 +import com.codahale.metrics.httpclient.HttpClientMetricNameStrategy;
 +import com.codahale.metrics.httpclient.InstrumentedHttpRequestExecutor;
 +import org.apache.knox.gateway.services.metrics.InstrumentationProvider;
 +import org.apache.knox.gateway.services.metrics.MetricsContext;
 +import org.apache.knox.gateway.services.metrics.impl.DefaultMetricsService;
 +import org.apache.http.Header;
 +import org.apache.http.HttpRequest;
 +import org.apache.http.RequestLine;
 +import org.apache.http.client.utils.URIBuilder;
 +import org.apache.http.impl.client.HttpClientBuilder;
 +import org.apache.http.impl.conn.PoolingHttpClientConnectionManager;
 +
 +import java.net.URISyntaxException;
 +
 +public class InstrHttpClientBuilderProvider implements
 +    InstrumentationProvider<HttpClientBuilder> {
 +
 +  @Override
 +  public HttpClientBuilder getInstrumented(MetricsContext metricsContext) {
 +    MetricRegistry registry = (MetricRegistry) metricsContext.getProperty(DefaultMetricsService.METRICS_REGISTRY);
-     return  HttpClientBuilder.create().setRequestExecutor(new InstrumentedHttpRequestExecutor(registry, TOPOLOGY_URL_AND_METHOD)).
-         setConnectionManager(new PoolingHttpClientConnectionManager());
++    return  HttpClientBuilder.create().setRequestExecutor(new InstrumentedHttpRequestExecutor(registry, TOPOLOGY_URL_AND_METHOD));
 +  }
 +
 +  @Override
 +  public HttpClientBuilder getInstrumented(HttpClientBuilder instanceClass, MetricsContext metricsContext) {
 +    throw new UnsupportedOperationException();
 +  }
 +
 +  private static final HttpClientMetricNameStrategy TOPOLOGY_URL_AND_METHOD = new HttpClientMetricNameStrategy() {
 +    public String getNameFor(String name, HttpRequest request) {
 +      try {
 +        String context = "";
 +        Header header = request.getFirstHeader("X-Forwarded-Context");
 +        if (header != null) {
 +          context = header.getValue();
 +        }
 +        RequestLine requestLine = request.getRequestLine();
 +        URIBuilder uriBuilder = new URIBuilder(requestLine.getUri());
 +        String resourcePath = InstrUtils.getResourcePath(uriBuilder.removeQuery().build().toString());
 +        return MetricRegistry.name("service", new String[]{name, context + resourcePath, methodNameString(request)});
 +      } catch (URISyntaxException e) {
 +        throw new IllegalArgumentException(e);
 +      }
 +    }
 +
 +    private String methodNameString(HttpRequest request) {
 +      return request.getRequestLine().getMethod().toLowerCase() + "-requests";
 +    }
 +  };
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/2c69152f/gateway-server/src/main/java/org/apache/knox/gateway/topology/builder/BeanPropertyTopologyBuilder.java
----------------------------------------------------------------------
diff --cc gateway-server/src/main/java/org/apache/knox/gateway/topology/builder/BeanPropertyTopologyBuilder.java
index a1a2609,0000000..afeade0
mode 100644,000000..100644
--- a/gateway-server/src/main/java/org/apache/knox/gateway/topology/builder/BeanPropertyTopologyBuilder.java
+++ b/gateway-server/src/main/java/org/apache/knox/gateway/topology/builder/BeanPropertyTopologyBuilder.java
@@@ -1,105 -1,0 +1,116 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements. See the NOTICE file distributed with this
 + * work for additional information regarding copyright ownership. The ASF
 + * licenses this file to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance with the License.
 + * You may obtain a copy of the License at
 + *
 + * http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 + * License for the specific language governing permissions and limitations under
 + * the License.
 + */
 +package org.apache.knox.gateway.topology.builder;
 +
 +import java.util.ArrayList;
 +import java.util.List;
 +
 +import org.apache.knox.gateway.topology.Application;
 +import org.apache.knox.gateway.topology.Provider;
 +import org.apache.knox.gateway.topology.Service;
 +import org.apache.knox.gateway.topology.Topology;
 +
 +public class BeanPropertyTopologyBuilder implements TopologyBuilder {
 +
 +    private String name;
 +    private String defaultService;
++    private boolean isGenerated;
 +    private List<Provider> providers;
 +    private List<Service> services;
 +    private List<Application> applications;
 +
 +    public BeanPropertyTopologyBuilder() {
 +        providers = new ArrayList<Provider>();
 +        services = new ArrayList<Service>();
 +        applications = new ArrayList<Application>();
 +    }
 +
 +    public BeanPropertyTopologyBuilder name(String name) {
 +        this.name = name;
 +        return this;
 +    }
 +
 +    public String name() {
 +        return name;
 +    }
 +
++    public BeanPropertyTopologyBuilder generated(String isGenerated) {
++        this.isGenerated = Boolean.valueOf(isGenerated);
++        return this;
++    }
++
++    public boolean isGenerated() {
++        return isGenerated;
++    }
++
 +    public BeanPropertyTopologyBuilder defaultService(String defaultService) {
 +      this.defaultService = defaultService;
 +      return this;
 +    }
 +
 +    public String defaultService() {
 +      return defaultService;
 +    }
 +
 +    public BeanPropertyTopologyBuilder addProvider(Provider provider) {
 +        providers.add(provider);
 +        return this;
 +    }
 +
 +    public List<Provider> providers() {
 +        return providers;
 +    }
 +
 +    public BeanPropertyTopologyBuilder addService(Service service) {
 +        services.add(service);
 +        return this;
 +    }
 +
 +    public List<Service> services() {
 +        return services;
 +    }
 +
 +    public BeanPropertyTopologyBuilder addApplication( Application application ) {
 +        applications.add(application);
 +        return this;
 +    }
 +
 +    public List<Application> applications() {
 +        return applications;
 +    }
 +
 +    public Topology build() {
 +        Topology topology = new Topology();
 +        topology.setName(name);
 +        topology.setDefaultServicePath(defaultService);
++        topology.setGenerated(isGenerated);
 +
 +        for (Provider provider : providers) {
 +            topology.addProvider(provider);
 +        }
 +
 +        for (Service service : services) {
 +            topology.addService(service);
 +        }
 +
 +        for (Application application : applications) {
 +            topology.addApplication(application);
 +        }
 +
 +        return topology;
 +    }
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/2c69152f/gateway-server/src/main/java/org/apache/knox/gateway/topology/simple/SimpleDescriptor.java
----------------------------------------------------------------------
diff --cc gateway-server/src/main/java/org/apache/knox/gateway/topology/simple/SimpleDescriptor.java
index 25997b1,0000000..7d25286
mode 100644,000000..100644
--- a/gateway-server/src/main/java/org/apache/knox/gateway/topology/simple/SimpleDescriptor.java
+++ b/gateway-server/src/main/java/org/apache/knox/gateway/topology/simple/SimpleDescriptor.java
@@@ -1,48 -1,0 +1,58 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements. See the NOTICE file distributed with this
 + * work for additional information regarding copyright ownership. The ASF
 + * licenses this file to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance with the License.
 + * You may obtain a copy of the License at
 + *
 + * http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 + * License for the specific language governing permissions and limitations under
 + * the License.
 + */
 +package org.apache.knox.gateway.topology.simple;
 +
 +import java.util.List;
 +import java.util.Map;
 +
 +public interface SimpleDescriptor {
 +
 +    String getName();
 +
 +    String getDiscoveryType();
 +
 +    String getDiscoveryAddress();
 +
 +    String getDiscoveryUser();
 +
 +    String getDiscoveryPasswordAlias();
 +
 +    String getClusterName();
 +
 +    String getProviderConfig();
 +
 +    List<Service> getServices();
 +
++    List<Application> getApplications();
++
 +
 +    interface Service {
 +        String getName();
 +
 +        Map<String, String> getParams();
 +
 +        List<String> getURLs();
 +    }
++
++    interface Application {
++        String getName();
++
++        Map<String, String> getParams();
++
++        List<String> getURLs();
++    }
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/2c69152f/gateway-server/src/main/java/org/apache/knox/gateway/topology/simple/SimpleDescriptorHandler.java
----------------------------------------------------------------------
diff --cc gateway-server/src/main/java/org/apache/knox/gateway/topology/simple/SimpleDescriptorHandler.java
index b54432d,0000000..2e3214d
mode 100644,000000..100644
--- a/gateway-server/src/main/java/org/apache/knox/gateway/topology/simple/SimpleDescriptorHandler.java
+++ b/gateway-server/src/main/java/org/apache/knox/gateway/topology/simple/SimpleDescriptorHandler.java
@@@ -1,267 -1,0 +1,316 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements. See the NOTICE file distributed with this
 + * work for additional information regarding copyright ownership. The ASF
 + * licenses this file to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance with the License.
 + * You may obtain a copy of the License at
 + *
 + * http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 + * License for the specific language governing permissions and limitations under
 + * the License.
 + */
 +package org.apache.knox.gateway.topology.simple;
 +
 +import org.apache.knox.gateway.i18n.messages.MessagesFactory;
 +import org.apache.knox.gateway.services.Service;
 +import org.apache.knox.gateway.topology.discovery.DefaultServiceDiscoveryConfig;
 +import org.apache.knox.gateway.topology.discovery.ServiceDiscovery;
 +import org.apache.knox.gateway.topology.discovery.ServiceDiscoveryFactory;
 +import java.io.BufferedWriter;
 +import java.io.File;
 +import java.io.FileInputStream;
 +import java.io.FileWriter;
 +import java.io.InputStreamReader;
 +import java.io.IOException;
 +
 +import java.net.URI;
 +import java.net.URISyntaxException;
 +
 +import java.util.ArrayList;
 +import java.util.Collections;
 +import java.util.HashMap;
 +import java.util.List;
 +import java.util.Map;
 +
 +
 +
 +/**
 + * Processes simple topology descriptors, producing full topology files, which can subsequently be deployed to the
 + * gateway.
 + */
 +public class SimpleDescriptorHandler {
 +
 +    private static final Service[] NO_GATEWAY_SERVICES = new Service[]{};
 +
 +    private static final SimpleDescriptorMessages log = MessagesFactory.get(SimpleDescriptorMessages.class);
 +
 +    public static Map<String, File> handle(File desc) throws IOException {
 +        return handle(desc, NO_GATEWAY_SERVICES);
 +    }
 +
 +    public static Map<String, File> handle(File desc, Service...gatewayServices) throws IOException {
 +        return handle(desc, desc.getParentFile(), gatewayServices);
 +    }
 +
 +    public static Map<String, File> handle(File desc, File destDirectory) throws IOException {
 +        return handle(desc, destDirectory, NO_GATEWAY_SERVICES);
 +    }
 +
 +    public static Map<String, File> handle(File desc, File destDirectory, Service...gatewayServices) throws IOException {
 +        return handle(SimpleDescriptorFactory.parse(desc.getAbsolutePath()), desc.getParentFile(), destDirectory, gatewayServices);
 +    }
 +
 +    public static Map<String, File> handle(SimpleDescriptor desc, File srcDirectory, File destDirectory) {
 +        return handle(desc, srcDirectory, destDirectory, NO_GATEWAY_SERVICES);
 +    }
 +
 +    public static Map<String, File> handle(SimpleDescriptor desc, File srcDirectory, File destDirectory, Service...gatewayServices) {
 +        Map<String, File> result = new HashMap<>();
 +
 +        File topologyDescriptor;
 +
 +        DefaultServiceDiscoveryConfig sdc = new DefaultServiceDiscoveryConfig(desc.getDiscoveryAddress());
 +        sdc.setUser(desc.getDiscoveryUser());
 +        sdc.setPasswordAlias(desc.getDiscoveryPasswordAlias());
-         ServiceDiscovery sd = ServiceDiscoveryFactory.get(desc.getDiscoveryType(), gatewayServices);
++
++        // Use the discovery type from the descriptor. If it's unspecified, employ the default type.
++        String discoveryType = desc.getDiscoveryType();
++        if (discoveryType == null) {
++            discoveryType = "AMBARI";
++        }
++
++        ServiceDiscovery sd = ServiceDiscoveryFactory.get(discoveryType, gatewayServices);
 +        ServiceDiscovery.Cluster cluster = sd.discover(sdc, desc.getClusterName());
 +
 +        List<String> validServiceNames = new ArrayList<>();
 +
 +        Map<String, Map<String, String>> serviceParams = new HashMap<>();
 +        Map<String, List<String>>        serviceURLs   = new HashMap<>();
 +
 +        if (cluster != null) {
 +            for (SimpleDescriptor.Service descService : desc.getServices()) {
 +                String serviceName = descService.getName();
 +
 +                List<String> descServiceURLs = descService.getURLs();
 +                if (descServiceURLs == null || descServiceURLs.isEmpty()) {
 +                    descServiceURLs = cluster.getServiceURLs(serviceName);
 +                }
 +
 +                // Validate the discovered service URLs
 +                List<String> validURLs = new ArrayList<>();
 +                if (descServiceURLs != null && !descServiceURLs.isEmpty()) {
 +                    // Validate the URL(s)
 +                    for (String descServiceURL : descServiceURLs) {
 +                        if (validateURL(serviceName, descServiceURL)) {
 +                            validURLs.add(descServiceURL);
 +                        }
 +                    }
 +
 +                    if (!validURLs.isEmpty()) {
 +                        validServiceNames.add(serviceName);
 +                    }
 +                }
 +
 +                // If there is at least one valid URL associated with the service, then add it to the map
 +                if (!validURLs.isEmpty()) {
 +                    serviceURLs.put(serviceName, validURLs);
 +                } else {
 +                    log.failedToDiscoverClusterServiceURLs(serviceName, cluster.getName());
 +                }
 +
 +                // Service params
 +                if (descService.getParams() != null) {
 +                    serviceParams.put(serviceName, descService.getParams());
 +                    if (!validServiceNames.contains(serviceName)) {
 +                        validServiceNames.add(serviceName);
 +                    }
 +                }
 +            }
 +        } else {
 +            log.failedToDiscoverClusterServices(desc.getClusterName());
 +        }
 +
 +        BufferedWriter fw = null;
 +        topologyDescriptor = null;
 +        File providerConfig;
 +        try {
 +            // Verify that the referenced provider configuration exists before attempting to reading it
 +            providerConfig = resolveProviderConfigurationReference(desc.getProviderConfig(), srcDirectory);
 +            if (providerConfig == null) {
 +                log.failedToResolveProviderConfigRef(desc.getProviderConfig());
 +                throw new IllegalArgumentException("Unresolved provider configuration reference: " +
 +                                                   desc.getProviderConfig() + " ; Topology update aborted!");
 +            }
 +            result.put("reference", providerConfig);
 +
 +            // TODO: Should the contents of the provider config be validated before incorporating it into the topology?
 +
 +            String topologyFilename = desc.getName();
 +            if (topologyFilename == null) {
 +                topologyFilename = desc.getClusterName();
 +            }
 +            topologyDescriptor = new File(destDirectory, topologyFilename + ".xml");
++
 +            fw = new BufferedWriter(new FileWriter(topologyDescriptor));
 +
++            fw.write("<?xml version=\"1.0\" encoding=\"utf-8\"?>\n");
++
++            fw.write("<!--==============================================-->\n");
++            fw.write("<!-- DO NOT EDIT. This is an auto-generated file. -->\n");
++            fw.write("<!--==============================================-->\n");
++
 +            fw.write("<topology>\n");
 +
++            // KNOX-1105 Indicate that this topology was auto-generated
++            fw.write("    <generated>true</generated>\n");
++
 +            // Copy the externalized provider configuration content into the topology descriptor in-line
 +            InputStreamReader policyReader = new InputStreamReader(new FileInputStream(providerConfig));
 +            char[] buffer = new char[1024];
 +            int count;
 +            while ((count = policyReader.read(buffer)) > 0) {
 +                fw.write(buffer, 0, count);
 +            }
 +            policyReader.close();
 +
++            // Services
 +            // Sort the service names to write the services alphabetically
 +            List<String> serviceNames = new ArrayList<>(validServiceNames);
 +            Collections.sort(serviceNames);
 +
 +            // Write the service declarations
 +            for (String serviceName : serviceNames) {
 +                fw.write("    <service>\n");
 +                fw.write("        <role>" + serviceName + "</role>\n");
 +
 +                // URLs
 +                List<String> urls = serviceURLs.get(serviceName);
 +                if (urls != null) {
 +                    for (String url : urls) {
 +                        fw.write("        <url>" + url + "</url>\n");
 +                    }
 +                }
 +
 +                // Params
 +                Map<String, String> svcParams = serviceParams.get(serviceName);
 +                if (svcParams != null) {
 +                    for (String paramName : svcParams.keySet()) {
 +                        fw.write("        <param>\n");
 +                        fw.write("            <name>" + paramName + "</name>\n");
 +                        fw.write("            <value>" + svcParams.get(paramName) + "</value>\n");
 +                        fw.write("        </param>\n");
 +                    }
 +                }
 +
 +                fw.write("    </service>\n");
 +            }
 +
++            // Applications
++            List<SimpleDescriptor.Application> apps = desc.getApplications();
++            if (apps != null) {
++                for (SimpleDescriptor.Application app : apps) {
++                    fw.write("    <application>\n");
++                    fw.write("        <name>" + app.getName() + "</name>\n");
++
++                    // URLs
++                    List<String> urls = app.getURLs();
++                    if (urls != null) {
++                        for (String url : urls) {
++                            fw.write("        <url>" + url + "</url>\n");
++                        }
++                    }
++
++                    // Params
++                    Map<String, String> appParams = app.getParams();
++                    if (appParams != null) {
++                        for (String paramName : appParams.keySet()) {
++                            fw.write("        <param>\n");
++                            fw.write("            <name>" + paramName + "</name>\n");
++                            fw.write("            <value>" + appParams.get(paramName) + "</value>\n");
++                            fw.write("        </param>\n");
++                        }
++                    }
++
++                    fw.write("    </application>\n");
++                }
++            }
++
 +            fw.write("</topology>\n");
 +
 +            fw.flush();
 +        } catch (IOException e) {
 +            log.failedToGenerateTopologyFromSimpleDescriptor(topologyDescriptor.getName(), e);
 +            topologyDescriptor.delete();
 +        } finally {
 +            if (fw != null) {
 +                try {
 +                    fw.close();
 +                } catch (IOException e) {
 +                    // ignore
 +                }
 +            }
 +        }
 +
 +        result.put("topology", topologyDescriptor);
 +        return result;
 +    }
 +
++
 +    private static boolean validateURL(String serviceName, String url) {
 +        boolean result = false;
 +
 +        if (url != null && !url.isEmpty()) {
 +            try {
 +                new URI(url);
 +                result = true;
 +            } catch (URISyntaxException e) {
 +                log.serviceURLValidationFailed(serviceName, url, e);
 +            }
 +        }
 +
 +        return result;
 +    }
 +
 +
 +    private static File resolveProviderConfigurationReference(String reference, File srcDirectory) {
 +        File providerConfig;
 +
 +        // If the reference includes a path
 +        if (reference.contains(File.separator)) {
 +            // Check if it's an absolute path
 +            providerConfig = new File(reference);
 +            if (!providerConfig.exists()) {
 +                // If it's not an absolute path, try treating it as a relative path
 +                providerConfig = new File(srcDirectory, reference);
 +                if (!providerConfig.exists()) {
 +                    providerConfig = null;
 +                }
 +            }
 +        } else { // No file path, just a name
 +            // Check if it's co-located with the referencing descriptor
 +            providerConfig = new File(srcDirectory, reference);
 +            if (!providerConfig.exists()) {
 +                // Check the shared-providers config location
 +                File sharedProvidersDir = new File(srcDirectory, "../shared-providers");
 +                if (sharedProvidersDir.exists()) {
 +                    providerConfig = new File(sharedProvidersDir, reference);
 +                    if (!providerConfig.exists()) {
 +                        // Check if it's a valid name without the extension
 +                        providerConfig = new File(sharedProvidersDir, reference + ".xml");
 +                        if (!providerConfig.exists()) {
 +                            providerConfig = null;
 +                        }
 +                    }
 +                }
 +            }
 +        }
 +
 +        return providerConfig;
 +    }
 +
 +}


[21/53] [abbrv] knox git commit: KNOX-998 - Merge from master

Posted by mo...@apache.org.
KNOX-998 - Merge from master


Project: http://git-wip-us.apache.org/repos/asf/knox/repo
Commit: http://git-wip-us.apache.org/repos/asf/knox/commit/46109ad8
Tree: http://git-wip-us.apache.org/repos/asf/knox/tree/46109ad8
Diff: http://git-wip-us.apache.org/repos/asf/knox/diff/46109ad8

Branch: refs/heads/master
Commit: 46109ad8563ea2286ca7e4756410e7753b2fd9cb
Parents: c754cc0
Author: Sandeep More <mo...@apache.org>
Authored: Thu Nov 2 10:37:58 2017 -0400
Committer: Sandeep More <mo...@apache.org>
Committed: Thu Nov 2 10:37:58 2017 -0400

----------------------------------------------------------------------
 .../security/ldap/BaseDirectoryService.java     |   0
 .../ldap/BaseDirectoryServiceFactory.java       |   0
 .../ldap/SimpleDirectoryServiceFactory.java     |   0
 .../ambari/AmbariServiceDiscoveryMessages.java  |   2 +-
 .../webappsec/filter/StrictTranportFilter.java  | 137 ----------
 .../webappsec/deploy/WebAppSecContributor.java  |   2 +-
 .../webappsec/filter/StrictTranportFilter.java  | 137 ++++++++++
 .../webappsec/StrictTranportFilterTest.java     | 164 ------------
 .../webappsec/StrictTranportFilterTest.java     | 164 ++++++++++++
 .../org/apache/knox/gateway/GatewayFilter.java  |   2 +-
 .../impl/DefaultTokenAuthorityServiceTest.java  | 254 -------------------
 .../apache/knox/gateway/GatewayFilterTest.java  |   2 +-
 .../impl/DefaultTokenAuthorityServiceTest.java  | 254 +++++++++++++++++++
 .../topology/DefaultTopologyServiceTest.java    |  20 +-
 .../simple/SimpleDescriptorHandlerTest.java     |   2 +-
 .../topology/file/provider-config-one.xml       |  74 ------
 .../topology/file/simple-descriptor-five.json   |  14 -
 .../topology/file/simple-descriptor-six.json    |  18 --
 .../topology/file/ambari-cluster-policy.xml     |   4 +-
 .../topology/file/provider-config-one.xml       |  74 ++++++
 .../topology/file/simple-descriptor-five.json   |  14 +
 .../topology/file/simple-descriptor-six.json    |  18 ++
 .../service/admin/HrefListingMarshaller.java    |  75 ------
 .../service/admin/HrefListingMarshaller.java    |  75 ++++++
 .../service/admin/TopologiesResource.java       |   2 +-
 .../services/ambariui/2.2.1/service.xml         |   2 +-
 26 files changed, 755 insertions(+), 755 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/knox/blob/46109ad8/gateway-demo-ldap/src/main/java/org/apache/knox/gateway/security/ldap/BaseDirectoryService.java
----------------------------------------------------------------------
diff --git a/gateway-demo-ldap/src/main/java/org/apache/knox/gateway/security/ldap/BaseDirectoryService.java b/gateway-demo-ldap/src/main/java/org/apache/knox/gateway/security/ldap/BaseDirectoryService.java
deleted file mode 100644
index e69de29..0000000

http://git-wip-us.apache.org/repos/asf/knox/blob/46109ad8/gateway-demo-ldap/src/main/java/org/apache/knox/gateway/security/ldap/BaseDirectoryServiceFactory.java
----------------------------------------------------------------------
diff --git a/gateway-demo-ldap/src/main/java/org/apache/knox/gateway/security/ldap/BaseDirectoryServiceFactory.java b/gateway-demo-ldap/src/main/java/org/apache/knox/gateway/security/ldap/BaseDirectoryServiceFactory.java
deleted file mode 100644
index e69de29..0000000

http://git-wip-us.apache.org/repos/asf/knox/blob/46109ad8/gateway-demo-ldap/src/main/java/org/apache/knox/gateway/security/ldap/SimpleDirectoryServiceFactory.java
----------------------------------------------------------------------
diff --git a/gateway-demo-ldap/src/main/java/org/apache/knox/gateway/security/ldap/SimpleDirectoryServiceFactory.java b/gateway-demo-ldap/src/main/java/org/apache/knox/gateway/security/ldap/SimpleDirectoryServiceFactory.java
deleted file mode 100644
index e69de29..0000000

http://git-wip-us.apache.org/repos/asf/knox/blob/46109ad8/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariServiceDiscoveryMessages.java
----------------------------------------------------------------------
diff --git a/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariServiceDiscoveryMessages.java b/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariServiceDiscoveryMessages.java
index d91edef..2bdc94b 100644
--- a/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariServiceDiscoveryMessages.java
+++ b/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariServiceDiscoveryMessages.java
@@ -21,7 +21,7 @@ import org.apache.knox.gateway.i18n.messages.MessageLevel;
 import org.apache.knox.gateway.i18n.messages.Messages;
 import org.apache.knox.gateway.i18n.messages.StackTrace;
 
-@Messages(logger="org.apache.hadoop.gateway.topology.discovery.ambari")
+@Messages(logger="org.apache.knox.gateway.topology.discovery.ambari")
 public interface AmbariServiceDiscoveryMessages {
 
     @Message(level = MessageLevel.ERROR,

http://git-wip-us.apache.org/repos/asf/knox/blob/46109ad8/gateway-provider-security-webappsec/src/main/java/org/apache/hadoop/gateway/webappsec/filter/StrictTranportFilter.java
----------------------------------------------------------------------
diff --git a/gateway-provider-security-webappsec/src/main/java/org/apache/hadoop/gateway/webappsec/filter/StrictTranportFilter.java b/gateway-provider-security-webappsec/src/main/java/org/apache/hadoop/gateway/webappsec/filter/StrictTranportFilter.java
deleted file mode 100644
index 28ac18a..0000000
--- a/gateway-provider-security-webappsec/src/main/java/org/apache/hadoop/gateway/webappsec/filter/StrictTranportFilter.java
+++ /dev/null
@@ -1,137 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.gateway.webappsec.filter;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.List;
-
-import javax.servlet.Filter;
-import javax.servlet.FilterChain;
-import javax.servlet.FilterConfig;
-import javax.servlet.ServletException;
-import javax.servlet.ServletRequest;
-import javax.servlet.ServletResponse;
-import javax.servlet.http.HttpServletResponse;
-import javax.servlet.http.HttpServletResponseWrapper;
-
-/**
- * This filter protects proxied webapps from protocol downgrade attacks 
- * and cookie hijacking.
- */
-public class StrictTranportFilter implements Filter {
-  private static final String STRICT_TRANSPORT = "Strict-Transport-Security";
-  private static final String CUSTOM_HEADER_PARAM = "strict.transport";
-
-  private String option = "max-age=31536000";
-
-  /* (non-Javadoc)
-   * @see javax.servlet.Filter#destroy()
-   */
-  @Override
-  public void destroy() {
-  }
-
-  /* (non-Javadoc)
-   * @see javax.servlet.Filter#doFilter(javax.servlet.ServletRequest, javax.servlet.ServletResponse, javax.servlet.FilterChain)
-   */
-  @Override
-  public void doFilter(ServletRequest req, ServletResponse res,
-      FilterChain chain) throws IOException, ServletException {
-    ((HttpServletResponse) res).setHeader(STRICT_TRANSPORT, option);
-    chain.doFilter(req, new StrictTranportResponseWrapper((HttpServletResponse) res));
-  }
-
-  /* (non-Javadoc)
-   * @see javax.servlet.Filter#init(javax.servlet.FilterConfig)
-   */
-  @Override
-  public void init(FilterConfig config) throws ServletException {
-    String customOption = config.getInitParameter(CUSTOM_HEADER_PARAM);
-    if (customOption != null) {
-      option = customOption;
-    }
-  }
-
-  public class StrictTranportResponseWrapper extends HttpServletResponseWrapper {
-    @Override
-    public void addHeader(String name, String value) {
-      // don't allow additional values to be added to
-      // the configured options value in topology
-      if (!name.equals(STRICT_TRANSPORT)) {
-        super.addHeader(name, value);
-      }
-    }
-
-    @Override
-    public void setHeader(String name, String value) {
-      // don't allow overwriting of configured value
-      if (!name.equals(STRICT_TRANSPORT)) {
-        super.setHeader(name, value);
-      }
-    }
-
-    /**
-     * construct a wrapper for this request
-     * 
-     * @param request
-     */
-    public StrictTranportResponseWrapper(HttpServletResponse response) {
-        super(response);
-    }
-
-    @Override
-    public String getHeader(String name) {
-        String headerValue = null;
-        if (name.equals(STRICT_TRANSPORT)) {
-            headerValue = option;
-        }
-        else {
-          headerValue = super.getHeader(name);
-        }
-        return headerValue;
-    }
-
-    /**
-     * get the Header names
-     */
-    @Override
-    public Collection<String> getHeaderNames() {
-        List<String> names = (List<String>) super.getHeaderNames();
-        if (names == null) {
-          names = new ArrayList<String>();
-        }
-        names.add(STRICT_TRANSPORT);
-        return names;
-    }
-
-    @Override
-    public Collection<String> getHeaders(String name) {
-        List<String> values = (List<String>) super.getHeaders(name);
-        if (name.equals(STRICT_TRANSPORT)) {
-          if (values == null) {
-            values = new ArrayList<String>();
-          }
-          values.add(option);
-        }
-        return values;
-    }
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/knox/blob/46109ad8/gateway-provider-security-webappsec/src/main/java/org/apache/knox/gateway/webappsec/deploy/WebAppSecContributor.java
----------------------------------------------------------------------
diff --git a/gateway-provider-security-webappsec/src/main/java/org/apache/knox/gateway/webappsec/deploy/WebAppSecContributor.java b/gateway-provider-security-webappsec/src/main/java/org/apache/knox/gateway/webappsec/deploy/WebAppSecContributor.java
index 17fb8c2..71a5af9 100644
--- a/gateway-provider-security-webappsec/src/main/java/org/apache/knox/gateway/webappsec/deploy/WebAppSecContributor.java
+++ b/gateway-provider-security-webappsec/src/main/java/org/apache/knox/gateway/webappsec/deploy/WebAppSecContributor.java
@@ -43,7 +43,7 @@ public class WebAppSecContributor extends
   private static final String XFRAME_OPTIONS_FILTER_CLASSNAME = "org.apache.knox.gateway.webappsec.filter.XFrameOptionsFilter";
   private static final String XFRAME_OPTIONS_ENABLED = "xframe.options.enabled";
   private static final String STRICT_TRANSPORT_SUFFIX = "_STRICTTRANSPORT";
-  private static final String STRICT_TRANSPORT_FILTER_CLASSNAME = "org.apache.hadoop.gateway.webappsec.filter.StrictTranportFilter";
+  private static final String STRICT_TRANSPORT_FILTER_CLASSNAME = "org.apache.knox.gateway.webappsec.filter.StrictTranportFilter";
   private static final String STRICT_TRANSPORT_ENABLED = "strict.transport.enabled";
 
 

http://git-wip-us.apache.org/repos/asf/knox/blob/46109ad8/gateway-provider-security-webappsec/src/main/java/org/apache/knox/gateway/webappsec/filter/StrictTranportFilter.java
----------------------------------------------------------------------
diff --git a/gateway-provider-security-webappsec/src/main/java/org/apache/knox/gateway/webappsec/filter/StrictTranportFilter.java b/gateway-provider-security-webappsec/src/main/java/org/apache/knox/gateway/webappsec/filter/StrictTranportFilter.java
new file mode 100644
index 0000000..0856297
--- /dev/null
+++ b/gateway-provider-security-webappsec/src/main/java/org/apache/knox/gateway/webappsec/filter/StrictTranportFilter.java
@@ -0,0 +1,137 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.knox.gateway.webappsec.filter;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+
+import javax.servlet.Filter;
+import javax.servlet.FilterChain;
+import javax.servlet.FilterConfig;
+import javax.servlet.ServletException;
+import javax.servlet.ServletRequest;
+import javax.servlet.ServletResponse;
+import javax.servlet.http.HttpServletResponse;
+import javax.servlet.http.HttpServletResponseWrapper;
+
+/**
+ * This filter protects proxied webapps from protocol downgrade attacks 
+ * and cookie hijacking.
+ */
+public class StrictTranportFilter implements Filter {
+  private static final String STRICT_TRANSPORT = "Strict-Transport-Security";
+  private static final String CUSTOM_HEADER_PARAM = "strict.transport";
+
+  private String option = "max-age=31536000";
+
+  /* (non-Javadoc)
+   * @see javax.servlet.Filter#destroy()
+   */
+  @Override
+  public void destroy() {
+  }
+
+  /* (non-Javadoc)
+   * @see javax.servlet.Filter#doFilter(javax.servlet.ServletRequest, javax.servlet.ServletResponse, javax.servlet.FilterChain)
+   */
+  @Override
+  public void doFilter(ServletRequest req, ServletResponse res,
+      FilterChain chain) throws IOException, ServletException {
+    ((HttpServletResponse) res).setHeader(STRICT_TRANSPORT, option);
+    chain.doFilter(req, new StrictTranportResponseWrapper((HttpServletResponse) res));
+  }
+
+  /* (non-Javadoc)
+   * @see javax.servlet.Filter#init(javax.servlet.FilterConfig)
+   */
+  @Override
+  public void init(FilterConfig config) throws ServletException {
+    String customOption = config.getInitParameter(CUSTOM_HEADER_PARAM);
+    if (customOption != null) {
+      option = customOption;
+    }
+  }
+
+  public class StrictTranportResponseWrapper extends HttpServletResponseWrapper {
+    @Override
+    public void addHeader(String name, String value) {
+      // don't allow additional values to be added to
+      // the configured options value in topology
+      if (!name.equals(STRICT_TRANSPORT)) {
+        super.addHeader(name, value);
+      }
+    }
+
+    @Override
+    public void setHeader(String name, String value) {
+      // don't allow overwriting of configured value
+      if (!name.equals(STRICT_TRANSPORT)) {
+        super.setHeader(name, value);
+      }
+    }
+
+    /**
+     * construct a wrapper for this request
+     * 
+     * @param request
+     */
+    public StrictTranportResponseWrapper(HttpServletResponse response) {
+        super(response);
+    }
+
+    @Override
+    public String getHeader(String name) {
+        String headerValue = null;
+        if (name.equals(STRICT_TRANSPORT)) {
+            headerValue = option;
+        }
+        else {
+          headerValue = super.getHeader(name);
+        }
+        return headerValue;
+    }
+
+    /**
+     * get the Header names
+     */
+    @Override
+    public Collection<String> getHeaderNames() {
+        List<String> names = (List<String>) super.getHeaderNames();
+        if (names == null) {
+          names = new ArrayList<String>();
+        }
+        names.add(STRICT_TRANSPORT);
+        return names;
+    }
+
+    @Override
+    public Collection<String> getHeaders(String name) {
+        List<String> values = (List<String>) super.getHeaders(name);
+        if (name.equals(STRICT_TRANSPORT)) {
+          if (values == null) {
+            values = new ArrayList<String>();
+          }
+          values.add(option);
+        }
+        return values;
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/knox/blob/46109ad8/gateway-provider-security-webappsec/src/test/java/org/apache/hadoop/gateway/webappsec/StrictTranportFilterTest.java
----------------------------------------------------------------------
diff --git a/gateway-provider-security-webappsec/src/test/java/org/apache/hadoop/gateway/webappsec/StrictTranportFilterTest.java b/gateway-provider-security-webappsec/src/test/java/org/apache/hadoop/gateway/webappsec/StrictTranportFilterTest.java
deleted file mode 100644
index 0c63d7f..0000000
--- a/gateway-provider-security-webappsec/src/test/java/org/apache/hadoop/gateway/webappsec/StrictTranportFilterTest.java
+++ /dev/null
@@ -1,164 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.gateway.webappsec;
-
-import static org.junit.Assert.fail;
-
-import java.io.IOException;
-import java.util.Collection;
-import java.util.Enumeration;
-import java.util.Properties;
-import javax.servlet.FilterChain;
-import javax.servlet.FilterConfig;
-import javax.servlet.ServletContext;
-import javax.servlet.ServletException;
-import javax.servlet.ServletRequest;
-import javax.servlet.ServletResponse;
-import javax.servlet.http.HttpServletRequest;
-import javax.servlet.http.HttpServletResponse;
-
-import org.apache.hadoop.gateway.webappsec.filter.StrictTranportFilter;
-import org.easymock.EasyMock;
-import org.junit.Assert;
-import org.junit.Test;
-
-/**
- *
- */
-public class StrictTranportFilterTest {
-  /**
-   * 
-   */
-  private static final String STRICT_TRANSPORT = "Strict-Transport-Security";
-  String options = null;
-  Collection<String> headerNames = null;
-  Collection<String> headers = null;
-
-  @Test
-  public void testDefaultOptionsValue() throws Exception {
-    try {
-      StrictTranportFilter filter = new StrictTranportFilter();
-      Properties props = new Properties();
-      props.put("strict.transport.enabled", "true");
-      filter.init(new TestFilterConfig(props));
-
-      HttpServletRequest request = EasyMock.createNiceMock(
-          HttpServletRequest.class);
-      HttpServletResponse response = EasyMock.createNiceMock(
-          HttpServletResponse.class);
-      EasyMock.replay(request);
-      EasyMock.replay(response);
-
-      TestFilterChain chain = new TestFilterChain();
-      filter.doFilter(request, response, chain);
-      Assert.assertTrue("doFilterCalled should not be false.",
-          chain.doFilterCalled );
-      Assert.assertTrue("Options value incorrect should be max-age=31536000 but is: "
-          + options, "max-age=31536000".equals(options));
-
-      Assert.assertTrue("Strict-Transport-Security count not equal to 1.", headers.size() == 1);
-    } catch (ServletException se) {
-      fail("Should NOT have thrown a ServletException.");
-    }
-  }
-
-  @Test
-  public void testConfiguredOptionsValue() throws Exception {
-    try {
-      StrictTranportFilter filter = new StrictTranportFilter();
-      Properties props = new Properties();
-      props.put("strict.transport.enabled", "true");
-      props.put("strict.transport", "max-age=31536010; includeSubDomains");
-      filter.init(new TestFilterConfig(props));
-
-      HttpServletRequest request = EasyMock.createNiceMock(
-          HttpServletRequest.class);
-      HttpServletResponse response = EasyMock.createNiceMock(
-          HttpServletResponse.class);
-      EasyMock.replay(request);
-      EasyMock.replay(response);
-
-      TestFilterChain chain = new TestFilterChain();
-      filter.doFilter(request, response, chain);
-      Assert.assertTrue("doFilterCalled should not be false.",
-          chain.doFilterCalled );
-      Assert.assertTrue("Options value incorrect should be max-age=31536010; includeSubDomains but is: "
-          + options, "max-age=31536010; includeSubDomains".equals(options));
-
-      Assert.assertTrue("Strict-Transport-Security count not equal to 1.", headers.size() == 1);
-    } catch (ServletException se) {
-      fail("Should NOT have thrown a ServletException.");
-    }
-  }
-
-  class TestFilterConfig implements FilterConfig {
-    Properties props = null;
-
-    public TestFilterConfig(Properties props) {
-      this.props = props;
-    }
-
-    @Override
-    public String getFilterName() {
-      return null;
-    }
-
-    /* (non-Javadoc)
-     * @see javax.servlet.FilterConfig#getServletContext()
-     */
-    @Override
-    public ServletContext getServletContext() {
-      return null;
-    }
-
-    /* (non-Javadoc)
-     * @see javax.servlet.FilterConfig#getInitParameter(java.lang.String)
-     */
-    @Override
-    public String getInitParameter(String name) {
-      return props.getProperty(name, null);
-    }
-
-    /* (non-Javadoc)
-     * @see javax.servlet.FilterConfig#getInitParameterNames()
-     */
-    @Override
-    public Enumeration<String> getInitParameterNames() {
-      return null;
-    }
-    
-  }
-
-  class TestFilterChain implements FilterChain {
-    boolean doFilterCalled = false;
-
-    /* (non-Javadoc)
-     * @see javax.servlet.FilterChain#doFilter(javax.servlet.ServletRequest, javax.servlet.ServletResponse)
-     */
-    @Override
-    public void doFilter(ServletRequest request, ServletResponse response)
-        throws IOException, ServletException {
-      doFilterCalled = true;
-      options = ((HttpServletResponse)response).getHeader(STRICT_TRANSPORT);
-      headerNames = ((HttpServletResponse)response).getHeaderNames();
-      headers = ((HttpServletResponse)response).getHeaders(STRICT_TRANSPORT);
-    }
-    
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/knox/blob/46109ad8/gateway-provider-security-webappsec/src/test/java/org/apache/knox/gateway/webappsec/StrictTranportFilterTest.java
----------------------------------------------------------------------
diff --git a/gateway-provider-security-webappsec/src/test/java/org/apache/knox/gateway/webappsec/StrictTranportFilterTest.java b/gateway-provider-security-webappsec/src/test/java/org/apache/knox/gateway/webappsec/StrictTranportFilterTest.java
new file mode 100644
index 0000000..fa0b5b6
--- /dev/null
+++ b/gateway-provider-security-webappsec/src/test/java/org/apache/knox/gateway/webappsec/StrictTranportFilterTest.java
@@ -0,0 +1,164 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.knox.gateway.webappsec;
+
+import static org.junit.Assert.fail;
+
+import java.io.IOException;
+import java.util.Collection;
+import java.util.Enumeration;
+import java.util.Properties;
+import javax.servlet.FilterChain;
+import javax.servlet.FilterConfig;
+import javax.servlet.ServletContext;
+import javax.servlet.ServletException;
+import javax.servlet.ServletRequest;
+import javax.servlet.ServletResponse;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+
+import org.apache.knox.gateway.webappsec.filter.StrictTranportFilter;
+import org.easymock.EasyMock;
+import org.junit.Assert;
+import org.junit.Test;
+
+/**
+ *
+ */
+public class StrictTranportFilterTest {
+  /**
+   * 
+   */
+  private static final String STRICT_TRANSPORT = "Strict-Transport-Security";
+  String options = null;
+  Collection<String> headerNames = null;
+  Collection<String> headers = null;
+
+  @Test
+  public void testDefaultOptionsValue() throws Exception {
+    try {
+      StrictTranportFilter filter = new StrictTranportFilter();
+      Properties props = new Properties();
+      props.put("strict.transport.enabled", "true");
+      filter.init(new TestFilterConfig(props));
+
+      HttpServletRequest request = EasyMock.createNiceMock(
+          HttpServletRequest.class);
+      HttpServletResponse response = EasyMock.createNiceMock(
+          HttpServletResponse.class);
+      EasyMock.replay(request);
+      EasyMock.replay(response);
+
+      TestFilterChain chain = new TestFilterChain();
+      filter.doFilter(request, response, chain);
+      Assert.assertTrue("doFilterCalled should not be false.",
+          chain.doFilterCalled );
+      Assert.assertTrue("Options value incorrect should be max-age=31536000 but is: "
+          + options, "max-age=31536000".equals(options));
+
+      Assert.assertTrue("Strict-Transport-Security count not equal to 1.", headers.size() == 1);
+    } catch (ServletException se) {
+      fail("Should NOT have thrown a ServletException.");
+    }
+  }
+
+  @Test
+  public void testConfiguredOptionsValue() throws Exception {
+    try {
+      StrictTranportFilter filter = new StrictTranportFilter();
+      Properties props = new Properties();
+      props.put("strict.transport.enabled", "true");
+      props.put("strict.transport", "max-age=31536010; includeSubDomains");
+      filter.init(new TestFilterConfig(props));
+
+      HttpServletRequest request = EasyMock.createNiceMock(
+          HttpServletRequest.class);
+      HttpServletResponse response = EasyMock.createNiceMock(
+          HttpServletResponse.class);
+      EasyMock.replay(request);
+      EasyMock.replay(response);
+
+      TestFilterChain chain = new TestFilterChain();
+      filter.doFilter(request, response, chain);
+      Assert.assertTrue("doFilterCalled should not be false.",
+          chain.doFilterCalled );
+      Assert.assertTrue("Options value incorrect should be max-age=31536010; includeSubDomains but is: "
+          + options, "max-age=31536010; includeSubDomains".equals(options));
+
+      Assert.assertTrue("Strict-Transport-Security count not equal to 1.", headers.size() == 1);
+    } catch (ServletException se) {
+      fail("Should NOT have thrown a ServletException.");
+    }
+  }
+
+  class TestFilterConfig implements FilterConfig {
+    Properties props = null;
+
+    public TestFilterConfig(Properties props) {
+      this.props = props;
+    }
+
+    @Override
+    public String getFilterName() {
+      return null;
+    }
+
+    /* (non-Javadoc)
+     * @see javax.servlet.FilterConfig#getServletContext()
+     */
+    @Override
+    public ServletContext getServletContext() {
+      return null;
+    }
+
+    /* (non-Javadoc)
+     * @see javax.servlet.FilterConfig#getInitParameter(java.lang.String)
+     */
+    @Override
+    public String getInitParameter(String name) {
+      return props.getProperty(name, null);
+    }
+
+    /* (non-Javadoc)
+     * @see javax.servlet.FilterConfig#getInitParameterNames()
+     */
+    @Override
+    public Enumeration<String> getInitParameterNames() {
+      return null;
+    }
+    
+  }
+
+  class TestFilterChain implements FilterChain {
+    boolean doFilterCalled = false;
+
+    /* (non-Javadoc)
+     * @see javax.servlet.FilterChain#doFilter(javax.servlet.ServletRequest, javax.servlet.ServletResponse)
+     */
+    @Override
+    public void doFilter(ServletRequest request, ServletResponse response)
+        throws IOException, ServletException {
+      doFilterCalled = true;
+      options = ((HttpServletResponse)response).getHeader(STRICT_TRANSPORT);
+      headerNames = ((HttpServletResponse)response).getHeaderNames();
+      headers = ((HttpServletResponse)response).getHeaders(STRICT_TRANSPORT);
+    }
+    
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/knox/blob/46109ad8/gateway-server/src/main/java/org/apache/knox/gateway/GatewayFilter.java
----------------------------------------------------------------------
diff --git a/gateway-server/src/main/java/org/apache/knox/gateway/GatewayFilter.java b/gateway-server/src/main/java/org/apache/knox/gateway/GatewayFilter.java
index 8dd29bf..25d4f75 100644
--- a/gateway-server/src/main/java/org/apache/knox/gateway/GatewayFilter.java
+++ b/gateway-server/src/main/java/org/apache/knox/gateway/GatewayFilter.java
@@ -127,7 +127,7 @@ public class GatewayFilter implements Filter {
 
     // if there was no match then look for a default service for the topology
     if (match == null) {
-      Topology topology = (Topology) servletRequest.getServletContext().getAttribute("org.apache.hadoop.gateway.topology");
+      Topology topology = (Topology) servletRequest.getServletContext().getAttribute("org.apache.knox.gateway.topology");
       if (topology != null) {
         String defaultServicePath = topology.getDefaultServicePath();
         if (defaultServicePath != null) {

http://git-wip-us.apache.org/repos/asf/knox/blob/46109ad8/gateway-server/src/test/java/org/apache/hadoop/gateway/services/token/impl/DefaultTokenAuthorityServiceTest.java
----------------------------------------------------------------------
diff --git a/gateway-server/src/test/java/org/apache/hadoop/gateway/services/token/impl/DefaultTokenAuthorityServiceTest.java b/gateway-server/src/test/java/org/apache/hadoop/gateway/services/token/impl/DefaultTokenAuthorityServiceTest.java
deleted file mode 100644
index da55422..0000000
--- a/gateway-server/src/test/java/org/apache/hadoop/gateway/services/token/impl/DefaultTokenAuthorityServiceTest.java
+++ /dev/null
@@ -1,254 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.knox.gateway.services.token.impl;
-
-import java.io.File;
-import java.security.Principal;
-import java.util.HashMap;
-
-import org.apache.knox.gateway.config.GatewayConfig;
-import org.apache.knox.gateway.services.security.AliasService;
-import org.apache.knox.gateway.services.security.KeystoreService;
-import org.apache.knox.gateway.services.security.MasterService;
-import org.apache.knox.gateway.services.security.impl.DefaultKeystoreService;
-import org.apache.knox.gateway.services.security.token.JWTokenAuthority;
-import org.apache.knox.gateway.services.security.token.impl.JWT;
-import org.apache.knox.gateway.services.security.token.TokenServiceException;
-
-import org.easymock.EasyMock;
-import org.junit.Test;
-
-/**
- * Some unit tests for the DefaultTokenAuthorityService.
- */
-public class DefaultTokenAuthorityServiceTest extends org.junit.Assert {
-
-  @Test
-  public void testTokenCreation() throws Exception {
-
-    Principal principal = EasyMock.createNiceMock(Principal.class);
-    EasyMock.expect(principal.getName()).andReturn("john.doe@example.com");
-
-    GatewayConfig config = EasyMock.createNiceMock(GatewayConfig.class);
-    String basedir = System.getProperty("basedir");
-    if (basedir == null) {
-      basedir = new File(".").getCanonicalPath();
-    }
-
-    EasyMock.expect(config.getGatewaySecurityDir()).andReturn(basedir + "/target/test-classes");
-    EasyMock.expect(config.getSigningKeystoreName()).andReturn("server-keystore.jks");
-    EasyMock.expect(config.getSigningKeyAlias()).andReturn("server").anyTimes();
-
-    MasterService ms = EasyMock.createNiceMock(MasterService.class);
-    EasyMock.expect(ms.getMasterSecret()).andReturn("horton".toCharArray());
-
-    AliasService as = EasyMock.createNiceMock(AliasService.class);
-    EasyMock.expect(as.getGatewayIdentityPassphrase()).andReturn("horton".toCharArray());
-
-    EasyMock.replay(principal, config, ms, as);
-
-    KeystoreService ks = new DefaultKeystoreService();
-    ((DefaultKeystoreService)ks).setMasterService(ms);
-
-    ((DefaultKeystoreService)ks).init(config, new HashMap<String, String>());
-
-    JWTokenAuthority ta = new DefaultTokenAuthorityService();
-    ((DefaultTokenAuthorityService)ta).setAliasService(as);
-    ((DefaultTokenAuthorityService)ta).setKeystoreService(ks);
-
-    ((DefaultTokenAuthorityService)ta).init(config, new HashMap<String, String>());
-
-    JWT token = ta.issueToken(principal, "RS256");
-    assertEquals("KNOXSSO", token.getIssuer());
-    assertEquals("john.doe@example.com", token.getSubject());
-
-    assertTrue(ta.verifyToken(token));
-  }
-
-  @Test
-  public void testTokenCreationAudience() throws Exception {
-
-    Principal principal = EasyMock.createNiceMock(Principal.class);
-    EasyMock.expect(principal.getName()).andReturn("john.doe@example.com");
-
-    GatewayConfig config = EasyMock.createNiceMock(GatewayConfig.class);
-    String basedir = System.getProperty("basedir");
-    if (basedir == null) {
-      basedir = new File(".").getCanonicalPath();
-    }
-
-    EasyMock.expect(config.getGatewaySecurityDir()).andReturn(basedir + "/target/test-classes");
-    EasyMock.expect(config.getSigningKeystoreName()).andReturn("server-keystore.jks");
-    EasyMock.expect(config.getSigningKeyAlias()).andReturn("server").anyTimes();
-
-    MasterService ms = EasyMock.createNiceMock(MasterService.class);
-    EasyMock.expect(ms.getMasterSecret()).andReturn("horton".toCharArray());
-
-    AliasService as = EasyMock.createNiceMock(AliasService.class);
-    EasyMock.expect(as.getGatewayIdentityPassphrase()).andReturn("horton".toCharArray());
-
-    EasyMock.replay(principal, config, ms, as);
-
-    KeystoreService ks = new DefaultKeystoreService();
-    ((DefaultKeystoreService)ks).setMasterService(ms);
-
-    ((DefaultKeystoreService)ks).init(config, new HashMap<String, String>());
-
-    JWTokenAuthority ta = new DefaultTokenAuthorityService();
-    ((DefaultTokenAuthorityService)ta).setAliasService(as);
-    ((DefaultTokenAuthorityService)ta).setKeystoreService(ks);
-
-    ((DefaultTokenAuthorityService)ta).init(config, new HashMap<String, String>());
-
-    JWT token = ta.issueToken(principal, "https://login.example.com", "RS256");
-    assertEquals("KNOXSSO", token.getIssuer());
-    assertEquals("john.doe@example.com", token.getSubject());
-    assertEquals("https://login.example.com", token.getAudience());
-
-    assertTrue(ta.verifyToken(token));
-  }
-
-  @Test
-  public void testTokenCreationNullAudience() throws Exception {
-
-    Principal principal = EasyMock.createNiceMock(Principal.class);
-    EasyMock.expect(principal.getName()).andReturn("john.doe@example.com");
-
-    GatewayConfig config = EasyMock.createNiceMock(GatewayConfig.class);
-    String basedir = System.getProperty("basedir");
-    if (basedir == null) {
-      basedir = new File(".").getCanonicalPath();
-    }
-
-    EasyMock.expect(config.getGatewaySecurityDir()).andReturn(basedir + "/target/test-classes");
-    EasyMock.expect(config.getSigningKeystoreName()).andReturn("server-keystore.jks");
-    EasyMock.expect(config.getSigningKeyAlias()).andReturn("server").anyTimes();
-
-    MasterService ms = EasyMock.createNiceMock(MasterService.class);
-    EasyMock.expect(ms.getMasterSecret()).andReturn("horton".toCharArray());
-
-    AliasService as = EasyMock.createNiceMock(AliasService.class);
-    EasyMock.expect(as.getGatewayIdentityPassphrase()).andReturn("horton".toCharArray());
-
-    EasyMock.replay(principal, config, ms, as);
-
-    KeystoreService ks = new DefaultKeystoreService();
-    ((DefaultKeystoreService)ks).setMasterService(ms);
-
-    ((DefaultKeystoreService)ks).init(config, new HashMap<String, String>());
-
-    JWTokenAuthority ta = new DefaultTokenAuthorityService();
-    ((DefaultTokenAuthorityService)ta).setAliasService(as);
-    ((DefaultTokenAuthorityService)ta).setKeystoreService(ks);
-
-    ((DefaultTokenAuthorityService)ta).init(config, new HashMap<String, String>());
-
-    JWT token = ta.issueToken(principal, null, "RS256");
-    assertEquals("KNOXSSO", token.getIssuer());
-    assertEquals("john.doe@example.com", token.getSubject());
-
-    assertTrue(ta.verifyToken(token));
-  }
-
-  @Test
-  public void testTokenCreationSignatureAlgorithm() throws Exception {
-
-    Principal principal = EasyMock.createNiceMock(Principal.class);
-    EasyMock.expect(principal.getName()).andReturn("john.doe@example.com");
-
-    GatewayConfig config = EasyMock.createNiceMock(GatewayConfig.class);
-    String basedir = System.getProperty("basedir");
-    if (basedir == null) {
-      basedir = new File(".").getCanonicalPath();
-    }
-
-    EasyMock.expect(config.getGatewaySecurityDir()).andReturn(basedir + "/target/test-classes");
-    EasyMock.expect(config.getSigningKeystoreName()).andReturn("server-keystore.jks");
-    EasyMock.expect(config.getSigningKeyAlias()).andReturn("server").anyTimes();
-
-    MasterService ms = EasyMock.createNiceMock(MasterService.class);
-    EasyMock.expect(ms.getMasterSecret()).andReturn("horton".toCharArray());
-
-    AliasService as = EasyMock.createNiceMock(AliasService.class);
-    EasyMock.expect(as.getGatewayIdentityPassphrase()).andReturn("horton".toCharArray());
-
-    EasyMock.replay(principal, config, ms, as);
-
-    KeystoreService ks = new DefaultKeystoreService();
-    ((DefaultKeystoreService)ks).setMasterService(ms);
-
-    ((DefaultKeystoreService)ks).init(config, new HashMap<String, String>());
-
-    JWTokenAuthority ta = new DefaultTokenAuthorityService();
-    ((DefaultTokenAuthorityService)ta).setAliasService(as);
-    ((DefaultTokenAuthorityService)ta).setKeystoreService(ks);
-
-    ((DefaultTokenAuthorityService)ta).init(config, new HashMap<String, String>());
-
-    JWT token = ta.issueToken(principal, "RS512");
-    assertEquals("KNOXSSO", token.getIssuer());
-    assertEquals("john.doe@example.com", token.getSubject());
-    assertTrue(token.getHeader().contains("RS512"));
-
-    assertTrue(ta.verifyToken(token));
-  }
-
-  @Test
-  public void testTokenCreationBadSignatureAlgorithm() throws Exception {
-
-    Principal principal = EasyMock.createNiceMock(Principal.class);
-    EasyMock.expect(principal.getName()).andReturn("john.doe@example.com");
-
-    GatewayConfig config = EasyMock.createNiceMock(GatewayConfig.class);
-    String basedir = System.getProperty("basedir");
-    if (basedir == null) {
-      basedir = new File(".").getCanonicalPath();
-    }
-
-    EasyMock.expect(config.getGatewaySecurityDir()).andReturn(basedir + "/target/test-classes");
-    EasyMock.expect(config.getSigningKeystoreName()).andReturn("server-keystore.jks");
-    EasyMock.expect(config.getSigningKeyAlias()).andReturn("server").anyTimes();
-
-    MasterService ms = EasyMock.createNiceMock(MasterService.class);
-    EasyMock.expect(ms.getMasterSecret()).andReturn("horton".toCharArray());
-
-    AliasService as = EasyMock.createNiceMock(AliasService.class);
-    EasyMock.expect(as.getGatewayIdentityPassphrase()).andReturn("horton".toCharArray());
-
-    EasyMock.replay(principal, config, ms, as);
-
-    KeystoreService ks = new DefaultKeystoreService();
-    ((DefaultKeystoreService)ks).setMasterService(ms);
-
-    ((DefaultKeystoreService)ks).init(config, new HashMap<String, String>());
-
-    JWTokenAuthority ta = new DefaultTokenAuthorityService();
-    ((DefaultTokenAuthorityService)ta).setAliasService(as);
-    ((DefaultTokenAuthorityService)ta).setKeystoreService(ks);
-
-    ((DefaultTokenAuthorityService)ta).init(config, new HashMap<String, String>());
-
-    try {
-      ta.issueToken(principal, "none");
-      fail("Failure expected on a bad signature algorithm");
-    } catch (TokenServiceException ex) {
-        // expected
-    }
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/knox/blob/46109ad8/gateway-server/src/test/java/org/apache/knox/gateway/GatewayFilterTest.java
----------------------------------------------------------------------
diff --git a/gateway-server/src/test/java/org/apache/knox/gateway/GatewayFilterTest.java b/gateway-server/src/test/java/org/apache/knox/gateway/GatewayFilterTest.java
index ac22400..2fe1f1a 100644
--- a/gateway-server/src/test/java/org/apache/knox/gateway/GatewayFilterTest.java
+++ b/gateway-server/src/test/java/org/apache/knox/gateway/GatewayFilterTest.java
@@ -196,7 +196,7 @@ public class GatewayFilterTest {
         "Custom-Forwarded-For").anyTimes();
     EasyMock.expect( request.getRequestURL() ).andReturn( new StringBuffer("http://host:8443/gateway/sandbox/test-path/test-resource/") ).anyTimes();
 
-    EasyMock.expect( context.getAttribute( "org.apache.hadoop.gateway.topology" ) ).andReturn( topology ).anyTimes();
+    EasyMock.expect( context.getAttribute( "org.apache.knox.gateway.topology" ) ).andReturn( topology ).anyTimes();
     EasyMock.replay( request );
     EasyMock.replay( context );
     EasyMock.replay( topology );

http://git-wip-us.apache.org/repos/asf/knox/blob/46109ad8/gateway-server/src/test/java/org/apache/knox/gateway/services/token/impl/DefaultTokenAuthorityServiceTest.java
----------------------------------------------------------------------
diff --git a/gateway-server/src/test/java/org/apache/knox/gateway/services/token/impl/DefaultTokenAuthorityServiceTest.java b/gateway-server/src/test/java/org/apache/knox/gateway/services/token/impl/DefaultTokenAuthorityServiceTest.java
new file mode 100644
index 0000000..da55422
--- /dev/null
+++ b/gateway-server/src/test/java/org/apache/knox/gateway/services/token/impl/DefaultTokenAuthorityServiceTest.java
@@ -0,0 +1,254 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.knox.gateway.services.token.impl;
+
+import java.io.File;
+import java.security.Principal;
+import java.util.HashMap;
+
+import org.apache.knox.gateway.config.GatewayConfig;
+import org.apache.knox.gateway.services.security.AliasService;
+import org.apache.knox.gateway.services.security.KeystoreService;
+import org.apache.knox.gateway.services.security.MasterService;
+import org.apache.knox.gateway.services.security.impl.DefaultKeystoreService;
+import org.apache.knox.gateway.services.security.token.JWTokenAuthority;
+import org.apache.knox.gateway.services.security.token.impl.JWT;
+import org.apache.knox.gateway.services.security.token.TokenServiceException;
+
+import org.easymock.EasyMock;
+import org.junit.Test;
+
+/**
+ * Some unit tests for the DefaultTokenAuthorityService.
+ */
+public class DefaultTokenAuthorityServiceTest extends org.junit.Assert {
+
+  @Test
+  public void testTokenCreation() throws Exception {
+
+    Principal principal = EasyMock.createNiceMock(Principal.class);
+    EasyMock.expect(principal.getName()).andReturn("john.doe@example.com");
+
+    GatewayConfig config = EasyMock.createNiceMock(GatewayConfig.class);
+    String basedir = System.getProperty("basedir");
+    if (basedir == null) {
+      basedir = new File(".").getCanonicalPath();
+    }
+
+    EasyMock.expect(config.getGatewaySecurityDir()).andReturn(basedir + "/target/test-classes");
+    EasyMock.expect(config.getSigningKeystoreName()).andReturn("server-keystore.jks");
+    EasyMock.expect(config.getSigningKeyAlias()).andReturn("server").anyTimes();
+
+    MasterService ms = EasyMock.createNiceMock(MasterService.class);
+    EasyMock.expect(ms.getMasterSecret()).andReturn("horton".toCharArray());
+
+    AliasService as = EasyMock.createNiceMock(AliasService.class);
+    EasyMock.expect(as.getGatewayIdentityPassphrase()).andReturn("horton".toCharArray());
+
+    EasyMock.replay(principal, config, ms, as);
+
+    KeystoreService ks = new DefaultKeystoreService();
+    ((DefaultKeystoreService)ks).setMasterService(ms);
+
+    ((DefaultKeystoreService)ks).init(config, new HashMap<String, String>());
+
+    JWTokenAuthority ta = new DefaultTokenAuthorityService();
+    ((DefaultTokenAuthorityService)ta).setAliasService(as);
+    ((DefaultTokenAuthorityService)ta).setKeystoreService(ks);
+
+    ((DefaultTokenAuthorityService)ta).init(config, new HashMap<String, String>());
+
+    JWT token = ta.issueToken(principal, "RS256");
+    assertEquals("KNOXSSO", token.getIssuer());
+    assertEquals("john.doe@example.com", token.getSubject());
+
+    assertTrue(ta.verifyToken(token));
+  }
+
+  @Test
+  public void testTokenCreationAudience() throws Exception {
+
+    Principal principal = EasyMock.createNiceMock(Principal.class);
+    EasyMock.expect(principal.getName()).andReturn("john.doe@example.com");
+
+    GatewayConfig config = EasyMock.createNiceMock(GatewayConfig.class);
+    String basedir = System.getProperty("basedir");
+    if (basedir == null) {
+      basedir = new File(".").getCanonicalPath();
+    }
+
+    EasyMock.expect(config.getGatewaySecurityDir()).andReturn(basedir + "/target/test-classes");
+    EasyMock.expect(config.getSigningKeystoreName()).andReturn("server-keystore.jks");
+    EasyMock.expect(config.getSigningKeyAlias()).andReturn("server").anyTimes();
+
+    MasterService ms = EasyMock.createNiceMock(MasterService.class);
+    EasyMock.expect(ms.getMasterSecret()).andReturn("horton".toCharArray());
+
+    AliasService as = EasyMock.createNiceMock(AliasService.class);
+    EasyMock.expect(as.getGatewayIdentityPassphrase()).andReturn("horton".toCharArray());
+
+    EasyMock.replay(principal, config, ms, as);
+
+    KeystoreService ks = new DefaultKeystoreService();
+    ((DefaultKeystoreService)ks).setMasterService(ms);
+
+    ((DefaultKeystoreService)ks).init(config, new HashMap<String, String>());
+
+    JWTokenAuthority ta = new DefaultTokenAuthorityService();
+    ((DefaultTokenAuthorityService)ta).setAliasService(as);
+    ((DefaultTokenAuthorityService)ta).setKeystoreService(ks);
+
+    ((DefaultTokenAuthorityService)ta).init(config, new HashMap<String, String>());
+
+    JWT token = ta.issueToken(principal, "https://login.example.com", "RS256");
+    assertEquals("KNOXSSO", token.getIssuer());
+    assertEquals("john.doe@example.com", token.getSubject());
+    assertEquals("https://login.example.com", token.getAudience());
+
+    assertTrue(ta.verifyToken(token));
+  }
+
+  @Test
+  public void testTokenCreationNullAudience() throws Exception {
+
+    Principal principal = EasyMock.createNiceMock(Principal.class);
+    EasyMock.expect(principal.getName()).andReturn("john.doe@example.com");
+
+    GatewayConfig config = EasyMock.createNiceMock(GatewayConfig.class);
+    String basedir = System.getProperty("basedir");
+    if (basedir == null) {
+      basedir = new File(".").getCanonicalPath();
+    }
+
+    EasyMock.expect(config.getGatewaySecurityDir()).andReturn(basedir + "/target/test-classes");
+    EasyMock.expect(config.getSigningKeystoreName()).andReturn("server-keystore.jks");
+    EasyMock.expect(config.getSigningKeyAlias()).andReturn("server").anyTimes();
+
+    MasterService ms = EasyMock.createNiceMock(MasterService.class);
+    EasyMock.expect(ms.getMasterSecret()).andReturn("horton".toCharArray());
+
+    AliasService as = EasyMock.createNiceMock(AliasService.class);
+    EasyMock.expect(as.getGatewayIdentityPassphrase()).andReturn("horton".toCharArray());
+
+    EasyMock.replay(principal, config, ms, as);
+
+    KeystoreService ks = new DefaultKeystoreService();
+    ((DefaultKeystoreService)ks).setMasterService(ms);
+
+    ((DefaultKeystoreService)ks).init(config, new HashMap<String, String>());
+
+    JWTokenAuthority ta = new DefaultTokenAuthorityService();
+    ((DefaultTokenAuthorityService)ta).setAliasService(as);
+    ((DefaultTokenAuthorityService)ta).setKeystoreService(ks);
+
+    ((DefaultTokenAuthorityService)ta).init(config, new HashMap<String, String>());
+
+    JWT token = ta.issueToken(principal, null, "RS256");
+    assertEquals("KNOXSSO", token.getIssuer());
+    assertEquals("john.doe@example.com", token.getSubject());
+
+    assertTrue(ta.verifyToken(token));
+  }
+
+  @Test
+  public void testTokenCreationSignatureAlgorithm() throws Exception {
+
+    Principal principal = EasyMock.createNiceMock(Principal.class);
+    EasyMock.expect(principal.getName()).andReturn("john.doe@example.com");
+
+    GatewayConfig config = EasyMock.createNiceMock(GatewayConfig.class);
+    String basedir = System.getProperty("basedir");
+    if (basedir == null) {
+      basedir = new File(".").getCanonicalPath();
+    }
+
+    EasyMock.expect(config.getGatewaySecurityDir()).andReturn(basedir + "/target/test-classes");
+    EasyMock.expect(config.getSigningKeystoreName()).andReturn("server-keystore.jks");
+    EasyMock.expect(config.getSigningKeyAlias()).andReturn("server").anyTimes();
+
+    MasterService ms = EasyMock.createNiceMock(MasterService.class);
+    EasyMock.expect(ms.getMasterSecret()).andReturn("horton".toCharArray());
+
+    AliasService as = EasyMock.createNiceMock(AliasService.class);
+    EasyMock.expect(as.getGatewayIdentityPassphrase()).andReturn("horton".toCharArray());
+
+    EasyMock.replay(principal, config, ms, as);
+
+    KeystoreService ks = new DefaultKeystoreService();
+    ((DefaultKeystoreService)ks).setMasterService(ms);
+
+    ((DefaultKeystoreService)ks).init(config, new HashMap<String, String>());
+
+    JWTokenAuthority ta = new DefaultTokenAuthorityService();
+    ((DefaultTokenAuthorityService)ta).setAliasService(as);
+    ((DefaultTokenAuthorityService)ta).setKeystoreService(ks);
+
+    ((DefaultTokenAuthorityService)ta).init(config, new HashMap<String, String>());
+
+    JWT token = ta.issueToken(principal, "RS512");
+    assertEquals("KNOXSSO", token.getIssuer());
+    assertEquals("john.doe@example.com", token.getSubject());
+    assertTrue(token.getHeader().contains("RS512"));
+
+    assertTrue(ta.verifyToken(token));
+  }
+
+  @Test
+  public void testTokenCreationBadSignatureAlgorithm() throws Exception {
+
+    Principal principal = EasyMock.createNiceMock(Principal.class);
+    EasyMock.expect(principal.getName()).andReturn("john.doe@example.com");
+
+    GatewayConfig config = EasyMock.createNiceMock(GatewayConfig.class);
+    String basedir = System.getProperty("basedir");
+    if (basedir == null) {
+      basedir = new File(".").getCanonicalPath();
+    }
+
+    EasyMock.expect(config.getGatewaySecurityDir()).andReturn(basedir + "/target/test-classes");
+    EasyMock.expect(config.getSigningKeystoreName()).andReturn("server-keystore.jks");
+    EasyMock.expect(config.getSigningKeyAlias()).andReturn("server").anyTimes();
+
+    MasterService ms = EasyMock.createNiceMock(MasterService.class);
+    EasyMock.expect(ms.getMasterSecret()).andReturn("horton".toCharArray());
+
+    AliasService as = EasyMock.createNiceMock(AliasService.class);
+    EasyMock.expect(as.getGatewayIdentityPassphrase()).andReturn("horton".toCharArray());
+
+    EasyMock.replay(principal, config, ms, as);
+
+    KeystoreService ks = new DefaultKeystoreService();
+    ((DefaultKeystoreService)ks).setMasterService(ms);
+
+    ((DefaultKeystoreService)ks).init(config, new HashMap<String, String>());
+
+    JWTokenAuthority ta = new DefaultTokenAuthorityService();
+    ((DefaultTokenAuthorityService)ta).setAliasService(as);
+    ((DefaultTokenAuthorityService)ta).setKeystoreService(ks);
+
+    ((DefaultTokenAuthorityService)ta).init(config, new HashMap<String, String>());
+
+    try {
+      ta.issueToken(principal, "none");
+      fail("Failure expected on a bad signature algorithm");
+    } catch (TokenServiceException ex) {
+        // expected
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/knox/blob/46109ad8/gateway-server/src/test/java/org/apache/knox/gateway/services/topology/DefaultTopologyServiceTest.java
----------------------------------------------------------------------
diff --git a/gateway-server/src/test/java/org/apache/knox/gateway/services/topology/DefaultTopologyServiceTest.java b/gateway-server/src/test/java/org/apache/knox/gateway/services/topology/DefaultTopologyServiceTest.java
index 95d6f9d..e70d096 100644
--- a/gateway-server/src/test/java/org/apache/knox/gateway/services/topology/DefaultTopologyServiceTest.java
+++ b/gateway-server/src/test/java/org/apache/knox/gateway/services/topology/DefaultTopologyServiceTest.java
@@ -25,17 +25,13 @@ import org.apache.commons.io.monitor.FileAlterationMonitor;
 import org.apache.commons.io.monitor.FileAlterationObserver;
 import org.apache.knox.gateway.config.GatewayConfig;
 import org.apache.knox.gateway.services.topology.impl.DefaultTopologyService;
-import org.apache.knox.gateway.config.GatewayConfig;
 import org.apache.knox.gateway.services.security.AliasService;
-import org.apache.knox.gateway.services.topology.impl.DefaultTopologyService;
-import org.apache.knox.gateway.topology.*;
 import org.apache.hadoop.test.TestUtils;
 import org.apache.knox.gateway.topology.Param;
 import org.apache.knox.gateway.topology.Provider;
 import org.apache.knox.gateway.topology.Topology;
 import org.apache.knox.gateway.topology.TopologyEvent;
 import org.apache.knox.gateway.topology.TopologyListener;
-import org.apache.knox.gateway.services.security.AliasService;
 import org.easymock.EasyMock;
 import org.junit.After;
 import org.junit.Before;
@@ -192,7 +188,7 @@ public class DefaultTopologyServiceTest {
    * Test the lifecycle relationship between simple descriptors and topology files.
    *
    * N.B. This test depends on the DummyServiceDiscovery extension being configured:
-   *        org.apache.hadoop.gateway.topology.discovery.test.extension.DummyServiceDiscovery
+   *        org.apache.knox.gateway.topology.discovery.test.extension.DummyServiceDiscovery
    */
   @Test
   public void testSimpleDescriptorsTopologyGeneration() throws Exception {
@@ -313,7 +309,7 @@ public class DefaultTopologyServiceTest {
    * Test the lifecycle relationship between provider configuration files, simple descriptors, and topology files.
    *
    * N.B. This test depends on the DummyServiceDiscovery extension being configured:
-   *        org.apache.hadoop.gateway.topology.discovery.test.extension.DummyServiceDiscovery
+   *        org.apache.knox.gateway.topology.discovery.test.extension.DummyServiceDiscovery
    */
   @Test
   public void testTopologiesUpdateFromProviderConfigChange() throws Exception {
@@ -447,14 +443,16 @@ public class DefaultTopologyServiceTest {
       // "Deploy" the referenced provider configs first
       boolean isDeployed =
         ts.deployProviderConfiguration(provConfOne,
-                FileUtils.readFileToString(new File(ClassLoader.getSystemResource("org/apache/hadoop/gateway/topology/file/provider-config-one.xml").toURI())));
+                FileUtils.readFileToString(new File(ClassLoader.getSystemResource(
+                    "org/apache/knox/gateway/topology/file/provider-config-one.xml").toURI())));
       assertTrue(isDeployed);
       File provConfOneFile = new File(sharedProvidersDir, provConfOne);
       assertTrue(provConfOneFile.exists());
 
       isDeployed =
         ts.deployProviderConfiguration(provConfTwo,
-                FileUtils.readFileToString(new File(ClassLoader.getSystemResource("org/apache/hadoop/gateway/topology/file/ambari-cluster-policy.xml").toURI())));
+                FileUtils.readFileToString(new File(ClassLoader.getSystemResource(
+                    "org/apache/knox/gateway/topology/file/ambari-cluster-policy.xml").toURI())));
       assertTrue(isDeployed);
       File provConfTwoFile = new File(sharedProvidersDir, provConfTwo);
       assertTrue(provConfTwoFile.exists());
@@ -469,7 +467,8 @@ public class DefaultTopologyServiceTest {
       // "Deploy" the simple descriptor, which depends on provConfOne
       isDeployed =
         ts.deployDescriptor(simpleDescName,
-            FileUtils.readFileToString(new File(ClassLoader.getSystemResource("org/apache/hadoop/gateway/topology/file/simple-descriptor-six.json").toURI())));
+            FileUtils.readFileToString(new File(ClassLoader.getSystemResource(
+                "org/apache/knox/gateway/topology/file/simple-descriptor-six.json").toURI())));
       assertTrue(isDeployed);
       File simpleDesc = new File(descriptorsDir, simpleDescName);
       assertTrue(simpleDesc.exists());
@@ -490,7 +489,8 @@ public class DefaultTopologyServiceTest {
       // Overwrite the simple descriptor with content that changes the provider config reference to provConfTwo
       isDeployed =
         ts.deployDescriptor(simpleDescName,
-              FileUtils.readFileToString(new File(ClassLoader.getSystemResource("org/apache/hadoop/gateway/topology/file/simple-descriptor-five.json").toURI())));
+              FileUtils.readFileToString(new File(ClassLoader.getSystemResource(
+                  "org/apache/knox/gateway/topology/file/simple-descriptor-five.json").toURI())));
       assertTrue(isDeployed);
       assertTrue(simpleDesc.exists());
       ts.getProviderConfigurations();

http://git-wip-us.apache.org/repos/asf/knox/blob/46109ad8/gateway-server/src/test/java/org/apache/knox/gateway/topology/simple/SimpleDescriptorHandlerTest.java
----------------------------------------------------------------------
diff --git a/gateway-server/src/test/java/org/apache/knox/gateway/topology/simple/SimpleDescriptorHandlerTest.java b/gateway-server/src/test/java/org/apache/knox/gateway/topology/simple/SimpleDescriptorHandlerTest.java
index a0c977a..f40fad7 100644
--- a/gateway-server/src/test/java/org/apache/knox/gateway/topology/simple/SimpleDescriptorHandlerTest.java
+++ b/gateway-server/src/test/java/org/apache/knox/gateway/topology/simple/SimpleDescriptorHandlerTest.java
@@ -302,7 +302,7 @@ public class SimpleDescriptorHandlerTest {
      * a service.
      *
      * N.B. This test depends on the PropertiesFileServiceDiscovery extension being configured:
-     *             org.apache.hadoop.gateway.topology.discovery.test.extension.PropertiesFileServiceDiscovery
+     *             org.apache.knox.gateway.topology.discovery.test.extension.PropertiesFileServiceDiscovery
      */
     @Test
     public void testInvalidServiceURLFromDiscovery() throws Exception {

http://git-wip-us.apache.org/repos/asf/knox/blob/46109ad8/gateway-server/src/test/resources/org/apache/hadoop/gateway/topology/file/provider-config-one.xml
----------------------------------------------------------------------
diff --git a/gateway-server/src/test/resources/org/apache/hadoop/gateway/topology/file/provider-config-one.xml b/gateway-server/src/test/resources/org/apache/hadoop/gateway/topology/file/provider-config-one.xml
deleted file mode 100644
index 95465a4..0000000
--- a/gateway-server/src/test/resources/org/apache/hadoop/gateway/topology/file/provider-config-one.xml
+++ /dev/null
@@ -1,74 +0,0 @@
-<gateway>
-    <provider>
-        <role>authentication</role>
-        <name>ShiroProvider</name>
-        <enabled>false</enabled>
-        <param>
-            <!--
-            session timeout in minutes,  this is really idle timeout,
-            defaults to 30mins, if the property value is not defined,,
-            current client authentication would expire if client idles contiuosly for more than this value
-            -->
-            <name>sessionTimeout</name>
-            <value>30</value>
-        </param>
-        <param>
-            <name>main.ldapRealm</name>
-            <value>org.apache.hadoop.gateway.shirorealm.KnoxLdapRealm</value>
-        </param>
-        <param>
-            <name>main.ldapContextFactory</name>
-            <value>org.apache.hadoop.gateway.shirorealm.KnoxLdapContextFactory</value>
-        </param>
-        <param>
-            <name>main.ldapRealm.contextFactory</name>
-            <value>$ldapContextFactory</value>
-        </param>
-        <param>
-            <name>main.ldapRealm.userDnTemplate</name>
-            <value>uid={0},ou=people,dc=hadoop,dc=apache,dc=org</value>
-        </param>
-        <param>
-            <name>main.ldapRealm.contextFactory.url</name>
-            <value>ldap://localhost:33389</value>
-        </param>
-        <param>
-            <name>main.ldapRealm.contextFactory.authenticationMechanism</name>
-            <value>simple</value>
-        </param>
-        <param>
-            <name>urls./**</name>
-            <value>authcBasic</value>
-        </param>
-    </provider>
-
-    <provider>
-        <role>identity-assertion</role>
-        <name>Default</name>
-        <enabled>true</enabled>
-    </provider>
-
-    <!--
-    Defines rules for mapping host names internal to a Hadoop cluster to externally accessible host names.
-    For example, a hadoop service running in AWS may return a response that includes URLs containing the
-    some AWS internal host name.  If the client needs to make a subsequent request to the host identified
-    in those URLs they need to be mapped to external host names that the client Knox can use to connect.
-
-    If the external hostname and internal host names are same turn of this provider by setting the value of
-    enabled parameter as false.
-
-    The name parameter specifies the external host names in a comma separated list.
-    The value parameter specifies corresponding internal host names in a comma separated list.
-
-    Note that when you are using Sandbox, the external hostname needs to be localhost, as seen in out
-    of box sandbox.xml.  This is because Sandbox uses port mapping to allow clients to connect to the
-    Hadoop services using localhost.  In real clusters, external host names would almost never be localhost.
-    -->
-    <provider>
-        <role>hostmap</role>
-        <name>static</name>
-        <enabled>true</enabled>
-        <param><name>localhost</name><value>sandbox,sandbox.hortonworks.com</value></param>
-    </provider>
-
-</gateway>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/knox/blob/46109ad8/gateway-server/src/test/resources/org/apache/hadoop/gateway/topology/file/simple-descriptor-five.json
----------------------------------------------------------------------
diff --git a/gateway-server/src/test/resources/org/apache/hadoop/gateway/topology/file/simple-descriptor-five.json b/gateway-server/src/test/resources/org/apache/hadoop/gateway/topology/file/simple-descriptor-five.json
deleted file mode 100644
index 52cec35..0000000
--- a/gateway-server/src/test/resources/org/apache/hadoop/gateway/topology/file/simple-descriptor-five.json
+++ /dev/null
@@ -1,14 +0,0 @@
-{
-  "discovery-type":"DUMMY",
-  "discovery-address":"http://c6401.ambari.apache.org:8080",
-  "provider-config-ref":"../shared-providers/ambari-cluster-policy.xml",
-  "cluster":"dummy",
-  "services":[
-    {"name":"NAMENODE"},
-    {"name":"JOBTRACKER"},
-    {"name":"WEBHDFS"},
-    {"name":"OOZIE"},
-    {"name":"HIVE"},
-    {"name":"RESOURCEMANAGER"}
-  ]
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/knox/blob/46109ad8/gateway-server/src/test/resources/org/apache/hadoop/gateway/topology/file/simple-descriptor-six.json
----------------------------------------------------------------------
diff --git a/gateway-server/src/test/resources/org/apache/hadoop/gateway/topology/file/simple-descriptor-six.json b/gateway-server/src/test/resources/org/apache/hadoop/gateway/topology/file/simple-descriptor-six.json
deleted file mode 100644
index e78f193..0000000
--- a/gateway-server/src/test/resources/org/apache/hadoop/gateway/topology/file/simple-descriptor-six.json
+++ /dev/null
@@ -1,18 +0,0 @@
-{
-  "discovery-type":"DUMMY",
-  "discovery-address":"http://c6401.ambari.apache.org:8080",
-  "provider-config-ref":"../shared-providers/provider-config-one.xml",
-  "cluster":"dummy",
-  "services":[
-    {"name":"NAMENODE"},
-    {"name":"JOBTRACKER"},
-    {"name":"WEBHDFS"},
-    {"name":"WEBHCAT"},
-    {"name":"OOZIE"},
-    {"name":"WEBHBASE"},
-    {"name":"HIVE"},
-    {"name":"RESOURCEMANAGER"},
-    {"name":"AMBARI", "urls":["http://c6401.ambari.apache.org:8080"]},
-    {"name":"AMBARIUI", "urls":["http://c6401.ambari.apache.org:8080"]}
-  ]
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/knox/blob/46109ad8/gateway-server/src/test/resources/org/apache/knox/gateway/topology/file/ambari-cluster-policy.xml
----------------------------------------------------------------------
diff --git a/gateway-server/src/test/resources/org/apache/knox/gateway/topology/file/ambari-cluster-policy.xml b/gateway-server/src/test/resources/org/apache/knox/gateway/topology/file/ambari-cluster-policy.xml
index 8223bea..32ae6e1 100644
--- a/gateway-server/src/test/resources/org/apache/knox/gateway/topology/file/ambari-cluster-policy.xml
+++ b/gateway-server/src/test/resources/org/apache/knox/gateway/topology/file/ambari-cluster-policy.xml
@@ -14,11 +14,11 @@
         </param>
         <param>
             <name>main.ldapRealm</name>
-            <value>org.apache.hadoop.gateway.shirorealm.KnoxLdapRealm</value>
+            <value>org.apache.knox.gateway.shirorealm.KnoxLdapRealm</value>
         </param>
         <param>
             <name>main.ldapContextFactory</name>
-            <value>org.apache.hadoop.gateway.shirorealm.KnoxLdapContextFactory</value>
+            <value>org.apache.knox.gateway.shirorealm.KnoxLdapContextFactory</value>
         </param>
         <param>
             <name>main.ldapRealm.contextFactory</name>

http://git-wip-us.apache.org/repos/asf/knox/blob/46109ad8/gateway-server/src/test/resources/org/apache/knox/gateway/topology/file/provider-config-one.xml
----------------------------------------------------------------------
diff --git a/gateway-server/src/test/resources/org/apache/knox/gateway/topology/file/provider-config-one.xml b/gateway-server/src/test/resources/org/apache/knox/gateway/topology/file/provider-config-one.xml
new file mode 100644
index 0000000..049d5cb
--- /dev/null
+++ b/gateway-server/src/test/resources/org/apache/knox/gateway/topology/file/provider-config-one.xml
@@ -0,0 +1,74 @@
+<gateway>
+    <provider>
+        <role>authentication</role>
+        <name>ShiroProvider</name>
+        <enabled>false</enabled>
+        <param>
+            <!--
+            session timeout in minutes,  this is really idle timeout,
+            defaults to 30mins, if the property value is not defined,,
+            current client authentication would expire if client idles contiuosly for more than this value
+            -->
+            <name>sessionTimeout</name>
+            <value>30</value>
+        </param>
+        <param>
+            <name>main.ldapRealm</name>
+            <value>org.apache.knox.gateway.shirorealm.KnoxLdapRealm</value>
+        </param>
+        <param>
+            <name>main.ldapContextFactory</name>
+            <value>org.apache.knox.gateway.shirorealm.KnoxLdapContextFactory</value>
+        </param>
+        <param>
+            <name>main.ldapRealm.contextFactory</name>
+            <value>$ldapContextFactory</value>
+        </param>
+        <param>
+            <name>main.ldapRealm.userDnTemplate</name>
+            <value>uid={0},ou=people,dc=hadoop,dc=apache,dc=org</value>
+        </param>
+        <param>
+            <name>main.ldapRealm.contextFactory.url</name>
+            <value>ldap://localhost:33389</value>
+        </param>
+        <param>
+            <name>main.ldapRealm.contextFactory.authenticationMechanism</name>
+            <value>simple</value>
+        </param>
+        <param>
+            <name>urls./**</name>
+            <value>authcBasic</value>
+        </param>
+    </provider>
+
+    <provider>
+        <role>identity-assertion</role>
+        <name>Default</name>
+        <enabled>true</enabled>
+    </provider>
+
+    <!--
+    Defines rules for mapping host names internal to a Hadoop cluster to externally accessible host names.
+    For example, a hadoop service running in AWS may return a response that includes URLs containing the
+    some AWS internal host name.  If the client needs to make a subsequent request to the host identified
+    in those URLs they need to be mapped to external host names that the client Knox can use to connect.
+
+    If the external hostname and internal host names are same turn of this provider by setting the value of
+    enabled parameter as false.
+
+    The name parameter specifies the external host names in a comma separated list.
+    The value parameter specifies corresponding internal host names in a comma separated list.
+
+    Note that when you are using Sandbox, the external hostname needs to be localhost, as seen in out
+    of box sandbox.xml.  This is because Sandbox uses port mapping to allow clients to connect to the
+    Hadoop services using localhost.  In real clusters, external host names would almost never be localhost.
+    -->
+    <provider>
+        <role>hostmap</role>
+        <name>static</name>
+        <enabled>true</enabled>
+        <param><name>localhost</name><value>sandbox,sandbox.hortonworks.com</value></param>
+    </provider>
+
+</gateway>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/knox/blob/46109ad8/gateway-server/src/test/resources/org/apache/knox/gateway/topology/file/simple-descriptor-five.json
----------------------------------------------------------------------
diff --git a/gateway-server/src/test/resources/org/apache/knox/gateway/topology/file/simple-descriptor-five.json b/gateway-server/src/test/resources/org/apache/knox/gateway/topology/file/simple-descriptor-five.json
new file mode 100644
index 0000000..52cec35
--- /dev/null
+++ b/gateway-server/src/test/resources/org/apache/knox/gateway/topology/file/simple-descriptor-five.json
@@ -0,0 +1,14 @@
+{
+  "discovery-type":"DUMMY",
+  "discovery-address":"http://c6401.ambari.apache.org:8080",
+  "provider-config-ref":"../shared-providers/ambari-cluster-policy.xml",
+  "cluster":"dummy",
+  "services":[
+    {"name":"NAMENODE"},
+    {"name":"JOBTRACKER"},
+    {"name":"WEBHDFS"},
+    {"name":"OOZIE"},
+    {"name":"HIVE"},
+    {"name":"RESOURCEMANAGER"}
+  ]
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/knox/blob/46109ad8/gateway-server/src/test/resources/org/apache/knox/gateway/topology/file/simple-descriptor-six.json
----------------------------------------------------------------------
diff --git a/gateway-server/src/test/resources/org/apache/knox/gateway/topology/file/simple-descriptor-six.json b/gateway-server/src/test/resources/org/apache/knox/gateway/topology/file/simple-descriptor-six.json
new file mode 100644
index 0000000..e78f193
--- /dev/null
+++ b/gateway-server/src/test/resources/org/apache/knox/gateway/topology/file/simple-descriptor-six.json
@@ -0,0 +1,18 @@
+{
+  "discovery-type":"DUMMY",
+  "discovery-address":"http://c6401.ambari.apache.org:8080",
+  "provider-config-ref":"../shared-providers/provider-config-one.xml",
+  "cluster":"dummy",
+  "services":[
+    {"name":"NAMENODE"},
+    {"name":"JOBTRACKER"},
+    {"name":"WEBHDFS"},
+    {"name":"WEBHCAT"},
+    {"name":"OOZIE"},
+    {"name":"WEBHBASE"},
+    {"name":"HIVE"},
+    {"name":"RESOURCEMANAGER"},
+    {"name":"AMBARI", "urls":["http://c6401.ambari.apache.org:8080"]},
+    {"name":"AMBARIUI", "urls":["http://c6401.ambari.apache.org:8080"]}
+  ]
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/knox/blob/46109ad8/gateway-service-admin/src/main/java/org/apache/hadoop/gateway/service/admin/HrefListingMarshaller.java
----------------------------------------------------------------------
diff --git a/gateway-service-admin/src/main/java/org/apache/hadoop/gateway/service/admin/HrefListingMarshaller.java b/gateway-service-admin/src/main/java/org/apache/hadoop/gateway/service/admin/HrefListingMarshaller.java
deleted file mode 100644
index c251213..0000000
--- a/gateway-service-admin/src/main/java/org/apache/hadoop/gateway/service/admin/HrefListingMarshaller.java
+++ /dev/null
@@ -1,75 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.gateway.service.admin;
-
-import org.eclipse.persistence.jaxb.JAXBContextProperties;
-
-import javax.ws.rs.Produces;
-import javax.ws.rs.WebApplicationException;
-import javax.ws.rs.core.MediaType;
-import javax.ws.rs.core.MultivaluedMap;
-import javax.ws.rs.ext.MessageBodyWriter;
-import javax.ws.rs.ext.Provider;
-import javax.xml.bind.JAXBContext;
-import javax.xml.bind.JAXBException;
-import javax.xml.bind.Marshaller;
-import java.io.IOException;
-import java.io.OutputStream;
-import java.lang.annotation.Annotation;
-import java.lang.reflect.Type;
-import java.util.HashMap;
-import java.util.Map;
-
-@Provider
-@Produces({MediaType.APPLICATION_JSON})
-public class HrefListingMarshaller implements MessageBodyWriter<TopologiesResource.HrefListing> {
-
-    @Override
-    public boolean isWriteable(Class<?> type, Type genericType, Annotation[] annotations, MediaType mediaType) {
-        return (TopologiesResource.HrefListing.class == type);
-    }
-
-    @Override
-    public long getSize(TopologiesResource.HrefListing instance,
-                        Class<?> type,
-                        Type genericType,
-                        Annotation[] annotations,
-                        MediaType mediaType) {
-        return -1;
-    }
-
-    @Override
-    public void writeTo(TopologiesResource.HrefListing instance,
-                        Class<?> type,
-                        Type genericType,
-                        Annotation[] annotations,
-                        MediaType mediaType,
-                        MultivaluedMap<String, Object> httpHeaders,
-                        OutputStream entityStream) throws IOException, WebApplicationException {
-        try {
-            Map<String, Object> properties = new HashMap<>(1);
-            properties.put( JAXBContextProperties.MEDIA_TYPE, mediaType.toString());
-            JAXBContext context = JAXBContext.newInstance(new Class[]{TopologiesResource.HrefListing.class}, properties);
-            Marshaller m = context.createMarshaller();
-            m.setProperty(Marshaller.JAXB_FORMATTED_OUTPUT, true);
-            m.marshal(instance, entityStream);
-        } catch (JAXBException e) {
-            throw new IOException(e);
-        }
-    }
-
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/knox/blob/46109ad8/gateway-service-admin/src/main/java/org/apache/knox/gateway/service/admin/HrefListingMarshaller.java
----------------------------------------------------------------------
diff --git a/gateway-service-admin/src/main/java/org/apache/knox/gateway/service/admin/HrefListingMarshaller.java b/gateway-service-admin/src/main/java/org/apache/knox/gateway/service/admin/HrefListingMarshaller.java
new file mode 100644
index 0000000..3313601
--- /dev/null
+++ b/gateway-service-admin/src/main/java/org/apache/knox/gateway/service/admin/HrefListingMarshaller.java
@@ -0,0 +1,75 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.knox.gateway.service.admin;
+
+import org.eclipse.persistence.jaxb.JAXBContextProperties;
+
+import javax.ws.rs.Produces;
+import javax.ws.rs.WebApplicationException;
+import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.MultivaluedMap;
+import javax.ws.rs.ext.MessageBodyWriter;
+import javax.ws.rs.ext.Provider;
+import javax.xml.bind.JAXBContext;
+import javax.xml.bind.JAXBException;
+import javax.xml.bind.Marshaller;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.lang.annotation.Annotation;
+import java.lang.reflect.Type;
+import java.util.HashMap;
+import java.util.Map;
+
+@Provider
+@Produces({MediaType.APPLICATION_JSON})
+public class HrefListingMarshaller implements MessageBodyWriter<TopologiesResource.HrefListing> {
+
+    @Override
+    public boolean isWriteable(Class<?> type, Type genericType, Annotation[] annotations, MediaType mediaType) {
+        return (TopologiesResource.HrefListing.class == type);
+    }
+
+    @Override
+    public long getSize(TopologiesResource.HrefListing instance,
+                        Class<?> type,
+                        Type genericType,
+                        Annotation[] annotations,
+                        MediaType mediaType) {
+        return -1;
+    }
+
+    @Override
+    public void writeTo(TopologiesResource.HrefListing instance,
+                        Class<?> type,
+                        Type genericType,
+                        Annotation[] annotations,
+                        MediaType mediaType,
+                        MultivaluedMap<String, Object> httpHeaders,
+                        OutputStream entityStream) throws IOException, WebApplicationException {
+        try {
+            Map<String, Object> properties = new HashMap<>(1);
+            properties.put( JAXBContextProperties.MEDIA_TYPE, mediaType.toString());
+            JAXBContext context = JAXBContext.newInstance(new Class[]{TopologiesResource.HrefListing.class}, properties);
+            Marshaller m = context.createMarshaller();
+            m.setProperty(Marshaller.JAXB_FORMATTED_OUTPUT, true);
+            m.marshal(instance, entityStream);
+        } catch (JAXBException e) {
+            throw new IOException(e);
+        }
+    }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/knox/blob/46109ad8/gateway-service-admin/src/main/java/org/apache/knox/gateway/service/admin/TopologiesResource.java
----------------------------------------------------------------------
diff --git a/gateway-service-admin/src/main/java/org/apache/knox/gateway/service/admin/TopologiesResource.java b/gateway-service-admin/src/main/java/org/apache/knox/gateway/service/admin/TopologiesResource.java
index 948447b..a0035fc 100644
--- a/gateway-service-admin/src/main/java/org/apache/knox/gateway/service/admin/TopologiesResource.java
+++ b/gateway-service-admin/src/main/java/org/apache/knox/gateway/service/admin/TopologiesResource.java
@@ -461,7 +461,7 @@ public class TopologiesResource {
      return buildHref(t.getName(), req);
   }
 
-  private SimpleTopology getSimpleTopology(org.apache.hadoop.gateway.topology.Topology t, GatewayConfig config) {
+  private SimpleTopology getSimpleTopology(org.apache.knox.gateway.topology.Topology t, GatewayConfig config) {
     String uri = buildURI(t, config, request);
     String href = buildHref(t, request);
     return new SimpleTopology(t, uri, href);

http://git-wip-us.apache.org/repos/asf/knox/blob/46109ad8/gateway-service-definitions/src/main/resources/services/ambariui/2.2.1/service.xml
----------------------------------------------------------------------
diff --git a/gateway-service-definitions/src/main/resources/services/ambariui/2.2.1/service.xml b/gateway-service-definitions/src/main/resources/services/ambariui/2.2.1/service.xml
index ab4ab2b..c6135ae 100644
--- a/gateway-service-definitions/src/main/resources/services/ambariui/2.2.1/service.xml
+++ b/gateway-service-definitions/src/main/resources/services/ambariui/2.2.1/service.xml
@@ -87,6 +87,6 @@
 
         <!-- No need to rewrite Slider View -->
     </routes>
-    <dispatch classname="org.apache.hadoop.gateway.dispatch.PassAllHeadersNoEncodingDispatch"/>
+    <dispatch classname="org.apache.knox.gateway.dispatch.PassAllHeadersNoEncodingDispatch"/>
 </service>
 


[05/53] [abbrv] knox git commit: Merge branch 'master' into KNOX-998-Package_Restructuring

Posted by mo...@apache.org.
Merge branch 'master' into KNOX-998-Package_Restructuring

# Conflicts:
#	gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariServiceURLCreator.java
#	gateway-discovery-ambari/src/main/resources/ambari-service-discovery-component-config-mapping.properties
#	gateway-provider-security-picketlink/src/main/java/org/apache/knox/gateway/picketlink/PicketlinkMessages.java
#	gateway-provider-security-picketlink/src/main/java/org/apache/knox/gateway/picketlink/deploy/PicketlinkConf.java
#	gateway-provider-security-picketlink/src/main/java/org/apache/knox/gateway/picketlink/deploy/PicketlinkFederationProviderContributor.java
#	gateway-provider-security-picketlink/src/main/java/org/apache/knox/gateway/picketlink/filter/CaptureOriginalURLFilter.java
#	gateway-provider-security-picketlink/src/main/java/org/apache/knox/gateway/picketlink/filter/PicketlinkIdentityAdapter.java
#	gateway-provider-security-picketlink/src/test/java/org/apache/knox/gateway/picketlink/PicketlinkTest.java
#	gateway-server/src/main/java/org/apache/knox/gateway/topology/simple/SimpleDescriptorHandler.java
#	gateway-server/src/test/java/org/apache/knox/gateway/topology/simple/SimpleDescriptorHandlerTest.java
#	gateway-service-knoxtoken/src/test/java/org/apache/knox/gateway/service/knoxtoken/TokenServiceResourceTest.java


Project: http://git-wip-us.apache.org/repos/asf/knox/repo
Commit: http://git-wip-us.apache.org/repos/asf/knox/commit/8affbc02
Tree: http://git-wip-us.apache.org/repos/asf/knox/tree/8affbc02
Diff: http://git-wip-us.apache.org/repos/asf/knox/diff/8affbc02

Branch: refs/heads/master
Commit: 8affbc0226fb2e587bdadaac9270b071d52b8062
Parents: 557d569 92b1505
Author: Sandeep More <mo...@apache.org>
Authored: Mon Oct 16 10:06:25 2017 -0400
Committer: Sandeep More <mo...@apache.org>
Committed: Mon Oct 16 10:06:25 2017 -0400

----------------------------------------------------------------------
 .../ambari/AmbariDynamicServiceURLCreator.java  | 151 ++++
 .../ambari/ConditionalValueHandler.java         |  24 +
 .../discovery/ambari/PropertyEqualsHandler.java |  76 ++
 .../ambari/ServiceURLPropertyConfig.java        | 324 +++++++
 .../discovery/ambari/SimpleValueHandler.java    |  32 +
 .../discovery/ambari/AmbariCluster.java         |   7 +-
 .../discovery/ambari/AmbariComponent.java       |  27 +-
 .../ambari/AmbariServiceDiscovery.java          |  58 +-
 .../ambari/AmbariServiceDiscoveryMessages.java  |  64 +-
 .../ambari/AmbariServiceURLCreator.java         | 184 ----
 ...iscovery-component-config-mapping.properties |  36 +
 .../ambari-service-discovery-url-mappings.xml   | 398 +++++++++
 .../AmbariDynamicServiceURLCreatorTest.java     | 876 +++++++++++++++++++
 .../ambari/AmbariServiceDiscoveryTest.java      |   4 +-
 .../jwt/filter/AbstractJWTFilter.java           |   2 +-
 .../federation/AbstractJWTFilterTest.java       |  31 +
 gateway-provider-security-picketlink/pom.xml    |  76 --
 .../gateway/picketlink/PicketlinkMessages.java  |  40 -
 .../picketlink/deploy/PicketlinkConf.java       | 194 ----
 ...PicketlinkFederationProviderContributor.java | 132 ---
 .../filter/CaptureOriginalURLFilter.java        |  89 --
 .../filter/PicketlinkIdentityAdapter.java       | 102 ---
 .../knox/gateway/picketlink/PicketlinkTest.java |  29 -
 gateway-release/pom.xml                         |   4 -
 .../gateway/websockets/ProxyInboundClient.java  | 107 +++
 .../impl/DefaultServiceRegistryService.java     |  50 +-
 .../security/impl/DefaultAliasService.java      |  12 +-
 .../simple/SimpleDescriptorHandler.java         |  69 +-
 .../simple/SimpleDescriptorMessages.java        |   8 +-
 .../websockets/GatewayWebsocketHandler.java     |  41 +-
 .../websockets/ProxyWebSocketAdapter.java       |  19 +-
 .../websockets/ProxyInboundClientTest.java      | 374 ++++++++
 .../simple/SimpleDescriptorHandlerTest.java     | 181 +++-
 .../services/ambariui/2.2.0/service.xml         |   5 +
 .../resources/services/atlas/0.8.0/rewrite.xml  |   6 +-
 .../gateway/service/knoxsso/WebSSOResource.java |   2 +-
 .../service/knoxsso/WebSSOResourceTest.java     |  58 ++
 .../service/knoxtoken/TokenResource.java        |  37 +-
 .../knoxtoken/TokenServiceResourceTest.java     | 203 +++++
 gateway-shell-release/pom.xml                   |   4 +
 .../knox/gateway/util/urltemplate/Parser.java   |  10 +-
 .../gateway/util/urltemplate/ParserTest.java    |  17 +
 pom.xml                                         |  44 +-
 43 files changed, 3217 insertions(+), 990 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/knox/blob/8affbc02/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariCluster.java
----------------------------------------------------------------------
diff --cc gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariCluster.java
index fa9d710,0000000..d65bff7
mode 100644,000000..100644
--- a/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariCluster.java
+++ b/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariCluster.java
@@@ -1,114 -1,0 +1,115 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements. See the NOTICE file distributed with this
 + * work for additional information regarding copyright ownership. The ASF
 + * licenses this file to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance with the License.
 + * You may obtain a copy of the License at
 + *
 + * http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 + * License for the specific language governing permissions and limitations under
 + * the License.
 + */
 +package org.apache.knox.gateway.topology.discovery.ambari;
 +
 +import org.apache.knox.gateway.topology.discovery.ServiceDiscovery;
 +
 +import java.util.ArrayList;
 +import java.util.HashMap;
 +import java.util.List;
 +import java.util.Map;
 +
 +class AmbariCluster implements ServiceDiscovery.Cluster {
 +
 +    private String name = null;
 +
-     private AmbariServiceURLCreator urlCreator = new AmbariServiceURLCreator();
++    private AmbariDynamicServiceURLCreator urlCreator;
 +
 +    private Map<String, Map<String, ServiceConfiguration>> serviceConfigurations = new HashMap<>();
 +
 +    private Map<String, AmbariComponent> components = null;
 +
 +
 +    AmbariCluster(String name) {
 +        this.name = name;
-         components = new HashMap<String, AmbariComponent>();
++        components = new HashMap<>();
++        urlCreator = new AmbariDynamicServiceURLCreator(this);
 +    }
 +
 +    void addServiceConfiguration(String serviceName, String configurationType, ServiceConfiguration serviceConfig) {
 +        if (!serviceConfigurations.keySet().contains(serviceName)) {
 +            serviceConfigurations.put(serviceName, new HashMap<String, ServiceConfiguration>());
 +        }
 +        serviceConfigurations.get(serviceName).put(configurationType, serviceConfig);
 +    }
 +
 +
 +    void addComponent(AmbariComponent component) {
 +        components.put(component.getName(), component);
 +    }
 +
 +
 +    ServiceConfiguration getServiceConfiguration(String serviceName, String configurationType) {
 +        ServiceConfiguration sc = null;
 +        Map<String, ServiceConfiguration> configs = serviceConfigurations.get(serviceName);
 +        if (configs != null) {
 +            sc = configs.get(configurationType);
 +        }
 +        return sc;
 +    }
 +
 +
 +    Map<String, AmbariComponent> getComponents() {
 +        return components;
 +    }
 +
 +
 +    AmbariComponent getComponent(String name) {
 +        return components.get(name);
 +    }
 +
 +
 +    @Override
 +    public String getName() {
 +        return name;
 +    }
 +
 +
 +    @Override
 +    public List<String> getServiceURLs(String serviceName) {
 +        List<String> urls = new ArrayList<>();
-         urls.addAll(urlCreator.create(this, serviceName));
++        urls.addAll(urlCreator.create(serviceName));
 +        return urls;
 +    }
 +
 +
 +    static class ServiceConfiguration {
 +
 +        private String type;
 +        private String version;
 +        private Map<String, String> props;
 +
 +        ServiceConfiguration(String type, String version, Map<String, String> properties) {
 +            this.type = type;
 +            this.version = version;
 +            this.props = properties;
 +        }
 +
 +        public String getVersion() {
 +            return version;
 +        }
 +
 +        public String getType() {
 +            return type;
 +        }
 +
 +        public Map<String, String> getProperties() {
 +            return props;
 +        }
 +    }
 +
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/8affbc02/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariComponent.java
----------------------------------------------------------------------
diff --cc gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariComponent.java
index 4750e7e,0000000..c8e7c6d
mode 100644,000000..100644
--- a/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariComponent.java
+++ b/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariComponent.java
@@@ -1,76 -1,0 +1,85 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.topology.discovery.ambari;
 +
++import java.util.ArrayList;
 +import java.util.List;
 +import java.util.Map;
 +
 +class AmbariComponent {
 +
 +    private String clusterName = null;
 +    private String serviceName = null;
 +    private String name        = null;
 +    private String version     = null;
 +
-     private List<String> hostNames = null;
++    private List<String> hostNames = new ArrayList<>();
 +
 +    private Map<String, String> properties = null;
 +
 +    AmbariComponent(String              name,
 +                    String              version,
 +                    String              cluster,
 +                    String              service,
 +                    List<String>        hostNames,
 +                    Map<String, String> properties) {
 +        this.name = name;
 +        this.serviceName = service;
 +        this.clusterName = cluster;
 +        this.version = version;
-         this.hostNames = hostNames;
 +        this.properties = properties;
++
++        if (hostNames != null) {
++            // Add the hostnames individually to prevent adding any null values
++            for (String hostName : hostNames) {
++                if (hostName != null) {
++                    this.hostNames.add(hostName);
++                }
++            }
++        }
 +    }
 +
-     public String getVersion() {
++    String getVersion() {
 +        return version;
 +    }
 +
-     public String getName() {
++    String getName() {
 +        return name;
 +    }
 +
-     public String getServiceName() {
++    String getServiceName() {
 +        return serviceName;
 +    }
 +
-     public String getClusterName() {
++    String getClusterName() {
 +        return clusterName;
 +    }
 +
-     public List<String> getHostNames() {
++    List<String> getHostNames() {
 +        return hostNames;
 +    }
 +
-     public Map<String, String> getConfigProperties() {
++    Map<String, String> getConfigProperties() {
 +        return properties;
 +    }
 +
-     public String getConfigProperty(String propertyName) {
++    String getConfigProperty(String propertyName) {
 +        return properties.get(propertyName);
 +    }
 +
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/8affbc02/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariServiceDiscovery.java
----------------------------------------------------------------------
diff --cc gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariServiceDiscovery.java
index da03564,0000000..70af903
mode 100644,000000..100644
--- a/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariServiceDiscovery.java
+++ b/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariServiceDiscovery.java
@@@ -1,291 -1,0 +1,305 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements. See the NOTICE file distributed with this
 + * work for additional information regarding copyright ownership. The ASF
 + * licenses this file to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance with the License.
 + * You may obtain a copy of the License at
 + *
 + * http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 + * License for the specific language governing permissions and limitations under
 + * the License.
 + */
 +package org.apache.knox.gateway.topology.discovery.ambari;
 +
++import java.io.IOException;
++import java.util.ArrayList;
++import java.util.HashMap;
++import java.util.List;
++import java.util.Map;
++import java.util.Properties;
++
 +import net.minidev.json.JSONArray;
 +import net.minidev.json.JSONObject;
 +import net.minidev.json.JSONValue;
 +import org.apache.knox.gateway.config.ConfigurationException;
 +import org.apache.knox.gateway.i18n.messages.MessagesFactory;
 +import org.apache.knox.gateway.services.security.AliasService;
 +import org.apache.knox.gateway.services.security.AliasServiceException;
 +import org.apache.knox.gateway.topology.discovery.GatewayService;
 +import org.apache.knox.gateway.topology.discovery.ServiceDiscovery;
 +import org.apache.knox.gateway.topology.discovery.ServiceDiscoveryConfig;
 +import org.apache.http.HttpEntity;
 +import org.apache.http.HttpStatus;
 +import org.apache.http.client.methods.CloseableHttpResponse;
 +import org.apache.http.client.methods.HttpGet;
 +import org.apache.http.impl.client.CloseableHttpClient;
 +import org.apache.http.message.BasicHeader;
 +import org.apache.http.util.EntityUtils;
 +
- import java.io.IOException;
- import java.util.*;
- 
 +
 +class AmbariServiceDiscovery implements ServiceDiscovery {
 +
 +    static final String TYPE = "AMBARI";
 +
 +    static final String AMBARI_CLUSTERS_URI = "/api/v1/clusters";
 +
 +    static final String AMBARI_HOSTROLES_URI =
 +                                       AMBARI_CLUSTERS_URI + "/%s/services?fields=components/host_components/HostRoles";
 +
 +    static final String AMBARI_SERVICECONFIGS_URI =
 +            AMBARI_CLUSTERS_URI + "/%s/configurations/service_config_versions?is_current=true";
 +
++    private static final String COMPONENT_CONFIG_MAPPING_FILE =
++                                                        "ambari-service-discovery-component-config-mapping.properties";
++
++    private static final AmbariServiceDiscoveryMessages log = MessagesFactory.get(AmbariServiceDiscoveryMessages.class);
++
 +    // Map of component names to service configuration types
 +    private static Map<String, String> componentServiceConfigs = new HashMap<>();
 +    static {
-         componentServiceConfigs.put("NAMENODE", "hdfs-site");
-         componentServiceConfigs.put("RESOURCEMANAGER", "yarn-site");
-         componentServiceConfigs.put("OOZIE_SERVER", "oozie-site");
-         componentServiceConfigs.put("HIVE_SERVER", "hive-site");
-         componentServiceConfigs.put("WEBHCAT_SERVER", "webhcat-site");
-         componentServiceConfigs.put("HBASE_MASTER", "hbase-site");
-     } // TODO: Are there other service components, for which the endpoints can be discovered via Ambari?
++        try {
++            Properties configMapping = new Properties();
++            configMapping.load(AmbariServiceDiscovery.class.getClassLoader().getResourceAsStream(COMPONENT_CONFIG_MAPPING_FILE));
++            for (String componentName : configMapping.stringPropertyNames()) {
++                componentServiceConfigs.put(componentName, configMapping.getProperty(componentName));
++            }
++        } catch (Exception e) {
++            log.failedToLoadServiceDiscoveryConfiguration(COMPONENT_CONFIG_MAPPING_FILE, e);
++        }
++    }
 +
 +    private static final String DEFAULT_USER_ALIAS = "ambari.discovery.user";
 +    private static final String DEFAULT_PWD_ALIAS  = "ambari.discovery.password";
 +
-     private static AmbariServiceURLCreator urlCreator = new AmbariServiceURLCreator();
- 
-     private AmbariServiceDiscoveryMessages log = MessagesFactory.get(AmbariServiceDiscoveryMessages.class);
- 
 +    @GatewayService
 +    private AliasService aliasService;
 +
 +    private CloseableHttpClient httpClient = null;
 +
-     private Map<String, Map<String, String>> serviceConfiguration = new HashMap<>();
- 
 +
 +    AmbariServiceDiscovery() {
 +        httpClient = org.apache.http.impl.client.HttpClients.createDefault();
 +    }
 +
 +
 +    @Override
 +    public String getType() {
 +        return TYPE;
 +    }
 +
 +
 +    @Override
 +    public Map<String, Cluster> discover(ServiceDiscoveryConfig config) {
 +        Map<String, Cluster> clusters = new HashMap<String, Cluster>();
 +
 +        String discoveryAddress = config.getAddress();
 +
 +        // Invoke Ambari REST API to discover the available clusters
 +        String clustersDiscoveryURL = String.format("%s" + AMBARI_CLUSTERS_URI, discoveryAddress);
 +
 +        JSONObject json = invokeREST(clustersDiscoveryURL, config.getUser(), config.getPasswordAlias());
 +
 +        // Parse the cluster names from the response, and perform the cluster discovery
 +        JSONArray clusterItems = (JSONArray) json.get("items");
 +        for (Object clusterItem : clusterItems) {
 +            String clusterName = (String) ((JSONObject)((JSONObject) clusterItem).get("Clusters")).get("cluster_name");
 +            try {
 +                Cluster c = discover(config, clusterName);
 +                clusters.put(clusterName, c);
 +            } catch (Exception e) {
 +                log.clusterDiscoveryError(clusterName, e);
 +            }
 +        }
 +
 +        return clusters;
 +    }
 +
 +
 +    @Override
 +    public Cluster discover(ServiceDiscoveryConfig config, String clusterName) {
 +        AmbariCluster cluster = new AmbariCluster(clusterName);
 +
 +        Map<String, String> serviceComponents = new HashMap<>();
 +
 +        String discoveryAddress = config.getAddress();
 +        String discoveryUser = config.getUser();
 +        String discoveryPwdAlias = config.getPasswordAlias();
 +
 +        Map<String, List<String>> componentHostNames = new HashMap<>();
 +        String hostRolesURL = String.format("%s" + AMBARI_HOSTROLES_URI, discoveryAddress, clusterName);
 +        JSONObject hostRolesJSON = invokeREST(hostRolesURL, discoveryUser, discoveryPwdAlias);
 +        if (hostRolesJSON != null) {
 +            // Process the host roles JSON
 +            JSONArray items = (JSONArray) hostRolesJSON.get("items");
 +            for (Object obj : items) {
 +                JSONArray components = (JSONArray) ((JSONObject) obj).get("components");
 +                for (Object component : components) {
 +                    JSONArray hostComponents = (JSONArray) ((JSONObject) component).get("host_components");
 +                    for (Object hostComponent : hostComponents) {
 +                        JSONObject hostRoles = (JSONObject) ((JSONObject) hostComponent).get("HostRoles");
 +                        String serviceName = (String) hostRoles.get("service_name");
 +                        String componentName = (String) hostRoles.get("component_name");
 +
 +                        serviceComponents.put(componentName, serviceName);
 +
- //                    String hostName = (String) hostRoles.get("host_name");
-                         String hostName = (String) hostRoles.get("public_host_name"); // Assuming public host name is most applicable
-                         log.discoveredServiceHost(serviceName, hostName);
-                         if (!componentHostNames.containsKey(componentName)) {
-                             componentHostNames.put(componentName, new ArrayList<String>());
++                        // Assuming public host name is more applicable than host_name
++                        String hostName = (String) hostRoles.get("public_host_name");
++                        if (hostName == null) {
++                            // Some (even slightly) older versions of Ambari/HDP do not return public_host_name,
++                            // so fall back to host_name in those cases.
++                            hostName = (String) hostRoles.get("host_name");
++                        }
++
++                        if (hostName != null) {
++                            log.discoveredServiceHost(serviceName, hostName);
++                            if (!componentHostNames.containsKey(componentName)) {
++                                componentHostNames.put(componentName, new ArrayList<String>());
++                            }
++                            componentHostNames.get(componentName).add(hostName);
 +                        }
-                         componentHostNames.get(componentName).add(hostName);
 +                    }
 +                }
 +            }
 +        }
 +
 +        Map<String, Map<String, AmbariCluster.ServiceConfiguration>> serviceConfigurations =
 +                                                 new HashMap<String, Map<String, AmbariCluster.ServiceConfiguration>>();
 +        String serviceConfigsURL = String.format("%s" + AMBARI_SERVICECONFIGS_URI, discoveryAddress, clusterName);
 +        JSONObject serviceConfigsJSON = invokeREST(serviceConfigsURL, discoveryUser, discoveryPwdAlias);
 +        if (serviceConfigsJSON != null) {
 +            // Process the service configurations
 +            JSONArray serviceConfigs = (JSONArray) serviceConfigsJSON.get("items");
 +            for (Object serviceConfig : serviceConfigs) {
 +                String serviceName = (String) ((JSONObject) serviceConfig).get("service_name");
 +                JSONArray configurations = (JSONArray) ((JSONObject) serviceConfig).get("configurations");
 +                for (Object configuration : configurations) {
 +                    String configType = (String) ((JSONObject) configuration).get("type");
 +                    String configVersion = String.valueOf(((JSONObject) configuration).get("version"));
 +
 +                    Map<String, String> configProps = new HashMap<String, String>();
 +                    JSONObject configProperties = (JSONObject) ((JSONObject) configuration).get("properties");
 +                    for (String propertyName : configProperties.keySet()) {
 +                        configProps.put(propertyName, String.valueOf(((JSONObject) configProperties).get(propertyName)));
 +                    }
 +                    if (!serviceConfigurations.containsKey(serviceName)) {
 +                        serviceConfigurations.put(serviceName, new HashMap<String, AmbariCluster.ServiceConfiguration>());
 +                    }
 +                    serviceConfigurations.get(serviceName).put(configType, new AmbariCluster.ServiceConfiguration(configType, configVersion, configProps));
 +                    cluster.addServiceConfiguration(serviceName, configType, new AmbariCluster.ServiceConfiguration(configType, configVersion, configProps));
 +                }
 +            }
 +        }
 +
 +        // Construct the AmbariCluster model
 +        for (String componentName : serviceComponents.keySet()) {
 +            String serviceName = serviceComponents.get(componentName);
 +            List<String> hostNames = componentHostNames.get(componentName);
 +
 +            Map<String, AmbariCluster.ServiceConfiguration> configs = serviceConfigurations.get(serviceName);
 +            String configType = componentServiceConfigs.get(componentName);
 +            if (configType != null) {
 +                AmbariCluster.ServiceConfiguration svcConfig = configs.get(configType);
 +                AmbariComponent c = new AmbariComponent(componentName,
 +                                                        svcConfig.getVersion(),
 +                                                        clusterName,
 +                                                        serviceName,
 +                                                        hostNames,
 +                                                        svcConfig.getProperties());
 +                cluster.addComponent(c);
 +            }
 +        }
 +
 +        return cluster;
 +    }
 +
 +
 +    protected JSONObject invokeREST(String url, String username, String passwordAlias) {
 +        JSONObject result = null;
 +
 +        CloseableHttpResponse response = null;
 +        try {
 +            HttpGet request = new HttpGet(url);
 +
 +            // If no configured username, then use default username alias
 +            String password = null;
 +            if (username == null) {
 +                if (aliasService != null) {
 +                    try {
 +                        char[] defaultUser = aliasService.getPasswordFromAliasForGateway(DEFAULT_USER_ALIAS);
 +                        if (defaultUser != null) {
 +                            username = new String(defaultUser);
 +                        }
 +                    } catch (AliasServiceException e) {
 +                        log.aliasServiceUserError(DEFAULT_USER_ALIAS, e.getLocalizedMessage());
 +                    }
 +                }
 +
 +                // If username is still null
 +                if (username == null) {
 +                    log.aliasServiceUserNotFound();
 +                    throw new ConfigurationException("No username is configured for Ambari service discovery.");
 +                }
 +            }
 +
 +            if (aliasService != null) {
 +                // If not password alias is configured, then try the default alias
 +                if (passwordAlias == null) {
 +                    passwordAlias = DEFAULT_PWD_ALIAS;
 +                }
 +                try {
 +                    char[] pwd = aliasService.getPasswordFromAliasForGateway(passwordAlias);
 +                    if (pwd != null) {
 +                        password = new String(pwd);
 +                    }
 +
 +                } catch (AliasServiceException e) {
 +                    log.aliasServicePasswordError(passwordAlias, e.getLocalizedMessage());
 +                }
 +            }
 +
 +            // If the password could not be determined
 +            if (password == null) {
 +                log.aliasServicePasswordNotFound();
 +                throw new ConfigurationException("No password is configured for Ambari service discovery.");
 +            }
 +
 +            // Add an auth header if credentials are available
 +            String encodedCreds =
 +                    org.apache.commons.codec.binary.Base64.encodeBase64String((username + ":" + password).getBytes());
 +            request.addHeader(new BasicHeader("Authorization", "Basic " + encodedCreds));
 +
 +            response = httpClient.execute(request);
 +
 +            if (HttpStatus.SC_OK == response.getStatusLine().getStatusCode()) {
 +                HttpEntity entity = response.getEntity();
 +                if (entity != null) {
 +                    result = (JSONObject) JSONValue.parse((EntityUtils.toString(entity)));
 +                    log.debugJSON(result.toJSONString());
 +                } else {
 +                    log.noJSON(url);
 +                }
 +            } else {
 +                log.unexpectedRestResponseStatusCode(url, response.getStatusLine().getStatusCode());
 +            }
 +
 +        } catch (IOException e) {
 +            log.restInvocationError(url, e);
 +        } finally {
 +            if(response != null) {
 +                try {
 +                    response.close();
 +                } catch (IOException e) {
 +                    // Ignore
 +                }
 +            }
 +        }
 +        return result;
 +    }
 +
 +
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/8affbc02/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariServiceDiscoveryMessages.java
----------------------------------------------------------------------
diff --cc gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariServiceDiscoveryMessages.java
index 2a153bb,0000000..d91edef
mode 100644,000000..100644
--- a/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariServiceDiscoveryMessages.java
+++ b/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariServiceDiscoveryMessages.java
@@@ -1,81 -1,0 +1,121 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements. See the NOTICE file distributed with this
 + * work for additional information regarding copyright ownership. The ASF
 + * licenses this file to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance with the License.
 + * You may obtain a copy of the License at
 + *
 + * http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 + * License for the specific language governing permissions and limitations under
 + * the License.
 + */
 +package org.apache.knox.gateway.topology.discovery.ambari;
 +
 +import org.apache.knox.gateway.i18n.messages.Message;
 +import org.apache.knox.gateway.i18n.messages.MessageLevel;
 +import org.apache.knox.gateway.i18n.messages.Messages;
 +import org.apache.knox.gateway.i18n.messages.StackTrace;
 +
- @Messages(logger="org.apache.gateway.topology.discovery.ambari")
++@Messages(logger="org.apache.hadoop.gateway.topology.discovery.ambari")
 +public interface AmbariServiceDiscoveryMessages {
 +
 +    @Message(level = MessageLevel.ERROR,
-             text = "Encountered an error during cluster {0} discovery: {1}")
++            text = "Failed to load service discovery configuration: {1}")
++    void failedToLoadServiceDiscoveryConfiguration(@StackTrace(level = MessageLevel.ERROR) Exception e);
++
++    @Message(level = MessageLevel.ERROR,
++             text = "Failed to load service discovery configuration {0}: {1}")
++    void failedToLoadServiceDiscoveryConfiguration(final String configuration,
++                               @StackTrace(level = MessageLevel.ERROR) Exception e);
++
++    @Message(level = MessageLevel.ERROR,
++             text = "Encountered an error during cluster {0} discovery: {1}")
 +    void clusterDiscoveryError(final String clusterName,
 +                               @StackTrace(level = MessageLevel.ERROR) Exception e);
 +
 +
 +    @Message(level = MessageLevel.DEBUG,
-             text = "REST invocation {0} failed: {1}")
++             text = "REST invocation {0} failed: {1}")
 +    void restInvocationError(final String url,
 +                             @StackTrace(level = MessageLevel.ERROR) Exception e);
 +
 +
 +    @Message(level = MessageLevel.ERROR,
-             text = "Encountered an error attempting to determine the user for alias {0} : {1}")
++             text = "Encountered an error attempting to determine the user for alias {0} : {1}")
 +    void aliasServiceUserError(final String alias, final String error);
 +
 +
 +    @Message(level = MessageLevel.ERROR,
-             text = "Encountered an error attempting to determine the password for alias {0} : {1}")
++             text = "Encountered an error attempting to determine the password for alias {0} : {1}")
 +    void aliasServicePasswordError(final String alias, final String error);
 +
 +
 +    @Message(level = MessageLevel.ERROR,
-             text = "No user configured for Ambari service discovery.")
++             text = "No user configured for Ambari service discovery.")
 +    void aliasServiceUserNotFound();
 +
 +
 +    @Message(level = MessageLevel.ERROR,
-             text = "No password configured for Ambari service discovery.")
++             text = "No password configured for Ambari service discovery.")
 +    void aliasServicePasswordNotFound();
 +
 +
 +    @Message(level = MessageLevel.ERROR,
-             text = "Unexpected REST invocation response code for {0} : {1}")
++             text = "Unexpected REST invocation response code for {0} : {1}")
 +    void unexpectedRestResponseStatusCode(final String url, int responseStatusCode);
 +
 +
 +    @Message(level = MessageLevel.ERROR,
-             text = "REST invocation {0} yielded a response without any JSON.")
++             text = "REST invocation {0} yielded a response without any JSON.")
 +    void noJSON(final String url);
 +
 +
 +    @Message(level = MessageLevel.DEBUG,
-             text = "REST invocation result: {0}")
++             text = "REST invocation result: {0}")
 +    void debugJSON(final String json);
 +
++    @Message(level = MessageLevel.DEBUG,
++            text = "Loaded component configuration mappings: {0}")
++    void loadedComponentConfigMappings(final String mappings);
 +
-     @Message(level = MessageLevel.INFO,
-             text = "Discovered: Service: {0}, Host: {1}")
++    @Message(level = MessageLevel.ERROR,
++             text = "Failed to load component configuration property mappings {0}: {1}")
++    void failedToLoadComponentConfigMappings(final String mappings,
++                                             @StackTrace(level = MessageLevel.ERROR) Exception e);
++
++    @Message(level = MessageLevel.DEBUG,
++             text = "Discovered: Service: {0}, Host: {1}")
 +    void discoveredServiceHost(final String serviceName, final String hostName);
 +
 +
++    @Message(level = MessageLevel.DEBUG,
++             text = "Querying the cluster for the {0} configuration ({1}) property: {2}")
++    void lookingUpServiceConfigProperty(final String serviceName, final String configType, final String propertyName);
++
++
++    @Message(level = MessageLevel.DEBUG,
++             text = "Querying the cluster for the {0} component configuration property: {1}")
++    void lookingUpComponentConfigProperty(final String componentName, final String propertyName);
++
++
++    @Message(level = MessageLevel.DEBUG,
++             text = "Querying the cluster for the {0} component's hosts")
++    void lookingUpComponentHosts(final String componentName);
++
++
++    @Message(level = MessageLevel.DEBUG,
++            text = "Handling a derived service URL mapping property for the {0} service: type = {1}, name = {2}")
++    void handlingDerivedProperty(final String serviceName, final String propertyType, final String propertyName);
++
++
++    @Message(level = MessageLevel.DEBUG,
++            text = "Determined the service URL mapping property {0} value: {1}")
++    void determinedPropertyValue(final String propertyName, final String propertyValue);
 +
 +
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/8affbc02/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariServiceURLCreator.java
----------------------------------------------------------------------
diff --cc gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariServiceURLCreator.java
index 302eda7,0000000..e69de29
mode 100644,000000..100644
--- a/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariServiceURLCreator.java
+++ b/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariServiceURLCreator.java

http://git-wip-us.apache.org/repos/asf/knox/blob/8affbc02/gateway-discovery-ambari/src/test/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariServiceDiscoveryTest.java
----------------------------------------------------------------------
diff --cc gateway-discovery-ambari/src/test/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariServiceDiscoveryTest.java
index ec8aed2,0000000..21627ad
mode 100644,000000..100644
--- a/gateway-discovery-ambari/src/test/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariServiceDiscoveryTest.java
+++ b/gateway-discovery-ambari/src/test/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariServiceDiscoveryTest.java
@@@ -1,856 -1,0 +1,858 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements. See the NOTICE file distributed with this
 + * work for additional information regarding copyright ownership. The ASF
 + * licenses this file to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance with the License.
 + * You may obtain a copy of the License at
 + *
 + * http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 + * License for the specific language governing permissions and limitations under
 + * the License.
 + */
 +package org.apache.knox.gateway.topology.discovery.ambari;
 +
 +import net.minidev.json.JSONObject;
 +import net.minidev.json.JSONValue;
 +import org.apache.knox.gateway.topology.discovery.ServiceDiscovery;
 +import org.apache.knox.gateway.topology.discovery.ServiceDiscoveryConfig;
 +import org.easymock.EasyMock;
 +import org.junit.Test;
 +
 +import java.util.HashMap;
 +import java.util.List;
 +import java.util.Map;
 +
- import static org.junit.Assert.*;
++import static org.junit.Assert.assertNotNull;
++import static org.junit.Assert.assertEquals;
++import static org.junit.Assert.assertTrue;
 +
 +
 +/**
 + * Test the Ambari ServiceDiscovery implementation.
 + *
 + * N.B. These tests do NOT verify Ambari API responses. They DO validate the Ambari ServiceDiscovery implementation's
 + *      treatment of the responses as they were observed at the time the tests are developed.
 + */
 +public class AmbariServiceDiscoveryTest {
 +
 +    @Test
 +    public void testSingleClusterDiscovery() throws Exception {
 +        final String discoveryAddress = "http://ambarihost:8080";
 +        final String clusterName = "testCluster";
 +        ServiceDiscovery sd = new TestAmbariServiceDiscovery(clusterName);
 +
 +        ServiceDiscoveryConfig sdc = EasyMock.createNiceMock(ServiceDiscoveryConfig.class);
 +        EasyMock.expect(sdc.getAddress()).andReturn(discoveryAddress).anyTimes();
 +        EasyMock.expect(sdc.getUser()).andReturn(null).anyTimes();
 +        EasyMock.replay(sdc);
 +
 +        ServiceDiscovery.Cluster cluster = sd.discover(sdc, clusterName);
 +        assertNotNull(cluster);
 +        assertEquals(clusterName, cluster.getName());
 +        assertTrue(AmbariCluster.class.isAssignableFrom(cluster.getClass()));
 +        assertEquals(6, ((AmbariCluster) cluster).getComponents().size());
 +
 +//        printServiceURLs(cluster);
 +    }
 +
 +
 +    @Test
 +    public void testBulkClusterDiscovery() throws Exception {
 +        final String discoveryAddress = "http://ambarihost:8080";
 +        final String clusterName = "anotherCluster";
 +        ServiceDiscovery sd = new TestAmbariServiceDiscovery(clusterName);
 +
 +        ServiceDiscoveryConfig sdc = EasyMock.createNiceMock(ServiceDiscoveryConfig.class);
 +        EasyMock.expect(sdc.getAddress()).andReturn(discoveryAddress).anyTimes();
 +        EasyMock.expect(sdc.getUser()).andReturn(null).anyTimes();
 +        EasyMock.replay(sdc);
 +
 +        Map<String, ServiceDiscovery.Cluster> clusters = sd.discover(sdc);
 +        assertNotNull(clusters);
 +        assertEquals(1, clusters.size());
 +        ServiceDiscovery.Cluster cluster = clusters.get(clusterName);
 +        assertNotNull(cluster);
 +        assertEquals(clusterName, cluster.getName());
 +        assertTrue(AmbariCluster.class.isAssignableFrom(cluster.getClass()));
 +        assertEquals(6, ((AmbariCluster) cluster).getComponents().size());
 +
 +//        printServiceURLs(cluster, "NAMENODE", "WEBHCAT", "OOZIE", "RESOURCEMANAGER");
 +    }
 +
 +
 +    private static void printServiceURLs(ServiceDiscovery.Cluster cluster) {
 +        final String[] services = new String[]{"NAMENODE",
 +                                               "JOBTRACKER",
 +                                               "WEBHDFS",
 +                                               "WEBHCAT",
 +                                               "OOZIE",
 +                                               "WEBHBASE",
 +                                               "HIVE",
 +                                               "RESOURCEMANAGER"};
 +        printServiceURLs(cluster, services);
 +    }
 +
 +
 +    private static void printServiceURLs(ServiceDiscovery.Cluster cluster, String...services) {
 +        for (String name : services) {
 +            StringBuilder sb = new StringBuilder();
 +            List<String> urls = cluster.getServiceURLs(name);
 +            if (urls != null && !urls.isEmpty()) {
 +                for (String url : urls) {
 +                    sb.append(url);
 +                    sb.append(" ");
 +                }
 +            }
 +            System.out.println(String.format("%18s: %s", name, sb.toString()));
 +        }
 +    }
 +
 +
 +    /**
 +     * ServiceDiscovery implementation derived from AmbariServiceDiscovery, so the invokeREST method can be overridden
 +     * to eliminate the need to perform actual HTTP interactions with a real Ambari endpoint.
 +     */
 +    private static final class TestAmbariServiceDiscovery extends AmbariServiceDiscovery {
 +
 +        final static String CLUSTER_PLACEHOLDER = "CLUSTER_NAME";
 +
 +        private Map<String, JSONObject> cannedResponses = new HashMap<>();
 +
 +        TestAmbariServiceDiscovery(String clusterName) {
 +            cannedResponses.put(AMBARI_CLUSTERS_URI,
 +                                (JSONObject) JSONValue.parse(CLUSTERS_JSON_TEMPLATE.replaceAll(CLUSTER_PLACEHOLDER,
 +                                                                                               clusterName)));
 +
 +            cannedResponses.put(String.format(AMBARI_HOSTROLES_URI, clusterName),
 +                                (JSONObject) JSONValue.parse(HOSTROLES_JSON_TEMPLATE.replaceAll(CLUSTER_PLACEHOLDER,
 +                                                                                                clusterName)));
 +
 +            cannedResponses.put(String.format(AMBARI_SERVICECONFIGS_URI, clusterName),
 +                                (JSONObject) JSONValue.parse(SERVICECONFIGS_JSON_TEMPLATE.replaceAll(CLUSTER_PLACEHOLDER,
 +                                                                                                     clusterName)));
 +        }
 +
 +        @Override
 +        protected JSONObject invokeREST(String url, String username, String passwordAlias) {
 +            return cannedResponses.get(url.substring(url.indexOf("/api")));
 +        }
 +    }
 +
 +
 +    ////////////////////////////////////////////////////////////////////////
 +    //  JSON response templates, based on actual response content excerpts
 +    ////////////////////////////////////////////////////////////////////////
 +
 +    private static final String CLUSTERS_JSON_TEMPLATE =
 +    "{\n" +
 +    "  \"href\" : \"http://c6401.ambari.apache.org:8080/api/v1/clusters\",\n" +
 +    "  \"items\" : [\n" +
 +    "    {\n" +
 +    "      \"href\" : \"http://c6401.ambari.apache.org:8080/api/v1/clusters/"+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "      \"Clusters\" : {\n" +
 +    "        \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "        \"version\" : \"HDP-2.6\"\n" +
 +    "      }\n" +
 +    "    }\n" +
 +    "  ]" +
 +    "}";
 +
 +
 +    private static final String HOSTROLES_JSON_TEMPLATE =
 +    "{\n" +
 +    "  \"href\" : \"http://c6401.ambari.apache.org:8080/api/v1/clusters/"+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"/services?fields=components/host_components/HostRoles\",\n" +
 +    "  \"items\" : [\n" +
 +    "    {\n" +
 +    "      \"href\" : \"http://c6401.ambari.apache.org:8080/api/v1/clusters/"+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"/services/AMBARI_METRICS\",\n" +
 +    "      \"ServiceInfo\" : {\n" +
 +    "        \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "        \"service_name\" : \"AMBARI_METRICS\"\n" +
 +    "      },\n" +
 +    "      \"components\" : [\n" +
 +    "        {\n" +
 +    "          \"href\" : \"http://c6401.ambari.apache.org:8080/api/v1/clusters/"+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"/services/AMBARI_METRICS/components/METRICS_COLLECTOR\",\n" +
 +    "          \"ServiceComponentInfo\" : {\n" +
 +    "            \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "            \"component_name\" : \"METRICS_COLLECTOR\",\n" +
 +    "            \"service_name\" : \"AMBARI_METRICS\"\n" +
 +    "          },\n" +
 +    "          \"host_components\" : [\n" +
 +    "            {\n" +
 +    "              \"href\" : \"http://c6401.ambari.apache.org:8080/api/v1/clusters/"+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"/hosts/c6403.ambari.apache.org/host_components/METRICS_COLLECTOR\",\n" +
 +    "              \"HostRoles\" : {\n" +
 +    "                \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "                \"component_name\" : \"METRICS_COLLECTOR\",\n" +
 +    "                \"host_name\" : \"c6403.ambari.apache.org\",\n" +
 +    "                \"public_host_name\" : \"c6403.ambari.apache.org\",\n" +
 +    "                \"service_name\" : \"AMBARI_METRICS\",\n" +
 +    "                \"stack_id\" : \"HDP-2.6\",\n" +
 +    "              }\n" +
 +    "            }\n" +
 +    "          ]\n" +
 +    "        },\n" +
 +    "        {\n" +
 +    "          \"href\" : \"http://c6401.ambari.apache.org:8080/api/v1/clusters/"+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"/services/HBASE/components/HBASE_MASTER\",\n" +
 +    "          \"ServiceComponentInfo\" : {\n" +
 +    "            \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "            \"component_name\" : \"HBASE_MASTER\",\n" +
 +    "            \"service_name\" : \"HBASE\"\n" +
 +    "          },\n" +
 +    "          \"host_components\" : [\n" +
 +    "            {\n" +
 +    "              \"href\" : \"http://c6401.ambari.apache.org:8080/api/v1/clusters/"+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"/hosts/c6401.ambari.apache.org/host_components/HBASE_MASTER\",\n" +
 +    "              \"HostRoles\" : {\n" +
 +    "                \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "                \"component_name\" : \"HBASE_MASTER\",\n" +
 +    "                \"host_name\" : \"c6401.ambari.apache.org\",\n" +
 +    "                \"public_host_name\" : \"c6401.ambari.apache.org\",\n" +
 +    "                \"service_name\" : \"HBASE\",\n" +
 +    "                \"stack_id\" : \"HDP-2.6\",\n" +
 +    "              }\n" +
 +    "            }\n" +
 +    "          ]\n" +
 +    "        }\n" +
 +    "      ]\n" +
 +    "    },\n" +
 +    "    {\n" +
 +    "      \"href\" : \"http://c6401.ambari.apache.org:8080/api/v1/clusters/"+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"/services/HDFS\",\n" +
 +    "      \"ServiceInfo\" : {\n" +
 +    "        \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "        \"service_name\" : \"HDFS\"\n" +
 +    "      },\n" +
 +    "      \"components\" : [\n" +
 +    "        {\n" +
 +    "          \"href\" : \"http://c6401.ambari.apache.org:8080/api/v1/clusters/"+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"/services/HDFS/components/NAMENODE\",\n" +
 +    "          \"ServiceComponentInfo\" : {\n" +
 +    "            \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "            \"component_name\" : \"NAMENODE\",\n" +
 +    "            \"service_name\" : \"HDFS\"\n" +
 +    "          },\n" +
 +    "          \"host_components\" : [\n" +
 +    "            {\n" +
 +    "              \"href\" : \"http://c6401.ambari.apache.org:8080/api/v1/clusters/"+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"/hosts/c6401.ambari.apache.org/host_components/NAMENODE\",\n" +
 +    "              \"HostRoles\" : {\n" +
 +    "                \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "                \"component_name\" : \"NAMENODE\",\n" +
 +    "                \"host_name\" : \"c6401.ambari.apache.org\",\n" +
 +    "                \"public_host_name\" : \"c6401.ambari.apache.org\",\n" +
 +    "                \"service_name\" : \"HDFS\",\n" +
 +    "                \"stack_id\" : \"HDP-2.6\",\n" +
 +    "              }\n" +
 +    "            }\n" +
 +    "          ]\n" +
 +    "        },\n" +
 +    "        {\n" +
 +    "          \"href\" : \"http://c6401.ambari.apache.org:8080/api/v1/clusters/"+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"/services/HDFS/components/SECONDARY_NAMENODE\",\n" +
 +    "          \"ServiceComponentInfo\" : {\n" +
 +    "            \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "            \"component_name\" : \"SECONDARY_NAMENODE\",\n" +
 +    "            \"service_name\" : \"HDFS\"\n" +
 +    "          },\n" +
 +    "          \"host_components\" : [\n" +
 +    "            {\n" +
 +    "              \"href\" : \"http://c6401.ambari.apache.org:8080/api/v1/clusters/"+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"/hosts/c6402.ambari.apache.org/host_components/SECONDARY_NAMENODE\",\n" +
 +    "              \"HostRoles\" : {\n" +
 +    "                \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "                \"component_name\" : \"SECONDARY_NAMENODE\",\n" +
 +    "                \"host_name\" : \"c6402.ambari.apache.org\",\n" +
 +    "                \"public_host_name\" : \"c6402.ambari.apache.org\",\n" +
 +    "                \"service_name\" : \"HDFS\",\n" +
 +    "                \"stack_id\" : \"HDP-2.6\",\n" +
 +    "              }\n" +
 +    "            }\n" +
 +    "          ]\n" +
 +    "        }\n" +
 +    "      ]\n" +
 +    "    },\n" +
 +    "    {\n" +
 +    "      \"href\" : \"http://c6401.ambari.apache.org:8080/api/v1/clusters/"+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"/services/HIVE\",\n" +
 +    "      \"ServiceInfo\" : {\n" +
 +    "        \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "        \"service_name\" : \"HIVE\"\n" +
 +    "      },\n" +
 +    "      \"components\" : [\n" +
 +    "        {\n" +
 +    "          \"href\" : \"http://c6401.ambari.apache.org:8080/api/v1/clusters/"+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"/services/HIVE/components/HCAT\",\n" +
 +    "          \"ServiceComponentInfo\" : {\n" +
 +    "            \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "            \"component_name\" : \"HCAT\",\n" +
 +    "            \"service_name\" : \"HIVE\"\n" +
 +    "          },\n" +
 +    "          \"host_components\" : [\n" +
 +    "            {\n" +
 +    "              \"href\" : \"http://c6401.ambari.apache.org:8080/api/v1/clusters/"+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"/hosts/c6403.ambari.apache.org/host_components/HCAT\",\n" +
 +    "              \"HostRoles\" : {\n" +
 +    "                \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "                \"component_name\" : \"HCAT\",\n" +
 +    "                \"host_name\" : \"c6403.ambari.apache.org\",\n" +
 +    "                \"public_host_name\" : \"c6403.ambari.apache.org\",\n" +
 +    "                \"service_name\" : \"HIVE\",\n" +
 +    "                \"stack_id\" : \"HDP-2.6\",\n" +
 +    "              }\n" +
 +    "            }\n" +
 +    "          ]\n" +
 +    "        }\n" +
 +    "        {\n" +
 +    "          \"href\" : \"http://c6401.ambari.apache.org:8080/api/v1/clusters/"+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"/services/HIVE/components/HIVE_METASTORE\",\n" +
 +    "          \"ServiceComponentInfo\" : {\n" +
 +    "            \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "            \"component_name\" : \"HIVE_METASTORE\",\n" +
 +    "            \"service_name\" : \"HIVE\"\n" +
 +    "          },\n" +
 +    "          \"host_components\" : [\n" +
 +    "            {\n" +
 +    "              \"href\" : \"http://c6401.ambari.apache.org:8080/api/v1/clusters/"+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"/hosts/c6402.ambari.apache.org/host_components/HIVE_METASTORE\",\n" +
 +    "              \"HostRoles\" : {\n" +
 +    "                \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "                \"component_name\" : \"HIVE_METASTORE\",\n" +
 +    "                \"host_name\" : \"c6402.ambari.apache.org\",\n" +
 +    "                \"public_host_name\" : \"c6402.ambari.apache.org\",\n" +
 +    "                \"service_name\" : \"HIVE\",\n" +
 +    "                \"stack_id\" : \"HDP-2.6\",\n" +
 +    "              }\n" +
 +    "            }\n" +
 +    "          ]\n" +
 +    "        },\n" +
 +    "        {\n" +
 +    "          \"href\" : \"http://c6401.ambari.apache.org:8080/api/v1/clusters/"+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"/services/HIVE/components/HIVE_SERVER\",\n" +
 +    "          \"ServiceComponentInfo\" : {\n" +
 +    "            \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "            \"component_name\" : \"HIVE_SERVER\",\n" +
 +    "            \"service_name\" : \"HIVE\"\n" +
 +    "          },\n" +
 +    "          \"host_components\" : [\n" +
 +    "            {\n" +
 +    "              \"href\" : \"http://c6401.ambari.apache.org:8080/api/v1/clusters/"+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"/hosts/c6402.ambari.apache.org/host_components/HIVE_SERVER\",\n" +
 +    "              \"HostRoles\" : {\n" +
 +    "                \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "                \"component_name\" : \"HIVE_SERVER\",\n" +
 +    "                \"host_name\" : \"c6402.ambari.apache.org\",\n" +
 +    "                \"public_host_name\" : \"c6402.ambari.apache.org\",\n" +
 +    "                \"service_name\" : \"HIVE\",\n" +
 +    "                \"stack_id\" : \"HDP-2.6\",\n" +
 +    "              }\n" +
 +    "            }\n" +
 +    "          ]\n" +
 +    "        },\n" +
 +    "        {\n" +
 +    "          \"href\" : \"http://c6401.ambari.apache.org:8080/api/v1/clusters/"+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"/services/HIVE/components/WEBHCAT_SERVER\",\n" +
 +    "          \"ServiceComponentInfo\" : {\n" +
 +    "            \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "            \"component_name\" : \"WEBHCAT_SERVER\",\n" +
 +    "            \"service_name\" : \"HIVE\"\n" +
 +    "          },\n" +
 +    "          \"host_components\" : [\n" +
 +    "            {\n" +
 +    "              \"href\" : \"http://c6401.ambari.apache.org:8080/api/v1/clusters/"+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"/hosts/c6402.ambari.apache.org/host_components/WEBHCAT_SERVER\",\n" +
 +    "              \"HostRoles\" : {\n" +
 +    "                \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "                \"component_name\" : \"WEBHCAT_SERVER\",\n" +
 +    "                \"host_name\" : \"c6402.ambari.apache.org\",\n" +
 +    "                \"public_host_name\" : \"c6402.ambari.apache.org\",\n" +
 +    "                \"service_name\" : \"HIVE\",\n" +
 +    "                \"stack_id\" : \"HDP-2.6\",\n" +
 +    "              }\n" +
 +    "            }\n" +
 +    "          ]\n" +
 +    "        }\n" +
 +    "      ]\n" +
 +    "    },\n" +
 +    "    {\n" +
 +    "      \"href\" : \"http://c6401.ambari.apache.org:8080/api/v1/clusters/"+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"/services/OOZIE\",\n" +
 +    "      \"ServiceInfo\" : {\n" +
 +    "        \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "        \"service_name\" : \"OOZIE\"\n" +
 +    "      },\n" +
 +    "      \"components\" : [\n" +
 +    "        {\n" +
 +    "          \"href\" : \"http://c6401.ambari.apache.org:8080/api/v1/clusters/"+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"/services/OOZIE/components/OOZIE_SERVER\",\n" +
 +    "          \"ServiceComponentInfo\" : {\n" +
 +    "            \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "            \"component_name\" : \"OOZIE_SERVER\",\n" +
 +    "            \"service_name\" : \"OOZIE\"\n" +
 +    "          },\n" +
 +    "          \"host_components\" : [\n" +
 +    "            {\n" +
 +    "              \"href\" : \"http://c6401.ambari.apache.org:8080/api/v1/clusters/"+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"/hosts/c6402.ambari.apache.org/host_components/OOZIE_SERVER\",\n" +
 +    "              \"HostRoles\" : {\n" +
 +    "                \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "                \"component_name\" : \"OOZIE_SERVER\",\n" +
 +    "                \"host_name\" : \"c6402.ambari.apache.org\",\n" +
 +    "                \"public_host_name\" : \"c6402.ambari.apache.org\",\n" +
 +    "                \"service_name\" : \"OOZIE\",\n" +
 +    "                \"stack_id\" : \"HDP-2.6\"\n" +
 +    "              }\n" +
 +    "            }\n" +
 +    "          ]\n" +
 +    "        }\n" +
 +    "      ]\n" +
 +    "    },\n" +
 +    "    {\n" +
 +    "      \"href\" : \"http://c6401.ambari.apache.org:8080/api/v1/clusters/"+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"/services/YARN\",\n" +
 +    "      \"ServiceInfo\" : {\n" +
 +    "        \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "        \"service_name\" : \"YARN\"\n" +
 +    "      },\n" +
 +    "      \"components\" : [\n" +
 +    "        {\n" +
 +    "          \"href\" : \"http://c6401.ambari.apache.org:8080/api/v1/clusters/"+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"/services/YARN/components/APP_TIMELINE_SERVER\",\n" +
 +    "          \"ServiceComponentInfo\" : {\n" +
 +    "            \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "            \"component_name\" : \"APP_TIMELINE_SERVER\",\n" +
 +    "            \"service_name\" : \"YARN\"\n" +
 +    "          },\n" +
 +    "          \"host_components\" : [\n" +
 +    "            {\n" +
 +    "              \"href\" : \"http://c6401.ambari.apache.org:8080/api/v1/clusters/"+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"/hosts/c6402.ambari.apache.org/host_components/APP_TIMELINE_SERVER\",\n" +
 +    "              \"HostRoles\" : {\n" +
 +    "                \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "                \"component_name\" : \"APP_TIMELINE_SERVER\",\n" +
 +    "                \"host_name\" : \"c6402.ambari.apache.org\",\n" +
 +    "                \"public_host_name\" : \"c6402.ambari.apache.org\",\n" +
 +    "                \"service_name\" : \"YARN\",\n" +
 +    "                \"stack_id\" : \"HDP-2.6\"\n" +
 +    "              }\n" +
 +    "            }\n" +
 +    "          ]\n" +
 +    "        },\n" +
 +    "        {\n" +
 +    "          \"href\" : \"http://c6401.ambari.apache.org:8080/api/v1/clusters/"+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"/services/YARN/components/NODEMANAGER\",\n" +
 +    "          \"ServiceComponentInfo\" : {\n" +
 +    "            \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "            \"component_name\" : \"NODEMANAGER\",\n" +
 +    "            \"service_name\" : \"YARN\"\n" +
 +    "          },\n" +
 +    "          \"host_components\" : [\n" +
 +    "            {\n" +
 +    "              \"href\" : \"http://c6401.ambari.apache.org:8080/api/v1/clusters/"+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"/hosts/c6403.ambari.apache.org/host_components/NODEMANAGER\",\n" +
 +    "              \"HostRoles\" : {\n" +
 +    "                \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "                \"component_name\" : \"NODEMANAGER\",\n" +
 +    "                \"host_name\" : \"c6403.ambari.apache.org\",\n" +
 +    "                \"public_host_name\" : \"c6403.ambari.apache.org\",\n" +
 +    "                \"service_name\" : \"YARN\",\n" +
 +    "                \"stack_id\" : \"HDP-2.6\"\n" +
 +    "              }\n" +
 +    "            }\n" +
 +    "          ]\n" +
 +    "        },\n" +
 +    "        {\n" +
 +    "          \"href\" : \"http://c6401.ambari.apache.org:8080/api/v1/clusters/"+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"/services/YARN/components/RESOURCEMANAGER\",\n" +
 +    "          \"ServiceComponentInfo\" : {\n" +
 +    "            \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "            \"component_name\" : \"RESOURCEMANAGER\",\n" +
 +    "            \"service_name\" : \"YARN\"\n" +
 +    "          },\n" +
 +    "          \"host_components\" : [\n" +
 +    "            {\n" +
 +    "              \"href\" : \"http://c6401.ambari.apache.org:8080/api/v1/clusters/"+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"/hosts/c6402.ambari.apache.org/host_components/RESOURCEMANAGER\",\n" +
 +    "              \"HostRoles\" : {\n" +
 +    "                \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "                \"component_name\" : \"RESOURCEMANAGER\",\n" +
 +    "                \"ha_state\" : \"ACTIVE\",\n" +
 +    "                \"host_name\" : \"c6402.ambari.apache.org\",\n" +
 +    "                \"public_host_name\" : \"c6402.ambari.apache.org\",\n" +
 +    "                \"service_name\" : \"YARN\",\n" +
 +    "                \"stack_id\" : \"HDP-2.6\"\n" +
 +    "              }\n" +
 +    "            }\n" +
 +    "          ]\n" +
 +    "        }\n" +
 +    "      ]\n" +
 +    "    },\n" +
 +    "    {\n" +
 +    "      \"href\" : \"http://c6401.ambari.apache.org:8080/api/v1/clusters/"+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"/services/ZOOKEEPER\",\n" +
 +    "      \"ServiceInfo\" : {\n" +
 +    "        \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "        \"service_name\" : \"ZOOKEEPER\"\n" +
 +    "      },\n" +
 +    "      \"components\" : [\n" +
 +    "        {\n" +
 +    "          \"href\" : \"http://c6401.ambari.apache.org:8080/api/v1/clusters/"+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"/services/ZOOKEEPER/components/ZOOKEEPER_SERVER\",\n" +
 +    "          \"ServiceComponentInfo\" : {\n" +
 +    "            \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "            \"component_name\" : \"ZOOKEEPER_SERVER\",\n" +
 +    "            \"service_name\" : \"ZOOKEEPER\"\n" +
 +    "          },\n" +
 +    "          \"host_components\" : [\n" +
 +    "            {\n" +
 +    "              \"href\" : \"http://c6401.ambari.apache.org:8080/api/v1/clusters/"+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"/hosts/c6401.ambari.apache.org/host_components/ZOOKEEPER_SERVER\",\n" +
 +    "              \"HostRoles\" : {\n" +
 +    "                \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "                \"component_name\" : \"ZOOKEEPER_SERVER\",\n" +
 +    "                \"host_name\" : \"c6401.ambari.apache.org\",\n" +
 +    "                \"public_host_name\" : \"c6401.ambari.apache.org\",\n" +
 +    "                \"service_name\" : \"ZOOKEEPER\",\n" +
 +    "                \"stack_id\" : \"HDP-2.6\"\n" +
 +    "              }\n" +
 +    "            },\n" +
 +    "            {\n" +
 +    "              \"href\" : \"http://c6401.ambari.apache.org:8080/api/v1/clusters/"+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"/hosts/c6402.ambari.apache.org/host_components/ZOOKEEPER_SERVER\",\n" +
 +    "              \"HostRoles\" : {\n" +
 +    "                \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "                \"component_name\" : \"ZOOKEEPER_SERVER\",\n" +
 +    "                \"host_name\" : \"c6402.ambari.apache.org\",\n" +
 +    "                \"public_host_name\" : \"c6402.ambari.apache.org\",\n" +
 +    "                \"service_name\" : \"ZOOKEEPER\",\n" +
 +    "                \"stack_id\" : \"HDP-2.6\"\n" +
 +    "              }\n" +
 +    "            },\n" +
 +    "            {\n" +
 +    "              \"href\" : \"http://c6401.ambari.apache.org:8080/api/v1/clusters/"+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"/hosts/c6403.ambari.apache.org/host_components/ZOOKEEPER_SERVER\",\n" +
 +    "              \"HostRoles\" : {\n" +
 +    "                \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "                \"component_name\" : \"ZOOKEEPER_SERVER\",\n" +
 +    "                \"host_name\" : \"c6403.ambari.apache.org\",\n" +
 +    "                \"public_host_name\" : \"c6403.ambari.apache.org\",\n" +
 +    "                \"service_name\" : \"ZOOKEEPER\",\n" +
 +    "                \"stack_id\" : \"HDP-2.6\"\n" +
 +    "              }\n" +
 +    "            }\n" +
 +    "          ]\n" +
 +    "        }\n" +
 +    "      ]\n" +
 +    "    }\n" +
 +    "  ]\n" +
 +    "}\n";
 +
 +
 +    private static final String SERVICECONFIGS_JSON_TEMPLATE =
 +    "{\n" +
 +    "  \"href\" : \"http://c6401.ambari.apache.org:8080/api/v1/clusters/"+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"/configurations/service_config_versions?is_current=true\",\n" +
 +    "  \"items\" : [\n" +
 +    "    {\n" +
 +    "      \"href\" : \"http://c6401.ambari.apache.org:8080/api/v1/clusters/"+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"/configurations/service_config_versions?service_name=HBASE&service_config_version=1\",\n" +
 +    "      \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "      \"configurations\" : [\n" +
 +    "        {\n" +
 +    "          \"Config\" : {\n" +
 +    "            \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "            \"stack_id\" : \"HDP-2.6\"\n" +
 +    "          },\n" +
 +    "          \"type\" : \"hbase-site\",\n" +
 +    "          \"tag\" : \"version1503410563715\",\n" +
 +    "          \"version\" : 1,\n" +
 +    "          \"properties\" : {\n" +
 +    "            \"hbase.master.info.bindAddress\" : \"0.0.0.0\",\n" +
 +    "            \"hbase.master.info.port\" : \"16010\",\n" +
 +    "            \"hbase.master.port\" : \"16000\",\n" +
 +    "            \"hbase.regionserver.info.port\" : \"16030\",\n" +
 +    "            \"hbase.regionserver.port\" : \"16020\",\n" +
 +    "            \"hbase.zookeeper.property.clientPort\" : \"2181\",\n" +
 +    "            \"hbase.zookeeper.quorum\" : \"c6403.ambari.apache.org,c6402.ambari.apache.org,c6401.ambari.apache.org\",\n" +
 +    "            \"hbase.zookeeper.useMulti\" : \"true\",\n" +
 +    "            \"zookeeper.znode.parent\" : \"/hbase-unsecure\"\n" +
 +    "          },\n" +
 +    "          \"properties_attributes\" : { }\n" +
 +    "        },\n" +
 +    "      ],\n" +
 +    "      \"is_current\" : true,\n" +
 +    "      \"service_config_version\" : 1,\n" +
 +    "      \"service_config_version_note\" : \"Initial configurations for HBase\",\n" +
 +    "      \"service_name\" : \"HBASE\",\n" +
 +    "      \"stack_id\" : \"HDP-2.6\",\n" +
 +    "      \"user\" : \"admin\"\n" +
 +    "    },\n" +
 +    "    {\n" +
 +    "      \"href\" : \"http://c6401.ambari.apache.org:8080/api/v1/clusters/"+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"/configurations/service_config_versions?service_name=HDFS&service_config_version=2\",\n" +
 +    "      \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "      \"configurations\" : [\n" +
 +    "        {\n" +
 +    "          \"Config\" : {\n" +
 +    "            \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "            \"stack_id\" : \"HDP-2.6\"\n" +
 +    "          },\n" +
 +    "          \"type\" : \"hdfs-site\",\n" +
 +    "          \"tag\" : \"version1\",\n" +
 +    "          \"version\" : 1,\n" +
 +    "          \"properties\" : {\n" +
 +    "            \"dfs.cluster.administrators\" : \" hdfs\",\n" +
 +    "            \"dfs.datanode.address\" : \"0.0.0.0:50010\",\n" +
 +    "            \"dfs.datanode.http.address\" : \"0.0.0.0:50075\",\n" +
 +    "            \"dfs.datanode.https.address\" : \"0.0.0.0:50475\",\n" +
 +    "            \"dfs.datanode.ipc.address\" : \"0.0.0.0:8010\",\n" +
 +    "            \"dfs.http.policy\" : \"HTTP_ONLY\",\n" +
 +    "            \"dfs.https.port\" : \"50470\",\n" +
 +    "            \"dfs.journalnode.http-address\" : \"0.0.0.0:8480\",\n" +
 +    "            \"dfs.journalnode.https-address\" : \"0.0.0.0:8481\",\n" +
 +    "            \"dfs.namenode.http-address\" : \"c6401.ambari.apache.org:50070\",\n" +
 +    "            \"dfs.namenode.https-address\" : \"c6401.ambari.apache.org:50470\",\n" +
 +    "            \"dfs.namenode.rpc-address\" : \"c6401.ambari.apache.org:8020\",\n" +
 +    "            \"dfs.namenode.secondary.http-address\" : \"c6402.ambari.apache.org:50090\",\n" +
 +    "            \"dfs.webhdfs.enabled\" : \"true\"\n" +
 +    "          },\n" +
 +    "          \"properties_attributes\" : {\n" +
 +    "            \"final\" : {\n" +
 +    "              \"dfs.webhdfs.enabled\" : \"true\",\n" +
 +    "              \"dfs.namenode.http-address\" : \"true\",\n" +
 +    "              \"dfs.support.append\" : \"true\",\n" +
 +    "              \"dfs.namenode.name.dir\" : \"true\",\n" +
 +    "              \"dfs.datanode.failed.volumes.tolerated\" : \"true\",\n" +
 +    "              \"dfs.datanode.data.dir\" : \"true\"\n" +
 +    "            }\n" +
 +    "          }\n" +
 +    "        },\n" +
 +    "        {\n" +
 +    "          \"Config\" : {\n" +
 +    "            \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "            \"stack_id\" : \"HDP-2.6\"\n" +
 +    "          },\n" +
 +    "          \"type\" : \"core-site\",\n" +
 +    "          \"tag\" : \"version1502131215159\",\n" +
 +    "          \"version\" : 2,\n" +
 +    "          \"properties\" : {\n" +
 +    "            \"hadoop.http.authentication.simple.anonymous.allowed\" : \"true\",\n" +
 +    "            \"net.topology.script.file.name\" : \"/etc/hadoop/conf/topology_script.py\"\n" +
 +    "          },\n" +
 +    "          \"properties_attributes\" : {\n" +
 +    "            \"final\" : {\n" +
 +    "              \"fs.defaultFS\" : \"true\"\n" +
 +    "            }\n" +
 +    "          }\n" +
 +    "        }\n" +
 +    "      ],\n" +
 +    "      \"is_current\" : true,\n" +
 +    "      \"service_config_version\" : 2,\n" +
 +    "      \"service_config_version_note\" : \"knox trusted proxy support\",\n" +
 +    "      \"service_name\" : \"HDFS\",\n" +
 +    "      \"stack_id\" : \"HDP-2.6\",\n" +
 +    "      \"user\" : \"admin\"\n" +
 +    "    },\n" +
 +    "    {\n" +
 +    "      \"href\" : \"http://c6401.ambari.apache.org:8080/api/v1/clusters/"+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"/configurations/service_config_versions?service_name=HIVE&service_config_version=3\",\n" +
 +    "      \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "      \"configurations\" : [\n" +
 +    "        {\n" +
 +    "          \"Config\" : {\n" +
 +    "            \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "            \"stack_id\" : \"HDP-2.6\"\n" +
 +    "          },\n" +
 +    "          \"type\" : \"hive-env\",\n" +
 +    "          \"tag\" : \"version1\",\n" +
 +    "          \"version\" : 1,\n" +
 +    "          \"properties\" : {\n" +
 +    "            \"hive_security_authorization\" : \"None\",\n" +
 +    "            \"webhcat_user\" : \"hcat\"\n" +
 +    "          },\n" +
 +    "          \"properties_attributes\" : { }\n" +
 +    "        },\n" +
 +    "        {\n" +
 +    "          \"Config\" : {\n" +
 +    "            \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "            \"stack_id\" : \"HDP-2.6\"\n" +
 +    "          },\n" +
 +    "          \"type\" : \"hiveserver2-site\",\n" +
 +    "          \"tag\" : \"version1\",\n" +
 +    "          \"version\" : 1,\n" +
 +    "          \"properties\" : {\n" +
 +    "            \"hive.metastore.metrics.enabled\" : \"true\",\n" +
 +    "            \"hive.security.authorization.enabled\" : \"false\",\n" +
 +    "            \"hive.service.metrics.hadoop2.component\" : \"hiveserver2\",\n" +
 +    "            \"hive.service.metrics.reporter\" : \"HADOOP2\"\n" +
 +    "          },\n" +
 +    "          \"properties_attributes\" : { }\n" +
 +    "        },\n" +
 +    "        {\n" +
 +    "          \"Config\" : {\n" +
 +    "            \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "            \"stack_id\" : \"HDP-2.6\"\n" +
 +    "          },\n" +
 +    "          \"type\" : \"hive-interactive-site\",\n" +
 +    "          \"tag\" : \"version1\",\n" +
 +    "          \"version\" : 1,\n" +
 +    "          \"properties\" : {\n" +
 +    "            \"hive.server2.enable.doAs\" : \"false\",\n" +
 +    "            \"hive.server2.tez.default.queues\" : \"default\",\n" +
 +    "            \"hive.server2.tez.initialize.default.sessions\" : \"true\",\n" +
 +    "            \"hive.server2.tez.sessions.custom.queue.allowed\" : \"ignore\",\n" +
 +    "            \"hive.server2.tez.sessions.per.default.queue\" : \"1\",\n" +
 +    "            \"hive.server2.tez.sessions.restricted.configs\" : \"hive.execution.mode,hive.execution.engine\",\n" +
 +    "            \"hive.server2.thrift.http.port\" : \"10501\",\n" +
 +    "            \"hive.server2.thrift.port\" : \"10500\",\n" +
 +    "            \"hive.server2.webui.port\" : \"10502\",\n" +
 +    "            \"hive.server2.webui.use.ssl\" : \"false\",\n" +
 +    "            \"hive.server2.zookeeper.namespace\" : \"hiveserver2-hive2\"\n" +
 +    "          },\n" +
 +    "          \"properties_attributes\" : { }\n" +
 +    "        },\n" +
 +    "        {\n" +
 +    "          \"Config\" : {\n" +
 +    "            \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "            \"stack_id\" : \"HDP-2.6\"\n" +
 +    "          },\n" +
 +    "          \"type\" : \"tez-interactive-site\",\n" +
 +    "          \"tag\" : \"version1\",\n" +
 +    "          \"version\" : 1,\n" +
 +    "          \"properties\" : {\n" +
 +    "            \"tez.am.am-rm.heartbeat.interval-ms.max\" : \"10000\",\n" +
 +    "            \"tez.am.client.heartbeat.poll.interval.millis\" : \"6000\",\n" +
 +    "            \"tez.am.client.heartbeat.timeout.secs\" : \"90\"\n" +
 +    "          },\n" +
 +    "          \"properties_attributes\" : { }\n" +
 +    "        },\n" +
 +    "        {\n" +
 +    "          \"Config\" : {\n" +
 +    "            \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "            \"stack_id\" : \"HDP-2.6\"\n" +
 +    "          },\n" +
 +    "          \"type\" : \"hive-site\",\n" +
 +    "          \"tag\" : \"version1502130841736\",\n" +
 +    "          \"version\" : 2,\n" +
 +    "          \"properties\" : {\n" +
 +    "            \"hive.metastore.sasl.enabled\" : \"false\",\n" +
 +    "            \"hive.metastore.server.max.threads\" : \"100000\",\n" +
 +    "            \"hive.metastore.uris\" : \"thrift://c6402.ambari.apache.org:9083\",\n" +
 +    "            \"hive.server2.allow.user.substitution\" : \"true\",\n" +
 +    "            \"hive.server2.authentication\" : \"NONE\",\n" +
 +    "            \"hive.server2.authentication.spnego.keytab\" : \"HTTP/_HOST@EXAMPLE.COM\",\n" +
 +    "            \"hive.server2.authentication.spnego.principal\" : \"/etc/security/keytabs/spnego.service.keytab\",\n" +
 +    "            \"hive.server2.enable.doAs\" : \"true\",\n" +
 +    "            \"hive.server2.support.dynamic.service.discovery\" : \"true\",\n" +
 +    "            \"hive.server2.thrift.http.path\" : \"cliservice\",\n" +
 +    "            \"hive.server2.thrift.http.port\" : \"10001\",\n" +
 +    "            \"hive.server2.thrift.max.worker.threads\" : \"500\",\n" +
 +    "            \"hive.server2.thrift.port\" : \"10000\",\n" +
 +    "            \"hive.server2.thrift.sasl.qop\" : \"auth\",\n" +
 +    "            \"hive.server2.transport.mode\" : \"http\",\n" +
 +    "            \"hive.server2.use.SSL\" : \"false\",\n" +
 +    "            \"hive.server2.zookeeper.namespace\" : \"hiveserver2\"\n" +
 +    "          },\n" +
 +    "          \"properties_attributes\" : {\n" +
 +    "            \"hidden\" : {\n" +
 +    "              \"javax.jdo.option.ConnectionPassword\" : \"HIVE_CLIENT,WEBHCAT_SERVER,HCAT,CONFIG_DOWNLOAD\"\n" +
 +    "            }\n" +
 +    "          }\n" +
 +    "        },\n" +
 +    "        {\n" +
 +    "          \"Config\" : {\n" +
 +    "            \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "            \"stack_id\" : \"HDP-2.6\"\n" +
 +    "          },\n" +
 +    "          \"type\" : \"webhcat-site\",\n" +
 +    "          \"tag\" : \"version1502131111746\",\n" +
 +    "          \"version\" : 2,\n" +
 +    "          \"properties\" : {\n" +
 +    "            \"templeton.port\" : \"50111\",\n" +
 +    "            \"templeton.zookeeper.hosts\" : \"c6403.ambari.apache.org:2181,c6401.ambari.apache.org:2181,c6402.ambari.apache.org:2181\",\n" +
 +    "            \"webhcat.proxyuser.knox.groups\" : \"users\",\n" +
 +    "            \"webhcat.proxyuser.knox.hosts\" : \"*\",\n" +
 +    "            \"webhcat.proxyuser.root.groups\" : \"*\",\n" +
 +    "            \"webhcat.proxyuser.root.hosts\" : \"c6401.ambari.apache.org\"\n" +
 +    "          },\n" +
 +    "          \"properties_attributes\" : { }\n" +
 +    "        }\n" +
 +    "      ],\n" +
 +    "      \"createtime\" : 1502131110745,\n" +
 +    "      \"group_id\" : -1,\n" +
 +    "      \"group_name\" : \"Default\",\n" +
 +    "      \"hosts\" : [ ],\n" +
 +    "      \"is_cluster_compatible\" : true,\n" +
 +    "      \"is_current\" : true,\n" +
 +    "      \"service_config_version\" : 3,\n" +
 +    "      \"service_config_version_note\" : \"knox trusted proxy support\",\n" +
 +    "      \"service_name\" : \"HIVE\",\n" +
 +    "      \"stack_id\" : \"HDP-2.6\",\n" +
 +    "      \"user\" : \"admin\"\n" +
 +    "    },\n" +
 +    "    {\n" +
 +    "      \"href\" : \"http://c6401.ambari.apache.org:8080/api/v1/clusters/"+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"/configurations/service_config_versions?service_name=OOZIE&service_config_version=3\",\n" +
 +    "      \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "      \"configurations\" : [\n" +
 +    "        {\n" +
 +    "          \"Config\" : {\n" +
 +    "            \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "            \"stack_id\" : \"HDP-2.6\"\n" +
 +    "          },\n" +
 +    "          \"type\" : \"oozie-site\",\n" +
 +    "          \"tag\" : \"version1502131137103\",\n" +
 +    "          \"version\" : 3,\n" +
 +    "          \"properties\" : {\n" +
 +    "            \"oozie.base.url\" : \"http://c6402.ambari.apache.org:11000/oozie\",\n" +
 +    "          },\n" +
 +    "          \"properties_attributes\" : { }\n" +
 +    "        }\n" +
 +    "      ],\n" +
 +    "      \"is_current\" : true,\n" +
 +    "      \"service_config_version\" : 3,\n" +
 +    "      \"service_name\" : \"OOZIE\",\n" +
 +    "      \"stack_id\" : \"HDP-2.6\",\n" +
 +    "      \"user\" : \"admin\"\n" +
 +    "    },\n" +
 +    "    {\n" +
 +    "      \"href\" : \"http://c6401.ambari.apache.org:8080/api/v1/clusters/"+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"/configurations/service_config_versions?service_name=TEZ&service_config_version=1\",\n" +
 +    "      \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "      \"configurations\" : [\n" +
 +    "        {\n" +
 +    "          \"Config\" : {\n" +
 +    "            \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "            \"stack_id\" : \"HDP-2.6\"\n" +
 +    "          },\n" +
 +    "          \"type\" : \"tez-site\",\n" +
 +    "          \"tag\" : \"version1\",\n" +
 +    "          \"version\" : 1,\n" +
 +    "          \"properties\" : {\n" +
 +    "            \"tez.use.cluster.hadoop-libs\" : \"false\"\n" +
 +    "          },\n" +
 +    "          \"properties_attributes\" : { }\n" +
 +    "        }\n" +
 +    "      ],\n" +
 +    "      \"createtime\" : 1502122253525,\n" +
 +    "      \"group_id\" : -1,\n" +
 +    "      \"group_name\" : \"Default\",\n" +
 +    "      \"hosts\" : [ ],\n" +
 +    "      \"is_cluster_compatible\" : true,\n" +
 +    "      \"is_current\" : true,\n" +
 +    "      \"service_config_version\" : 1,\n" +
 +    "      \"service_config_version_note\" : \"Initial configurations for Tez\",\n" +
 +    "      \"service_name\" : \"TEZ\",\n" +
 +    "      \"stack_id\" : \"HDP-2.6\",\n" +
 +    "      \"user\" : \"admin\"\n" +
 +    "    },\n" +
 +    "    {\n" +
 +    "      \"href\" : \"http://c6401.ambari.apache.org:8080/api/v1/clusters/"+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"/configurations/service_config_versions?service_name=YARN&service_config_version=1\",\n" +
 +    "      \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "      \"configurations\" : [\n" +
 +    "        {\n" +
 +    "          \"Config\" : {\n" +
 +    "            \"cluster_name\" : \""+TestAmbariServiceDiscovery.CLUSTER_PLACEHOLDER+"\",\n" +
 +    "            \"stack_id\" : \"HDP-2.6\"\n" +
 +    "          },\n" +
 +    "          \"type\" : \"yarn-site\",\n" +
 +    "          \"tag\" : \"version1\",\n" +
 +    "          \"version\" : 1,\n" +
 +    "          \"properties\" : {\n" +
 +    "            \"hadoop.registry.rm.enabled\" : \"true\",\n" +
 +    "            \"hadoop.registry.zk.quorum\" : \"c6403.ambari.apache.org:2181,c6401.ambari.apache.org:2181,c6402.ambari.apache.org:2181\",\n" +
 +    "            \"yarn.acl.enable\" : \"false\",\n" +
 +    "            \"yarn.http.policy\" : \"HTTP_ONLY\",\n" +
 +    "            \"yarn.nodemanager.address\" : \"0.0.0.0:45454\",\n" +
 +    "            \"yarn.nodemanager.bind-host\" : \"0.0.0.0\",\n" +
 +    "            \"yarn.resourcemanager.address\" : \"c6402.ambari.apache.org:8050\",\n" +
 +    "            \"yarn.resourcemanager.admin.address\" : \"c6402.ambari.apache.org:8141\",\n" +
 +    "            \"yarn.resourcemanager.ha.enabled\" : \"false\",\n" +
 +    "            \"yarn.resourcemanager.hostname\" : \"c6402.ambari.apache.org\",\n" +
 +    "            \"yarn.resourcemanager.resource-tracker.address\" : \"c6402.ambari.apache.org:8025\",\n" +
 +    "            \"yarn.resourcemanager.scheduler.address\" : \"c6402.ambari.apache.org:8030\",\n" +
 +    "            \"yarn.resourcemanager.webapp.address\" : \"c6402.ambari.apache.org:8088\",\n" +
 +    "            \"yarn.resourcemanager.webapp.delegation-token-auth-filter.enabled\" : \"false\",\n" +
 +    "            \"yarn.resourcemanager.webapp.https.address\" : \"c6402.ambari.apache.org:8090\",\n" +
 +    "            \"yarn.resourcemanager.zk-address\" : \"c6403.ambari.apache.org:2181,c6401.ambari.apache.org:2181,c6402.ambari.apache.org:2181\"\n" +
 +    "          },\n" +
 +    "          \"properties_attributes\" : { }\n" +
 +    "        }\n" +
 +    "      ],\n" +
 +    "      \"is_current\" : true,\n" +
 +    "      \"service_config_version\" : 1,\n" +
 +    "      \"service_name\" : \"YARN\",\n" +
 +    "      \"stack_id\" : \"HDP-2.6\",\n" +
 +    "      \"user\" : \"admin\"\n" +
 +    "    }\n" +
 +    "  ]\n" +
 +    "}";
 +
 +}


[17/53] [abbrv] knox git commit: Merge branch 'master' into KNOX-998-Package_Restructuring

Posted by mo...@apache.org.
http://git-wip-us.apache.org/repos/asf/knox/blob/c754cc06/gateway-service-admin/src/main/java/org/apache/knox/gateway/service/admin/TopologiesResource.java
----------------------------------------------------------------------
diff --cc gateway-service-admin/src/main/java/org/apache/knox/gateway/service/admin/TopologiesResource.java
index 1861490,0000000..948447b
mode 100644,000000..100644
--- a/gateway-service-admin/src/main/java/org/apache/knox/gateway/service/admin/TopologiesResource.java
+++ b/gateway-service-admin/src/main/java/org/apache/knox/gateway/service/admin/TopologiesResource.java
@@@ -1,312 -1,0 +1,657 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.service.admin;
 +
++import com.fasterxml.jackson.annotation.JsonProperty;
++import org.apache.commons.io.FileUtils;
++import org.apache.commons.io.FilenameUtils;
++import org.apache.knox.gateway.i18n.GatewaySpiMessages;
++import org.apache.knox.gateway.i18n.messages.MessagesFactory;
 +import org.apache.knox.gateway.service.admin.beans.BeanConverter;
 +import org.apache.knox.gateway.service.admin.beans.Topology;
 +import org.apache.knox.gateway.services.GatewayServices;
 +import org.apache.knox.gateway.config.GatewayConfig;
 +import org.apache.knox.gateway.services.topology.TopologyService;
 +
 +import javax.servlet.http.HttpServletRequest;
 +import javax.ws.rs.Consumes;
 +import javax.ws.rs.DELETE;
 +import javax.ws.rs.GET;
 +import javax.ws.rs.PUT;
 +import javax.ws.rs.Path;
 +import javax.ws.rs.PathParam;
 +import javax.ws.rs.Produces;
 +import javax.ws.rs.core.Context;
 +import javax.ws.rs.core.Response;
 +import javax.xml.bind.annotation.XmlAccessType;
 +import javax.xml.bind.annotation.XmlAccessorType;
 +import javax.xml.bind.annotation.XmlElement;
 +import javax.xml.bind.annotation.XmlElementWrapper;
++import java.io.File;
++import java.io.IOException;
 +import java.net.URI;
 +import java.net.URISyntaxException;
 +import java.util.ArrayList;
++import java.util.Collection;
 +import java.util.Collections;
 +import java.util.Comparator;
 +import java.util.List;
 +
 +import static javax.ws.rs.core.MediaType.APPLICATION_JSON;
 +import static javax.ws.rs.core.MediaType.APPLICATION_XML;
++import static javax.ws.rs.core.MediaType.TEXT_PLAIN;
++
 +import static javax.ws.rs.core.Response.ok;
++import static javax.ws.rs.core.Response.created;
++import static javax.ws.rs.core.Response.notModified;
++import static javax.ws.rs.core.Response.status;
++
 +
 +@Path("/api/v1")
 +public class TopologiesResource {
++
++  private static final String XML_EXT  = ".xml";
++  private static final String JSON_EXT = ".json";
++
++  private static final String TOPOLOGIES_API_PATH    = "topologies";
++  private static final String SINGLE_TOPOLOGY_API_PATH = TOPOLOGIES_API_PATH + "/{id}";
++  private static final String PROVIDERCONFIG_API_PATH = "providerconfig";
++  private static final String SINGLE_PROVIDERCONFIG_API_PATH = PROVIDERCONFIG_API_PATH + "/{name}";
++  private static final String DESCRIPTORS_API_PATH    = "descriptors";
++  private static final String SINGLE_DESCRIPTOR_API_PATH = DESCRIPTORS_API_PATH + "/{name}";
++
++  private static GatewaySpiMessages log = MessagesFactory.get(GatewaySpiMessages.class);
++
 +  @Context
 +  private HttpServletRequest request;
 +
 +  @GET
 +  @Produces({APPLICATION_JSON, APPLICATION_XML})
-   @Path("topologies/{id}")
++  @Path(SINGLE_TOPOLOGY_API_PATH)
 +  public Topology getTopology(@PathParam("id") String id) {
 +    GatewayServices services = (GatewayServices) request.getServletContext()
 +        .getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE);
 +    GatewayConfig config = (GatewayConfig) request.getServletContext().getAttribute(GatewayConfig.GATEWAY_CONFIG_ATTRIBUTE);
 +
 +    TopologyService ts = services.getService(GatewayServices.TOPOLOGY_SERVICE);
 +
 +    for (org.apache.knox.gateway.topology.Topology t : ts.getTopologies()) {
 +      if(t.getName().equals(id)) {
 +        try {
 +          t.setUri(new URI( buildURI(t, config, request) ));
 +        } catch (URISyntaxException se) {
 +          t.setUri(null);
 +        }
 +        return BeanConverter.getTopology(t);
 +      }
 +    }
 +    return null;
 +  }
 +
 +  @GET
 +  @Produces({APPLICATION_JSON, APPLICATION_XML})
-   @Path("topologies")
++  @Path(TOPOLOGIES_API_PATH)
 +  public SimpleTopologyWrapper getTopologies() {
 +    GatewayServices services = (GatewayServices) request.getServletContext()
 +        .getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE);
 +
 +
 +    TopologyService ts = services.getService(GatewayServices.TOPOLOGY_SERVICE);
 +
 +    ArrayList<SimpleTopology> st = new ArrayList<SimpleTopology>();
 +    GatewayConfig conf = (GatewayConfig) request.getServletContext().getAttribute(GatewayConfig.GATEWAY_CONFIG_ATTRIBUTE);
 +
 +    for (org.apache.knox.gateway.topology.Topology t : ts.getTopologies()) {
 +      st.add(getSimpleTopology(t, conf));
 +    }
 +
 +    Collections.sort(st, new TopologyComparator());
 +    SimpleTopologyWrapper stw = new SimpleTopologyWrapper();
 +
 +    for(SimpleTopology t : st){
 +      stw.topologies.add(t);
 +    }
 +
 +    return stw;
 +
 +  }
 +
 +  @PUT
 +  @Consumes({APPLICATION_JSON, APPLICATION_XML})
-   @Path("topologies/{id}")
++  @Path(SINGLE_TOPOLOGY_API_PATH)
 +  public Topology uploadTopology(@PathParam("id") String id, Topology t) {
 +
 +    GatewayServices gs = (GatewayServices) request.getServletContext()
 +        .getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE);
 +
 +    t.setName(id);
 +    TopologyService ts = gs.getService(GatewayServices.TOPOLOGY_SERVICE);
 +
 +    ts.deployTopology(BeanConverter.getTopology(t));
 +
 +    return getTopology(id);
 +  }
 +
 +  @DELETE
 +  @Produces(APPLICATION_JSON)
-   @Path("topologies/{id}")
++  @Path(SINGLE_TOPOLOGY_API_PATH)
 +  public Response deleteTopology(@PathParam("id") String id) {
 +    boolean deleted = false;
 +    if(!"admin".equals(id)) {
 +      GatewayServices services = (GatewayServices) request.getServletContext()
 +          .getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE);
 +
 +      TopologyService ts = services.getService(GatewayServices.TOPOLOGY_SERVICE);
 +
 +      for (org.apache.knox.gateway.topology.Topology t : ts.getTopologies()) {
 +        if(t.getName().equals(id)) {
 +          ts.deleteTopology(t);
 +          deleted = true;
 +        }
 +      }
 +    }else{
 +      deleted = false;
 +    }
 +    return ok().entity("{ \"deleted\" : " + deleted + " }").build();
 +  }
 +
++  @GET
++  @Produces({APPLICATION_JSON})
++  @Path(PROVIDERCONFIG_API_PATH)
++  public HrefListing getProviderConfigurations() {
++    HrefListing listing = new HrefListing();
++    listing.setHref(buildHref(request));
++
++    GatewayServices services =
++            (GatewayServices) request.getServletContext().getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE);
++
++    List<HrefListItem> configs = new ArrayList<>();
++    TopologyService ts = services.getService(GatewayServices.TOPOLOGY_SERVICE);
++    // Get all the simple descriptor file names
++    for (File providerConfig : ts.getProviderConfigurations()){
++      String id = FilenameUtils.getBaseName(providerConfig.getName());
++      configs.add(new HrefListItem(buildHref(id, request), providerConfig.getName()));
++    }
++
++    listing.setItems(configs);
++    return listing;
++  }
++
++  @GET
++  @Produces({APPLICATION_XML})
++  @Path(SINGLE_PROVIDERCONFIG_API_PATH)
++  public Response getProviderConfiguration(@PathParam("name") String name) {
++    Response response;
++
++    GatewayServices services =
++            (GatewayServices) request.getServletContext().getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE);
++
++    TopologyService ts = services.getService(GatewayServices.TOPOLOGY_SERVICE);
++
++    File providerConfigFile = null;
++
++    for (File pc : ts.getProviderConfigurations()){
++      // If the file name matches the specified id
++      if (FilenameUtils.getBaseName(pc.getName()).equals(name)) {
++        providerConfigFile = pc;
++        break;
++      }
++    }
++
++    if (providerConfigFile != null) {
++      byte[] content = null;
++      try {
++        content = FileUtils.readFileToByteArray(providerConfigFile);
++        response = ok().entity(content).build();
++      } catch (IOException e) {
++        log.failedToReadConfigurationFile(providerConfigFile.getAbsolutePath(), e);
++        response = Response.status(Response.Status.INTERNAL_SERVER_ERROR).build();
++      }
++
++    } else {
++      response = Response.status(Response.Status.NOT_FOUND).build();
++    }
++    return response;
++  }
++
++  @DELETE
++  @Produces(APPLICATION_JSON)
++  @Path(SINGLE_PROVIDERCONFIG_API_PATH)
++  public Response deleteProviderConfiguration(@PathParam("name") String name) {
++    Response response;
++    GatewayServices services =
++            (GatewayServices) request.getServletContext().getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE);
++
++    TopologyService ts = services.getService(GatewayServices.TOPOLOGY_SERVICE);
++    if (ts.deleteProviderConfiguration(name)) {
++      response = ok().entity("{ \"deleted\" : \"provider config " + name + "\" }").build();
++    } else {
++      response = notModified().build();
++    }
++    return response;
++  }
++
++
++  @DELETE
++  @Produces(APPLICATION_JSON)
++  @Path(SINGLE_DESCRIPTOR_API_PATH)
++  public Response deleteSimpleDescriptor(@PathParam("name") String name) {
++    Response response = null;
++    if(!"admin".equals(name)) {
++      GatewayServices services =
++              (GatewayServices) request.getServletContext().getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE);
++
++      TopologyService ts = services.getService(GatewayServices.TOPOLOGY_SERVICE);
++      if (ts.deleteDescriptor(name)) {
++        response = ok().entity("{ \"deleted\" : \"descriptor " + name + "\" }").build();
++      }
++    }
++
++    if (response == null) {
++      response = notModified().build();
++    }
++
++    return response;
++  }
++
++
++  @PUT
++  @Consumes({APPLICATION_XML})
++  @Path(SINGLE_PROVIDERCONFIG_API_PATH)
++  public Response uploadProviderConfiguration(@PathParam("name") String name, String content) {
++    Response response = null;
++
++    GatewayServices gs =
++            (GatewayServices) request.getServletContext().getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE);
++
++    TopologyService ts = gs.getService(GatewayServices.TOPOLOGY_SERVICE);
++
++    boolean isUpdate = configFileExists(ts.getProviderConfigurations(), name);
++
++    String filename = name.endsWith(XML_EXT) ? name : name + XML_EXT;
++    if (ts.deployProviderConfiguration(filename, content)) {
++      try {
++        if (isUpdate) {
++          response = Response.noContent().build();
++        } else{
++          response = created(new URI(buildHref(request))).build();
++        }
++      } catch (URISyntaxException e) {
++        log.invalidResourceURI(e.getInput(), e.getReason(), e);
++        response = status(Response.Status.BAD_REQUEST).entity("{ \"error\" : \"Failed to deploy provider configuration " + name + "\" }").build();
++      }
++    }
++
++    return response;
++  }
++
++
++  private boolean configFileExists(Collection<File> existing, String candidateName) {
++    boolean result = false;
++    for (File exists : existing) {
++      if (FilenameUtils.getBaseName(exists.getName()).equals(candidateName)) {
++        result = true;
++        break;
++      }
++    }
++    return result;
++  }
++
++
++  @PUT
++  @Consumes({APPLICATION_JSON})
++  @Path(SINGLE_DESCRIPTOR_API_PATH)
++  public Response uploadSimpleDescriptor(@PathParam("name") String name, String content) {
++    Response response = null;
++
++    GatewayServices gs =
++            (GatewayServices) request.getServletContext().getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE);
++
++    TopologyService ts = gs.getService(GatewayServices.TOPOLOGY_SERVICE);
++
++    boolean isUpdate = configFileExists(ts.getDescriptors(), name);
++
++    String filename = name.endsWith(JSON_EXT) ? name : name + JSON_EXT;
++    if (ts.deployDescriptor(filename, content)) {
++      try {
++        if (isUpdate) {
++          response = Response.noContent().build();
++        } else {
++          response = created(new URI(buildHref(request))).build();
++        }
++      } catch (URISyntaxException e) {
++        log.invalidResourceURI(e.getInput(), e.getReason(), e);
++        response = status(Response.Status.BAD_REQUEST).entity("{ \"error\" : \"Failed to deploy descriptor " + name + "\" }").build();
++      }
++    }
++
++    return response;
++  }
++
++
++  @GET
++  @Produces({APPLICATION_JSON})
++  @Path(DESCRIPTORS_API_PATH)
++  public HrefListing getSimpleDescriptors() {
++    HrefListing listing = new HrefListing();
++    listing.setHref(buildHref(request));
++
++    GatewayServices services =
++            (GatewayServices) request.getServletContext().getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE);
++
++    List<HrefListItem> descriptors = new ArrayList<>();
++    TopologyService ts = services.getService(GatewayServices.TOPOLOGY_SERVICE);
++    for (File descriptor : ts.getDescriptors()){
++      String id = FilenameUtils.getBaseName(descriptor.getName());
++      descriptors.add(new HrefListItem(buildHref(id, request), descriptor.getName()));
++    }
++
++    listing.setItems(descriptors);
++    return listing;
++  }
++
++
++  @GET
++  @Produces({APPLICATION_JSON, TEXT_PLAIN})
++  @Path(SINGLE_DESCRIPTOR_API_PATH)
++  public Response getSimpleDescriptor(@PathParam("name") String name) {
++    Response response;
++
++    GatewayServices services =
++            (GatewayServices) request.getServletContext().getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE);
++
++    TopologyService ts = services.getService(GatewayServices.TOPOLOGY_SERVICE);
++
++    File descriptorFile = null;
++
++    for (File sd : ts.getDescriptors()){
++      // If the file name matches the specified id
++      if (FilenameUtils.getBaseName(sd.getName()).equals(name)) {
++        descriptorFile = sd;
++        break;
++      }
++    }
++
++    if (descriptorFile != null) {
++      String mediaType = APPLICATION_JSON;
++
++      byte[] content = null;
++      try {
++        if ("yml".equals(FilenameUtils.getExtension(descriptorFile.getName()))) {
++          mediaType = TEXT_PLAIN;
++        }
++        content = FileUtils.readFileToByteArray(descriptorFile);
++        response = ok().type(mediaType).entity(content).build();
++      } catch (IOException e) {
++        log.failedToReadConfigurationFile(descriptorFile.getAbsolutePath(), e);
++        response = Response.status(Response.Status.INTERNAL_SERVER_ERROR).build();
++      }
++    } else {
++      response = Response.status(Response.Status.NOT_FOUND).build();
++    }
++
++    return response;
++  }
++
 +
 +  private static class TopologyComparator implements Comparator<SimpleTopology> {
 +    @Override
 +    public int compare(SimpleTopology t1, SimpleTopology t2) {
 +      return t1.getName().compareTo(t2.getName());
 +    }
 +  }
 +
-    String buildURI(org.apache.knox.gateway.topology.Topology topology, GatewayConfig config, HttpServletRequest req){
++
++  String buildURI(org.apache.knox.gateway.topology.Topology topology, GatewayConfig config, HttpServletRequest req){
 +    String uri = buildXForwardBaseURL(req);
 +
- //    Strip extra context
++    // Strip extra context
 +    uri = uri.replace(req.getContextPath(), "");
 +
- //    Add the gateway path
++    // Add the gateway path
 +    String gatewayPath;
 +    if(config.getGatewayPath() != null){
 +      gatewayPath = config.getGatewayPath();
 +    }else{
 +      gatewayPath = "gateway";
 +    }
 +    uri += "/" + gatewayPath;
 +
 +    uri += "/" + topology.getName();
 +    return uri;
 +  }
 +
-    String buildHref(org.apache.knox.gateway.topology.Topology t, HttpServletRequest req) {
++  String buildHref(HttpServletRequest req) {
++    return buildHref((String)null, req);
++  }
++
++  String buildHref(String id, HttpServletRequest req) {
 +    String href = buildXForwardBaseURL(req);
- //    Make sure that the pathInfo doesn't have any '/' chars at the end.
++    // Make sure that the pathInfo doesn't have any '/' chars at the end.
 +    String pathInfo = req.getPathInfo();
-     if(pathInfo.endsWith("/")) {
-       while(pathInfo.endsWith("/")) {
-         pathInfo = pathInfo.substring(0, pathInfo.length() - 1);
-       }
++    while(pathInfo.endsWith("/")) {
++      pathInfo = pathInfo.substring(0, pathInfo.length() - 1);
++    }
++
++    href += pathInfo;
++
++    if (id != null) {
++      href += "/" + id;
 +    }
 +
-     href += pathInfo + "/" + t.getName();
 +    return href;
 +  }
 +
-   private SimpleTopology getSimpleTopology(
-       org.apache.knox.gateway.topology.Topology t, GatewayConfig config) {
++   String buildHref(org.apache.knox.gateway.topology.Topology t, HttpServletRequest req) {
++     return buildHref(t.getName(), req);
++  }
++
++  private SimpleTopology getSimpleTopology(org.apache.hadoop.gateway.topology.Topology t, GatewayConfig config) {
 +    String uri = buildURI(t, config, request);
 +    String href = buildHref(t, request);
 +    return new SimpleTopology(t, uri, href);
 +  }
 +
 +  private String buildXForwardBaseURL(HttpServletRequest req){
 +    final String X_Forwarded = "X-Forwarded-";
 +    final String X_Forwarded_Context = X_Forwarded + "Context";
 +    final String X_Forwarded_Proto = X_Forwarded + "Proto";
 +    final String X_Forwarded_Host = X_Forwarded + "Host";
 +    final String X_Forwarded_Port = X_Forwarded + "Port";
 +    final String X_Forwarded_Server = X_Forwarded + "Server";
 +
 +    String baseURL = "";
 +
- //    Get Protocol
++    // Get Protocol
 +    if(req.getHeader(X_Forwarded_Proto) != null){
 +      baseURL += req.getHeader(X_Forwarded_Proto) + "://";
 +    } else {
 +      baseURL += req.getProtocol() + "://";
 +    }
 +
- //    Handle Server/Host and Port Here
++    // Handle Server/Host and Port Here
 +    if (req.getHeader(X_Forwarded_Host) != null && req.getHeader(X_Forwarded_Port) != null){
- //        Double check to see if host has port
++      // Double check to see if host has port
 +      if(req.getHeader(X_Forwarded_Host).contains(req.getHeader(X_Forwarded_Port))){
 +        baseURL += req.getHeader(X_Forwarded_Host);
 +      } else {
- //        If there's no port, add the host and port together;
++        // If there's no port, add the host and port together;
 +        baseURL += req.getHeader(X_Forwarded_Host) + ":" + req.getHeader(X_Forwarded_Port);
 +      }
 +    } else if(req.getHeader(X_Forwarded_Server) != null && req.getHeader(X_Forwarded_Port) != null){
- //      Tack on the server and port if they're available. Try host if server not available
++      // Tack on the server and port if they're available. Try host if server not available
 +      baseURL += req.getHeader(X_Forwarded_Server) + ":" + req.getHeader(X_Forwarded_Port);
 +    } else if(req.getHeader(X_Forwarded_Port) != null) {
- //      if we at least have a port, we can use it.
++      // if we at least have a port, we can use it.
 +      baseURL += req.getServerName() + ":" + req.getHeader(X_Forwarded_Port);
 +    } else {
- //      Resort to request members
++      // Resort to request members
 +      baseURL += req.getServerName() + ":" + req.getLocalPort();
 +    }
 +
- //    Handle Server context
++    // Handle Server context
 +    if( req.getHeader(X_Forwarded_Context) != null ) {
 +      baseURL += req.getHeader( X_Forwarded_Context );
 +    } else {
 +      baseURL += req.getContextPath();
 +    }
 +
 +    return baseURL;
 +  }
 +
++
++  static class HrefListing {
++    @JsonProperty
++    String href;
++
++    @JsonProperty
++    List<HrefListItem> items;
++
++    HrefListing() {}
++
++    public void setHref(String href) {
++      this.href = href;
++    }
++
++    public String getHref() {
++      return href;
++    }
++
++    public void setItems(List<HrefListItem> items) {
++      this.items = items;
++    }
++
++    public List<HrefListItem> getItems() {
++      return items;
++    }
++  }
++
++  static class HrefListItem {
++    @JsonProperty
++    String href;
++
++    @JsonProperty
++    String name;
++
++    HrefListItem() {}
++
++    HrefListItem(String href, String name) {
++      this.href = href;
++      this.name = name;
++    }
++
++    public void setHref(String href) {
++      this.href = href;
++    }
++
++    public String getHref() {
++      return href;
++    }
++
++    public void setName(String name) {
++      this.name = name;
++    }
++    public String getName() {
++      return name;
++    }
++  }
++
++
 +  @XmlAccessorType(XmlAccessType.NONE)
 +  public static class SimpleTopology {
 +
 +    @XmlElement
 +    private String name;
 +    @XmlElement
 +    private String timestamp;
 +    @XmlElement
++    private String defaultServicePath;
++    @XmlElement
 +    private String uri;
 +    @XmlElement
 +    private String href;
 +
 +    public SimpleTopology() {}
 +
 +    public SimpleTopology(org.apache.knox.gateway.topology.Topology t, String uri, String href) {
 +      this.name = t.getName();
 +      this.timestamp = Long.toString(t.getTimestamp());
++      this.defaultServicePath = t.getDefaultServicePath();
 +      this.uri = uri;
 +      this.href = href;
 +    }
 +
 +    public String getName() {
 +      return name;
 +    }
 +
 +    public void setName(String n) {
 +      name = n;
 +    }
 +
 +    public String getTimestamp() {
 +      return timestamp;
 +    }
 +
++    public void setDefaultService(String defaultServicePath) {
++      this.defaultServicePath = defaultServicePath;
++    }
++
++    public String getDefaultService() {
++      return defaultServicePath;
++    }
++
 +    public void setTimestamp(String timestamp) {
 +      this.timestamp = timestamp;
 +    }
 +
 +    public String getUri() {
 +      return uri;
 +    }
 +
 +    public void setUri(String uri) {
 +      this.uri = uri;
 +    }
 +
 +    public String getHref() {
 +      return href;
 +    }
 +
 +    public void setHref(String href) {
 +      this.href = href;
 +    }
 +  }
 +
 +  @XmlAccessorType(XmlAccessType.FIELD)
 +  public static class SimpleTopologyWrapper{
 +
 +    @XmlElement(name="topology")
 +    @XmlElementWrapper(name="topologies")
 +    private List<SimpleTopology> topologies = new ArrayList<SimpleTopology>();
 +
 +    public List<SimpleTopology> getTopologies(){
 +      return topologies;
 +    }
 +
 +    public void setTopologies(List<SimpleTopology> ts){
 +      this.topologies = ts;
 +    }
 +
 +  }
 +}
 +

http://git-wip-us.apache.org/repos/asf/knox/blob/c754cc06/gateway-service-admin/src/main/java/org/apache/knox/gateway/service/admin/beans/BeanConverter.java
----------------------------------------------------------------------
diff --cc gateway-service-admin/src/main/java/org/apache/knox/gateway/service/admin/beans/BeanConverter.java
index 358b5b5,0000000..e8d6915
mode 100644,000000..100644
--- a/gateway-service-admin/src/main/java/org/apache/knox/gateway/service/admin/beans/BeanConverter.java
+++ b/gateway-service-admin/src/main/java/org/apache/knox/gateway/service/admin/beans/BeanConverter.java
@@@ -1,168 -1,0 +1,170 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + * <p>
 + * http://www.apache.org/licenses/LICENSE-2.0
 + * <p>
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.service.admin.beans;
 +
 +import org.apache.knox.gateway.topology.Version;
 +
 +import java.util.Collection;
 +
 +public class BeanConverter {
 +
 +  public static Topology getTopology(
 +      org.apache.knox.gateway.topology.Topology topology) {
 +    Topology topologyResource = new Topology();
 +    topologyResource.setName(topology.getName());
 +    topologyResource.setTimestamp(topology.getTimestamp());
++    topologyResource.setPath(topology.getDefaultServicePath());
 +    topologyResource.setUri(topology.getUri());
 +    for ( org.apache.knox.gateway.topology.Provider provider : topology.getProviders() ) {
 +      topologyResource.getProviders().add( getProvider(provider) );
 +    }
 +    for ( org.apache.knox.gateway.topology.Service service : topology.getServices() ) {
 +      topologyResource.getServices().add( getService(service) );
 +    }
 +    for ( org.apache.knox.gateway.topology.Application application : topology.getApplications() ) {
 +      topologyResource.getApplications().add( getApplication(application) );
 +    }
 +    return topologyResource;
 +  }
 +
 +  public static org.apache.knox.gateway.topology.Topology getTopology(Topology topology) {
 +    org.apache.knox.gateway.topology.Topology deploymentTopology = new org.apache.knox.gateway.topology.Topology();
 +    deploymentTopology.setName(topology.getName());
 +    deploymentTopology.setTimestamp(topology.getTimestamp());
++    deploymentTopology.setDefaultServicePath(topology.getPath());
 +    deploymentTopology.setUri(topology.getUri());
 +    for ( Provider provider : topology.getProviders() ) {
 +      deploymentTopology.addProvider( getProvider(provider) );
 +    }
 +    for ( Service service : topology.getServices() ) {
 +      deploymentTopology.addService( getService(service) );
 +    }
 +    for ( Application application : topology.getApplications() ) {
 +      deploymentTopology.addApplication( getApplication(application) );
 +    }
 +    return deploymentTopology;
 +  }
 +
 +  private static Provider getProvider(
 +      org.apache.knox.gateway.topology.Provider provider) {
 +    Provider providerResource = new Provider();
 +    providerResource.setName(provider.getName());
 +    providerResource.setEnabled(provider.isEnabled());
 +    providerResource.setRole(provider.getRole());
 +    Collection<org.apache.knox.gateway.topology.Param> paramsList = provider.getParamsList();
 +    if (paramsList != null && !paramsList.isEmpty()) {
 +      for ( org.apache.knox.gateway.topology.Param param : paramsList ) {
 +        providerResource.getParams().add(getParam(param));
 +      }
 +    }
 +    return providerResource;
 +  }
 +
 +  private static org.apache.knox.gateway.topology.Provider getProvider(Provider provider) {
 +    org.apache.knox.gateway.topology.Provider deploymentProvider = new org.apache.knox.gateway.topology.Provider();
 +    deploymentProvider.setName(provider.getName());
 +    deploymentProvider.setEnabled(provider.isEnabled());
 +    deploymentProvider.setRole(provider.getRole());
 +    for ( Param param : provider.getParams() ) {
 +      deploymentProvider.addParam( getParam(param) );
 +    }
 +    return deploymentProvider;
 +  }
 +
 +  private static Service getService(
 +      org.apache.knox.gateway.topology.Service service) {
 +    Service serviceResource = new Service();
 +    serviceResource.setRole(service.getRole());
 +    serviceResource.setName(service.getName());
 +    Version version = service.getVersion();
 +    if (version != null) {
 +      serviceResource.setVersion(version.toString());
 +    }
 +    Collection<org.apache.knox.gateway.topology.Param> paramsList = service.getParamsList();
 +    if (paramsList != null && !paramsList.isEmpty()) {
 +      for ( org.apache.knox.gateway.topology.Param param : paramsList ) {
 +        serviceResource.getParams().add(getParam(param));
 +      }
 +    }
 +    for ( String url : service.getUrls() ) {
 +      serviceResource.getUrls().add( url );
 +    }
 +    return serviceResource;
 +  }
 +
 +  private static org.apache.knox.gateway.topology.Service getService(Service service) {
 +    org.apache.knox.gateway.topology.Service deploymentService = new org.apache.knox.gateway.topology.Service();
 +    deploymentService.setRole(service.getRole());
 +    deploymentService.setName(service.getName());
 +    if (service.getVersion() != null) {
 +      deploymentService.setVersion(new Version(service.getVersion()));
 +    }
 +    for ( Param param : service.getParams() ) {
 +      deploymentService.addParam( getParam(param) );
 +    }
 +    for ( String url : service.getUrls() ) {
 +      deploymentService.addUrl( url );
 +    }
 +    return deploymentService;
 +  }
 +
 +  private static Application getApplication(
 +      org.apache.knox.gateway.topology.Application application) {
 +    Application applicationResource = new Application();
 +    applicationResource.setRole(application.getRole());
 +    applicationResource.setName(application.getName());
 +    Version version = application.getVersion();
 +    if (version != null) {
 +      applicationResource.setVersion(version.toString());
 +    }
 +    Collection<org.apache.knox.gateway.topology.Param> paramsList = application.getParamsList();
 +    if (paramsList != null && !paramsList.isEmpty()) {
 +      for ( org.apache.knox.gateway.topology.Param param : paramsList ) {
 +        applicationResource.getParams().add(getParam(param));
 +      }
 +    }
 +    for ( String url : application.getUrls() ) {
 +      applicationResource.getUrls().add( url );
 +    }
 +    return applicationResource;
 +  }
 +
 +  private static org.apache.knox.gateway.topology.Application getApplication(Application application) {
 +    org.apache.knox.gateway.topology.Application applicationResource = new org.apache.knox.gateway.topology.Application();
 +    applicationResource.setRole(application.getRole());
 +    applicationResource.setName(application.getName());
 +    if (application.getVersion() != null) {
 +      applicationResource.setVersion(new Version(application.getVersion()));
 +    }
 +    for ( Param param : application.getParams() ) {
 +      applicationResource.addParam( getParam(param) );
 +    }
 +    for ( String url : application.getUrls() ) {
 +      applicationResource.getUrls().add( url );
 +    }
 +    return applicationResource;
 +  }
 +
 +  private static Param getParam(org.apache.knox.gateway.topology.Param param) {
 +    return new Param(param.getName(), param.getValue());
 +  }
 +
 +  private static org.apache.knox.gateway.topology.Param getParam(Param param) {
 +    return new org.apache.knox.gateway.topology.Param(param.getName(), param.getValue());
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/c754cc06/gateway-service-admin/src/main/java/org/apache/knox/gateway/service/admin/beans/Topology.java
----------------------------------------------------------------------
diff --cc gateway-service-admin/src/main/java/org/apache/knox/gateway/service/admin/beans/Topology.java
index 9c58ad3,0000000..2d2eab8
mode 100644,000000..100644
--- a/gateway-service-admin/src/main/java/org/apache/knox/gateway/service/admin/beans/Topology.java
+++ b/gateway-service-admin/src/main/java/org/apache/knox/gateway/service/admin/beans/Topology.java
@@@ -1,108 -1,0 +1,119 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + * <p>
 + * http://www.apache.org/licenses/LICENSE-2.0
 + * <p>
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.service.admin.beans;
 +
 +import javax.xml.bind.annotation.XmlElement;
 +import javax.xml.bind.annotation.XmlElementWrapper;
 +import javax.xml.bind.annotation.XmlRootElement;
 +import java.net.URI;
 +import java.util.ArrayList;
 +import java.util.List;
 +
 +@XmlRootElement(name="topology")
 +public class Topology {
 +
 +  @XmlElement
 +  private URI uri;
 +
 +  @XmlElement
 +  private String name;
 +
 +  @XmlElement
++  private String path;
++
++  @XmlElement
 +  private long timestamp;
 +
 +  @XmlElement(name="provider")
 +  @XmlElementWrapper(name="gateway")
 +  public List<Provider> providers;
 +
 +  @XmlElement(name="service")
 +  public List<Service> services;
 +
 +  @XmlElement(name="application")
 +  private List<Application> applications;
 +
 +  public Topology() {
 +  }
 +
 +  public URI getUri() {
 +    return uri;
 +  }
 +
 +  public void setUri( URI uri ) {
 +    this.uri = uri;
 +  }
 +
 +  public String getName() {
 +    return name;
 +  }
 +
 +  public void setName( String name ) {
 +    this.name = name;
 +  }
 +
 +  public long getTimestamp() {
 +    return timestamp;
 +  }
 +
++  public void setPath( String defaultServicePath ) {
++    this.path = defaultServicePath;
++  }
++
++  public String getPath() {
++    return path;
++  }
++
 +  public void setTimestamp( long timestamp ) {
 +    this.timestamp = timestamp;
 +  }
 +
 +  public List<Service> getServices() {
 +    if (services == null) {
 +      services = new ArrayList<>();
 +    }
 +    return services;
 +  }
 +
 +  public List<Application> getApplications() {
 +    if (applications == null) {
 +      applications = new ArrayList<>();
 +    }
 +    return applications;
 +  }
 +
 +  public List<Provider> getProviders() {
 +    if (providers == null) {
 +      providers = new ArrayList<>();
 +    }
 +    return providers;
 +  }
 +
 +  public void setProviders(List<Provider> providers) {
 +    this.providers = providers;
 +  }
 +
 +  public void setServices(List<Service> services) {
 +    this.services = services;
 +  }
 +
 +  public void setApplications(List<Application> applications) {
 +    this.applications = applications;
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/c754cc06/gateway-spi/src/main/java/org/apache/knox/gateway/i18n/GatewaySpiMessages.java
----------------------------------------------------------------------
diff --cc gateway-spi/src/main/java/org/apache/knox/gateway/i18n/GatewaySpiMessages.java
index 243bac3,0000000..27a1905
mode 100644,000000..100644
--- a/gateway-spi/src/main/java/org/apache/knox/gateway/i18n/GatewaySpiMessages.java
+++ b/gateway-spi/src/main/java/org/apache/knox/gateway/i18n/GatewaySpiMessages.java
@@@ -1,85 -1,0 +1,91 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.i18n;
 +
 +import org.apache.knox.gateway.i18n.messages.Message;
 +import org.apache.knox.gateway.i18n.messages.MessageLevel;
 +import org.apache.knox.gateway.i18n.messages.Messages;
 +import org.apache.knox.gateway.i18n.messages.StackTrace;
 +
 +@Messages(logger="org.apache.knox.gateway")
 +public interface GatewaySpiMessages {
 +
 +  @Message(level = MessageLevel.ERROR, text = "Failed to load the internal principal mapping table: {0}" )
 +  void failedToLoadPrincipalMappingTable( @StackTrace( level = MessageLevel.DEBUG ) Exception e );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to execute filter: {0}" )
 +  void failedToExecuteFilter( @StackTrace( level = MessageLevel.DEBUG ) Throwable t );
 +  
 +  @Message( level = MessageLevel.ERROR, text = "Failed to encrypt passphrase: {0}" )
 +  void failedToEncryptPassphrase( @StackTrace( level = MessageLevel.DEBUG ) Exception e );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to generate secret key from password: {0}" )
 +  void failedToGenerateKeyFromPassword( @StackTrace( level = MessageLevel.DEBUG ) Exception e );
 +  
 +  @Message( level = MessageLevel.ERROR, text = "Failed to create keystore [filename={0}, type={1}]: {2}" )
 +  void failedToCreateKeystore( String fileName, String keyStoreType, @StackTrace( level = MessageLevel.DEBUG ) Exception e );
 +  
 +  @Message( level = MessageLevel.ERROR, text = "Failed to load keystore [filename={0}, type={1}]: {2}" )
 +  void failedToLoadKeystore( String fileName, String keyStoreType, @StackTrace( level = MessageLevel.DEBUG ) Exception e );
 +  
 +  @Message( level = MessageLevel.ERROR, text = "Failed to add credential: {1}" )
 +  void failedToAddCredential( @StackTrace( level = MessageLevel.DEBUG ) Exception e );
 +
 +  @Message(level = MessageLevel.ERROR, text = "Failed to remove credential: {1}")
 +  void failedToRemoveCredential(@StackTrace(level = MessageLevel.DEBUG) Exception e);
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to get credential: {1}" )
 +  void failedToGetCredential(@StackTrace( level = MessageLevel.DEBUG ) Exception e);
 +  
 +  @Message( level = MessageLevel.ERROR, text = "Failed to persist master secret: {0}" )
 +  void failedToPersistMasterSecret( @StackTrace( level = MessageLevel.DEBUG ) Exception e );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to encrypt master secret: {0}" )
 +  void failedToEncryptMasterSecret( @StackTrace( level = MessageLevel.DEBUG ) Exception e );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to initialize master service from persistent master {0}: {1}" )
 +  void failedToInitializeFromPersistentMaster( String masterFileName, @StackTrace( level = MessageLevel.DEBUG ) Exception e );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to add self signed certificate for Gateway {0}: {1}" )
 +  void failedToAddSeflSignedCertForGateway( String alias, @StackTrace( level = MessageLevel.DEBUG ) Exception e );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to get key {0}: {1}" )
 +  void failedToGetKey(String alias, @StackTrace( level = MessageLevel.DEBUG ) Exception e);
 +
 +  @Message( level = MessageLevel.DEBUG, text = "Loading from persistent master: {0}" )
 +  void loadingFromPersistentMaster( String tag );
 +
 +  @Message( level = MessageLevel.DEBUG, text = "ALIAS: {0}" )
 +  void printClusterAlias( String alias );
 +
 +  @Message( level = MessageLevel.DEBUG, text = "MASTER SERVICE == NULL: {0}" )
 +  void printMasterServiceIsNull( boolean masterServiceIsNull );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Gateway has failed to start. Unable to prompt user for master secret setup. Please consider using knoxcli.sh create-master" )
 +  void unableToPromptForMasterUseKnoxCLI();
 +
-  @Message( level = MessageLevel.ERROR, text = "Error in generating certificate: {0}" )
-  void failedToGenerateCertificate( @StackTrace( level = MessageLevel.ERROR ) Exception e );
++  @Message( level = MessageLevel.ERROR, text = "Error in generating certificate: {0}" )
++  void failedToGenerateCertificate( @StackTrace( level = MessageLevel.ERROR ) Exception e );
++
++  @Message(level = MessageLevel.ERROR, text = "Failed to read configuration: {0}")
++  void failedToReadConfigurationFile(final String filePath, @StackTrace(level = MessageLevel.DEBUG) Exception e );
++
++  @Message(level = MessageLevel.ERROR, text = "Invalid resource URI {0} : {1}")
++  void invalidResourceURI(final String uri, final String reason, @StackTrace(level = MessageLevel.DEBUG) Exception e );
 +
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/c754cc06/gateway-spi/src/main/java/org/apache/knox/gateway/services/topology/TopologyService.java
----------------------------------------------------------------------
diff --cc gateway-spi/src/main/java/org/apache/knox/gateway/services/topology/TopologyService.java
index 820da73,0000000..3be3a4a
mode 100644,000000..100644
--- a/gateway-spi/src/main/java/org/apache/knox/gateway/services/topology/TopologyService.java
+++ b/gateway-spi/src/main/java/org/apache/knox/gateway/services/topology/TopologyService.java
@@@ -1,50 -1,0 +1,63 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.services.topology;
 +
 +import org.apache.knox.gateway.config.GatewayConfig;
 +import org.apache.knox.gateway.services.Service;
 +import org.apache.knox.gateway.topology.Topology;
 +import org.apache.knox.gateway.topology.TopologyListener;
 +
++import java.io.File;
 +import java.util.Collection;
 +import java.util.List;
 +import java.util.Map;
 +
 +
 +public interface TopologyService extends Service {
 +
-   public void reloadTopologies();
++  void reloadTopologies();
 +
-   public void deployTopology(Topology t);
++  void deployTopology(Topology t);
 +
-   public void redeployTopologies(String topologyName);
++  void redeployTopologies(String topologyName);
 +
-   public void addTopologyChangeListener(TopologyListener listener);
++  void addTopologyChangeListener(TopologyListener listener);
 +
-   public void startMonitor() throws Exception;
++  void startMonitor() throws Exception;
 +
-   public void stopMonitor() throws Exception;
++  void stopMonitor() throws Exception;
 +
-   public Collection<Topology> getTopologies();
++  Collection<Topology> getTopologies();
 +
-   public void deleteTopology(Topology t);
++  boolean deployProviderConfiguration(String name, String content);
 +
-   public Map<String, List<String>> getServiceTestURLs(Topology t, GatewayConfig config);
++  Collection<File> getProviderConfigurations();
 +
-   }
++  boolean deployDescriptor(String name, String content);
++
++  Collection<File> getDescriptors();
++
++  void deleteTopology(Topology t);
++
++  boolean deleteDescriptor(String name);
++
++  boolean deleteProviderConfiguration(String name);
++
++  Map<String, List<String>> getServiceTestURLs(Topology t, GatewayConfig config);
++
++}

http://git-wip-us.apache.org/repos/asf/knox/blob/c754cc06/gateway-spi/src/main/java/org/apache/knox/gateway/topology/Topology.java
----------------------------------------------------------------------
diff --cc gateway-spi/src/main/java/org/apache/knox/gateway/topology/Topology.java
index 6eac50b,0000000..815c218
mode 100644,000000..100644
--- a/gateway-spi/src/main/java/org/apache/knox/gateway/topology/Topology.java
+++ b/gateway-spi/src/main/java/org/apache/knox/gateway/topology/Topology.java
@@@ -1,142 -1,0 +1,151 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.topology;
 +
 +import org.apache.commons.collections.map.HashedMap;
 +import org.apache.commons.collections.map.MultiKeyMap;
 +
 +import java.net.URI;
 +import java.util.ArrayList;
 +import java.util.Collection;
 +import java.util.HashMap;
 +import java.util.List;
 +import java.util.Map;
 +
 +public class Topology {
 +
 +  private URI uri;
 +  private String name;
++  private String defaultServicePath = null;
 +  private long timestamp;
 +  public List<Provider> providerList = new ArrayList<Provider>();
 +  private Map<String,Map<String,Provider>> providerMap = new HashMap<>();
 +  public List<Service> services = new ArrayList<Service>();
 +  private MultiKeyMap serviceMap;
 +  private List<Application> applications = new ArrayList<Application>();
 +  private Map<String,Application> applicationMap = new HashMap<>();
 +
 +  public Topology() {
 +    serviceMap = MultiKeyMap.decorate(new HashedMap());
 +  }
 +
 +  public URI getUri() {
 +    return uri;
 +  }
 +
 +  public void setUri( URI uri ) {
 +    this.uri = uri;
 +  }
 +
 +  public String getName() {
 +    return name;
 +  }
 +
 +  public void setName( String name ) {
 +    this.name = name;
 +  }
 +
 +  public long getTimestamp() {
 +    return timestamp;
 +  }
 +
 +  public void setTimestamp( long timestamp ) {
 +    this.timestamp = timestamp;
 +  }
 +
++  public String getDefaultServicePath() {
++    return defaultServicePath;
++  }
++
++  public void setDefaultServicePath(String servicePath) {
++    defaultServicePath = servicePath;
++  }
++
 +  public Collection<Service> getServices() {
 +    return services;
 +  }
 +
 +  public Service getService( String role, String name, Version version) {
 +    return (Service)serviceMap.get(role, name, version);
 +  }
 +
 +  public void addService( Service service ) {
 +    services.add( service );
 +    serviceMap.put(service.getRole(), service.getName(), service.getVersion(), service);
 +  }
 +
 +  public Collection<Application> getApplications() {
 +    return applications;
 +  }
 +
 +  private static String fixApplicationUrl( String url ) {
 +    if( url == null ) {
 +      url = "/";
 +    }
 +    if( !url.startsWith( "/" ) ) {
 +      url = "/" + url;
 +    }
 +    return url;
 +  }
 +
 +  public Application getApplication(String url) {
 +    return applicationMap.get( fixApplicationUrl( url ) );
 +  }
 +
 +  public void addApplication( Application application ) {
 +    applications.add( application );
 +    List<String> urls = application.getUrls();
 +    if( urls == null || urls.isEmpty() ) {
 +      applicationMap.put( fixApplicationUrl( application.getName() ), application );
 +    } else {
 +      for( String url : application.getUrls() ) {
 +        applicationMap.put( fixApplicationUrl( url ), application );
 +      }
 +    }
 +  }
 +
 +  public Collection<Provider> getProviders() {
 +    return providerList;
 +  }
 +
 +  public Provider getProvider( String role, String name ) {
 +    Provider provider = null;
 +    Map<String,Provider> nameMap = providerMap.get( role );
 +    if( nameMap != null) { 
 +      if( name != null ) {
 +        provider = nameMap.get( name );
 +      }
 +      else {
 +        provider = (Provider) nameMap.values().toArray()[0];
 +      }
 +    }
 +    return provider;
 +  }
 +
 +  public void addProvider( Provider provider ) {
 +    providerList.add( provider );
 +    String role = provider.getRole();
 +    Map<String,Provider> nameMap = providerMap.get( role );
 +    if( nameMap == null ) {
 +      nameMap = new HashMap<>();
 +      providerMap.put( role, nameMap );
 +    }
 +    nameMap.put( provider.getName(), provider );
 +  }
 +
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/c754cc06/gateway-spi/src/main/resources/org/apache/knox/gateway/topology/topology_binding-xml.xml
----------------------------------------------------------------------
diff --cc gateway-spi/src/main/resources/org/apache/knox/gateway/topology/topology_binding-xml.xml
index 9e9c26f,0000000..956387e
mode 100644,000000..100644
--- a/gateway-spi/src/main/resources/org/apache/knox/gateway/topology/topology_binding-xml.xml
+++ b/gateway-spi/src/main/resources/org/apache/knox/gateway/topology/topology_binding-xml.xml
@@@ -1,63 -1,0 +1,64 @@@
 +<?xml version="1.0"?>
 +<!--
 + Licensed to the Apache Software Foundation (ASF) under one
 +or more contributor license agreements.  See the NOTICE file
 + distributed with this work for additional information
 + regarding copyright ownership.  The ASF licenses this file
 + to you under the Apache License, Version 2.0 (the
 + "License"); you may not use this file except in compliance
 + with the License.  You may obtain a copy of the License at
 +
 +     http://www.apache.org/licenses/LICENSE-2.0
 +
 + Unless required by applicable law or agreed to in writing, software
 + distributed under the License is distributed on an "AS IS" BASIS,
 + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + See the License for the specific language governing permissions and
 + limitations under the License.
 +-->
 +<xml-bindings
 +    xmlns="http://www.eclipse.org/eclipselink/xsds/persistence/oxm"
 +    package-name="org.apache.knox.gateway.topology"
 +    xml-mapping-metadata-complete="true">
 +    <xml-schema
 +        element-form-default="QUALIFIED"/>
 +    <java-types>
 +        <java-type name="Topology" xml-accessor-type="NONE">
-             <xml-type prop-order="name providers services applications"/>
++            <xml-type prop-order="name defaultServicePath providers services applications"/>
 +            <xml-root-element/>
 +            <java-attributes>
 +                <xml-element java-attribute="name" name="name"/>
++                <xml-element java-attribute="defaultServicePath" name="path"/>
 +                <xml-elements java-attribute="providers">
 +                    <xml-element name="provider"/>
 +                    <xml-element-wrapper name="gateway"/>
 +                </xml-elements>
 +                <xml-element java-attribute="services" name="service"/>
 +                <xml-element java-attribute="applications" name="application"/>
 +            </java-attributes>
 +        </java-type>
 +        <java-type name="Provider" xml-accessor-type="NONE">
 +            <java-attributes>
 +                <xml-element java-attribute="name" name="name"/>
 +                <xml-element java-attribute="enabled" name="enabled"/>
 +                <xml-element java-attribute="role" name="role"/>
 +                <xml-element java-attribute="paramsList" name="param"/>
 +            </java-attributes>
 +        </java-type>
 +        <java-type name="Service" xml-accessor-type="NONE">
 +            <java-attributes>
 +                <xml-element java-attribute="name" name="name"/>
 +                <xml-element java-attribute="role" name="role"/>
 +                <xml-element java-attribute="urls" name="url"/>
 +                <xml-element java-attribute="paramsList" name="param"/>
 +            </java-attributes>
 +        </java-type>
 +        <java-type name="Application" xml-accessor-type="NONE"/>
 +        <java-type name="Param" xml-accessor-type="NONE">
 +            <java-attributes>
 +                <xml-element java-attribute="name"/>
 +                <xml-element java-attribute="value"/>
 +            </java-attributes>
 +        </java-type>
 +    </java-types>
- </xml-bindings>
++</xml-bindings>


[34/53] [abbrv] knox git commit: Merge branch 'master' into KNOX-998-Package_Restructuring

Posted by mo...@apache.org.
http://git-wip-us.apache.org/repos/asf/knox/blob/22a7304a/gateway-server/src/test/java/org/apache/knox/gateway/config/impl/GatewayConfigImplTest.java
----------------------------------------------------------------------
diff --cc gateway-server/src/test/java/org/apache/knox/gateway/config/impl/GatewayConfigImplTest.java
index 06da13d,0000000..4187214
mode 100644,000000..100644
--- a/gateway-server/src/test/java/org/apache/knox/gateway/config/impl/GatewayConfigImplTest.java
+++ b/gateway-server/src/test/java/org/apache/knox/gateway/config/impl/GatewayConfigImplTest.java
@@@ -1,220 -1,0 +1,263 @@@
 +package org.apache.knox.gateway.config.impl;
 +
 +import org.apache.knox.test.TestUtils;
 +import org.hamcrest.CoreMatchers;
 +import org.junit.Test;
 +
 +import java.util.List;
++import java.util.concurrent.TimeUnit;
 +
 +import static org.hamcrest.CoreMatchers.is;
 +import static org.hamcrest.CoreMatchers.notNullValue;
 +import static org.hamcrest.MatcherAssert.assertThat;
 +import static org.hamcrest.Matchers.hasItems;
 +import static org.hamcrest.Matchers.nullValue;
++import static org.junit.Assert.assertNotEquals;
++import static org.junit.Assert.assertTrue;
++import static org.testng.Assert.assertEquals;
++import static org.testng.Assert.assertFalse;
++import static org.testng.Assert.assertNotNull;
 +
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +public class GatewayConfigImplTest {
 +
 +  @Test( timeout = TestUtils.SHORT_TIMEOUT )
 +  public void testHttpServerSettings() {
 +    GatewayConfigImpl config = new GatewayConfigImpl();
 +
 +    // Check the defaults.
 +    assertThat( config.getHttpServerRequestBuffer(), is( 16*1024 ) );
 +    assertThat( config.getHttpServerRequestHeaderBuffer(), is( 8*1024 ) );
 +    assertThat( config.getHttpServerResponseBuffer(), is( 32*1024 ) );
 +    assertThat( config.getHttpServerResponseHeaderBuffer(), is( 8*1024 ) );
 +
 +    assertThat( GatewayConfigImpl.HTTP_SERVER_REQUEST_BUFFER, is( "gateway.httpserver.requestBuffer" ) );
 +    assertThat( GatewayConfigImpl.HTTP_SERVER_REQUEST_HEADER_BUFFER, is( "gateway.httpserver.requestHeaderBuffer" ) );
 +    assertThat( GatewayConfigImpl.HTTP_SERVER_RESPONSE_BUFFER, is( "gateway.httpserver.responseBuffer" ) );
 +    assertThat( GatewayConfigImpl.HTTP_SERVER_RESPONSE_HEADER_BUFFER, is( "gateway.httpserver.responseHeaderBuffer" ) );
 +
 +    config.setInt( GatewayConfigImpl.HTTP_SERVER_REQUEST_BUFFER, 32*1024 );
 +    assertThat( config.getHttpServerRequestBuffer(), is( 32*1024 ) );
 +
 +    config.setInt( GatewayConfigImpl.HTTP_SERVER_REQUEST_HEADER_BUFFER, 4*1024 );
 +    assertThat( config.getHttpServerRequestHeaderBuffer(), is( 4*1024 ) );
 +
 +    config.setInt( GatewayConfigImpl.HTTP_SERVER_RESPONSE_BUFFER, 16*1024 );
 +    assertThat( config.getHttpServerResponseBuffer(), is( 16*1024 ) );
 +
 +    config.setInt( GatewayConfigImpl.HTTP_SERVER_RESPONSE_HEADER_BUFFER, 6*1024 );
 +    assertThat( config.getHttpServerResponseHeaderBuffer(), is( 6*1024 ) );
 +
 +    // Restore the defaults.
 +    config.setInt( GatewayConfigImpl.HTTP_SERVER_REQUEST_BUFFER, 16*1024 );
 +    config.setInt( GatewayConfigImpl.HTTP_SERVER_REQUEST_HEADER_BUFFER, 8*1024 );
 +    config.setInt( GatewayConfigImpl.HTTP_SERVER_RESPONSE_BUFFER, 32*1024 );
 +    config.setInt( GatewayConfigImpl.HTTP_SERVER_RESPONSE_HEADER_BUFFER, 8*1024 );
 +  }
 +
 +  @Test( timeout = TestUtils.SHORT_TIMEOUT )
 +  public void testGetGatewayDeploymentsBackupVersionLimit() {
 +    GatewayConfigImpl config = new GatewayConfigImpl();
 +    assertThat( config.getGatewayDeploymentsBackupVersionLimit(), is(5) );
 +
 +    config.setInt( config.DEPLOYMENTS_BACKUP_VERSION_LIMIT, 3 );
 +    assertThat( config.getGatewayDeploymentsBackupVersionLimit(), is(3) );
 +
 +    config.setInt( config.DEPLOYMENTS_BACKUP_VERSION_LIMIT, -3 );
 +    assertThat( config.getGatewayDeploymentsBackupVersionLimit(), is(-1) );
 +
 +    config.setInt( config.DEPLOYMENTS_BACKUP_VERSION_LIMIT, 0 );
 +    assertThat( config.getGatewayDeploymentsBackupVersionLimit(), is(0) );
 +  }
 +
 +  @Test( timeout = TestUtils.SHORT_TIMEOUT )
 +  public void testGetGatewayDeploymentsBackupAgeLimit() {
 +    GatewayConfigImpl config = new GatewayConfigImpl();
 +    assertThat( config.getGatewayDeploymentsBackupAgeLimit(), is(-1L) );
 +
 +    config.set( config.DEPLOYMENTS_BACKUP_AGE_LIMIT, "1" );
 +    assertThat( config.getGatewayDeploymentsBackupAgeLimit(), is(86400000L) );
 +
 +    config.set( config.DEPLOYMENTS_BACKUP_AGE_LIMIT, "2" );
 +    assertThat( config.getGatewayDeploymentsBackupAgeLimit(), is(86400000L*2L) );
 +
 +    config.set( config.DEPLOYMENTS_BACKUP_AGE_LIMIT, "0" );
 +    assertThat( config.getGatewayDeploymentsBackupAgeLimit(), is(0L) );
 +
 +    config.set( config.DEPLOYMENTS_BACKUP_AGE_LIMIT, "X" );
 +    assertThat( config.getGatewayDeploymentsBackupAgeLimit(), is(-1L) );
 +  }
 +
 +
 +  @Test
 +  public void testSSLCiphers() {
 +    GatewayConfigImpl config = new GatewayConfigImpl();
 +    List<String> list;
 +
 +    list = config.getIncludedSSLCiphers();
 +    assertThat( list, is(nullValue()) );
 +
 +    config.set( "ssl.include.ciphers", "none" );
 +    assertThat( config.getIncludedSSLCiphers(), is(nullValue()) );
 +
 +    config.set( "ssl.include.ciphers", "" );
 +    assertThat( config.getIncludedSSLCiphers(), is(nullValue()) );
 +
 +    config.set( "ssl.include.ciphers", "ONE" );
 +    assertThat( config.getIncludedSSLCiphers(), is(hasItems("ONE")) );
 +
 +    config.set( "ssl.include.ciphers", " ONE " );
 +    assertThat( config.getIncludedSSLCiphers(), is(hasItems("ONE")) );
 +
 +    config.set( "ssl.include.ciphers", "ONE,TWO" );
 +    assertThat( config.getIncludedSSLCiphers(), is(hasItems("ONE","TWO")) );
 +
 +    config.set( "ssl.include.ciphers", "ONE,TWO,THREE" );
 +    assertThat( config.getIncludedSSLCiphers(), is(hasItems("ONE","TWO","THREE")) );
 +
 +    config.set( "ssl.include.ciphers", " ONE , TWO , THREE " );
 +    assertThat( config.getIncludedSSLCiphers(), is(hasItems("ONE","TWO","THREE")) );
 +
 +    list = config.getExcludedSSLCiphers();
 +    assertThat( list, is(nullValue()) );
 +
 +    config.set( "ssl.exclude.ciphers", "none" );
 +    assertThat( config.getExcludedSSLCiphers(), is(nullValue()) );
 +
 +    config.set( "ssl.exclude.ciphers", "" );
 +    assertThat( config.getExcludedSSLCiphers(), is(nullValue()) );
 +
 +    config.set( "ssl.exclude.ciphers", "ONE" );
 +    assertThat( config.getExcludedSSLCiphers(), is(hasItems("ONE")) );
 +
 +    config.set( "ssl.exclude.ciphers", " ONE " );
 +    assertThat( config.getExcludedSSLCiphers(), is(hasItems("ONE")) );
 +
 +    config.set( "ssl.exclude.ciphers", "ONE,TWO" );
 +    assertThat( config.getExcludedSSLCiphers(), is(hasItems("ONE","TWO")) );
 +
 +    config.set( "ssl.exclude.ciphers", "ONE,TWO,THREE" );
 +    assertThat( config.getExcludedSSLCiphers(), is(hasItems("ONE","TWO","THREE")) );
 +
 +    config.set( "ssl.exclude.ciphers", " ONE , TWO , THREE " );
 +    assertThat( config.getExcludedSSLCiphers(), is(hasItems("ONE","TWO","THREE")) );
 +  }
 +
 +  @Test( timeout = TestUtils.SHORT_TIMEOUT )
 +  public void testGlobalRulesServices() {
 +    GatewayConfigImpl config = new GatewayConfigImpl();
 +    List<String> list;
 +
 +    list = config.getGlobalRulesServices();
 +    assertThat( list, is(notNullValue()) );
 +
 +    assertThat( list, is( CoreMatchers.hasItems("NAMENODE","JOBTRACKER", "WEBHDFS", "WEBHCAT", "OOZIE", "WEBHBASE", "HIVE", "RESOURCEMANAGER")));
 +
 +
 +    config.set( GatewayConfigImpl.GLOBAL_RULES_SERVICES, "none" );
 +    assertThat( config.getGlobalRulesServices(), is( CoreMatchers.hasItems("NAMENODE","JOBTRACKER", "WEBHDFS", "WEBHCAT", "OOZIE", "WEBHBASE", "HIVE", "RESOURCEMANAGER")) );
 +
 +    config.set( GatewayConfigImpl.GLOBAL_RULES_SERVICES, "" );
 +    assertThat( config.getGlobalRulesServices(), is( CoreMatchers.hasItems("NAMENODE","JOBTRACKER", "WEBHDFS", "WEBHCAT", "OOZIE", "WEBHBASE", "HIVE", "RESOURCEMANAGER")) );
 +
 +    config.set( GatewayConfigImpl.GLOBAL_RULES_SERVICES, "ONE" );
 +    assertThat( config.getGlobalRulesServices(), is(hasItems("ONE")) );
 +
 +    config.set( GatewayConfigImpl.GLOBAL_RULES_SERVICES, "ONE,TWO,THREE" );
 +    assertThat( config.getGlobalRulesServices(), is(hasItems("ONE","TWO","THREE")) );
 +
 +    config.set( GatewayConfigImpl.GLOBAL_RULES_SERVICES, " ONE , TWO , THREE " );
 +    assertThat( config.getGlobalRulesServices(), is(hasItems("ONE","TWO","THREE")) );
 +  }
 +
 +  @Test( timeout = TestUtils.SHORT_TIMEOUT )
 +  public void testMetricsSettings() {
 +    GatewayConfigImpl config = new GatewayConfigImpl();
 +    //test defaults
 +    assertThat(config.isMetricsEnabled(), is(false));
 +    assertThat(config.isJmxMetricsReportingEnabled(), is(false));
 +    assertThat(config.isGraphiteMetricsReportingEnabled(), is(false));
 +    assertThat(config.getGraphiteHost(), is("localhost"));
 +    assertThat(config.getGraphitePort(), is(32772));
 +  }
 +  
 +  @Test( timeout = TestUtils.SHORT_TIMEOUT )
 +  public void testGatewayIdleTimeout() {
 +    GatewayConfigImpl config = new GatewayConfigImpl();
 +    long idleTimeout = 0l;
 +    
 +    idleTimeout = config.getGatewayIdleTimeout();
 +    assertThat( idleTimeout, is(300000L));
 +
 +    config.set( GatewayConfigImpl.GATEWAY_IDLE_TIMEOUT, "15000" );
 +    idleTimeout = config.getGatewayIdleTimeout();
 +    assertThat( idleTimeout, is(15000L));
 +  }
 +  
 +  @Test( timeout = TestUtils.SHORT_TIMEOUT )
 +  public void testGatewayServerHeaderEnabled() {
 +    GatewayConfigImpl config = new GatewayConfigImpl();
 +    boolean serverHeaderEnabled = true;
 +    
 +    serverHeaderEnabled = config.isGatewayServerHeaderEnabled();
 +    assertThat( serverHeaderEnabled, is(true));
 +
 +    config.set( GatewayConfigImpl.SERVER_HEADER_ENABLED, "false");
 +    serverHeaderEnabled = config.isGatewayServerHeaderEnabled();
 +    assertThat( serverHeaderEnabled, is(false));
 +  }
 +
++
++  @Test
++  public void testGetRemoteConfigurationRegistryNames() {
++    GatewayConfigImpl config = new GatewayConfigImpl();
++
++    List<String> registryNames = config.getRemoteRegistryConfigurationNames();
++    assertNotNull(registryNames);
++    assertTrue(registryNames.isEmpty());
++
++    config.set(GatewayConfigImpl.CONFIG_REGISTRY_PREFIX + ".test1",
++               "type=ZooKeeper;address=host1:2181;authType=digest;principal=itsme;credentialAlias=testAlias");
++    registryNames = config.getRemoteRegistryConfigurationNames();
++    assertNotNull(registryNames);
++    assertFalse(registryNames.isEmpty());
++    assertEquals(1, registryNames.size());
++
++    config.set(GatewayConfigImpl.CONFIG_REGISTRY_PREFIX + ".test2",
++               "type=ZooKeeper;address=host2:2181,host3:2181,host4:2181");
++    registryNames = config.getRemoteRegistryConfigurationNames();
++    assertNotNull(registryNames);
++    assertFalse(registryNames.isEmpty());
++    assertEquals(registryNames.size(), 2);
++  }
++
++
++  @Test
++  public void testHTTPDefaultTimeouts() {
++    final GatewayConfigImpl config = new GatewayConfigImpl();
++
++    assertNotEquals(config.getHttpClientConnectionTimeout(), -1);
++    assertNotEquals(config.getHttpClientSocketTimeout(), -1);
++
++    assertEquals(TimeUnit.SECONDS.toMillis(20), config.getHttpClientConnectionTimeout());
++    assertEquals(TimeUnit.SECONDS.toMillis(20), config.getHttpClientSocketTimeout());
++
++  }
++
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/22a7304a/gateway-server/src/test/java/org/apache/knox/gateway/services/topology/DefaultTopologyServiceTest.java
----------------------------------------------------------------------
diff --cc gateway-server/src/test/java/org/apache/knox/gateway/services/topology/DefaultTopologyServiceTest.java
index 408d396,0000000..60cf633
mode 100644,000000..100644
--- a/gateway-server/src/test/java/org/apache/knox/gateway/services/topology/DefaultTopologyServiceTest.java
+++ b/gateway-server/src/test/java/org/apache/knox/gateway/services/topology/DefaultTopologyServiceTest.java
@@@ -1,610 -1,0 +1,618 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.services.topology;
 +
 +import org.apache.commons.io.FileUtils;
 +import org.apache.commons.io.FilenameUtils;
 +import org.apache.commons.io.IOUtils;
 +import org.apache.commons.io.monitor.FileAlterationListener;
 +import org.apache.commons.io.monitor.FileAlterationMonitor;
 +import org.apache.commons.io.monitor.FileAlterationObserver;
 +import org.apache.knox.gateway.config.GatewayConfig;
 +import org.apache.knox.gateway.services.topology.impl.DefaultTopologyService;
 +import org.apache.knox.gateway.services.security.AliasService;
 +import org.apache.knox.test.TestUtils;
 +import org.apache.knox.gateway.topology.Param;
 +import org.apache.knox.gateway.topology.Provider;
 +import org.apache.knox.gateway.topology.Topology;
 +import org.apache.knox.gateway.topology.TopologyEvent;
 +import org.apache.knox.gateway.topology.TopologyListener;
 +import org.easymock.EasyMock;
 +import org.junit.After;
 +import org.junit.Before;
 +import org.junit.Test;
 +
 +import java.io.File;
 +import java.io.IOException;
 +import java.io.InputStream;
 +import java.io.OutputStream;
 +import java.util.ArrayList;
 +import java.util.Arrays;
 +import java.util.Collection;
 +import java.util.HashMap;
 +import java.util.HashSet;
 +import java.util.Iterator;
 +import java.util.List;
 +import java.util.Map;
 +import java.util.Set;
 +
 +import static org.easymock.EasyMock.anyObject;
 +import static org.hamcrest.CoreMatchers.is;
 +import static org.hamcrest.Matchers.hasItem;
 +import static org.hamcrest.core.IsNull.notNullValue;
 +import static org.junit.Assert.assertEquals;
 +import static org.junit.Assert.assertFalse;
 +import static org.junit.Assert.assertNotEquals;
 +import static org.junit.Assert.assertNotNull;
 +import static org.junit.Assert.assertThat;
 +import static org.junit.Assert.assertTrue;
 +
 +public class DefaultTopologyServiceTest {
 +
 +  @Before
 +  public void setUp() throws Exception {
 +  }
 +
 +  @After
 +  public void tearDown() throws Exception {
 +  }
 +
 +  private File createDir() throws IOException {
 +    return TestUtils.createTempDir(this.getClass().getSimpleName() + "-");
 +  }
 +
 +  private File createFile(File parent, String name, String resource, long timestamp) throws IOException {
 +    File file = new File(parent, name);
 +    if (!file.exists()) {
 +      FileUtils.touch(file);
 +    }
 +    InputStream input = ClassLoader.getSystemResourceAsStream(resource);
 +    OutputStream output = FileUtils.openOutputStream(file);
 +    IOUtils.copy(input, output);
 +    //KNOX-685: output.flush();
 +    input.close();
 +    output.close();
 +    file.setLastModified(timestamp);
 +    assertTrue("Failed to create test file " + file.getAbsolutePath(), file.exists());
 +    assertTrue("Failed to populate test file " + file.getAbsolutePath(), file.length() > 0);
 +
 +    return file;
 +  }
 +
 +  @Test
 +  public void testGetTopologies() throws Exception {
 +
 +    File dir = createDir();
 +    File topologyDir = new File(dir, "topologies");
 +
++    File descriptorsDir = new File(dir, "descriptors");
++    descriptorsDir.mkdirs();
++
++    File sharedProvidersDir = new File(dir, "shared-providers");
++    sharedProvidersDir.mkdirs();
++
 +    long time = topologyDir.lastModified();
 +    try {
 +      createFile(topologyDir, "one.xml", "org/apache/knox/gateway/topology/file/topology-one.xml", time);
 +
 +      TestTopologyListener topoListener = new TestTopologyListener();
 +      FileAlterationMonitor monitor = new FileAlterationMonitor(Long.MAX_VALUE);
 +
 +      TopologyService provider = new DefaultTopologyService();
 +      Map<String, String> c = new HashMap<>();
 +
 +      GatewayConfig config = EasyMock.createNiceMock(GatewayConfig.class);
 +      EasyMock.expect(config.getGatewayTopologyDir()).andReturn(topologyDir.getAbsolutePath()).anyTimes();
-       EasyMock.expect(config.getGatewayConfDir()).andReturn(topologyDir.getParentFile().getAbsolutePath()).anyTimes();
++      EasyMock.expect(config.getGatewayConfDir()).andReturn(descriptorsDir.getParentFile().getAbsolutePath()).anyTimes();
++      EasyMock.expect(config.getGatewayProvidersConfigDir()).andReturn(sharedProvidersDir.getAbsolutePath()).anyTimes();
++      EasyMock.expect(config.getGatewayDescriptorsDir()).andReturn(descriptorsDir.getAbsolutePath()).anyTimes();
 +      EasyMock.replay(config);
 +
 +      provider.init(config, c);
 +
 +      provider.addTopologyChangeListener(topoListener);
 +
 +      provider.reloadTopologies();
 +
 +      Collection<Topology> topologies = provider.getTopologies();
 +      assertThat(topologies, notNullValue());
 +      assertThat(topologies.size(), is(1));
 +      Topology topology = topologies.iterator().next();
 +      assertThat(topology.getName(), is("one"));
 +      assertThat(topology.getTimestamp(), is(time));
 +      assertThat(topoListener.events.size(), is(1));
 +      topoListener.events.clear();
 +
 +      // Add a file to the directory.
 +      File two = createFile(topologyDir, "two.xml",
 +          "org/apache/knox/gateway/topology/file/topology-two.xml", 1L);
 +      provider.reloadTopologies();
 +      topologies = provider.getTopologies();
 +      assertThat(topologies.size(), is(2));
 +      Set<String> names = new HashSet<>(Arrays.asList("one", "two"));
 +      Iterator<Topology> iterator = topologies.iterator();
 +      topology = iterator.next();
 +      assertThat(names, hasItem(topology.getName()));
 +      names.remove(topology.getName());
 +      topology = iterator.next();
 +      assertThat(names, hasItem(topology.getName()));
 +      names.remove(topology.getName());
 +      assertThat(names.size(), is(0));
 +      assertThat(topoListener.events.size(), is(1));
 +      List<TopologyEvent> events = topoListener.events.get(0);
 +      assertThat(events.size(), is(1));
 +      TopologyEvent event = events.get(0);
 +      assertThat(event.getType(), is(TopologyEvent.Type.CREATED));
 +      assertThat(event.getTopology(), notNullValue());
 +
 +      // Update a file in the directory.
 +      two = createFile(topologyDir, "two.xml",
 +          "org/apache/knox/gateway/topology/file/topology-three.xml", 2L);
 +      provider.reloadTopologies();
 +      topologies = provider.getTopologies();
 +      assertThat(topologies.size(), is(2));
 +      names = new HashSet<>(Arrays.asList("one", "two"));
 +      iterator = topologies.iterator();
 +      topology = iterator.next();
 +      assertThat(names, hasItem(topology.getName()));
 +      names.remove(topology.getName());
 +      topology = iterator.next();
 +      assertThat(names, hasItem(topology.getName()));
 +      names.remove(topology.getName());
 +      assertThat(names.size(), is(0));
 +
 +      // Remove a file from the directory.
 +      two.delete();
 +      provider.reloadTopologies();
 +      topologies = provider.getTopologies();
 +      assertThat(topologies.size(), is(1));
 +      topology = topologies.iterator().next();
 +      assertThat(topology.getName(), is("one"));
 +      assertThat(topology.getTimestamp(), is(time));
 +
 +    } finally {
 +      FileUtils.deleteQuietly(dir);
 +    }
 +  }
 +
 +  /**
 +   * KNOX-1014
 +   *
 +   * Test the lifecycle relationship between simple descriptors and topology files.
 +   *
 +   * N.B. This test depends on the DummyServiceDiscovery extension being configured:
 +   *        org.apache.knox.gateway.topology.discovery.test.extension.DummyServiceDiscovery
 +   */
 +  @Test
 +  public void testSimpleDescriptorsTopologyGeneration() throws Exception {
 +
 +    File dir = createDir();
 +    File topologyDir = new File(dir, "topologies");
 +    topologyDir.mkdirs();
 +
 +    File descriptorsDir = new File(dir, "descriptors");
 +    descriptorsDir.mkdirs();
 +
 +    File sharedProvidersDir = new File(dir, "shared-providers");
 +    sharedProvidersDir.mkdirs();
 +
 +    try {
 +      TestTopologyListener topoListener = new TestTopologyListener();
 +      FileAlterationMonitor monitor = new FileAlterationMonitor(Long.MAX_VALUE);
 +
 +      TopologyService provider = new DefaultTopologyService();
 +      Map<String, String> c = new HashMap<>();
 +
 +      GatewayConfig config = EasyMock.createNiceMock(GatewayConfig.class);
 +      EasyMock.expect(config.getGatewayTopologyDir()).andReturn(topologyDir.getAbsolutePath()).anyTimes();
 +      EasyMock.expect(config.getGatewayConfDir()).andReturn(descriptorsDir.getParentFile().getAbsolutePath()).anyTimes();
 +      EasyMock.replay(config);
 +
 +      provider.init(config, c);
 +      provider.addTopologyChangeListener(topoListener);
 +      provider.reloadTopologies();
 +
 +
 +      // Add a simple descriptor to the descriptors dir to verify topology generation and loading (KNOX-1006)
 +      AliasService aliasService = EasyMock.createNiceMock(AliasService.class);
 +      EasyMock.expect(aliasService.getPasswordFromAliasForGateway(anyObject(String.class))).andReturn(null).anyTimes();
 +      EasyMock.replay(aliasService);
 +      DefaultTopologyService.DescriptorsMonitor dm =
 +              new DefaultTopologyService.DescriptorsMonitor(topologyDir, aliasService);
 +
 +      // Listener to simulate the topologies directory monitor, to notice when a topology has been deleted
 +      provider.addTopologyChangeListener(new TestTopologyDeleteListener((DefaultTopologyService)provider));
 +
 +      // Write out the referenced provider config first
 +      File provCfgFile = createFile(sharedProvidersDir,
 +                                    "ambari-cluster-policy.xml",
 +                                    "org/apache/knox/gateway/topology/file/ambari-cluster-policy.xml",
 +                                    System.currentTimeMillis());
 +      try {
 +        // Create the simple descriptor in the descriptors dir
 +        File simpleDesc = createFile(descriptorsDir,
 +                                     "four.json",
 +                                     "org/apache/knox/gateway/topology/file/simple-topology-four.json",
 +                                     System.currentTimeMillis());
 +
 +        // Trigger the topology generation by noticing the simple descriptor
 +        dm.onFileChange(simpleDesc);
 +
 +        // Load the generated topology
 +        provider.reloadTopologies();
 +        Collection<Topology> topologies = provider.getTopologies();
 +        assertThat(topologies.size(), is(1));
 +        Iterator<Topology> iterator = topologies.iterator();
 +        Topology topology = iterator.next();
 +        assertThat("four", is(topology.getName()));
 +        int serviceCount = topology.getServices().size();
 +        assertEquals("Expected the same number of services as are declared in the simple dscriptor.", 10, serviceCount);
 +
 +        // Overwrite the simple descriptor with a different set of services, and check that the changes are
 +        // propagated to the associated topology
 +        simpleDesc = createFile(descriptorsDir,
 +                                "four.json",
 +                                "org/apache/knox/gateway/topology/file/simple-descriptor-five.json",
 +                                System.currentTimeMillis());
 +        dm.onFileChange(simpleDesc);
 +        provider.reloadTopologies();
 +        topologies = provider.getTopologies();
 +        topology = topologies.iterator().next();
 +        assertNotEquals(serviceCount, topology.getServices().size());
 +        assertEquals(6, topology.getServices().size());
 +
 +        // Delete the simple descriptor, and make sure that the associated topology file is deleted
 +        simpleDesc.delete();
 +        dm.onFileDelete(simpleDesc);
 +        provider.reloadTopologies();
 +        topologies = provider.getTopologies();
 +        assertTrue(topologies.isEmpty());
 +
 +        // Delete a topology file, and make sure that the associated simple descriptor is deleted
 +        // Overwrite the simple descriptor with a different set of services, and check that the changes are
 +        // propagated to the associated topology
 +        simpleDesc = createFile(descriptorsDir,
 +                                "deleteme.json",
 +                                "org/apache/knox/gateway/topology/file/simple-descriptor-five.json",
 +                                System.currentTimeMillis());
 +        dm.onFileChange(simpleDesc);
 +        provider.reloadTopologies();
 +        topologies = provider.getTopologies();
 +        assertFalse(topologies.isEmpty());
 +        topology = topologies.iterator().next();
 +        assertEquals("deleteme", topology.getName());
 +        File topologyFile = new File(topologyDir, topology.getName() + ".xml");
 +        assertTrue(topologyFile.exists());
 +        topologyFile.delete();
 +        provider.reloadTopologies();
 +        assertFalse("Simple descriptor should have been deleted because the associated topology was.",
 +                    simpleDesc.exists());
 +
 +      } finally {
 +        provCfgFile.delete();
 +      }
 +    } finally {
 +      FileUtils.deleteQuietly(dir);
 +    }
 +  }
 +
 +  /**
 +   * KNOX-1014
 +   *
 +   * Test the lifecycle relationship between provider configuration files, simple descriptors, and topology files.
 +   *
 +   * N.B. This test depends on the DummyServiceDiscovery extension being configured:
 +   *        org.apache.knox.gateway.topology.discovery.test.extension.DummyServiceDiscovery
 +   */
 +  @Test
 +  public void testTopologiesUpdateFromProviderConfigChange() throws Exception {
 +    File dir = createDir();
 +    File topologyDir = new File(dir, "topologies");
 +    topologyDir.mkdirs();
 +
 +    File descriptorsDir = new File(dir, "descriptors");
 +    descriptorsDir.mkdirs();
 +
 +    File sharedProvidersDir = new File(dir, "shared-providers");
 +    sharedProvidersDir.mkdirs();
 +
 +    try {
 +      TestTopologyListener topoListener = new TestTopologyListener();
 +      FileAlterationMonitor monitor = new FileAlterationMonitor(Long.MAX_VALUE);
 +
 +      TopologyService ts = new DefaultTopologyService();
 +      Map<String, String> c = new HashMap<>();
 +
 +      GatewayConfig config = EasyMock.createNiceMock(GatewayConfig.class);
 +      EasyMock.expect(config.getGatewayTopologyDir()).andReturn(topologyDir.getAbsolutePath()).anyTimes();
 +      EasyMock.expect(config.getGatewayConfDir()).andReturn(descriptorsDir.getParentFile().getAbsolutePath()).anyTimes();
 +      EasyMock.replay(config);
 +
 +      ts.init(config, c);
 +      ts.addTopologyChangeListener(topoListener);
 +      ts.reloadTopologies();
 +
 +      java.lang.reflect.Field dmField = ts.getClass().getDeclaredField("descriptorsMonitor");
 +      dmField.setAccessible(true);
 +      DefaultTopologyService.DescriptorsMonitor dm = (DefaultTopologyService.DescriptorsMonitor) dmField.get(ts);
 +
 +      // Write out the referenced provider configs first
 +      createFile(sharedProvidersDir,
 +                 "provider-config-one.xml",
 +                 "org/apache/knox/gateway/topology/file/provider-config-one.xml",
 +                 System.currentTimeMillis());
 +
 +      // Create the simple descriptor, which depends on provider-config-one.xml
 +      File simpleDesc = createFile(descriptorsDir,
 +                                   "six.json",
 +                                   "org/apache/knox/gateway/topology/file/simple-descriptor-six.json",
 +                                   System.currentTimeMillis());
 +
 +      // "Notice" the simple descriptor change, and generate a topology based on it
 +      dm.onFileChange(simpleDesc);
 +
 +      // Load the generated topology
 +      ts.reloadTopologies();
 +      Collection<Topology> topologies = ts.getTopologies();
 +      assertThat(topologies.size(), is(1));
 +      Iterator<Topology> iterator = topologies.iterator();
 +      Topology topology = iterator.next();
 +      assertFalse("The Shiro provider is disabled in provider-config-one.xml",
 +                  topology.getProvider("authentication", "ShiroProvider").isEnabled());
 +
 +      // Overwrite the referenced provider configuration with a different ShiroProvider config, and check that the
 +      // changes are propagated to the associated topology
 +      File providerConfig = createFile(sharedProvidersDir,
 +                                       "provider-config-one.xml",
 +                                       "org/apache/knox/gateway/topology/file/ambari-cluster-policy.xml",
 +                                       System.currentTimeMillis());
 +
 +      // "Notice" the simple descriptor change as a result of the referenced config change
 +      dm.onFileChange(simpleDesc);
 +
 +      // Load the generated topology
 +      ts.reloadTopologies();
 +      topologies = ts.getTopologies();
 +      assertFalse(topologies.isEmpty());
 +      topology = topologies.iterator().next();
 +      assertTrue("The Shiro provider is enabled in ambari-cluster-policy.xml",
 +              topology.getProvider("authentication", "ShiroProvider").isEnabled());
 +
 +      // Delete the provider configuration, and make sure that the associated topology file is unaffected.
 +      // The topology file should not be affected because the simple descriptor handling will fail to resolve the
 +      // referenced provider configuration.
 +      providerConfig.delete();     // Delete the file
 +      dm.onFileChange(simpleDesc); // The provider config deletion will trigger a descriptor change notification
 +      ts.reloadTopologies();
 +      topologies = ts.getTopologies();
 +      assertFalse(topologies.isEmpty());
 +      assertTrue("The Shiro provider is enabled in ambari-cluster-policy.xml",
 +              topology.getProvider("authentication", "ShiroProvider").isEnabled());
 +
 +    } finally {
 +      FileUtils.deleteQuietly(dir);
 +    }
 +  }
 +
 +  /**
 +   * KNOX-1039
 +   */
 +  @Test
 +  public void testConfigurationCRUDAPI() throws Exception {
 +    File dir = createDir();
 +    File topologyDir = new File(dir, "topologies");
 +    topologyDir.mkdirs();
 +
 +    File descriptorsDir = new File(dir, "descriptors");
 +    descriptorsDir.mkdirs();
 +
 +    File sharedProvidersDir = new File(dir, "shared-providers");
 +    sharedProvidersDir.mkdirs();
 +
 +    try {
 +      TestTopologyListener topoListener = new TestTopologyListener();
 +      FileAlterationMonitor monitor = new FileAlterationMonitor(Long.MAX_VALUE);
 +
 +      TopologyService ts = new DefaultTopologyService();
 +      Map<String, String> c = new HashMap<>();
 +
 +      GatewayConfig config = EasyMock.createNiceMock(GatewayConfig.class);
 +      EasyMock.expect(config.getGatewayTopologyDir()).andReturn(topologyDir.getAbsolutePath()).anyTimes();
 +      EasyMock.expect(config.getGatewayConfDir()).andReturn(descriptorsDir.getParentFile().getAbsolutePath()).anyTimes();
 +      EasyMock.replay(config);
 +
 +      ts.init(config, c);
 +      ts.addTopologyChangeListener(topoListener);
 +      ts.reloadTopologies();
 +
 +      java.lang.reflect.Field dmField = ts.getClass().getDeclaredField("descriptorsMonitor");
 +      dmField.setAccessible(true);
 +      DefaultTopologyService.DescriptorsMonitor dm = (DefaultTopologyService.DescriptorsMonitor) dmField.get(ts);
 +
 +      final String simpleDescName  = "six.json";
 +      final String provConfOne     = "provider-config-one.xml";
 +      final String provConfTwo     = "ambari-cluster-policy.xml";
 +
 +      // "Deploy" the referenced provider configs first
 +      boolean isDeployed =
 +        ts.deployProviderConfiguration(provConfOne,
 +                FileUtils.readFileToString(new File(ClassLoader.getSystemResource(
 +                    "org/apache/knox/gateway/topology/file/provider-config-one.xml").toURI())));
 +      assertTrue(isDeployed);
 +      File provConfOneFile = new File(sharedProvidersDir, provConfOne);
 +      assertTrue(provConfOneFile.exists());
 +
 +      isDeployed =
 +        ts.deployProviderConfiguration(provConfTwo,
 +                FileUtils.readFileToString(new File(ClassLoader.getSystemResource(
 +                    "org/apache/knox/gateway/topology/file/ambari-cluster-policy.xml").toURI())));
 +      assertTrue(isDeployed);
 +      File provConfTwoFile = new File(sharedProvidersDir, provConfTwo);
 +      assertTrue(provConfTwoFile.exists());
 +
 +      // Validate the provider configurations known by the topology service
 +      Collection<File> providerConfigurations = ts.getProviderConfigurations();
 +      assertNotNull(providerConfigurations);
 +      assertEquals(2, providerConfigurations.size());
 +      assertTrue(providerConfigurations.contains(provConfOneFile));
 +      assertTrue(providerConfigurations.contains(provConfTwoFile));
 +
 +      // "Deploy" the simple descriptor, which depends on provConfOne
 +      isDeployed =
 +        ts.deployDescriptor(simpleDescName,
 +            FileUtils.readFileToString(new File(ClassLoader.getSystemResource(
 +                "org/apache/knox/gateway/topology/file/simple-descriptor-six.json").toURI())));
 +      assertTrue(isDeployed);
 +      File simpleDesc = new File(descriptorsDir, simpleDescName);
 +      assertTrue(simpleDesc.exists());
 +
 +      // Validate the simple descriptors known by the topology service
 +      Collection<File> descriptors = ts.getDescriptors();
 +      assertNotNull(descriptors);
 +      assertEquals(1, descriptors.size());
 +      assertTrue(descriptors.contains(simpleDesc));
 +
 +      // "Notice" the simple descriptor, so the provider configuration dependency relationship is recorded
 +      dm.onFileChange(simpleDesc);
 +
 +      // Attempt to delete the referenced provConfOne
 +      assertFalse("Should not be able to delete a provider configuration that is referenced by one or more descriptors",
 +                  ts.deleteProviderConfiguration(FilenameUtils.getBaseName(provConfOne)));
 +
 +      // Overwrite the simple descriptor with content that changes the provider config reference to provConfTwo
 +      isDeployed =
 +        ts.deployDescriptor(simpleDescName,
 +              FileUtils.readFileToString(new File(ClassLoader.getSystemResource(
 +                  "org/apache/knox/gateway/topology/file/simple-descriptor-five.json").toURI())));
 +      assertTrue(isDeployed);
 +      assertTrue(simpleDesc.exists());
 +      ts.getProviderConfigurations();
 +
 +      // "Notice" the simple descriptor, so the provider configuration dependency relationship is updated
 +      dm.onFileChange(simpleDesc);
 +
 +      // Attempt to delete the referenced provConfOne
 +      assertTrue("Should be able to delete the provider configuration, now that it's not referenced by any descriptors",
 +                 ts.deleteProviderConfiguration(FilenameUtils.getBaseName(provConfOne)));
 +
 +      // Re-validate the provider configurations known by the topology service
 +      providerConfigurations = ts.getProviderConfigurations();
 +      assertNotNull(providerConfigurations);
 +      assertEquals(1, providerConfigurations.size());
 +      assertFalse(providerConfigurations.contains(provConfOneFile));
 +      assertTrue(providerConfigurations.contains(provConfTwoFile));
 +
 +      // Attempt to delete the referenced provConfTwo
 +      assertFalse("Should not be able to delete a provider configuration that is referenced by one or more descriptors",
 +                  ts.deleteProviderConfiguration(FilenameUtils.getBaseName(provConfTwo)));
 +
 +      // Delete the referencing simple descriptor
 +      assertTrue(ts.deleteDescriptor(FilenameUtils.getBaseName(simpleDescName)));
 +      assertFalse(simpleDesc.exists());
 +
 +      // Re-validate the simple descriptors known by the topology service
 +      descriptors = ts.getDescriptors();
 +      assertNotNull(descriptors);
 +      assertTrue(descriptors.isEmpty());
 +
 +      // "Notice" the simple descriptor, so the provider configuration dependency relationship is updated
 +      dm.onFileDelete(simpleDesc);
 +
 +      // Attempt to delete the referenced provConfTwo
 +      assertTrue("Should be able to delete the provider configuration, now that it's not referenced by any descriptors",
 +                 ts.deleteProviderConfiguration(FilenameUtils.getBaseName(provConfTwo)));
 +
 +      // Re-validate the provider configurations known by the topology service
 +      providerConfigurations = ts.getProviderConfigurations();
 +      assertNotNull(providerConfigurations);
 +      assertTrue(providerConfigurations.isEmpty());
 +
 +    } finally {
 +      FileUtils.deleteQuietly(dir);
 +    }
 +  }
 +
 +  private void kickMonitor(FileAlterationMonitor monitor) {
 +    for (FileAlterationObserver observer : monitor.getObservers()) {
 +      observer.checkAndNotify();
 +    }
 +  }
 +
 +
 +  @Test
 +  public void testProviderParamsOrderIsPreserved() {
 +
 +    Provider provider = new Provider();
 +    String names[] = {"ldapRealm=",
 +        "ldapContextFactory",
 +        "ldapRealm.contextFactory",
 +        "ldapGroupRealm",
 +        "ldapGroupRealm.contextFactory",
 +        "ldapGroupRealm.contextFactory.systemAuthenticationMechanism"
 +    };
 +
 +    Param param = null;
 +    for (String name : names) {
 +      param = new Param();
 +      param.setName(name);
 +      param.setValue(name);
 +      provider.addParam(param);
 +
 +    }
 +    Map<String, String> params = provider.getParams();
 +    Set<String> keySet = params.keySet();
 +    Iterator<String> iter = keySet.iterator();
 +    int i = 0;
 +    while (iter.hasNext()) {
 +      assertTrue(iter.next().equals(names[i++]));
 +    }
 +
 +  }
 +
 +  private class TestTopologyListener implements TopologyListener {
 +
 +    ArrayList<List<TopologyEvent>> events = new ArrayList<List<TopologyEvent>>();
 +
 +    @Override
 +    public void handleTopologyEvent(List<TopologyEvent> events) {
 +      this.events.add(events);
 +    }
 +
 +  }
 +
 +
 +  private class TestTopologyDeleteListener implements TopologyListener {
 +
 +    FileAlterationListener delegate;
 +
 +    TestTopologyDeleteListener(FileAlterationListener delegate) {
 +      this.delegate = delegate;
 +    }
 +
 +    @Override
 +    public void handleTopologyEvent(List<TopologyEvent> events) {
 +      for (TopologyEvent event : events) {
 +        if (event.getType().equals(TopologyEvent.Type.DELETED)) {
 +          delegate.onFileDelete(new File(event.getTopology().getUri()));
 +        }
 +      }
 +    }
 +
 +  }
 +
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/22a7304a/gateway-server/src/test/java/org/apache/knox/gateway/topology/simple/SimpleDescriptorFactoryTest.java
----------------------------------------------------------------------
diff --cc gateway-server/src/test/java/org/apache/knox/gateway/topology/simple/SimpleDescriptorFactoryTest.java
index df31f3d,0000000..2622f13
mode 100644,000000..100644
--- a/gateway-server/src/test/java/org/apache/knox/gateway/topology/simple/SimpleDescriptorFactoryTest.java
+++ b/gateway-server/src/test/java/org/apache/knox/gateway/topology/simple/SimpleDescriptorFactoryTest.java
@@@ -1,681 -1,0 +1,690 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements. See the NOTICE file distributed with this
 + * work for additional information regarding copyright ownership. The ASF
 + * licenses this file to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance with the License.
 + * You may obtain a copy of the License at
 + *
 + * http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 + * License for the specific language governing permissions and limitations under
 + * the License.
 + */
 +package org.apache.knox.gateway.topology.simple;
 +
 +import java.io.File;
 +import java.io.FileWriter;
 +import java.io.Writer;
 +import java.util.*;
 +
 +import org.junit.Test;
 +import static org.junit.Assert.*;
 +
 +
 +public class SimpleDescriptorFactoryTest {
 +
 +    private enum FileType {
 +        JSON,
-         YAML
++        YAML,
++        YML
 +    }
 +
 +    @Test
 +    public void testParseJSONSimpleDescriptor() throws Exception {
 +        testParseSimpleDescriptor(FileType.JSON);
 +    }
 +
 +    @Test
 +    public void testParseYAMLSimpleDescriptor() throws Exception {
++        testParseSimpleDescriptor(FileType.YML);
 +        testParseSimpleDescriptor(FileType.YAML);
 +    }
 +
 +    @Test
 +    public void testParseJSONSimpleDescriptorWithServiceParams() throws Exception {
 +        testParseSimpleDescriptorWithServiceParams(FileType.JSON);
 +    }
 +
 +    @Test
 +    public void testParseYAMLSimpleDescriptorWithServiceParams() throws Exception {
++        testParseSimpleDescriptorWithServiceParams(FileType.YML);
 +        testParseSimpleDescriptorWithServiceParams(FileType.YAML);
 +    }
 +
 +    @Test
 +    public void testParseJSONSimpleDescriptorWithApplications() throws Exception {
 +        testParseSimpleDescriptorWithApplications(FileType.JSON);
 +    }
 +
 +    @Test
 +    public void testParseYAMLSimpleDescriptorApplications() throws Exception {
++        testParseSimpleDescriptorWithApplications(FileType.YML);
 +        testParseSimpleDescriptorWithApplications(FileType.YAML);
 +    }
 +
 +
 +    @Test
 +    public void testParseJSONSimpleDescriptorWithServicesAndApplications() throws Exception {
 +        testParseSimpleDescriptorWithServicesAndApplications(FileType.JSON);
 +    }
 +
 +    @Test
 +    public void testParseYAMLSimpleDescriptorWithServicesAndApplications() throws Exception {
++        testParseSimpleDescriptorWithServicesAndApplications(FileType.YML);
 +        testParseSimpleDescriptorWithServicesAndApplications(FileType.YAML);
 +    }
 +
 +
 +    private void testParseSimpleDescriptor(FileType type) throws Exception {
 +        final String   discoveryType    = "AMBARI";
 +        final String   discoveryAddress = "http://c6401.ambari.apache.org:8080";
 +        final String   discoveryUser    = "joeblow";
 +        final String   providerConfig   = "ambari-cluster-policy.xml";
 +        final String   clusterName      = "myCluster";
 +
 +        final Map<String, List<String>> services = new HashMap<>();
 +        services.put("NODEMANAGER", null);
 +        services.put("JOBTRACKER", null);
 +        services.put("RESOURCEMANAGER", null);
 +        services.put("HIVE", Arrays.asList("http://c6401.ambari.apache.org", "http://c6402.ambari.apache.org", "http://c6403.ambari.apache.org"));
 +        services.put("AMBARIUI", Collections.singletonList("http://c6401.ambari.apache.org:8080"));
 +
 +        String fileName = "test-topology." + getFileExtensionForType(type);
 +        File testFile = null;
 +        try {
 +            testFile = writeDescriptorFile(type,
 +                                           fileName,
 +                                           discoveryType,
 +                                           discoveryAddress,
 +                                           discoveryUser,
 +                                           providerConfig,
 +                                           clusterName,
 +                                           services);
 +            SimpleDescriptor sd = SimpleDescriptorFactory.parse(testFile.getAbsolutePath());
 +            validateSimpleDescriptor(sd, discoveryType, discoveryAddress, providerConfig, clusterName, services);
 +        } catch (Exception e) {
 +            e.printStackTrace();
 +        } finally {
 +            if (testFile != null) {
 +                try {
 +                    testFile.delete();
 +                } catch (Exception e) {
 +                    // Ignore
 +                }
 +            }
 +        }
 +    }
 +
 +    private void testParseSimpleDescriptorWithServiceParams(FileType type) throws Exception {
 +
 +        final String   discoveryType    = "AMBARI";
 +        final String   discoveryAddress = "http://c6401.ambari.apache.org:8080";
 +        final String   discoveryUser    = "admin";
 +        final String   providerConfig   = "ambari-cluster-policy.xml";
 +        final String   clusterName      = "myCluster";
 +
 +        final Map<String, List<String>> services = new HashMap<>();
 +        services.put("NODEMANAGER", null);
 +        services.put("JOBTRACKER", null);
 +        services.put("RESOURCEMANAGER", null);
 +        services.put("HIVE", Arrays.asList("http://c6401.ambari.apache.org", "http://c6402.ambari.apache.org", "http://c6403.ambari.apache.org"));
 +        services.put("AMBARIUI", Collections.singletonList("http://c6401.ambari.apache.org:8080"));
 +        services.put("KNOXSSO", null);
 +        services.put("KNOXTOKEN", null);
 +        services.put("CustomRole", Collections.singletonList("http://c6402.ambari.apache.org:1234"));
 +
 +        final Map<String, Map<String, String>> serviceParams = new HashMap<>();
 +        Map<String, String> knoxSSOParams = new HashMap<>();
 +        knoxSSOParams.put("knoxsso.cookie.secure.only", "true");
 +        knoxSSOParams.put("knoxsso.token.ttl", "100000");
 +        serviceParams.put("KNOXSSO", knoxSSOParams);
 +
 +        Map<String, String> knoxTokenParams = new HashMap<>();
 +        knoxTokenParams.put("knox.token.ttl", "36000000");
 +        knoxTokenParams.put("knox.token.audiences", "tokenbased");
 +        knoxTokenParams.put("knox.token.target.url", "https://localhost:8443/gateway/tokenbased");
 +        serviceParams.put("KNOXTOKEN", knoxTokenParams);
 +
 +        Map<String, String> customRoleParams = new HashMap<>();
 +        customRoleParams.put("custom.param.1", "value1");
 +        customRoleParams.put("custom.param.2", "value2");
 +        serviceParams.put("CustomRole", customRoleParams);
 +
 +        String fileName = "test-topology." + getFileExtensionForType(type);
 +        File testFile = null;
 +        try {
 +            testFile = writeDescriptorFile(type,
 +                                           fileName,
 +                                           discoveryType,
 +                                           discoveryAddress,
 +                                           discoveryUser,
 +                                           providerConfig,
 +                                           clusterName,
 +                                           services,
 +                                           serviceParams);
 +            SimpleDescriptor sd = SimpleDescriptorFactory.parse(testFile.getAbsolutePath());
 +            validateSimpleDescriptor(sd, discoveryType, discoveryAddress, providerConfig, clusterName, services, serviceParams);
 +        } finally {
 +            if (testFile != null) {
 +                try {
 +                    testFile.delete();
 +                } catch (Exception e) {
 +                    // Ignore
 +                }
 +            }
 +        }
 +    }
 +
 +    private void testParseSimpleDescriptorWithApplications(FileType type) throws Exception {
 +
 +        final String   discoveryType    = "AMBARI";
 +        final String   discoveryAddress = "http://c6401.ambari.apache.org:8080";
 +        final String   discoveryUser    = "admin";
 +        final String   providerConfig   = "ambari-cluster-policy.xml";
 +        final String   clusterName      = "myCluster";
 +
 +        final Map<String, List<String>> apps = new HashMap<>();
 +        apps.put("app-one", null);
 +        apps.put("appTwo", null);
 +        apps.put("thirdApps", null);
 +        apps.put("appfour", Arrays.asList("http://host1:1234", "http://host2:5678", "http://host1:1357"));
 +        apps.put("AppFive", Collections.singletonList("http://host5:8080"));
 +
 +        final Map<String, Map<String, String>> appParams = new HashMap<>();
 +        Map<String, String> oneParams = new HashMap<>();
 +        oneParams.put("appone.cookie.secure.only", "true");
 +        oneParams.put("appone.token.ttl", "100000");
 +        appParams.put("app-one", oneParams);
 +        Map<String, String> fiveParams = new HashMap<>();
 +        fiveParams.put("myproperty", "true");
 +        fiveParams.put("anotherparam", "100000");
 +        appParams.put("AppFive", fiveParams);
 +
 +        String fileName = "test-topology." + getFileExtensionForType(type);
 +        File testFile = null;
 +        try {
 +            testFile = writeDescriptorFile(type,
 +                                           fileName,
 +                                           discoveryType,
 +                                           discoveryAddress,
 +                                           discoveryUser,
 +                                           providerConfig,
 +                                           clusterName,
 +                                           null,
 +                                           null,
 +                                           apps,
 +                                           appParams);
 +            SimpleDescriptor sd = SimpleDescriptorFactory.parse(testFile.getAbsolutePath());
 +            validateSimpleDescriptor(sd,
 +                                     discoveryType,
 +                                     discoveryAddress,
 +                                     providerConfig,
 +                                     clusterName,
 +                                     null,
 +                                     null,
 +                                     apps,
 +                                     appParams);
 +        } finally {
 +            if (testFile != null) {
 +                try {
 +                    testFile.delete();
 +                } catch (Exception e) {
 +                    // Ignore
 +                }
 +            }
 +        }
 +    }
 +
 +    private void testParseSimpleDescriptorWithServicesAndApplications(FileType type) throws Exception {
 +
 +        final String   discoveryType    = "AMBARI";
 +        final String   discoveryAddress = "http://c6401.ambari.apache.org:8080";
 +        final String   discoveryUser    = "admin";
 +        final String   providerConfig   = "ambari-cluster-policy.xml";
 +        final String   clusterName      = "myCluster";
 +
 +        final Map<String, List<String>> services = new HashMap<>();
 +        services.put("NODEMANAGER", null);
 +        services.put("JOBTRACKER", null);
 +        services.put("RESOURCEMANAGER", null);
 +        services.put("HIVE", Arrays.asList("http://c6401.ambari.apache.org", "http://c6402.ambari.apache.org", "http://c6403.ambari.apache.org"));
 +        services.put("AMBARIUI", Collections.singletonList("http://c6401.ambari.apache.org:8080"));
 +        services.put("KNOXSSO", null);
 +        services.put("KNOXTOKEN", null);
 +        services.put("CustomRole", Collections.singletonList("http://c6402.ambari.apache.org:1234"));
 +
 +        final Map<String, Map<String, String>> serviceParams = new HashMap<>();
 +        Map<String, String> knoxSSOParams = new HashMap<>();
 +        knoxSSOParams.put("knoxsso.cookie.secure.only", "true");
 +        knoxSSOParams.put("knoxsso.token.ttl", "100000");
 +        serviceParams.put("KNOXSSO", knoxSSOParams);
 +
 +        Map<String, String> knoxTokenParams = new HashMap<>();
 +        knoxTokenParams.put("knox.token.ttl", "36000000");
 +        knoxTokenParams.put("knox.token.audiences", "tokenbased");
 +        knoxTokenParams.put("knox.token.target.url", "https://localhost:8443/gateway/tokenbased");
 +        serviceParams.put("KNOXTOKEN", knoxTokenParams);
 +
 +        Map<String, String> customRoleParams = new HashMap<>();
 +        customRoleParams.put("custom.param.1", "value1");
 +        customRoleParams.put("custom.param.2", "value2");
 +        serviceParams.put("CustomRole", customRoleParams);
 +
 +        final Map<String, List<String>> apps = new HashMap<>();
 +        apps.put("app-one", null);
 +        apps.put("appTwo", null);
 +        apps.put("thirdApps", null);
 +        apps.put("appfour", Arrays.asList("http://host1:1234", "http://host2:5678", "http://host1:1357"));
 +        apps.put("AppFive", Collections.singletonList("http://host5:8080"));
 +
 +        final Map<String, Map<String, String>> appParams = new HashMap<>();
 +        Map<String, String> oneParams = new HashMap<>();
 +        oneParams.put("appone.cookie.secure.only", "true");
 +        oneParams.put("appone.token.ttl", "100000");
 +        appParams.put("app-one", oneParams);
 +        Map<String, String> fiveParams = new HashMap<>();
 +        fiveParams.put("myproperty", "true");
 +        fiveParams.put("anotherparam", "100000");
 +        appParams.put("AppFive", fiveParams);
 +
 +        String fileName = "test-topology." + getFileExtensionForType(type);
 +        File testFile = null;
 +        try {
 +            testFile = writeDescriptorFile(type,
 +                                           fileName,
 +                                           discoveryType,
 +                                           discoveryAddress,
 +                                           discoveryUser,
 +                                           providerConfig,
 +                                           clusterName,
 +                                           services,
 +                                           serviceParams,
 +                                           apps,
 +                                           appParams);
 +            SimpleDescriptor sd = SimpleDescriptorFactory.parse(testFile.getAbsolutePath());
 +            validateSimpleDescriptor(sd,
 +                                     discoveryType,
 +                                     discoveryAddress,
 +                                     providerConfig,
 +                                     clusterName,
 +                                     services,
 +                                     serviceParams,
 +                                     apps,
 +                                     appParams);
 +        } finally {
 +            if (testFile != null) {
 +                try {
 +                    testFile.delete();
 +                } catch (Exception e) {
 +                    // Ignore
 +                }
 +            }
 +        }
 +    }
 +
 +    private String getFileExtensionForType(FileType type) {
 +        String extension = null;
 +        switch (type) {
 +            case JSON:
 +                extension = "json";
 +                break;
-             case YAML:
++            case YML:
 +                extension = "yml";
 +                break;
++            case YAML:
++                extension = "yaml";
++                break;
 +        }
 +        return extension;
 +    }
 +
 +    private File writeDescriptorFile(FileType type,
 +                                     String                           path,
 +                                     String                           discoveryType,
 +                                     String                           discoveryAddress,
 +                                     String                           discoveryUser,
 +                                     String                           providerConfig,
 +                                     String                           clusterName,
 +                                     Map<String, List<String>>        services) throws Exception {
 +        return writeDescriptorFile(type,
 +                                   path,
 +                                   discoveryType,
 +                                   discoveryAddress,
 +                                   discoveryUser,
 +                                   providerConfig,
 +                                   clusterName,
 +                                   services,
 +                                   null);
 +    }
 +
 +    private File writeDescriptorFile(FileType type,
 +                                     String                           path,
 +                                     String                           discoveryType,
 +                                     String                           discoveryAddress,
 +                                     String                           discoveryUser,
 +                                     String                           providerConfig,
 +                                     String                           clusterName,
 +                                     Map<String, List<String>>        services,
 +                                     Map<String, Map<String, String>> serviceParams) throws Exception {
 +        return writeDescriptorFile(type,
 +                                   path,
 +                                   discoveryType,
 +                                   discoveryAddress,
 +                                   discoveryUser,
 +                                   providerConfig,
 +                                   clusterName,
 +                                   services,
 +                                   serviceParams,
 +                                   null,
 +                                   null);
 +    }
 +
 +
 +    private File writeDescriptorFile(FileType type,
 +                                     String                           path,
 +                                     String                           discoveryType,
 +                                     String                           discoveryAddress,
 +                                     String                           discoveryUser,
 +                                     String                           providerConfig,
 +                                     String                           clusterName,
 +                                     Map<String, List<String>>        services,
 +                                     Map<String, Map<String, String>> serviceParams,
 +                                     Map<String, List<String>>        apps,
 +                                     Map<String, Map<String, String>> appParams) throws Exception {
 +        File result = null;
 +        switch (type) {
 +            case JSON:
 +                result = writeJSON(path,
 +                                   discoveryType,
 +                                   discoveryAddress,
 +                                   discoveryUser,
 +                                   providerConfig,
 +                                   clusterName,
 +                                   services,
 +                                   serviceParams,
 +                                   apps,
 +                                   appParams);
 +                break;
 +            case YAML:
++            case YML:
 +                result = writeYAML(path,
 +                                   discoveryType,
 +                                   discoveryAddress,
 +                                   discoveryUser,
 +                                   providerConfig,
 +                                   clusterName,
 +                                   services,
 +                                   serviceParams,
 +                                   apps,
 +                                   appParams);
 +                break;
 +        }
 +        return result;
 +    }
 +
 +
 +    private File writeJSON(String path,
 +                           String discoveryType,
 +                           String discoveryAddress,
 +                           String discoveryUser,
 +                           String providerConfig,
 +                           String clusterName,
 +                           Map<String, List<String>> services,
 +                           Map<String, Map<String, String>> serviceParams,
 +                           Map<String, List<String>> apps,
 +                           Map<String, Map<String, String>> appParams) throws Exception {
 +        File f = new File(path);
 +
 +        Writer fw = new FileWriter(f);
 +        fw.write("{" + "\n");
 +        fw.write("\"discovery-type\":\"" + discoveryType + "\",\n");
 +        fw.write("\"discovery-address\":\"" + discoveryAddress + "\",\n");
 +        fw.write("\"discovery-user\":\"" + discoveryUser + "\",\n");
 +        fw.write("\"provider-config-ref\":\"" + providerConfig + "\",\n");
 +        fw.write("\"cluster\":\"" + clusterName + "\"");
 +
 +        if (services != null && !services.isEmpty()) {
 +            fw.write(",\n\"services\":[\n");
 +            writeServiceOrApplicationJSON(fw, services, serviceParams);
 +            fw.write("]\n");
 +        }
 +
 +        if (apps != null && !apps.isEmpty()) {
 +            fw.write(",\n\"applications\":[\n");
 +            writeServiceOrApplicationJSON(fw, apps, appParams);
 +            fw.write("]\n");
 +        }
 +
 +        fw.write("}\n");
 +        fw.flush();
 +        fw.close();
 +
 +        return f;
 +    }
 +
 +    private void writeServiceOrApplicationJSON(Writer fw,
 +                                               Map<String, List<String>> elementURLs,
 +                                               Map<String, Map<String, String>> elementParams) throws Exception {
 +        if (elementURLs != null) {
 +            int i = 0;
 +            for (String name : elementURLs.keySet()) {
 +                fw.write("{\"name\":\"" + name + "\"");
 +
 +                // Service params
 +                if (elementParams != null && !elementParams.isEmpty()) {
 +                    Map<String, String> params = elementParams.get(name);
 +                    if (params != null && !params.isEmpty()) {
 +                        fw.write(",\n\"params\":{\n");
 +                        Iterator<String> paramNames = params.keySet().iterator();
 +                        while (paramNames.hasNext()) {
 +                            String paramName = paramNames.next();
 +                            String paramValue = params.get(paramName);
 +                            fw.write("\"" + paramName + "\":\"" + paramValue + "\"");
 +                            fw.write(paramNames.hasNext() ? ",\n" : "");
 +                        }
 +                        fw.write("\n}");
 +                    }
 +                }
 +
 +                // Service URLs
 +                List<String> urls = elementURLs.get(name);
 +                if (urls != null) {
 +                    fw.write(",\n\"urls\":[");
 +                    Iterator<String> urlIter = urls.iterator();
 +                    while (urlIter.hasNext()) {
 +                        fw.write("\"" + urlIter.next() + "\"");
 +                        if (urlIter.hasNext()) {
 +                            fw.write(", ");
 +                        }
 +                    }
 +                    fw.write("]\n");
 +                }
 +
 +                fw.write("}");
 +                if (i++ < elementURLs.size() - 1) {
 +                    fw.write(",");
 +                }
 +                fw.write("\n");
 +            }
 +        }
 +    }
 +
 +    private File writeYAML(String                           path,
 +                           String                           discoveryType,
 +                           String                           discoveryAddress,
 +                           String                           discoveryUser,
 +                           String                           providerConfig,
 +                           String                           clusterName,
 +                           Map<String, List<String>>        services,
 +                           Map<String, Map<String, String>> serviceParams,
 +                           Map<String, List<String>>        apps,
 +                           Map<String, Map<String, String>> appParams) throws Exception {
 +
 +        File f = new File(path);
 +
 +        Writer fw = new FileWriter(f);
 +        fw.write("---" + "\n");
 +        fw.write("discovery-type: " + discoveryType + "\n");
 +        fw.write("discovery-address: " + discoveryAddress + "\n");
 +        fw.write("discovery-user: " + discoveryUser + "\n");
 +        fw.write("provider-config-ref: " + providerConfig + "\n");
 +        fw.write("cluster: " + clusterName+ "\n");
 +
 +        if (services != null && !services.isEmpty()) {
 +            fw.write("services:\n");
 +            writeServiceOrApplicationYAML(fw, services, serviceParams);
 +        }
 +
 +        if (apps != null && !apps.isEmpty()) {
 +            fw.write("applications:\n");
 +            writeServiceOrApplicationYAML(fw, apps, appParams);
 +        }
 +
 +        fw.flush();
 +        fw.close();
 +
 +        return f;
 +    }
 +
 +    private void writeServiceOrApplicationYAML(Writer                           fw,
 +                                               Map<String, List<String>>        elementURLs,
 +                                               Map<String, Map<String, String>> elementParams) throws Exception {
 +        for (String name : elementURLs.keySet()) {
 +            fw.write("    - name: " + name + "\n");
 +
 +            // Service params
 +            if (elementParams != null && !elementParams.isEmpty()) {
 +                if (elementParams.containsKey(name)) {
 +                    Map<String, String> params = elementParams.get(name);
 +                    fw.write("      params:\n");
 +                    for (String paramName : params.keySet()) {
 +                        fw.write("            " + paramName + ": " + params.get(paramName) + "\n");
 +                    }
 +                }
 +            }
 +
 +            // Service URLs
 +            List<String> urls = elementURLs.get(name);
 +            if (urls != null) {
 +                fw.write("      urls:\n");
 +                for (String url : urls) {
 +                    fw.write("          - " + url + "\n");
 +                }
 +            }
 +        }
 +    }
 +
 +
 +    private void validateSimpleDescriptor(SimpleDescriptor          sd,
 +                                          String                    discoveryType,
 +                                          String                    discoveryAddress,
 +                                          String                    providerConfig,
 +                                          String                    clusterName,
 +                                          Map<String, List<String>> expectedServices) {
 +        validateSimpleDescriptor(sd, discoveryType, discoveryAddress, providerConfig, clusterName, expectedServices, null);
 +    }
 +
 +
 +    private void validateSimpleDescriptor(SimpleDescriptor                 sd,
 +                                          String                           discoveryType,
 +                                          String                           discoveryAddress,
 +                                          String                           providerConfig,
 +                                          String                           clusterName,
 +                                          Map<String, List<String>>        expectedServices,
 +                                          Map<String, Map<String, String>> expectedServiceParameters) {
 +        validateSimpleDescriptor(sd,
 +                                 discoveryType,
 +                                 discoveryAddress,
 +                                 providerConfig,
 +                                 clusterName,
 +                                 expectedServices,
 +                                 expectedServiceParameters,
 +                                 null,
 +                                 null);
 +    }
 +
 +    private void validateSimpleDescriptor(SimpleDescriptor                 sd,
 +                                          String                           discoveryType,
 +                                          String                           discoveryAddress,
 +                                          String                           providerConfig,
 +                                          String                           clusterName,
 +                                          Map<String, List<String>>        expectedServices,
 +                                          Map<String, Map<String, String>> expectedServiceParameters,
 +                                          Map<String, List<String>>        expectedApps,
 +                                          Map<String, Map<String, String>> expectedAppParameters) {
 +        assertNotNull(sd);
 +        assertEquals(discoveryType, sd.getDiscoveryType());
 +        assertEquals(discoveryAddress, sd.getDiscoveryAddress());
 +        assertEquals(providerConfig, sd.getProviderConfig());
 +        assertEquals(clusterName, sd.getClusterName());
 +
 +        List<SimpleDescriptor.Service> actualServices = sd.getServices();
 +
 +        if (expectedServices == null) {
 +            assertTrue(actualServices.isEmpty());
 +        } else {
 +            assertEquals(expectedServices.size(), actualServices.size());
 +
 +            for (SimpleDescriptor.Service actualService : actualServices) {
 +                assertTrue(expectedServices.containsKey(actualService.getName()));
 +                assertEquals(expectedServices.get(actualService.getName()), actualService.getURLs());
 +
 +                // Validate service parameters
 +                if (expectedServiceParameters != null) {
 +                    if (expectedServiceParameters.containsKey(actualService.getName())) {
 +                        Map<String, String> expectedParams = expectedServiceParameters.get(actualService.getName());
 +
 +                        Map<String, String> actualServiceParams = actualService.getParams();
 +                        assertNotNull(actualServiceParams);
 +
 +                        // Validate the size of the service parameter set
 +                        assertEquals(expectedParams.size(), actualServiceParams.size());
 +
 +                        // Validate the parameter contents
 +                        for (String paramName : actualServiceParams.keySet()) {
 +                            assertTrue(expectedParams.containsKey(paramName));
 +                            assertEquals(expectedParams.get(paramName), actualServiceParams.get(paramName));
 +                        }
 +                    }
 +                }
 +            }
 +        }
 +
 +        List<SimpleDescriptor.Application> actualApps = sd.getApplications();
 +
 +        if (expectedApps == null) {
 +            assertTrue(actualApps.isEmpty());
 +        } else {
 +            assertEquals(expectedApps.size(), actualApps.size());
 +
 +            for (SimpleDescriptor.Application actualApp : actualApps) {
 +                assertTrue(expectedApps.containsKey(actualApp.getName()));
 +                assertEquals(expectedApps.get(actualApp.getName()), actualApp.getURLs());
 +
 +                // Validate service parameters
 +                if (expectedServiceParameters != null) {
 +                    if (expectedAppParameters.containsKey(actualApp.getName())) {
 +                        Map<String, String> expectedParams = expectedAppParameters.get(actualApp.getName());
 +
 +                        Map<String, String> actualAppParams = actualApp.getParams();
 +                        assertNotNull(actualAppParams);
 +
 +                        // Validate the size of the service parameter set
 +                        assertEquals(expectedParams.size(), actualAppParams.size());
 +
 +                        // Validate the parameter contents
 +                        for (String paramName : actualAppParams.keySet()) {
 +                            assertTrue(expectedParams.containsKey(paramName));
 +                            assertEquals(expectedParams.get(paramName), actualAppParams.get(paramName));
 +                        }
 +                    }
 +                }
 +            }
 +        }
 +    }
 +
 +}


[14/53] [abbrv] knox git commit: Merge branch 'master' into KNOX-998-Package_Restructuring

Posted by mo...@apache.org.
Merge branch 'master' into KNOX-998-Package_Restructuring

# Conflicts:
#	gateway-provider-rewrite-func-hostmap-static/src/main/resources/META-INF/services/org.apache.knox.gateway.deploy.ProviderDeploymentContributor
#	gateway-provider-security-jwt/src/main/java/org/apache/knox/gateway/provider/federation/jwt/filter/AbstractJWTFilter.java
#	gateway-provider-security-jwt/src/main/java/org/apache/knox/gateway/provider/federation/jwt/filter/JWTFederationFilter.java
#	gateway-provider-security-jwt/src/main/java/org/apache/knox/gateway/provider/federation/jwt/filter/SSOCookieFederationFilter.java
#	gateway-provider-security-jwt/src/test/java/org/apache/knox/gateway/provider/federation/SSOCookieProviderTest.java
#	gateway-server/src/test/java/org/apache/hadoop/gateway/services/token/impl/DefaultTokenAuthorityServiceTest.java
#	gateway-server/src/test/java/org/apache/knox/gateway/topology/simple/SimpleDescriptorHandlerTest.java
#	gateway-spi/src/main/java/org/apache/knox/gateway/services/security/token/impl/JWTToken.java


Project: http://git-wip-us.apache.org/repos/asf/knox/repo
Commit: http://git-wip-us.apache.org/repos/asf/knox/commit/58780d37
Tree: http://git-wip-us.apache.org/repos/asf/knox/tree/58780d37
Diff: http://git-wip-us.apache.org/repos/asf/knox/diff/58780d37

Branch: refs/heads/master
Commit: 58780d37c96f1f49f3863ecd065a09fb3bfda26a
Parents: 7d0bff1 994ac32
Author: Sandeep More <mo...@apache.org>
Authored: Wed Oct 25 10:59:55 2017 -0400
Committer: Sandeep More <mo...@apache.org>
Committed: Wed Oct 25 10:59:55 2017 -0400

----------------------------------------------------------------------
 .../discovery/ambari/AmbariCluster.java         |   2 +-
 .../provider/impl/BaseZookeeperURLManager.java  | 195 ++++++++++++
 .../provider/impl/HBaseZookeeperURLManager.java | 138 +++++++++
 .../provider/impl/KafkaZookeeperURLManager.java | 152 ++++++++++
 .../provider/impl/SOLRZookeeperURLManager.java  | 118 ++++++++
 .../ha/provider/impl/StringResponseHandler.java |  49 +++
 .../impl/HBaseZookeeperURLManagerTest.java      |  72 +++++
 .../impl/KafkaZookeeperURLManagerTest.java      |  71 +++++
 .../impl/SOLRZookeeperURLManagerTest.java       | 110 +++++++
 ...gateway.deploy.ProviderDeploymentContributor |   5 +-
 .../provider/federation/jwt/JWTMessages.java    |   3 +
 .../jwt/filter/AbstractJWTFilter.java           |  57 +++-
 .../jwt/filter/JWTFederationFilter.java         |   5 +-
 .../jwt/filter/SSOCookieFederationFilter.java   |   5 +-
 .../federation/AbstractJWTFilterTest.java       | 239 +++++++++++++--
 .../federation/SSOCookieProviderTest.java       |   5 +-
 gateway-provider-security-pac4j/pom.xml         |  31 +-
 .../pac4j/filter/Pac4jDispatcherFilter.java     |  15 +-
 .../pac4j/filter/Pac4jIdentityAdapter.java      |  36 ++-
 .../gateway/pac4j/session/KnoxSessionStore.java |  28 +-
 .../knox/gateway/pac4j/Pac4jProviderTest.java   |  10 +-
 .../impl/DefaultTokenAuthorityService.java      |  22 +-
 .../topology/impl/DefaultTopologyService.java   |  16 +
 .../topology/simple/SimpleDescriptor.java       |   4 +-
 .../simple/SimpleDescriptorHandler.java         |  43 ++-
 .../topology/simple/SimpleDescriptorImpl.java   |  12 +
 .../impl/DefaultTokenAuthorityServiceTest.java  |  94 ++++++
 .../simple/SimpleDescriptorFactoryTest.java     | 230 +++++++++++++-
 .../simple/SimpleDescriptorHandlerTest.java     |  79 ++++-
 .../gateway/service/knoxsso/WebSSOResource.java |  18 +-
 .../service/knoxsso/WebSSOResourceTest.java     | 299 +++++++++++++++++-
 .../service/knoxtoken/TokenResource.java        |  20 +-
 .../knoxtoken/TokenServiceResourceTest.java     | 302 ++++++++++++++++++-
 .../apache/knox/gateway/shell/job/Sqoop.java    |   2 +-
 .../services/security/token/impl/JWT.java       |   3 +
 .../services/security/token/impl/JWTToken.java  |  38 ++-
 .../security/token/impl/JWTTokenTest.java       |  45 ++-
 pom.xml                                         |   2 +-
 38 files changed, 2409 insertions(+), 166 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/knox/blob/58780d37/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariCluster.java
----------------------------------------------------------------------
diff --cc gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariCluster.java
index d65bff7,0000000..d71e079
mode 100644,000000..100644
--- a/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariCluster.java
+++ b/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariCluster.java
@@@ -1,115 -1,0 +1,115 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements. See the NOTICE file distributed with this
 + * work for additional information regarding copyright ownership. The ASF
 + * licenses this file to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance with the License.
 + * You may obtain a copy of the License at
 + *
 + * http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 + * License for the specific language governing permissions and limitations under
 + * the License.
 + */
 +package org.apache.knox.gateway.topology.discovery.ambari;
 +
 +import org.apache.knox.gateway.topology.discovery.ServiceDiscovery;
 +
 +import java.util.ArrayList;
 +import java.util.HashMap;
 +import java.util.List;
 +import java.util.Map;
 +
 +class AmbariCluster implements ServiceDiscovery.Cluster {
 +
 +    private String name = null;
 +
 +    private AmbariDynamicServiceURLCreator urlCreator;
 +
 +    private Map<String, Map<String, ServiceConfiguration>> serviceConfigurations = new HashMap<>();
 +
 +    private Map<String, AmbariComponent> components = null;
 +
 +
 +    AmbariCluster(String name) {
 +        this.name = name;
 +        components = new HashMap<>();
 +        urlCreator = new AmbariDynamicServiceURLCreator(this);
 +    }
 +
 +    void addServiceConfiguration(String serviceName, String configurationType, ServiceConfiguration serviceConfig) {
 +        if (!serviceConfigurations.keySet().contains(serviceName)) {
-             serviceConfigurations.put(serviceName, new HashMap<String, ServiceConfiguration>());
++            serviceConfigurations.put(serviceName, new HashMap<>());
 +        }
 +        serviceConfigurations.get(serviceName).put(configurationType, serviceConfig);
 +    }
 +
 +
 +    void addComponent(AmbariComponent component) {
 +        components.put(component.getName(), component);
 +    }
 +
 +
 +    ServiceConfiguration getServiceConfiguration(String serviceName, String configurationType) {
 +        ServiceConfiguration sc = null;
 +        Map<String, ServiceConfiguration> configs = serviceConfigurations.get(serviceName);
 +        if (configs != null) {
 +            sc = configs.get(configurationType);
 +        }
 +        return sc;
 +    }
 +
 +
 +    Map<String, AmbariComponent> getComponents() {
 +        return components;
 +    }
 +
 +
 +    AmbariComponent getComponent(String name) {
 +        return components.get(name);
 +    }
 +
 +
 +    @Override
 +    public String getName() {
 +        return name;
 +    }
 +
 +
 +    @Override
 +    public List<String> getServiceURLs(String serviceName) {
 +        List<String> urls = new ArrayList<>();
 +        urls.addAll(urlCreator.create(serviceName));
 +        return urls;
 +    }
 +
 +
 +    static class ServiceConfiguration {
 +
 +        private String type;
 +        private String version;
 +        private Map<String, String> props;
 +
 +        ServiceConfiguration(String type, String version, Map<String, String> properties) {
 +            this.type = type;
 +            this.version = version;
 +            this.props = properties;
 +        }
 +
 +        public String getVersion() {
 +            return version;
 +        }
 +
 +        public String getType() {
 +            return type;
 +        }
 +
 +        public Map<String, String> getProperties() {
 +            return props;
 +        }
 +    }
 +
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/58780d37/gateway-provider-rewrite-func-hostmap-static/src/main/resources/META-INF/services/org.apache.knox.gateway.deploy.ProviderDeploymentContributor
----------------------------------------------------------------------
diff --cc gateway-provider-rewrite-func-hostmap-static/src/main/resources/META-INF/services/org.apache.knox.gateway.deploy.ProviderDeploymentContributor
index 76328d9,0000000..d6b9608
mode 100644,000000..100644
--- a/gateway-provider-rewrite-func-hostmap-static/src/main/resources/META-INF/services/org.apache.knox.gateway.deploy.ProviderDeploymentContributor
+++ b/gateway-provider-rewrite-func-hostmap-static/src/main/resources/META-INF/services/org.apache.knox.gateway.deploy.ProviderDeploymentContributor
@@@ -1,19 -1,0 +1,22 @@@
 +##########################################################################
 +# Licensed to the Apache Software Foundation (ASF) under one
 +# or more contributor license agreements.  See the NOTICE file
 +# distributed with this work for additional information
 +# regarding copyright ownership.  The ASF licenses this file
 +# to you under the Apache License, Version 2.0 (the
 +# "License"); you may not use this file except in compliance
 +# with the License.  You may obtain a copy of the License at
 +#
 +#     http://www.apache.org/licenses/LICENSE-2.0
 +#
 +# Unless required by applicable law or agreed to in writing, software
 +# distributed under the License is distributed on an "AS IS" BASIS,
 +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 +# See the License for the specific language governing permissions and
 +# limitations under the License.
 +##########################################################################
 +
- org.apache.knox.gateway.hostmap.impl.HostmapDeploymentContributor
++org.apache.knox.gateway.ha.provider.impl.HS2ZookeeperURLManager
++org.apache.knox.gateway.ha.provider.impl.SOLRZookeeperURLManager
++org.apache.knox.gateway.ha.provider.impl.KafkaZookeeperURLManager
++org.apache.knox.gateway.ha.provider.impl.HBaseZookeeperURLManager

http://git-wip-us.apache.org/repos/asf/knox/blob/58780d37/gateway-provider-security-jwt/src/main/java/org/apache/knox/gateway/provider/federation/jwt/JWTMessages.java
----------------------------------------------------------------------
diff --cc gateway-provider-security-jwt/src/main/java/org/apache/knox/gateway/provider/federation/jwt/JWTMessages.java
index e1e0dcb,0000000..70efa8c
mode 100644,000000..100644
--- a/gateway-provider-security-jwt/src/main/java/org/apache/knox/gateway/provider/federation/jwt/JWTMessages.java
+++ b/gateway-provider-security-jwt/src/main/java/org/apache/knox/gateway/provider/federation/jwt/JWTMessages.java
@@@ -1,57 -1,0 +1,60 @@@
 +/**
 +
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.provider.federation.jwt;
 +
 +import org.apache.knox.gateway.i18n.messages.Message;
 +import org.apache.knox.gateway.i18n.messages.MessageLevel;
 +import org.apache.knox.gateway.i18n.messages.Messages;
 +import org.apache.knox.gateway.i18n.messages.StackTrace;
 +
 +@Messages(logger="org.apache.knox.gateway.provider.federation.jwt")
 +public interface JWTMessages {
 +  @Message( level = MessageLevel.WARN, text = "Failed to validate the audience attribute." )
 +  void failedToValidateAudience();
 +
 +  @Message( level = MessageLevel.WARN, text = "Failed to verify the token signature." )
 +  void failedToVerifyTokenSignature();
 +
 +  @Message( level = MessageLevel.INFO, text = "Access token has expired; a new one must be acquired." )
 +  void tokenHasExpired();
 +
++  @Message( level = MessageLevel.INFO, text = "The NotBefore check failed." )
++  void notBeforeCheckFailed();
++
 +  @Message( level = MessageLevel.WARN, text = "Expected Bearer token is missing." )
 +  void missingBearerToken();
 +
 +  @Message( level = MessageLevel.INFO, text = "Unable to verify token: {0}" )
 +  void unableToVerifyToken(@StackTrace( level = MessageLevel.ERROR) Exception e);
 +
 +  @Message( level = MessageLevel.ERROR, text = "Unable to verify token: {0}" )
 +  void unableToIssueToken(@StackTrace( level = MessageLevel.DEBUG) Exception e);
 +
 +  @Message( level = MessageLevel.DEBUG, text = "Sending redirect to: {0}" )
 +  void sendRedirectToLoginURL(String loginURL);
 +
 +  @Message( level = MessageLevel.ERROR, text = "Required configuration element for authentication provider is missing." )
 +  void missingAuthenticationProviderUrlConfiguration();
 +
 +  @Message( level = MessageLevel.DEBUG, text = "{0} Cookie has been found and is being processed." )
 +  void cookieHasBeenFound(String cookieName);
 +
 +  @Message( level = MessageLevel.DEBUG, text = "Audience claim has been validated." )
 +  void jwtAudienceValidated();
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/58780d37/gateway-provider-security-jwt/src/main/java/org/apache/knox/gateway/provider/federation/jwt/filter/AbstractJWTFilter.java
----------------------------------------------------------------------
diff --cc gateway-provider-security-jwt/src/main/java/org/apache/knox/gateway/provider/federation/jwt/filter/AbstractJWTFilter.java
index 077fa05,0000000..49357f0
mode 100644,000000..100644
--- a/gateway-provider-security-jwt/src/main/java/org/apache/knox/gateway/provider/federation/jwt/filter/AbstractJWTFilter.java
+++ b/gateway-provider-security-jwt/src/main/java/org/apache/knox/gateway/provider/federation/jwt/filter/AbstractJWTFilter.java
@@@ -1,278 -1,0 +1,315 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.provider.federation.jwt.filter;
 +
 +import java.io.IOException;
 +import java.security.Principal;
 +import java.security.PrivilegedActionException;
 +import java.security.PrivilegedExceptionAction;
 +import java.security.interfaces.RSAPublicKey;
++import java.text.ParseException;
 +import java.util.ArrayList;
 +import java.util.Date;
 +import java.util.HashSet;
 +import java.util.List;
 +import java.util.Set;
 +
 +import javax.security.auth.Subject;
 +import javax.servlet.Filter;
 +import javax.servlet.FilterChain;
 +import javax.servlet.FilterConfig;
 +import javax.servlet.ServletContext;
 +import javax.servlet.ServletException;
 +import javax.servlet.ServletRequest;
 +import javax.servlet.ServletResponse;
 +import javax.servlet.http.HttpServletRequest;
 +import javax.servlet.http.HttpServletResponse;
 +
 +import org.apache.knox.gateway.audit.api.Action;
 +import org.apache.knox.gateway.audit.api.ActionOutcome;
 +import org.apache.knox.gateway.audit.api.AuditContext;
 +import org.apache.knox.gateway.audit.api.AuditService;
 +import org.apache.knox.gateway.audit.api.AuditServiceFactory;
 +import org.apache.knox.gateway.audit.api.Auditor;
 +import org.apache.knox.gateway.audit.api.ResourceType;
 +import org.apache.knox.gateway.audit.log4j.audit.AuditConstants;
 +import org.apache.knox.gateway.filter.AbstractGatewayFilter;
 +import org.apache.knox.gateway.i18n.messages.MessagesFactory;
 +import org.apache.knox.gateway.provider.federation.jwt.JWTMessages;
 +import org.apache.knox.gateway.security.PrimaryPrincipal;
 +import org.apache.knox.gateway.services.GatewayServices;
 +import org.apache.knox.gateway.services.security.token.JWTokenAuthority;
 +import org.apache.knox.gateway.services.security.token.TokenServiceException;
- import org.apache.knox.gateway.services.security.token.impl.JWTToken;
++import org.apache.commons.lang.StringUtils;
++import org.apache.knox.gateway.services.security.token.impl.JWT;
++
++import com.nimbusds.jose.JWSHeader;
 +
 +/**
 + *
 + */
 +public abstract class AbstractJWTFilter implements Filter {
 +  /**
 +   * If specified, this configuration property refers to a value which the issuer of a received
 +   * token must match. Otherwise, the default value "KNOXSSO" is used
 +   */
 +  public static final String JWT_EXPECTED_ISSUER = "jwt.expected.issuer";
 +  public static final String JWT_DEFAULT_ISSUER = "KNOXSSO";
 +
++  /**
++   * If specified, this configuration property refers to the signature algorithm which a received
++   * token must match. Otherwise, the default value "RS256" is used
++   */
++  public static final String JWT_EXPECTED_SIGALG = "jwt.expected.sigalg";
++  public static final String JWT_DEFAULT_SIGALG = "RS256";
++
 +  static JWTMessages log = MessagesFactory.get( JWTMessages.class );
 +  private static AuditService auditService = AuditServiceFactory.getAuditService();
 +  private static Auditor auditor = auditService.getAuditor(
 +      AuditConstants.DEFAULT_AUDITOR_NAME, AuditConstants.KNOX_SERVICE_NAME,
 +      AuditConstants.KNOX_COMPONENT_NAME );
 +
 +  protected List<String> audiences;
 +  protected JWTokenAuthority authority;
 +  protected RSAPublicKey publicKey = null;
 +  private String expectedIssuer;
++  private String expectedSigAlg;
 +
 +  public abstract void doFilter(ServletRequest request, ServletResponse response, FilterChain chain)
 +      throws IOException, ServletException;
 +
 +  /**
 +   *
 +   */
 +  public AbstractJWTFilter() {
 +    super();
 +  }
 +
 +  @Override
 +  public void init( FilterConfig filterConfig ) throws ServletException {
 +    ServletContext context = filterConfig.getServletContext();
 +    if (context != null) {
 +      GatewayServices services = (GatewayServices) context.getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE);
 +      if (services != null) {
 +        authority = (JWTokenAuthority) services.getService(GatewayServices.TOKEN_SERVICE);
 +      }
 +    }
 +  }
 +
-   protected void configureExpectedIssuer(FilterConfig filterConfig) {
-     expectedIssuer = filterConfig.getInitParameter(JWT_EXPECTED_ISSUER);;
++  protected void configureExpectedParameters(FilterConfig filterConfig) {
++    expectedIssuer = filterConfig.getInitParameter(JWT_EXPECTED_ISSUER);
 +    if (expectedIssuer == null) {
 +      expectedIssuer = JWT_DEFAULT_ISSUER;
 +    }
++
++    expectedSigAlg = filterConfig.getInitParameter(JWT_EXPECTED_SIGALG);
++    if (expectedSigAlg == null) {
++      expectedSigAlg = JWT_DEFAULT_SIGALG;
++    }
 +  }
 +
 +  /**
 +   * @param expectedAudiences
 +   * @return
 +   */
 +  protected List<String> parseExpectedAudiences(String expectedAudiences) {
-     ArrayList<String> audList = null;
++    List<String> audList = null;
 +    // setup the list of valid audiences for token validation
-     if (expectedAudiences != null) {
++    if (!StringUtils.isEmpty(expectedAudiences)) {
 +      // parse into the list
 +      String[] audArray = expectedAudiences.split(",");
 +      audList = new ArrayList<String>();
 +      for (String a : audArray) {
 +        audList.add(a.trim());
 +      }
 +    }
 +    return audList;
 +  }
 +
-   protected boolean tokenIsStillValid(JWTToken jwtToken) {
++  protected boolean tokenIsStillValid(JWT jwtToken) {
 +    // if there is no expiration date then the lifecycle is tied entirely to
 +    // the cookie validity - otherwise ensure that the current time is before
 +    // the designated expiration time
 +    Date expires = jwtToken.getExpiresDate();
 +    return (expires == null || expires != null && new Date().before(expires));
 +  }
 +
 +  /**
 +   * Validate whether any of the accepted audience claims is present in the
 +   * issued token claims list for audience. Override this method in subclasses
 +   * in order to customize the audience validation behavior.
 +   *
 +   * @param jwtToken
 +   *          the JWT token where the allowed audiences will be found
 +   * @return true if an expected audience is present, otherwise false
 +   */
-   protected boolean validateAudiences(JWTToken jwtToken) {
++  protected boolean validateAudiences(JWT jwtToken) {
 +    boolean valid = false;
 +
 +    String[] tokenAudienceList = jwtToken.getAudienceClaims();
 +    // if there were no expected audiences configured then just
 +    // consider any audience acceptable
 +    if (audiences == null) {
 +      valid = true;
 +    } else {
 +      // if any of the configured audiences is found then consider it
 +      // acceptable
 +      if (tokenAudienceList != null) {
 +        for (String aud : tokenAudienceList) {
 +          if (audiences.contains(aud)) {
 +            log.jwtAudienceValidated();
 +            valid = true;
 +            break;
 +          }
 +        }
 +      }
 +    }
 +    return valid;
 +  }
 +
 +  protected void continueWithEstablishedSecurityContext(Subject subject, final HttpServletRequest request, final HttpServletResponse response, final FilterChain chain) throws IOException, ServletException {
 +    Principal principal = (Principal) subject.getPrincipals(PrimaryPrincipal.class).toArray()[0];
 +    AuditContext context = auditService.getContext();
 +    if (context != null) {
 +      context.setUsername( principal.getName() );
 +      String sourceUri = (String)request.getAttribute( AbstractGatewayFilter.SOURCE_REQUEST_CONTEXT_URL_ATTRIBUTE_NAME );
 +      if (sourceUri != null) {
 +        auditor.audit( Action.AUTHENTICATION , sourceUri, ResourceType.URI, ActionOutcome.SUCCESS );
 +      }
 +    }
 +
 +    try {
 +      Subject.doAs(
 +        subject,
 +        new PrivilegedExceptionAction<Object>() {
 +          @Override
 +          public Object run() throws Exception {
 +            chain.doFilter(request, response);
 +            return null;
 +          }
 +        }
 +        );
 +    }
 +    catch (PrivilegedActionException e) {
 +      Throwable t = e.getCause();
 +      if (t instanceof IOException) {
 +        throw (IOException) t;
 +      }
 +      else if (t instanceof ServletException) {
 +        throw (ServletException) t;
 +      }
 +      else {
 +        throw new ServletException(t);
 +      }
 +    }
 +  }
 +
-   protected Subject createSubjectFromToken(JWTToken token) {
++  protected Subject createSubjectFromToken(JWT token) {
 +    final String principal = token.getSubject();
 +
 +    @SuppressWarnings("rawtypes")
 +    HashSet emptySet = new HashSet();
 +    Set<Principal> principals = new HashSet<>();
 +    Principal p = new PrimaryPrincipal(principal);
 +    principals.add(p);
 +
 +    // The newly constructed Sets check whether this Subject has been set read-only
 +    // before permitting subsequent modifications. The newly created Sets also prevent
 +    // illegal modifications by ensuring that callers have sufficient permissions.
 +    //
 +    // To modify the Principals Set, the caller must have AuthPermission("modifyPrincipals").
 +    // To modify the public credential Set, the caller must have AuthPermission("modifyPublicCredentials").
 +    // To modify the private credential Set, the caller must have AuthPermission("modifyPrivateCredentials").
 +    javax.security.auth.Subject subject = new javax.security.auth.Subject(true, principals, emptySet, emptySet);
 +    return subject;
 +  }
 +
 +  protected boolean validateToken(HttpServletRequest request, HttpServletResponse response,
-       FilterChain chain, JWTToken token)
++      FilterChain chain, JWT token)
 +      throws IOException, ServletException {
 +    boolean verified = false;
 +    try {
 +      if (publicKey == null) {
 +        verified = authority.verifyToken(token);
 +      }
 +      else {
 +        verified = authority.verifyToken(token, publicKey);
 +      }
 +    } catch (TokenServiceException e) {
 +      log.unableToVerifyToken(e);
 +    }
 +
++    // Check received signature algorithm
++    if (verified) {
++      try {
++        String receivedSigAlg = JWSHeader.parse(token.getHeader()).getAlgorithm().getName();
++        if (!receivedSigAlg.equals(expectedSigAlg)) {
++          verified = false;
++        }
++      } catch (ParseException e) {
++        log.unableToVerifyToken(e);
++        verified = false;
++      }
++    }
++
 +    if (verified) {
 +      // confirm that issue matches intended target
 +      if (expectedIssuer.equals(token.getIssuer())) {
 +        // if there is no expiration data then the lifecycle is tied entirely to
 +        // the cookie validity - otherwise ensure that the current time is before
 +        // the designated expiration time
 +        if (tokenIsStillValid(token)) {
 +          boolean audValid = validateAudiences(token);
 +          if (audValid) {
-             return true;
++              Date nbf = token.getNotBeforeDate();
++              if (nbf == null || new Date().after(nbf)) {
++                return true;
++              } else {
++                log.notBeforeCheckFailed();
++                handleValidationError(request, response, HttpServletResponse.SC_BAD_REQUEST,
++                                      "Bad request: the NotBefore check failed");
++              }
 +          }
 +          else {
 +            log.failedToValidateAudience();
 +            handleValidationError(request, response, HttpServletResponse.SC_BAD_REQUEST,
 +                                  "Bad request: missing required token audience");
 +          }
 +        }
 +        else {
 +          log.tokenHasExpired();
 +          handleValidationError(request, response, HttpServletResponse.SC_BAD_REQUEST,
 +                                "Bad request: token has expired");
 +        }
 +      }
 +      else {
 +        handleValidationError(request, response, HttpServletResponse.SC_UNAUTHORIZED, null);
 +      }
 +    }
 +    else {
 +      log.failedToVerifyTokenSignature();
 +      handleValidationError(request, response, HttpServletResponse.SC_UNAUTHORIZED, null);
 +    }
 +
 +    return false;
 +  }
 +
 +  protected abstract void handleValidationError(HttpServletRequest request, HttpServletResponse response, int status,
 +                                                String error) throws IOException;
 +
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/58780d37/gateway-provider-security-jwt/src/main/java/org/apache/knox/gateway/provider/federation/jwt/filter/JWTFederationFilter.java
----------------------------------------------------------------------
diff --cc gateway-provider-security-jwt/src/main/java/org/apache/knox/gateway/provider/federation/jwt/filter/JWTFederationFilter.java
index ec0e980,0000000..187d2b0
mode 100644,000000..100644
--- a/gateway-provider-security-jwt/src/main/java/org/apache/knox/gateway/provider/federation/jwt/filter/JWTFederationFilter.java
+++ b/gateway-provider-security-jwt/src/main/java/org/apache/knox/gateway/provider/federation/jwt/filter/JWTFederationFilter.java
@@@ -1,111 -1,0 +1,112 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.provider.federation.jwt.filter;
 +
 +import org.apache.knox.gateway.services.security.token.impl.JWTToken;
 +import org.apache.knox.gateway.util.CertificateUtils;
++import org.apache.knox.gateway.services.security.token.impl.JWT;
 +
 +import javax.security.auth.Subject;
 +import javax.servlet.FilterChain;
 +import javax.servlet.FilterConfig;
 +import javax.servlet.ServletException;
 +import javax.servlet.ServletRequest;
 +import javax.servlet.ServletResponse;
 +import javax.servlet.http.HttpServletRequest;
 +import javax.servlet.http.HttpServletResponse;
 +
 +import java.io.IOException;
 +import java.text.ParseException;
 +
 +public class JWTFederationFilter extends AbstractJWTFilter {
 +
 +  public static final String KNOX_TOKEN_AUDIENCES = "knox.token.audiences";
 +  public static final String TOKEN_VERIFICATION_PEM = "knox.token.verification.pem";
 +  private static final String KNOX_TOKEN_QUERY_PARAM_NAME = "knox.token.query.param.name";
 +  private static final String BEARER = "Bearer ";
 +  private String paramName = "knoxtoken";
 +
 +  @Override
 +  public void init( FilterConfig filterConfig ) throws ServletException {
 +      super.init(filterConfig);
 +
 +    // expected audiences or null
 +    String expectedAudiences = filterConfig.getInitParameter(KNOX_TOKEN_AUDIENCES);
 +    if (expectedAudiences != null) {
 +      audiences = parseExpectedAudiences(expectedAudiences);
 +    }
 +
 +    // query param name for finding the provided knoxtoken
 +    String queryParamName = filterConfig.getInitParameter(KNOX_TOKEN_QUERY_PARAM_NAME);
 +    if (queryParamName != null) {
 +      paramName = queryParamName;
 +    }
 +
 +    // token verification pem
 +    String verificationPEM = filterConfig.getInitParameter(TOKEN_VERIFICATION_PEM);
 +    // setup the public key of the token issuer for verification
 +    if (verificationPEM != null) {
 +      publicKey = CertificateUtils.parseRSAPublicKey(verificationPEM);
 +    }
 +
-     configureExpectedIssuer(filterConfig);
++    configureExpectedParameters(filterConfig);
 +  }
 +
 +  public void destroy() {
 +  }
 +
 +  public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain)
 +      throws IOException, ServletException {
 +    String header = ((HttpServletRequest) request).getHeader("Authorization");
 +    String wireToken = null;
 +    if (header != null && header.startsWith(BEARER)) {
 +      // what follows the bearer designator should be the JWT token being used to request or as an access token
 +      wireToken = header.substring(BEARER.length());
 +    }
 +    else {
 +      // check for query param
 +      wireToken = ((HttpServletRequest) request).getParameter(paramName);
 +    }
 +
 +    if (wireToken != null) {
 +      try {
-         JWTToken token = new JWTToken(wireToken);
++        JWT token = new JWTToken(wireToken);
 +        if (validateToken((HttpServletRequest)request, (HttpServletResponse)response, chain, token)) {
 +          Subject subject = createSubjectFromToken(token);
 +          continueWithEstablishedSecurityContext(subject, (HttpServletRequest)request, (HttpServletResponse)response, chain);
 +        }
 +      } catch (ParseException ex) {
 +        ((HttpServletResponse) response).sendError(HttpServletResponse.SC_UNAUTHORIZED);
 +      }
 +    }
 +    else {
 +      // no token provided in header
 +      ((HttpServletResponse) response).sendError(HttpServletResponse.SC_UNAUTHORIZED);
 +    }
 +  }
 +
 +  protected void handleValidationError(HttpServletRequest request, HttpServletResponse response, int status,
 +                                       String error) throws IOException {
 +    if (error != null) {
 +      response.sendError(status, error);
 +    }
 +    else {
 +      response.sendError(status);
 +    }
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/58780d37/gateway-provider-security-jwt/src/main/java/org/apache/knox/gateway/provider/federation/jwt/filter/SSOCookieFederationFilter.java
----------------------------------------------------------------------
diff --cc gateway-provider-security-jwt/src/main/java/org/apache/knox/gateway/provider/federation/jwt/filter/SSOCookieFederationFilter.java
index 8a5f1ef,0000000..dbdb364
mode 100644,000000..100644
--- a/gateway-provider-security-jwt/src/main/java/org/apache/knox/gateway/provider/federation/jwt/filter/SSOCookieFederationFilter.java
+++ b/gateway-provider-security-jwt/src/main/java/org/apache/knox/gateway/provider/federation/jwt/filter/SSOCookieFederationFilter.java
@@@ -1,170 -1,0 +1,171 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.provider.federation.jwt.filter;
 +
 +import java.io.IOException;
 +import java.text.ParseException;
 +
 +import javax.security.auth.Subject;
 +import javax.servlet.FilterChain;
 +import javax.servlet.FilterConfig;
 +import javax.servlet.ServletException;
 +import javax.servlet.ServletRequest;
 +import javax.servlet.ServletResponse;
 +import javax.servlet.http.Cookie;
 +import javax.servlet.http.HttpServletRequest;
 +import javax.servlet.http.HttpServletResponse;
 +
 +import org.apache.knox.gateway.i18n.messages.MessagesFactory;
 +import org.apache.knox.gateway.provider.federation.jwt.JWTMessages;
 +import org.apache.knox.gateway.security.PrimaryPrincipal;
 +import org.apache.knox.gateway.services.security.token.impl.JWTToken;
 +import org.apache.knox.gateway.util.CertificateUtils;
++import org.apache.knox.gateway.services.security.token.impl.JWT;
 +
 +public class SSOCookieFederationFilter extends AbstractJWTFilter {
 +  public static final String SSO_COOKIE_NAME = "sso.cookie.name";
 +  public static final String SSO_EXPECTED_AUDIENCES = "sso.expected.audiences";
 +  public static final String SSO_AUTHENTICATION_PROVIDER_URL = "sso.authentication.provider.url";
 +  public static final String SSO_VERIFICATION_PEM = "sso.token.verification.pem";
 +  private static JWTMessages log = MessagesFactory.get( JWTMessages.class );
 +  private static final String ORIGINAL_URL_QUERY_PARAM = "originalUrl=";
 +  private static final String DEFAULT_SSO_COOKIE_NAME = "hadoop-jwt";
 +
 +  private String cookieName;
 +  private String authenticationProviderUrl;
 +
 +  @Override
 +  public void init( FilterConfig filterConfig ) throws ServletException {
 +    super.init(filterConfig);
 +
 +    // configured cookieName
 +    cookieName = filterConfig.getInitParameter(SSO_COOKIE_NAME);
 +    if (cookieName == null) {
 +      cookieName = DEFAULT_SSO_COOKIE_NAME;
 +    }
 +
 +    // expected audiences or null
 +    String expectedAudiences = filterConfig.getInitParameter(SSO_EXPECTED_AUDIENCES);
 +    if (expectedAudiences != null) {
 +      audiences = parseExpectedAudiences(expectedAudiences);
 +    }
 +
 +    // url to SSO authentication provider
 +    authenticationProviderUrl = filterConfig.getInitParameter(SSO_AUTHENTICATION_PROVIDER_URL);
 +    if (authenticationProviderUrl == null) {
 +      log.missingAuthenticationProviderUrlConfiguration();
 +      throw new ServletException("Required authentication provider URL is missing.");
 +    }
 +
 +    // token verification pem
 +    String verificationPEM = filterConfig.getInitParameter(SSO_VERIFICATION_PEM);
 +    // setup the public key of the token issuer for verification
 +    if (verificationPEM != null) {
 +      publicKey = CertificateUtils.parseRSAPublicKey(verificationPEM);
 +    }
 +
-     configureExpectedIssuer(filterConfig);
++    configureExpectedParameters(filterConfig);
 +  }
 +
 +  public void destroy() {
 +  }
 +
 +  @Override
 +  public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain)
 +      throws IOException, ServletException {
 +    String wireToken = null;
 +    HttpServletRequest req = (HttpServletRequest) request;
 +
 +    String loginURL = constructLoginURL(req);
 +    wireToken = getJWTFromCookie(req);
 +    if (wireToken == null) {
 +      if (req.getMethod().equals("OPTIONS")) {
 +        // CORS preflight requests to determine allowed origins and related config
 +        // must be able to continue without being redirected
 +        Subject sub = new Subject();
 +        sub.getPrincipals().add(new PrimaryPrincipal("anonymous"));
 +        continueWithEstablishedSecurityContext(sub, req, (HttpServletResponse) response, chain);
 +      }
 +      log.sendRedirectToLoginURL(loginURL);
 +      ((HttpServletResponse) response).sendRedirect(loginURL);
 +    }
 +    else {
 +      try {
-         JWTToken token = new JWTToken(wireToken);
++        JWT token = new JWTToken(wireToken);
 +        if (validateToken((HttpServletRequest)request, (HttpServletResponse)response, chain, token)) {
 +          Subject subject = createSubjectFromToken(token);
 +          continueWithEstablishedSecurityContext(subject, (HttpServletRequest)request, (HttpServletResponse)response, chain);
 +        }
 +      } catch (ParseException ex) {
 +        ((HttpServletResponse) response).sendRedirect(loginURL);
 +      }
 +    }
 +  }
 +
 +  protected void handleValidationError(HttpServletRequest request, HttpServletResponse response, int status,
 +                                       String error) throws IOException {
 +    String loginURL = constructLoginURL(request);
 +    response.sendRedirect(loginURL);
 +  }
 +
 +  /**
 +   * Encapsulate the acquisition of the JWT token from HTTP cookies within the
 +   * request.
 +   *
 +   * @param req servlet request to get the JWT token from
 +   * @return serialized JWT token
 +   */
 +  protected String getJWTFromCookie(HttpServletRequest req) {
 +    String serializedJWT = null;
 +    Cookie[] cookies = req.getCookies();
 +    if (cookies != null) {
 +      for (Cookie cookie : cookies) {
 +        if (cookieName.equals(cookie.getName())) {
 +          log.cookieHasBeenFound(cookieName);
 +          serializedJWT = cookie.getValue();
 +          break;
 +        }
 +      }
 +    }
 +    return serializedJWT;
 +  }
 +
 +  /**
 +   * Create the URL to be used for authentication of the user in the absence of
 +   * a JWT token within the incoming request.
 +   *
 +   * @param request for getting the original request URL
 +   * @return url to use as login url for redirect
 +   */
 +  protected String constructLoginURL(HttpServletRequest request) {
 +    String delimiter = "?";
 +    if (authenticationProviderUrl.contains("?")) {
 +      delimiter = "&";
 +    }
 +    String loginURL = authenticationProviderUrl + delimiter
 +        + ORIGINAL_URL_QUERY_PARAM
 +        + request.getRequestURL().append(getOriginalQueryString(request));
 +    return loginURL;
 +  }
 +
 +  private String getOriginalQueryString(HttpServletRequest request) {
 +    String originalQueryString = request.getQueryString();
 +    return (originalQueryString == null) ? "" : "?" + originalQueryString;
 +  }
 +
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/58780d37/gateway-provider-security-jwt/src/test/java/org/apache/knox/gateway/provider/federation/AbstractJWTFilterTest.java
----------------------------------------------------------------------
diff --cc gateway-provider-security-jwt/src/test/java/org/apache/knox/gateway/provider/federation/AbstractJWTFilterTest.java
index 9888eab,0000000..f79a743
mode 100644,000000..100644
--- a/gateway-provider-security-jwt/src/test/java/org/apache/knox/gateway/provider/federation/AbstractJWTFilterTest.java
+++ b/gateway-provider-security-jwt/src/test/java/org/apache/knox/gateway/provider/federation/AbstractJWTFilterTest.java
@@@ -1,667 -1,0 +1,870 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.provider.federation;
 +
 +import static org.junit.Assert.fail;
 +
 +import java.io.IOException;
 +import java.net.InetAddress;
 +import java.security.AccessController;
 +import java.security.KeyPair;
 +import java.security.KeyPairGenerator;
 +import java.security.NoSuchAlgorithmException;
 +import java.security.Principal;
 +import java.security.PublicKey;
 +import java.security.cert.Certificate;
 +import java.security.interfaces.RSAPrivateKey;
 +import java.security.interfaces.RSAPublicKey;
 +import java.text.MessageFormat;
 +import java.util.Enumeration;
 +import java.util.List;
 +import java.util.ArrayList;
 +import java.util.Properties;
 +import java.util.Date;
 +import java.util.Set;
 +
 +import javax.security.auth.Subject;
 +import javax.servlet.FilterChain;
 +import javax.servlet.FilterConfig;
 +import javax.servlet.ServletContext;
 +import javax.servlet.ServletException;
 +import javax.servlet.ServletRequest;
 +import javax.servlet.ServletResponse;
 +import javax.servlet.http.HttpServletRequest;
 +import javax.servlet.http.HttpServletResponse;
 +
 +import org.apache.commons.codec.binary.Base64;
 +import org.apache.knox.gateway.provider.federation.jwt.filter.AbstractJWTFilter;
 +import org.apache.knox.gateway.provider.federation.jwt.filter.SSOCookieFederationFilter;
 +import org.apache.knox.gateway.security.PrimaryPrincipal;
 +import org.apache.knox.gateway.services.security.impl.X509CertificateUtil;
 +import org.apache.knox.gateway.services.security.token.JWTokenAuthority;
 +import org.apache.knox.gateway.services.security.token.TokenServiceException;
 +import org.apache.knox.gateway.services.security.token.impl.JWT;
 +import org.apache.knox.gateway.services.security.token.impl.JWTToken;
 +import org.easymock.EasyMock;
 +import org.junit.After;
 +import org.junit.Assert;
 +import org.junit.BeforeClass;
 +import org.junit.Test;
 +
 +import com.nimbusds.jose.*;
 +import com.nimbusds.jwt.JWTClaimsSet;
 +import com.nimbusds.jwt.SignedJWT;
 +import com.nimbusds.jose.crypto.RSASSASigner;
 +import com.nimbusds.jose.crypto.RSASSAVerifier;
 +
 +public abstract class AbstractJWTFilterTest  {
 +  private static final String SERVICE_URL = "https://localhost:8888/resource";
 +  private static final String dnTemplate = "CN={0},OU=Test,O=Hadoop,L=Test,ST=Test,C=US";
 +
 +  protected AbstractJWTFilter handler = null;
 +  protected static RSAPublicKey publicKey = null;
 +  protected static RSAPrivateKey privateKey = null;
 +  protected static String pem = null;
 +
 +  protected abstract void setTokenOnRequest(HttpServletRequest request, SignedJWT jwt);
 +  protected abstract void setGarbledTokenOnRequest(HttpServletRequest request, SignedJWT jwt);
 +  protected abstract String getAudienceProperty();
 +  protected abstract String getVerificationPemProperty();
 +
 +  private static String buildDistinguishedName(String hostname) {
 +    MessageFormat headerFormatter = new MessageFormat(dnTemplate);
 +    String[] paramArray = new String[1];
 +    paramArray[0] = hostname;
 +    String dn = headerFormatter.format(paramArray);
 +    return dn;
 +  }
 +
 +  @BeforeClass
 +  public static void generateKeys() throws Exception, NoSuchAlgorithmException {
 +    KeyPairGenerator kpg = KeyPairGenerator.getInstance("RSA");
 +    kpg.initialize(2048);
 +    KeyPair KPair = kpg.generateKeyPair();
 +    String dn = buildDistinguishedName(InetAddress.getLocalHost().getHostName());
 +    Certificate cert = X509CertificateUtil.generateCertificate(dn, KPair, 365, "SHA1withRSA");
 +    byte[] data = cert.getEncoded();
 +    Base64 encoder = new Base64( 76, "\n".getBytes( "ASCII" ) );
 +    pem = new String(encoder.encodeToString( data ).getBytes( "ASCII" )).trim();
 +
 +    publicKey = (RSAPublicKey) KPair.getPublic();
 +    privateKey = (RSAPrivateKey) KPair.getPrivate();
 +  }
 +
 +  @After
 +  public void teardown() throws Exception {
 +    handler.destroy();
 +  }
 +
 +  @Test
 +  public void testValidJWT() throws Exception {
 +    try {
 +      Properties props = getProperties();
 +      handler.init(new TestFilterConfig(props));
 +
-       SignedJWT jwt = getJWT("alice", new Date(new Date().getTime() + 5000), privateKey, props);
++      SignedJWT jwt = getJWT(AbstractJWTFilter.JWT_DEFAULT_ISSUER, "alice",
++                             new Date(new Date().getTime() + 5000), privateKey);
 +
 +      HttpServletRequest request = EasyMock.createNiceMock(HttpServletRequest.class);
 +      setTokenOnRequest(request, jwt);
 +
 +      EasyMock.expect(request.getRequestURL()).andReturn(
 +          new StringBuffer(SERVICE_URL)).anyTimes();
 +      EasyMock.expect(request.getQueryString()).andReturn(null);
 +      HttpServletResponse response = EasyMock.createNiceMock(HttpServletResponse.class);
 +      EasyMock.expect(response.encodeRedirectURL(SERVICE_URL)).andReturn(
 +          SERVICE_URL);
 +      EasyMock.replay(request);
 +
 +      TestFilterChain chain = new TestFilterChain();
 +      handler.doFilter(request, response, chain);
 +      Assert.assertTrue("doFilterCalled should not be false.", chain.doFilterCalled );
 +      Set<PrimaryPrincipal> principals = chain.subject.getPrincipals(PrimaryPrincipal.class);
 +      Assert.assertTrue("No PrimaryPrincipal", !principals.isEmpty());
 +      Assert.assertEquals("Not the expected principal", "alice", ((Principal)principals.toArray()[0]).getName());
 +    } catch (ServletException se) {
 +      fail("Should NOT have thrown a ServletException.");
 +    }
 +  }
 +
 +  @Test
 +  public void testValidAudienceJWT() throws Exception {
 +    try {
 +      Properties props = getProperties();
 +      props.put(getAudienceProperty(), "bar");
 +      handler.init(new TestFilterConfig(props));
 +
-       SignedJWT jwt = getJWT("alice", new Date(new Date().getTime() + 5000), privateKey, props);
++      SignedJWT jwt = getJWT(AbstractJWTFilter.JWT_DEFAULT_ISSUER, "alice",
++                             new Date(new Date().getTime() + 5000), privateKey);
 +
 +      HttpServletRequest request = EasyMock.createNiceMock(HttpServletRequest.class);
 +      setTokenOnRequest(request, jwt);
 +
 +      EasyMock.expect(request.getRequestURL()).andReturn(
 +          new StringBuffer(SERVICE_URL)).anyTimes();
 +      EasyMock.expect(request.getQueryString()).andReturn(null);
 +      HttpServletResponse response = EasyMock.createNiceMock(HttpServletResponse.class);
 +      EasyMock.expect(response.encodeRedirectURL(SERVICE_URL)).andReturn(
 +          SERVICE_URL);
 +      EasyMock.replay(request);
 +
 +      TestFilterChain chain = new TestFilterChain();
 +      handler.doFilter(request, response, chain);
 +      Assert.assertTrue("doFilterCalled should not be false.", chain.doFilterCalled );
 +      Set<PrimaryPrincipal> principals = chain.subject.getPrincipals(PrimaryPrincipal.class);
 +      Assert.assertTrue("No PrimaryPrincipal", !principals.isEmpty());
 +      Assert.assertEquals("Not the expected principal", "alice", ((Principal)principals.toArray()[0]).getName());
 +    } catch (ServletException se) {
 +      fail("Should NOT have thrown a ServletException.");
 +    }
 +  }
 +
 +  @Test
 +  public void testInvalidAudienceJWT() throws Exception {
 +    try {
 +      Properties props = getProperties();
 +      props.put(getAudienceProperty(), "foo");
 +      props.put("sso.authentication.provider.url", "https://localhost:8443/gateway/knoxsso/api/v1/websso");
 +
 +      handler.init(new TestFilterConfig(props));
 +
-       SignedJWT jwt = getJWT("alice", new Date(new Date().getTime() + 5000), privateKey, props);
++      SignedJWT jwt = getJWT(AbstractJWTFilter.JWT_DEFAULT_ISSUER, "alice",
++                             new Date(new Date().getTime() + 5000), privateKey);
 +
 +      HttpServletRequest request = EasyMock.createNiceMock(HttpServletRequest.class);
 +      setTokenOnRequest(request, jwt);
 +
 +      EasyMock.expect(request.getRequestURL()).andReturn(
 +          new StringBuffer(SERVICE_URL)).anyTimes();
 +      EasyMock.expect(request.getQueryString()).andReturn(null);
 +      HttpServletResponse response = EasyMock.createNiceMock(HttpServletResponse.class);
 +      EasyMock.expect(response.encodeRedirectURL(SERVICE_URL)).andReturn(
 +          SERVICE_URL);
 +      EasyMock.replay(request);
 +
 +      TestFilterChain chain = new TestFilterChain();
 +      handler.doFilter(request, response, chain);
 +      Assert.assertTrue("doFilterCalled should not be true.", !chain.doFilterCalled);
 +      Assert.assertTrue("No Subject should be returned.", chain.subject == null);
 +    } catch (ServletException se) {
 +      fail("Should NOT have thrown a ServletException.");
 +    }
 +  }
 +
 +  @Test
 +  public void testValidAudienceJWTWhitespace() throws Exception {
 +    try {
 +      Properties props = getProperties();
 +      props.put(getAudienceProperty(), " foo, bar ");
 +      handler.init(new TestFilterConfig(props));
 +
-       SignedJWT jwt = getJWT("alice", new Date(new Date().getTime() + 5000), privateKey, props);
++      SignedJWT jwt = getJWT(AbstractJWTFilter.JWT_DEFAULT_ISSUER, "alice",
++                             new Date(new Date().getTime() + 5000), privateKey);
++
++      HttpServletRequest request = EasyMock.createNiceMock(HttpServletRequest.class);
++      setTokenOnRequest(request, jwt);
++
++      EasyMock.expect(request.getRequestURL()).andReturn(
++          new StringBuffer(SERVICE_URL)).anyTimes();
++      EasyMock.expect(request.getQueryString()).andReturn(null);
++      HttpServletResponse response = EasyMock.createNiceMock(HttpServletResponse.class);
++      EasyMock.expect(response.encodeRedirectURL(SERVICE_URL)).andReturn(
++          SERVICE_URL);
++      EasyMock.replay(request);
++
++      TestFilterChain chain = new TestFilterChain();
++      handler.doFilter(request, response, chain);
++      Assert.assertTrue("doFilterCalled should not be false.", chain.doFilterCalled );
++      Set<PrimaryPrincipal> principals = chain.subject.getPrincipals(PrimaryPrincipal.class);
++      Assert.assertTrue("No PrimaryPrincipal", !principals.isEmpty());
++      Assert.assertEquals("Not the expected principal", "alice", ((Principal)principals.toArray()[0]).getName());
++    } catch (ServletException se) {
++      fail("Should NOT have thrown a ServletException.");
++    }
++  }
++
++  @Test
++  public void testNoTokenAudience() throws Exception {
++    try {
++      Properties props = getProperties();
++      props.put(getAudienceProperty(), "bar");
++      handler.init(new TestFilterConfig(props));
++
++      SignedJWT jwt = getJWT(AbstractJWTFilter.JWT_DEFAULT_ISSUER, "alice", null,
++                             new Date(new Date().getTime() + 5000), new Date(), privateKey, "RS256");
++
++      HttpServletRequest request = EasyMock.createNiceMock(HttpServletRequest.class);
++      setTokenOnRequest(request, jwt);
++
++      EasyMock.expect(request.getRequestURL()).andReturn(
++          new StringBuffer(SERVICE_URL)).anyTimes();
++      EasyMock.expect(request.getQueryString()).andReturn(null);
++      HttpServletResponse response = EasyMock.createNiceMock(HttpServletResponse.class);
++      EasyMock.expect(response.encodeRedirectURL(SERVICE_URL)).andReturn(
++          SERVICE_URL);
++      EasyMock.replay(request);
++
++      TestFilterChain chain = new TestFilterChain();
++      handler.doFilter(request, response, chain);
++      Assert.assertTrue("doFilterCalled should not be true.", !chain.doFilterCalled);
++      Assert.assertTrue("No Subject should be returned.", chain.subject == null);
++    } catch (ServletException se) {
++      fail("Should NOT have thrown a ServletException.");
++    }
++  }
++
++  @Test
++  public void testNoAudienceConfigured() throws Exception {
++    try {
++      Properties props = getProperties();
++      handler.init(new TestFilterConfig(props));
++
++      SignedJWT jwt = getJWT(AbstractJWTFilter.JWT_DEFAULT_ISSUER, "alice", null,
++                             new Date(new Date().getTime() + 5000), new Date(), privateKey, "RS256");
++
++      HttpServletRequest request = EasyMock.createNiceMock(HttpServletRequest.class);
++      setTokenOnRequest(request, jwt);
++
++      EasyMock.expect(request.getRequestURL()).andReturn(
++          new StringBuffer(SERVICE_URL)).anyTimes();
++      EasyMock.expect(request.getQueryString()).andReturn(null);
++      HttpServletResponse response = EasyMock.createNiceMock(HttpServletResponse.class);
++      EasyMock.expect(response.encodeRedirectURL(SERVICE_URL)).andReturn(
++          SERVICE_URL);
++      EasyMock.replay(request);
++
++      TestFilterChain chain = new TestFilterChain();
++      handler.doFilter(request, response, chain);
++      Assert.assertTrue("doFilterCalled should not be false.", chain.doFilterCalled );
++      Set<PrimaryPrincipal> principals = chain.subject.getPrincipals(PrimaryPrincipal.class);
++      Assert.assertTrue("No PrimaryPrincipal", !principals.isEmpty());
++      Assert.assertEquals("Not the expected principal", "alice", ((Principal)principals.toArray()[0]).getName());
++    } catch (ServletException se) {
++      fail("Should NOT have thrown a ServletException.");
++    }
++  }
++
++  @Test
++  public void testEmptyAudienceConfigured() throws Exception {
++    try {
++      Properties props = getProperties();
++      props.put(getAudienceProperty(), "");
++      handler.init(new TestFilterConfig(props));
++
++      SignedJWT jwt = getJWT(AbstractJWTFilter.JWT_DEFAULT_ISSUER, "alice", null,
++                             new Date(new Date().getTime() + 5000), new Date(), privateKey, "RS256");
 +
 +      HttpServletRequest request = EasyMock.createNiceMock(HttpServletRequest.class);
 +      setTokenOnRequest(request, jwt);
 +
 +      EasyMock.expect(request.getRequestURL()).andReturn(
 +          new StringBuffer(SERVICE_URL)).anyTimes();
 +      EasyMock.expect(request.getQueryString()).andReturn(null);
 +      HttpServletResponse response = EasyMock.createNiceMock(HttpServletResponse.class);
 +      EasyMock.expect(response.encodeRedirectURL(SERVICE_URL)).andReturn(
 +          SERVICE_URL);
 +      EasyMock.replay(request);
 +
 +      TestFilterChain chain = new TestFilterChain();
 +      handler.doFilter(request, response, chain);
 +      Assert.assertTrue("doFilterCalled should not be false.", chain.doFilterCalled );
 +      Set<PrimaryPrincipal> principals = chain.subject.getPrincipals(PrimaryPrincipal.class);
 +      Assert.assertTrue("No PrimaryPrincipal", !principals.isEmpty());
 +      Assert.assertEquals("Not the expected principal", "alice", ((Principal)principals.toArray()[0]).getName());
 +    } catch (ServletException se) {
 +      fail("Should NOT have thrown a ServletException.");
 +    }
 +  }
 +
 +  @Test
 +  public void testValidVerificationPEM() throws Exception {
 +    try {
 +      Properties props = getProperties();
 +
 +//      System.out.println("+" + pem + "+");
 +
 +      props.put(getAudienceProperty(), "bar");
 +      props.put("sso.authentication.provider.url", "https://localhost:8443/gateway/knoxsso/api/v1/websso");
 +      props.put(getVerificationPemProperty(), pem);
 +      handler.init(new TestFilterConfig(props));
 +
-       SignedJWT jwt = getJWT("alice", new Date(new Date().getTime() + 50000), privateKey, props);
++      SignedJWT jwt = getJWT(AbstractJWTFilter.JWT_DEFAULT_ISSUER, "alice",
++                             new Date(new Date().getTime() + 50000), privateKey);
 +
 +      HttpServletRequest request = EasyMock.createNiceMock(HttpServletRequest.class);
 +      setTokenOnRequest(request, jwt);
 +
 +      EasyMock.expect(request.getRequestURL()).andReturn(
 +          new StringBuffer(SERVICE_URL)).anyTimes();
 +      EasyMock.expect(request.getQueryString()).andReturn(null);
 +      HttpServletResponse response = EasyMock.createNiceMock(HttpServletResponse.class);
 +      EasyMock.expect(response.encodeRedirectURL(SERVICE_URL)).andReturn(
 +          SERVICE_URL);
 +      EasyMock.replay(request);
 +
 +      TestFilterChain chain = new TestFilterChain();
 +      handler.doFilter(request, response, chain);
 +      Assert.assertTrue("doFilterCalled should not be false.", chain.doFilterCalled );
 +      Set<PrimaryPrincipal> principals = chain.subject.getPrincipals(PrimaryPrincipal.class);
 +      Assert.assertTrue("No PrimaryPrincipal", !principals.isEmpty());
 +      Assert.assertEquals("Not the expected principal", "alice", ((Principal)principals.toArray()[0]).getName());
 +    } catch (ServletException se) {
 +      fail("Should NOT have thrown a ServletException.");
 +    }
 +  }
 +
 +  @Test
 +  public void testExpiredJWT() throws Exception {
 +    try {
 +      Properties props = getProperties();
 +      handler.init(new TestFilterConfig(props));
 +
-       SignedJWT jwt = getJWT("alice", new Date(new Date().getTime() - 1000), privateKey, props);
++      SignedJWT jwt = getJWT(AbstractJWTFilter.JWT_DEFAULT_ISSUER, "alice",
++                             new Date(new Date().getTime() - 1000), privateKey);
 +
 +      HttpServletRequest request = EasyMock.createNiceMock(HttpServletRequest.class);
 +      setTokenOnRequest(request, jwt);
 +
 +      EasyMock.expect(request.getRequestURL()).andReturn(
 +          new StringBuffer(SERVICE_URL)).anyTimes();
 +      EasyMock.expect(request.getQueryString()).andReturn(null);
 +      HttpServletResponse response = EasyMock.createNiceMock(HttpServletResponse.class);
 +      EasyMock.expect(response.encodeRedirectURL(SERVICE_URL)).andReturn(
 +          SERVICE_URL);
 +      EasyMock.replay(request);
 +
 +      TestFilterChain chain = new TestFilterChain();
 +      handler.doFilter(request, response, chain);
 +      Assert.assertTrue("doFilterCalled should not be false.", !chain.doFilterCalled);
 +      Assert.assertTrue("No Subject should be returned.", chain.subject == null);
 +    } catch (ServletException se) {
 +      fail("Should NOT have thrown a ServletException.");
 +    }
 +  }
 +
 +  @Test
 +  public void testValidJWTNoExpiration() throws Exception {
 +    try {
 +      Properties props = getProperties();
 +      handler.init(new TestFilterConfig(props));
 +
-       SignedJWT jwt = getJWT("alice", null, privateKey, props);
++      SignedJWT jwt = getJWT(AbstractJWTFilter.JWT_DEFAULT_ISSUER, "alice", null, privateKey);
 +
 +      HttpServletRequest request = EasyMock.createNiceMock(HttpServletRequest.class);
 +      setTokenOnRequest(request, jwt);
 +
 +      EasyMock.expect(request.getRequestURL()).andReturn(
 +          new StringBuffer(SERVICE_URL)).anyTimes();
 +      EasyMock.expect(request.getQueryString()).andReturn(null);
 +      HttpServletResponse response = EasyMock.createNiceMock(HttpServletResponse.class);
 +      EasyMock.expect(response.encodeRedirectURL(SERVICE_URL)).andReturn(
 +          SERVICE_URL).anyTimes();
 +      EasyMock.replay(request);
 +
 +      TestFilterChain chain = new TestFilterChain();
 +      handler.doFilter(request, response, chain);
 +      Assert.assertTrue("doFilterCalled should not be false.", chain.doFilterCalled );
 +      Set<PrimaryPrincipal> principals = chain.subject.getPrincipals(PrimaryPrincipal.class);
 +      Assert.assertTrue("No PrimaryPrincipal", !principals.isEmpty());
 +      Assert.assertEquals("Not the expected principal", "alice", ((Principal)principals.toArray()[0]).getName());
 +    } catch (ServletException se) {
 +      fail("Should NOT have thrown a ServletException.");
 +    }
 +  }
 +
 +  @Test
 +  public void testUnableToParseJWT() throws Exception {
 +    try {
 +      Properties props = getProperties();
 +      handler.init(new TestFilterConfig(props));
 +
-       SignedJWT jwt = getJWT("bob", new Date(new Date().getTime() + 5000), privateKey, props);
++      SignedJWT jwt = getJWT(AbstractJWTFilter.JWT_DEFAULT_ISSUER, "bob",
++                             new Date(new Date().getTime() + 5000), privateKey);
 +
 +      HttpServletRequest request = EasyMock.createNiceMock(HttpServletRequest.class);
 +      setGarbledTokenOnRequest(request, jwt);
 +
 +      EasyMock.expect(request.getRequestURL()).andReturn(
 +          new StringBuffer(SERVICE_URL)).anyTimes();
 +      EasyMock.expect(request.getQueryString()).andReturn(null);
 +      HttpServletResponse response = EasyMock.createNiceMock(HttpServletResponse.class);
 +      EasyMock.expect(response.encodeRedirectURL(SERVICE_URL)).andReturn(
 +          SERVICE_URL).anyTimes();
 +      EasyMock.replay(request);
 +
 +      TestFilterChain chain = new TestFilterChain();
 +      handler.doFilter(request, response, chain);
 +      Assert.assertTrue("doFilterCalled should not be true.", !chain.doFilterCalled);
 +      Assert.assertTrue("No Subject should be returned.", chain.subject == null);
 +    } catch (ServletException se) {
 +      fail("Should NOT have thrown a ServletException.");
 +    }
 +  }
 +
 +  @Test
 +  public void testFailedSignatureValidationJWT() throws Exception {
 +    try {
 +      // Create a private key to sign the token
 +      KeyPairGenerator kpg = KeyPairGenerator.getInstance("RSA");
 +      kpg.initialize(1024);
 +
 +      KeyPair kp = kpg.genKeyPair();
 +
 +      Properties props = getProperties();
 +      handler.init(new TestFilterConfig(props));
 +
-       SignedJWT jwt = getJWT("bob", new Date(new Date().getTime() + 5000),
-                              (RSAPrivateKey)kp.getPrivate(), props);
++      SignedJWT jwt = getJWT(AbstractJWTFilter.JWT_DEFAULT_ISSUER, "bob",
++                             new Date(new Date().getTime() + 5000), (RSAPrivateKey)kp.getPrivate());
 +
 +      HttpServletRequest request = EasyMock.createNiceMock(HttpServletRequest.class);
 +      setTokenOnRequest(request, jwt);
 +
 +      EasyMock.expect(request.getRequestURL()).andReturn(
 +          new StringBuffer(SERVICE_URL)).anyTimes();
 +      EasyMock.expect(request.getQueryString()).andReturn(null);
 +      HttpServletResponse response = EasyMock.createNiceMock(HttpServletResponse.class);
 +      EasyMock.expect(response.encodeRedirectURL(SERVICE_URL)).andReturn(
 +          SERVICE_URL).anyTimes();
 +      EasyMock.replay(request);
 +
 +      TestFilterChain chain = new TestFilterChain();
 +      handler.doFilter(request, response, chain);
 +      Assert.assertTrue("doFilterCalled should not be true.", !chain.doFilterCalled);
 +      Assert.assertTrue("No Subject should be returned.", chain.subject == null);
 +    } catch (ServletException se) {
 +      fail("Should NOT have thrown a ServletException.");
 +    }
 +  }
 +
 +  @Test
 +  public void testInvalidVerificationPEM() throws Exception {
 +    try {
 +      Properties props = getProperties();
 +
 +      KeyPairGenerator kpg = KeyPairGenerator.getInstance("RSA");
 +      kpg.initialize(1024);
 +
 +      KeyPair KPair = kpg.generateKeyPair();
 +      String dn = buildDistinguishedName(InetAddress.getLocalHost().getHostName());
 +      Certificate cert = X509CertificateUtil.generateCertificate(dn, KPair, 365, "SHA1withRSA");
 +      byte[] data = cert.getEncoded();
 +      Base64 encoder = new Base64( 76, "\n".getBytes( "ASCII" ) );
 +      String failingPem = new String(encoder.encodeToString( data ).getBytes( "ASCII" )).trim();
 +
 +      props.put(getAudienceProperty(), "bar");
 +      props.put(getVerificationPemProperty(), failingPem);
 +      handler.init(new TestFilterConfig(props));
 +
-       SignedJWT jwt = getJWT("alice", new Date(new Date().getTime() + 50000), privateKey, props);
++      SignedJWT jwt = getJWT(AbstractJWTFilter.JWT_DEFAULT_ISSUER, "alice",
++                             new Date(new Date().getTime() + 50000), privateKey);
 +
 +      HttpServletRequest request = EasyMock.createNiceMock(HttpServletRequest.class);
 +      setTokenOnRequest(request, jwt);
 +
 +      EasyMock.expect(request.getRequestURL()).andReturn(
 +          new StringBuffer(SERVICE_URL)).anyTimes();
 +      EasyMock.expect(request.getQueryString()).andReturn(null);
 +      HttpServletResponse response = EasyMock.createNiceMock(HttpServletResponse.class);
 +      EasyMock.expect(response.encodeRedirectURL(SERVICE_URL)).andReturn(SERVICE_URL);
 +      EasyMock.replay(request);
 +
 +      TestFilterChain chain = new TestFilterChain();
 +      handler.doFilter(request, response, chain);
 +      Assert.assertTrue("doFilterCalled should not be true.", chain.doFilterCalled == false);
 +      Assert.assertTrue("No Subject should be returned.", chain.subject == null);
 +    } catch (ServletException se) {
 +      fail("Should NOT have thrown a ServletException.");
 +    }
 +  }
 +
 +  @Test
 +  public void testInvalidIssuer() throws Exception {
 +    try {
 +      Properties props = getProperties();
 +      handler.init(new TestFilterConfig(props));
 +
 +      SignedJWT jwt = getJWT("new-issuer", "alice", new Date(new Date().getTime() + 5000), privateKey);
 +
 +      HttpServletRequest request = EasyMock.createNiceMock(HttpServletRequest.class);
 +      setTokenOnRequest(request, jwt);
 +
 +      EasyMock.expect(request.getRequestURL()).andReturn(
 +         new StringBuffer(SERVICE_URL)).anyTimes();
 +      EasyMock.expect(request.getQueryString()).andReturn(null);
 +      HttpServletResponse response = EasyMock.createNiceMock(HttpServletResponse.class);
 +      EasyMock.expect(response.encodeRedirectURL(SERVICE_URL)).andReturn(
 +          SERVICE_URL);
 +      EasyMock.replay(request);
 +
 +      TestFilterChain chain = new TestFilterChain();
 +      handler.doFilter(request, response, chain);
 +      Assert.assertTrue("doFilterCalled should not be true.", !chain.doFilterCalled);
 +      Assert.assertTrue("No Subject should be returned.", chain.subject == null);
 +    } catch (ServletException se) {
 +      fail("Should NOT have thrown a ServletException.");
 +    }
 +  }
 +
 +  @Test
 +  public void testValidIssuerViaConfig() throws Exception {
 +    try {
 +      Properties props = getProperties();
 +      props.setProperty(AbstractJWTFilter.JWT_EXPECTED_ISSUER, "new-issuer");
 +      handler.init(new TestFilterConfig(props));
 +
 +      SignedJWT jwt = getJWT("new-issuer", "alice", new Date(new Date().getTime() + 5000), privateKey);
 +
 +      HttpServletRequest request = EasyMock.createNiceMock(HttpServletRequest.class);
 +      setTokenOnRequest(request, jwt);
 +
 +      EasyMock.expect(request.getRequestURL()).andReturn(
 +          new StringBuffer(SERVICE_URL)).anyTimes();
 +      EasyMock.expect(request.getQueryString()).andReturn(null);
 +      HttpServletResponse response = EasyMock.createNiceMock(HttpServletResponse.class);
 +      EasyMock.expect(response.encodeRedirectURL(SERVICE_URL)).andReturn(
 +          SERVICE_URL);
 +      EasyMock.replay(request);
 +
 +      TestFilterChain chain = new TestFilterChain();
 +      handler.doFilter(request, response, chain);
 +      Assert.assertTrue("doFilterCalled should not be false.", chain.doFilterCalled);
 +      Set<PrimaryPrincipal> principals = chain.subject.getPrincipals(PrimaryPrincipal.class);
 +      Assert.assertTrue("No PrimaryPrincipal", principals.size() > 0);
 +      Assert.assertEquals("Not the expected principal", "alice", ((Principal)principals.toArray()[0]).getName());
 +    } catch (ServletException se) {
 +      fail("Should NOT have thrown a ServletException.");
 +    }
 +  }
 +
++  @Test
++  public void testRS512SignatureAlgorithm() throws Exception {
++    try {
++      Properties props = getProperties();
++      props.put(AbstractJWTFilter.JWT_EXPECTED_SIGALG, "RS512");
++      handler.init(new TestFilterConfig(props));
++
++      SignedJWT jwt = getJWT(AbstractJWTFilter.JWT_DEFAULT_ISSUER, "alice", new Date(new Date().getTime() + 5000),
++                             new Date(), privateKey, JWSAlgorithm.RS512.getName());
++
++      HttpServletRequest request = EasyMock.createNiceMock(HttpServletRequest.class);
++      setTokenOnRequest(request, jwt);
++
++      EasyMock.expect(request.getRequestURL()).andReturn(
++          new StringBuffer(SERVICE_URL)).anyTimes();
++      EasyMock.expect(request.getQueryString()).andReturn(null);
++      HttpServletResponse response = EasyMock.createNiceMock(HttpServletResponse.class);
++      EasyMock.expect(response.encodeRedirectURL(SERVICE_URL)).andReturn(
++          SERVICE_URL);
++      EasyMock.replay(request);
++
++      TestFilterChain chain = new TestFilterChain();
++      handler.doFilter(request, response, chain);
++      Assert.assertTrue("doFilterCalled should not be false.", chain.doFilterCalled );
++      Set<PrimaryPrincipal> principals = chain.subject.getPrincipals(PrimaryPrincipal.class);
++      Assert.assertTrue("No PrimaryPrincipal", !principals.isEmpty());
++      Assert.assertEquals("Not the expected principal", "alice", ((Principal)principals.toArray()[0]).getName());
++    } catch (ServletException se) {
++      fail("Should NOT have thrown a ServletException.");
++    }
++  }
++
++  @Test
++  public void testInvalidSignatureAlgorithm() throws Exception {
++    try {
++      Properties props = getProperties();
++      handler.init(new TestFilterConfig(props));
++
++      SignedJWT jwt = getJWT(AbstractJWTFilter.JWT_DEFAULT_ISSUER, "alice", new Date(new Date().getTime() + 5000),
++                             new Date(), privateKey, JWSAlgorithm.RS384.getName());
++
++      HttpServletRequest request = EasyMock.createNiceMock(HttpServletRequest.class);
++      setTokenOnRequest(request, jwt);
++
++      EasyMock.expect(request.getRequestURL()).andReturn(
++          new StringBuffer(SERVICE_URL)).anyTimes();
++      EasyMock.expect(request.getQueryString()).andReturn(null);
++      HttpServletResponse response = EasyMock.createNiceMock(HttpServletResponse.class);
++      EasyMock.expect(response.encodeRedirectURL(SERVICE_URL)).andReturn(
++          SERVICE_URL);
++      EasyMock.replay(request);
++
++      TestFilterChain chain = new TestFilterChain();
++      handler.doFilter(request, response, chain);
++      Assert.assertTrue("doFilterCalled should not be false.", !chain.doFilterCalled );
++      Assert.assertTrue("No Subject should be returned.", chain.subject == null);
++    } catch (ServletException se) {
++      fail("Should NOT have thrown a ServletException.");
++    }
++  }
++
++  @Test
++  public void testNotBeforeJWT() throws Exception {
++    try {
++      Properties props = getProperties();
++      handler.init(new TestFilterConfig(props));
++
++      SignedJWT jwt = getJWT(AbstractJWTFilter.JWT_DEFAULT_ISSUER, "alice",
++                             new Date(new Date().getTime() + 5000),
++                             new Date(new Date().getTime() + 5000), privateKey,
++                             JWSAlgorithm.RS256.getName());
++
++      HttpServletRequest request = EasyMock.createNiceMock(HttpServletRequest.class);
++      setTokenOnRequest(request, jwt);
++
++      EasyMock.expect(request.getRequestURL()).andReturn(
++          new StringBuffer(SERVICE_URL)).anyTimes();
++      EasyMock.expect(request.getQueryString()).andReturn(null);
++      HttpServletResponse response = EasyMock.createNiceMock(HttpServletResponse.class);
++      EasyMock.expect(response.encodeRedirectURL(SERVICE_URL)).andReturn(
++          SERVICE_URL);
++      EasyMock.replay(request);
++
++      TestFilterChain chain = new TestFilterChain();
++      handler.doFilter(request, response, chain);
++      Assert.assertTrue("doFilterCalled should not be false.", !chain.doFilterCalled);
++      Assert.assertTrue("No Subject should be returned.", chain.subject == null);
++    } catch (ServletException se) {
++      fail("Should NOT have thrown a ServletException.");
++    }
++  }
++
 +  protected Properties getProperties() {
 +    Properties props = new Properties();
 +    props.setProperty(
 +        SSOCookieFederationFilter.SSO_AUTHENTICATION_PROVIDER_URL,
 +        "https://localhost:8443/authserver");
 +    return props;
 +  }
 +
-   protected SignedJWT getJWT(String sub, Date expires, RSAPrivateKey privateKey,
-       Properties props) throws Exception {
-     return getJWT(AbstractJWTFilter.JWT_DEFAULT_ISSUER, sub, expires, privateKey);
++  protected SignedJWT getJWT(String issuer, String sub, Date expires, RSAPrivateKey privateKey)
++      throws Exception {
++    return getJWT(issuer, sub, expires, new Date(), privateKey, JWSAlgorithm.RS256.getName());
++  }
++
++  protected SignedJWT getJWT(String issuer, String sub, Date expires, Date nbf, RSAPrivateKey privateKey,
++                             String signatureAlgorithm)
++      throws Exception {
++    return getJWT(issuer, sub, "bar", expires, nbf, privateKey, signatureAlgorithm);
 +  }
 +
-   protected SignedJWT getJWT(String issuer, String sub, Date expires, RSAPrivateKey privateKey)
++  protected SignedJWT getJWT(String issuer, String sub, String aud, Date expires, Date nbf, RSAPrivateKey privateKey,
++                             String signatureAlgorithm)
 +      throws Exception {
-     List<String> aud = new ArrayList<String>();
-     aud.add("bar");
++    List<String> audiences = new ArrayList<String>();
++    if (aud != null) {
++      audiences.add(aud);
++    }
 +
 +    JWTClaimsSet claims = new JWTClaimsSet.Builder()
 +    .issuer(issuer)
 +    .subject(sub)
 +    .audience(aud)
 +    .expirationTime(expires)
++    .notBeforeTime(nbf)
 +    .claim("scope", "openid")
 +    .build();
 +
-     JWSHeader header = new JWSHeader.Builder(JWSAlgorithm.RS256).build();
++    JWSHeader header = new JWSHeader.Builder(JWSAlgorithm.parse(signatureAlgorithm)).build();
 +
 +    SignedJWT signedJWT = new SignedJWT(header, claims);
 +    JWSSigner signer = new RSASSASigner(privateKey);
 +
 +    signedJWT.sign(signer);
 +
 +    return signedJWT;
 +  }
 +
 +  protected static class TestFilterConfig implements FilterConfig {
 +    Properties props = null;
 +
 +    public TestFilterConfig(Properties props) {
 +      this.props = props;
 +    }
 +
 +    @Override
 +    public String getFilterName() {
 +      return null;
 +    }
 +
 +    /* (non-Javadoc)
 +     * @see javax.servlet.FilterConfig#getServletContext()
 +     */
 +    @Override
 +    public ServletContext getServletContext() {
 +//      JWTokenAuthority authority = EasyMock.createNiceMock(JWTokenAuthority.class);
 +//      GatewayServices services = EasyMock.createNiceMock(GatewayServices.class);
 +//      EasyMock.expect(services.getService("TokenService").andReturn(authority));
 +//      ServletContext context = EasyMock.createNiceMock(ServletContext.class);
 +//      EasyMock.expect(context.getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE).andReturn(new DefaultGatewayServices()));
 +      return null;
 +    }
 +
 +    /* (non-Javadoc)
 +     * @see javax.servlet.FilterConfig#getInitParameter(java.lang.String)
 +     */
 +    @Override
 +    public String getInitParameter(String name) {
 +      return props.getProperty(name, null);
 +    }
 +
 +    /* (non-Javadoc)
 +     * @see javax.servlet.FilterConfig#getInitParameterNames()
 +     */
 +    @Override
 +    public Enumeration<String> getInitParameterNames() {
 +      return null;
 +    }
 +
 +  }
 +
 +  protected static class TestJWTokenAuthority implements JWTokenAuthority {
 +
 +    private PublicKey verifyingKey;
 +
 +    public TestJWTokenAuthority(PublicKey verifyingKey) {
 +      this.verifyingKey = verifyingKey;
 +    }
 +
 +    /* (non-Javadoc)
 +     * @see JWTokenAuthority#issueToken(javax.security.auth.Subject, java.lang.String)
 +     */
 +    @Override
 +    public JWT issueToken(Subject subject, String algorithm)
 +        throws TokenServiceException {
 +      // TODO Auto-generated method stub
 +      return null;
 +    }
 +
 +    /* (non-Javadoc)
 +     * @see JWTokenAuthority#issueToken(java.security.Principal, java.lang.String)
 +     */
 +    @Override
 +    public JWT issueToken(Principal p, String algorithm)
 +        throws TokenServiceException {
 +      // TODO Auto-generated method stub
 +      return null;
 +    }
 +
 +    /* (non-Javadoc)
 +     * @see JWTokenAuthority#issueToken(java.security.Principal, java.lang.String, java.lang.String)
 +     */
 +    @Override
 +    public JWT issueToken(Principal p, String audience, String algorithm)
 +        throws TokenServiceException {
 +      return null;
 +    }
 +
 +    /* (non-Javadoc)
 +     * @see org.apache.knox.gateway.services.security.token.JWTokenAuthority#verifyToken(org.apache.knox.gateway.services.security.token.impl.JWT)
 +     */
 +    @Override
 +    public boolean verifyToken(JWT token) throws TokenServiceException {
 +      JWSVerifier verifier = new RSASSAVerifier((RSAPublicKey) verifyingKey);
 +      return token.verify(verifier);
 +    }
 +
 +    /* (non-Javadoc)
 +     * @see JWTokenAuthority#issueToken(java.security.Principal, java.lang.String, java.lang.String, long)
 +     */
 +    @Override
 +    public JWT issueToken(Principal p, String audience, String algorithm,
 +        long expires) throws TokenServiceException {
 +      return null;
 +    }
 +
 +    @Override
 +    public JWT issueToken(Principal p, List<String> audiences, String algorithm,
 +        long expires) throws TokenServiceException {
 +      return null;
 +    }
 +
 +    /* (non-Javadoc)
 +     * @see JWTokenAuthority#issueToken(java.security.Principal, java.lang.String, long)
 +     */
 +    @Override
 +    public JWT issueToken(Principal p, String algorithm, long expires)
 +        throws TokenServiceException {
 +      // TODO Auto-generated method stub
 +      return null;
 +    }
 +
 +    @Override
 +    public boolean verifyToken(JWT token, RSAPublicKey publicKey) throws TokenServiceException {
 +      JWSVerifier verifier = new RSASSAVerifier(publicKey);
 +      return token.verify(verifier);
 +    }
 +
 +  }
 +
 +  protected static class TestFilterChain implements FilterChain {
 +    boolean doFilterCalled = false;
 +    Subject subject = null;
 +
 +    /* (non-Javadoc)
 +     * @see javax.servlet.FilterChain#doFilter(javax.servlet.ServletRequest, javax.servlet.ServletResponse)
 +     */
 +    @Override
 +    public void doFilter(ServletRequest request, ServletResponse response)
 +        throws IOException, ServletException {
 +      doFilterCalled = true;
 +
 +      subject = Subject.getSubject( AccessController.getContext() );
 +    }
 +
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/58780d37/gateway-provider-security-jwt/src/test/java/org/apache/knox/gateway/provider/federation/SSOCookieProviderTest.java
----------------------------------------------------------------------
diff --cc gateway-provider-security-jwt/src/test/java/org/apache/knox/gateway/provider/federation/SSOCookieProviderTest.java
index babbee2,0000000..50a44ce
mode 100644,000000..100644
--- a/gateway-provider-security-jwt/src/test/java/org/apache/knox/gateway/provider/federation/SSOCookieProviderTest.java
+++ b/gateway-provider-security-jwt/src/test/java/org/apache/knox/gateway/provider/federation/SSOCookieProviderTest.java
@@@ -1,161 -1,0 +1,162 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.provider.federation;
 +
 +import static org.junit.Assert.fail;
 +
 +import java.security.NoSuchAlgorithmException;
 +import java.security.Principal;
 +import java.util.Properties;
 +import java.util.Date;
 +import java.util.Set;
 +
 +import javax.servlet.ServletException;
 +import javax.servlet.http.Cookie;
 +import javax.servlet.http.HttpServletRequest;
 +import javax.servlet.http.HttpServletResponse;
 +
++import org.apache.knox.gateway.provider.federation.jwt.filter.AbstractJWTFilter;
 +import org.apache.knox.gateway.provider.federation.jwt.filter.SSOCookieFederationFilter;
 +import org.apache.knox.gateway.security.PrimaryPrincipal;
 +import org.apache.knox.gateway.services.security.token.JWTokenAuthority;
 +import org.easymock.EasyMock;
 +import org.junit.Assert;
 +import org.junit.Before;
 +import org.junit.Test;
 +
 +import com.nimbusds.jwt.SignedJWT;
 +
 +public class SSOCookieProviderTest extends AbstractJWTFilterTest {
 +  private static final String SERVICE_URL = "https://localhost:8888/resource";
 +
 +  @Before
 +  public void setup() throws Exception, NoSuchAlgorithmException {
 +    handler = new TestSSOCookieFederationProvider();
 +    ((TestSSOCookieFederationProvider) handler).setTokenService(new TestJWTokenAuthority(publicKey));
 +  }
 +
 +  protected void setTokenOnRequest(HttpServletRequest request, SignedJWT jwt) {
 +    Cookie cookie = new Cookie("hadoop-jwt", jwt.serialize());
 +    EasyMock.expect(request.getCookies()).andReturn(new Cookie[] { cookie });
 +  }
 +
 +  protected void setGarbledTokenOnRequest(HttpServletRequest request, SignedJWT jwt) {
 +    Cookie cookie = new Cookie("hadoop-jwt", "ljm" + jwt.serialize());
 +    EasyMock.expect(request.getCookies()).andReturn(new Cookie[] { cookie });
 +  }
 +
 +  protected String getAudienceProperty() {
 +    return TestSSOCookieFederationProvider.SSO_EXPECTED_AUDIENCES;
 +  }
 +
 +  @Test
 +  public void testCustomCookieNameJWT() throws Exception {
 +    try {
 +      Properties props = getProperties();
 +      props.put("sso.cookie.name", "jowt");
 +      handler.init(new TestFilterConfig(props));
 +
-       SignedJWT jwt = getJWT("alice", new Date(new Date().getTime() + 5000),
-           privateKey, props);
++      SignedJWT jwt = getJWT(AbstractJWTFilter.JWT_DEFAULT_ISSUER, "alice",
++                             new Date(new Date().getTime() + 5000), privateKey);
 +
 +      Cookie cookie = new Cookie("jowt", jwt.serialize());
 +      HttpServletRequest request = EasyMock.createNiceMock(HttpServletRequest.class);
 +      EasyMock.expect(request.getCookies()).andReturn(new Cookie[] { cookie });
 +      EasyMock.expect(request.getRequestURL()).andReturn(
 +          new StringBuffer(SERVICE_URL)).anyTimes();
 +      EasyMock.expect(request.getQueryString()).andReturn(null);
 +      HttpServletResponse response = EasyMock.createNiceMock(HttpServletResponse.class);
 +      EasyMock.expect(response.encodeRedirectURL(SERVICE_URL)).andReturn(
 +          SERVICE_URL);
 +      EasyMock.replay(request);
 +
 +      TestFilterChain chain = new TestFilterChain();
 +      handler.doFilter(request, response, chain);
 +      Assert.assertTrue("doFilterCalled should not be false.", chain.doFilterCalled );
 +      Set<PrimaryPrincipal> principals = chain.subject.getPrincipals(PrimaryPrincipal.class);
 +      Assert.assertTrue("No PrimaryPrincipal returned.", !principals.isEmpty());
 +      Assert.assertEquals("Not the expected principal", "alice", ((Principal)principals.toArray()[0]).getName());
 +    } catch (ServletException se) {
 +      fail("Should NOT have thrown a ServletException.");
 +    }
 +  }
 +
 +  @Test
 +  public void testNoProviderURLJWT() throws Exception {
 +    try {
 +      Properties props = getProperties();
 +      props.remove("sso.authentication.provider.url");
 +      handler.init(new TestFilterConfig(props));
 +
 +      fail("Servlet exception should have been thrown.");
 +
 +    } catch (ServletException se) {
 +      // expected - let's ensure it mentions the missing authentication provider URL
 +      se.getMessage().contains("authentication provider URL is missing");
 +    }
 +  }
 +
 +  @Test
 +  public void testOrigURLWithQueryString() throws Exception {
 +    Properties props = getProperties();
 +    handler.init(new TestFilterConfig(props));
 +
 +    HttpServletRequest request = EasyMock.createNiceMock(HttpServletRequest.class);
 +    EasyMock.expect(request.getRequestURL()).andReturn(
 +        new StringBuffer(SERVICE_URL)).anyTimes();
 +    EasyMock.expect(request.getQueryString()).andReturn("name=value");
 +    EasyMock.replay(request);
 +
 +    String loginURL = ((TestSSOCookieFederationProvider)handler).testConstructLoginURL(request);
 +    Assert.assertNotNull("loginURL should not be null.", loginURL);
 +    Assert.assertEquals("https://localhost:8443/authserver?originalUrl=" + SERVICE_URL + "?name=value", loginURL);
 +  }
 +
 +  @Test
 +  public void testOrigURLNoQueryString() throws Exception {
 +    Properties props = getProperties();
 +    handler.init(new TestFilterConfig(props));
 +
 +    HttpServletRequest request = EasyMock.createNiceMock(HttpServletRequest.class);
 +    EasyMock.expect(request.getRequestURL()).andReturn(
 +        new StringBuffer(SERVICE_URL)).anyTimes();
 +    EasyMock.expect(request.getQueryString()).andReturn(null);
 +    EasyMock.replay(request);
 +
 +    String loginURL = ((TestSSOCookieFederationProvider)handler).testConstructLoginURL(request);
 +    Assert.assertNotNull("LoginURL should not be null.", loginURL);
 +    Assert.assertEquals("https://localhost:8443/authserver?originalUrl=" + SERVICE_URL, loginURL);
 +  }
 +
 +
 +  @Override
 +  protected String getVerificationPemProperty() {
 +    return SSOCookieFederationFilter.SSO_VERIFICATION_PEM;
 +  };
 +
 +  private static class TestSSOCookieFederationProvider extends SSOCookieFederationFilter {
 +    public String testConstructLoginURL(HttpServletRequest req) {
 +      return constructLoginURL(req);
 +    }
 +
 +    public void setTokenService(JWTokenAuthority ts) {
 +      authority = ts;
 +    }
 +  };
 +
 +}


[09/53] [abbrv] knox git commit: KNOX-998 - package name refactoring

Posted by mo...@apache.org.
KNOX-998 - package name refactoring


Project: http://git-wip-us.apache.org/repos/asf/knox/repo
Commit: http://git-wip-us.apache.org/repos/asf/knox/commit/7d0bff16
Tree: http://git-wip-us.apache.org/repos/asf/knox/tree/7d0bff16
Diff: http://git-wip-us.apache.org/repos/asf/knox/diff/7d0bff16

Branch: refs/heads/master
Commit: 7d0bff16e7128e5f2e10b54237cbc93f45932ffc
Parents: 78d35f1
Author: Sandeep More <mo...@apache.org>
Authored: Mon Oct 16 13:05:28 2017 -0400
Committer: Sandeep More <mo...@apache.org>
Committed: Mon Oct 16 13:05:28 2017 -0400

----------------------------------------------------------------------
 .../ambari/AmbariDynamicServiceURLCreator.java  | 151 ----
 .../ambari/ConditionalValueHandler.java         |  24 -
 .../discovery/ambari/PropertyEqualsHandler.java |  76 --
 .../ambari/ServiceURLPropertyConfig.java        | 324 -------
 .../discovery/ambari/SimpleValueHandler.java    |  32 -
 .../ambari/AmbariDynamicServiceURLCreator.java  | 151 ++++
 .../ambari/AmbariServiceURLCreator.java         |   0
 .../ambari/ConditionalValueHandler.java         |  24 +
 .../discovery/ambari/PropertyEqualsHandler.java |  76 ++
 .../ambari/ServiceURLPropertyConfig.java        | 324 +++++++
 .../discovery/ambari/SimpleValueHandler.java    |  32 +
 .../AmbariDynamicServiceURLCreatorTest.java     | 876 -------------------
 .../AmbariDynamicServiceURLCreatorTest.java     | 876 +++++++++++++++++++
 .../gateway/websockets/ProxyInboundClient.java  | 107 ---
 .../gateway/websockets/ProxyInboundClient.java  | 107 +++
 .../websockets/ProxyInboundClientTest.java      | 374 --------
 .../websockets/ProxyInboundClientTest.java      | 374 ++++++++
 17 files changed, 1964 insertions(+), 1964 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/knox/blob/7d0bff16/gateway-discovery-ambari/src/main/java/org/apache/hadoop/gateway/topology/discovery/ambari/AmbariDynamicServiceURLCreator.java
----------------------------------------------------------------------
diff --git a/gateway-discovery-ambari/src/main/java/org/apache/hadoop/gateway/topology/discovery/ambari/AmbariDynamicServiceURLCreator.java b/gateway-discovery-ambari/src/main/java/org/apache/hadoop/gateway/topology/discovery/ambari/AmbariDynamicServiceURLCreator.java
deleted file mode 100644
index ed5d3e7..0000000
--- a/gateway-discovery-ambari/src/main/java/org/apache/hadoop/gateway/topology/discovery/ambari/AmbariDynamicServiceURLCreator.java
+++ /dev/null
@@ -1,151 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.gateway.topology.discovery.ambari;
-
-import org.apache.hadoop.gateway.i18n.messages.MessagesFactory;
-
-import java.io.ByteArrayInputStream;
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-
-class AmbariDynamicServiceURLCreator {
-
-    static final String MAPPING_CONFIG_OVERRIDE_PROPERTY = "org.apache.gateway.topology.discovery.ambari.config";
-
-    private AmbariServiceDiscoveryMessages log = MessagesFactory.get(AmbariServiceDiscoveryMessages.class);
-
-    private AmbariCluster cluster = null;
-    private ServiceURLPropertyConfig config;
-
-    AmbariDynamicServiceURLCreator(AmbariCluster cluster) {
-        this.cluster = cluster;
-
-        String mappingConfiguration = System.getProperty(MAPPING_CONFIG_OVERRIDE_PROPERTY);
-        if (mappingConfiguration != null) {
-            File mappingConfigFile = new File(mappingConfiguration);
-            if (mappingConfigFile.exists()) {
-                try {
-                    config = new ServiceURLPropertyConfig(mappingConfigFile);
-                    log.loadedComponentConfigMappings(mappingConfigFile.getAbsolutePath());
-                } catch (Exception e) {
-                    log.failedToLoadComponentConfigMappings(mappingConfigFile.getAbsolutePath(), e);
-                }
-            }
-        }
-
-        // If there is no valid override configured, fall-back to the internal mapping configuration
-        if (config == null) {
-            config = new ServiceURLPropertyConfig();
-        }
-    }
-
-    AmbariDynamicServiceURLCreator(AmbariCluster cluster, File mappingConfiguration) throws IOException {
-        this.cluster = cluster;
-        config = new ServiceURLPropertyConfig(new FileInputStream(mappingConfiguration));
-    }
-
-    AmbariDynamicServiceURLCreator(AmbariCluster cluster, String mappings) {
-        this.cluster = cluster;
-        config = new ServiceURLPropertyConfig(new ByteArrayInputStream(mappings.getBytes()));
-    }
-
-    List<String> create(String serviceName) {
-        List<String> urls = new ArrayList<>();
-
-        Map<String, String> placeholderValues = new HashMap<>();
-        List<String> componentHostnames = new ArrayList<>();
-        String hostNamePlaceholder = null;
-
-        ServiceURLPropertyConfig.URLPattern pattern = config.getURLPattern(serviceName);
-        if (pattern != null) {
-            for (String propertyName : pattern.getPlaceholders()) {
-                ServiceURLPropertyConfig.Property configProperty = config.getConfigProperty(serviceName, propertyName);
-
-                String propertyValue = null;
-                String propertyType = configProperty.getType();
-                if (ServiceURLPropertyConfig.Property.TYPE_SERVICE.equals(propertyType)) {
-                    log.lookingUpServiceConfigProperty(configProperty.getService(), configProperty.getServiceConfig(), configProperty.getValue());
-                    AmbariCluster.ServiceConfiguration svcConfig =
-                        cluster.getServiceConfiguration(configProperty.getService(), configProperty.getServiceConfig());
-                    if (svcConfig != null) {
-                        propertyValue = svcConfig.getProperties().get(configProperty.getValue());
-                    }
-                } else if (ServiceURLPropertyConfig.Property.TYPE_COMPONENT.equals(propertyType)) {
-                    String compName = configProperty.getComponent();
-                    if (compName != null) {
-                        AmbariComponent component = cluster.getComponent(compName);
-                        if (component != null) {
-                            if (ServiceURLPropertyConfig.Property.PROP_COMP_HOSTNAME.equals(configProperty.getValue())) {
-                                log.lookingUpComponentHosts(compName);
-                                componentHostnames.addAll(component.getHostNames());
-                                hostNamePlaceholder = propertyName; // Remember the host name placeholder
-                            } else {
-                                log.lookingUpComponentConfigProperty(compName, configProperty.getValue());
-                                propertyValue = component.getConfigProperty(configProperty.getValue());
-                            }
-                        }
-                    }
-                } else { // Derived property
-                    log.handlingDerivedProperty(serviceName, configProperty.getType(), configProperty.getName());
-                    ServiceURLPropertyConfig.Property p = config.getConfigProperty(serviceName, configProperty.getName());
-                    propertyValue = p.getValue();
-                    if (propertyValue == null) {
-                        if (p.getConditionHandler() != null) {
-                            propertyValue = p.getConditionHandler().evaluate(config, cluster);
-                        }
-                    }
-                }
-
-                log.determinedPropertyValue(configProperty.getName(), propertyValue);
-                placeholderValues.put(configProperty.getName(), propertyValue);
-            }
-
-            // For patterns with a placeholder value for the hostname (e.g., multiple URL scenarios)
-            if (!componentHostnames.isEmpty()) {
-                for (String componentHostname : componentHostnames) {
-                    String url = pattern.get().replace("{" + hostNamePlaceholder + "}", componentHostname);
-                    urls.add(createURL(url, placeholderValues));
-                }
-            } else { // Single URL result case
-                urls.add(createURL(pattern.get(), placeholderValues));
-            }
-        }
-
-        return urls;
-    }
-
-    private String createURL(String pattern, Map<String, String> placeholderValues) {
-        String url = null;
-        if (pattern != null) {
-            url = pattern;
-            for (String placeHolder : placeholderValues.keySet()) {
-                String value = placeholderValues.get(placeHolder);
-                if (value != null) {
-                    url = url.replace("{" + placeHolder + "}", value);
-                }
-            }
-        }
-        return url;
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/knox/blob/7d0bff16/gateway-discovery-ambari/src/main/java/org/apache/hadoop/gateway/topology/discovery/ambari/ConditionalValueHandler.java
----------------------------------------------------------------------
diff --git a/gateway-discovery-ambari/src/main/java/org/apache/hadoop/gateway/topology/discovery/ambari/ConditionalValueHandler.java b/gateway-discovery-ambari/src/main/java/org/apache/hadoop/gateway/topology/discovery/ambari/ConditionalValueHandler.java
deleted file mode 100644
index d76a161..0000000
--- a/gateway-discovery-ambari/src/main/java/org/apache/hadoop/gateway/topology/discovery/ambari/ConditionalValueHandler.java
+++ /dev/null
@@ -1,24 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.gateway.topology.discovery.ambari;
-
-
-interface ConditionalValueHandler {
-
-    String evaluate(ServiceURLPropertyConfig config, AmbariCluster cluster);
-
-}

http://git-wip-us.apache.org/repos/asf/knox/blob/7d0bff16/gateway-discovery-ambari/src/main/java/org/apache/hadoop/gateway/topology/discovery/ambari/PropertyEqualsHandler.java
----------------------------------------------------------------------
diff --git a/gateway-discovery-ambari/src/main/java/org/apache/hadoop/gateway/topology/discovery/ambari/PropertyEqualsHandler.java b/gateway-discovery-ambari/src/main/java/org/apache/hadoop/gateway/topology/discovery/ambari/PropertyEqualsHandler.java
deleted file mode 100644
index 642a676..0000000
--- a/gateway-discovery-ambari/src/main/java/org/apache/hadoop/gateway/topology/discovery/ambari/PropertyEqualsHandler.java
+++ /dev/null
@@ -1,76 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.gateway.topology.discovery.ambari;
-
-
-class PropertyEqualsHandler implements ConditionalValueHandler {
-
-    private String serviceName                        = null;
-    private String propertyName                       = null;
-    private String propertyValue                      = null;
-    private ConditionalValueHandler affirmativeResult = null;
-    private ConditionalValueHandler negativeResult    = null;
-
-    PropertyEqualsHandler(String                  serviceName,
-                          String                  propertyName,
-                          String                  propertyValue,
-                          ConditionalValueHandler affirmativeResult,
-                          ConditionalValueHandler negativeResult) {
-        this.serviceName       = serviceName;
-        this.propertyName      = propertyName;
-        this.propertyValue     = propertyValue;
-        this.affirmativeResult = affirmativeResult;
-        this.negativeResult    = negativeResult;
-    }
-
-    @Override
-    public String evaluate(ServiceURLPropertyConfig config, AmbariCluster cluster) {
-        String result = null;
-
-        ServiceURLPropertyConfig.Property p = config.getConfigProperty(serviceName, propertyName);
-        if (p != null) {
-            String value = getActualPropertyValue(cluster, p);
-            if (propertyValue.equals(value)) {
-                result = affirmativeResult.evaluate(config, cluster);
-            } else if (negativeResult != null) {
-                result = negativeResult.evaluate(config, cluster);
-            }
-
-            // Check if the result is a reference to a local derived property
-            ServiceURLPropertyConfig.Property derived = config.getConfigProperty(serviceName, result);
-            if (derived != null) {
-                result = getActualPropertyValue(cluster, derived);
-            }
-        }
-
-        return result;
-    }
-
-    private String getActualPropertyValue(AmbariCluster cluster, ServiceURLPropertyConfig.Property property) {
-        String value = null;
-        String propertyType = property.getType();
-        if (ServiceURLPropertyConfig.Property.TYPE_COMPONENT.equals(propertyType)) {
-            AmbariComponent component = cluster.getComponent(property.getComponent());
-            if (component != null) {
-                value = component.getConfigProperty(property.getValue());
-            }
-        } else if (ServiceURLPropertyConfig.Property.TYPE_SERVICE.equals(propertyType)) {
-            value = cluster.getServiceConfiguration(property.getService(), property.getServiceConfig()).getProperties().get(property.getValue());
-        }
-        return value;
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/knox/blob/7d0bff16/gateway-discovery-ambari/src/main/java/org/apache/hadoop/gateway/topology/discovery/ambari/ServiceURLPropertyConfig.java
----------------------------------------------------------------------
diff --git a/gateway-discovery-ambari/src/main/java/org/apache/hadoop/gateway/topology/discovery/ambari/ServiceURLPropertyConfig.java b/gateway-discovery-ambari/src/main/java/org/apache/hadoop/gateway/topology/discovery/ambari/ServiceURLPropertyConfig.java
deleted file mode 100644
index 3330cc3..0000000
--- a/gateway-discovery-ambari/src/main/java/org/apache/hadoop/gateway/topology/discovery/ambari/ServiceURLPropertyConfig.java
+++ /dev/null
@@ -1,324 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.gateway.topology.discovery.ambari;
-
-import org.apache.hadoop.gateway.i18n.messages.MessagesFactory;
-import org.apache.hadoop.gateway.util.XmlUtils;
-import org.w3c.dom.Document;
-import org.w3c.dom.NamedNodeMap;
-import org.w3c.dom.Node;
-import org.w3c.dom.NodeList;
-
-import javax.xml.xpath.XPath;
-import javax.xml.xpath.XPathConstants;
-import javax.xml.xpath.XPathExpression;
-import javax.xml.xpath.XPathExpressionException;
-import javax.xml.xpath.XPathFactory;
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
-/**
- * Service URL pattern mapping configuration model.
- */
-class ServiceURLPropertyConfig {
-
-    private static final AmbariServiceDiscoveryMessages log = MessagesFactory.get(AmbariServiceDiscoveryMessages.class);
-
-    private static final String ATTR_NAME = "name";
-
-    private static XPathExpression SERVICE_URL_PATTERN_MAPPINGS;
-    private static XPathExpression URL_PATTERN;
-    private static XPathExpression PROPERTIES;
-    static {
-        XPath xpath = XPathFactory.newInstance().newXPath();
-        try {
-            SERVICE_URL_PATTERN_MAPPINGS = xpath.compile("/service-discovery-url-mappings/service");
-            URL_PATTERN                  = xpath.compile("url-pattern/text()");
-            PROPERTIES                   = xpath.compile("properties/property");
-        } catch (XPathExpressionException e) {
-            e.printStackTrace();
-        }
-    }
-
-    private static final String DEFAULT_SERVICE_URL_MAPPINGS = "ambari-service-discovery-url-mappings.xml";
-
-    private Map<String, URLPattern> urlPatterns = new HashMap<>();
-
-    private Map<String, Map<String, Property>> properties = new HashMap<>();
-
-
-    /**
-     * The default service URL pattern to property mapping configuration will be used.
-     */
-    ServiceURLPropertyConfig() {
-        this(ServiceURLPropertyConfig.class.getClassLoader().getResourceAsStream(DEFAULT_SERVICE_URL_MAPPINGS));
-    }
-
-    /**
-     * The default service URL pattern to property mapping configuration will be used.
-     */
-    ServiceURLPropertyConfig(File mappingConfigurationFile) throws Exception {
-        this(new FileInputStream(mappingConfigurationFile));
-    }
-
-    /**
-     *
-     * @param source An InputStream for the XML content
-     */
-    ServiceURLPropertyConfig(InputStream source) {
-        // Parse the XML, and build the model
-        try {
-            Document doc = XmlUtils.readXml(source);
-
-            NodeList serviceNodes =
-                    (NodeList) SERVICE_URL_PATTERN_MAPPINGS.evaluate(doc, XPathConstants.NODESET);
-            for (int i=0; i < serviceNodes.getLength(); i++) {
-                Node serviceNode = serviceNodes.item(i);
-                String serviceName = serviceNode.getAttributes().getNamedItem(ATTR_NAME).getNodeValue();
-                properties.put(serviceName, new HashMap<String, Property>());
-
-                Node urlPatternNode = (Node) URL_PATTERN.evaluate(serviceNode, XPathConstants.NODE);
-                if (urlPatternNode != null) {
-                    urlPatterns.put(serviceName, new URLPattern(urlPatternNode.getNodeValue()));
-                }
-
-                NodeList propertiesNode = (NodeList) PROPERTIES.evaluate(serviceNode, XPathConstants.NODESET);
-                if (propertiesNode != null) {
-                    processProperties(serviceName, propertiesNode);
-                }
-            }
-        } catch (Exception e) {
-            log.failedToLoadServiceDiscoveryConfiguration(e);
-        } finally {
-            try {
-                source.close();
-            } catch (IOException e) {
-                // Ignore
-            }
-        }
-    }
-
-    private void processProperties(String serviceName, NodeList propertyNodes) {
-        for (int i = 0; i < propertyNodes.getLength(); i++) {
-            Property p = Property.createProperty(serviceName, propertyNodes.item(i));
-            properties.get(serviceName).put(p.getName(), p);
-        }
-    }
-
-    URLPattern getURLPattern(String service) {
-        return urlPatterns.get(service);
-    }
-
-    Property getConfigProperty(String service, String property) {
-        return properties.get(service).get(property);
-    }
-
-    static class URLPattern {
-        String pattern;
-        List<String> placeholders = new ArrayList<>();
-
-        URLPattern(String pattern) {
-            this.pattern = pattern;
-
-            final Pattern regex = Pattern.compile("\\{(.*?)}", Pattern.DOTALL);
-            final Matcher matcher = regex.matcher(pattern);
-            while( matcher.find() ){
-                placeholders.add(matcher.group(1));
-            }
-        }
-
-        String get() {return pattern; }
-        List<String> getPlaceholders() {
-            return placeholders;
-        }
-    }
-
-    static class Property {
-        static final String TYPE_SERVICE   = "SERVICE";
-        static final String TYPE_COMPONENT = "COMPONENT";
-        static final String TYPE_DERIVED   = "DERIVED";
-
-        static final String PROP_COMP_HOSTNAME = "component.host.name";
-
-        static final String ATTR_NAME     = "name";
-        static final String ATTR_PROPERTY = "property";
-        static final String ATTR_VALUE    = "value";
-
-        static XPathExpression HOSTNAME;
-        static XPathExpression SERVICE_CONFIG;
-        static XPathExpression COMPONENT;
-        static XPathExpression CONFIG_PROPERTY;
-        static XPathExpression IF;
-        static XPathExpression THEN;
-        static XPathExpression ELSE;
-        static XPathExpression TEXT;
-        static {
-            XPath xpath = XPathFactory.newInstance().newXPath();
-            try {
-                HOSTNAME        = xpath.compile("hostname");
-                SERVICE_CONFIG  = xpath.compile("service-config");
-                COMPONENT       = xpath.compile("component");
-                CONFIG_PROPERTY = xpath.compile("config-property");
-                IF              = xpath.compile("if");
-                THEN            = xpath.compile("then");
-                ELSE            = xpath.compile("else");
-                TEXT            = xpath.compile("text()");
-            } catch (XPathExpressionException e) {
-                e.printStackTrace();
-            }
-        }
-
-
-        String type;
-        String name;
-        String component;
-        String service;
-        String serviceConfig;
-        String value;
-        ConditionalValueHandler conditionHandler = null;
-
-        private Property(String type,
-                         String propertyName,
-                         String component,
-                         String service,
-                         String configType,
-                         String value,
-                         ConditionalValueHandler pch) {
-            this.type = type;
-            this.name = propertyName;
-            this.service = service;
-            this.component = component;
-            this.serviceConfig = configType;
-            this.value = value;
-            conditionHandler = pch;
-        }
-
-        static Property createProperty(String serviceName, Node propertyNode) {
-            String propertyName = propertyNode.getAttributes().getNamedItem(ATTR_NAME).getNodeValue();
-            String propertyType = null;
-            String serviceType = null;
-            String configType = null;
-            String componentType = null;
-            String value = null;
-            ConditionalValueHandler pch = null;
-
-            try {
-                Node hostNameNode = (Node) HOSTNAME.evaluate(propertyNode, XPathConstants.NODE);
-                if (hostNameNode != null) {
-                    value = PROP_COMP_HOSTNAME;
-                }
-
-                // Check for a service-config node
-                Node scNode = (Node) SERVICE_CONFIG.evaluate(propertyNode, XPathConstants.NODE);
-                if (scNode != null) {
-                    // Service config property
-                    propertyType = Property.TYPE_SERVICE;
-                    serviceType = scNode.getAttributes().getNamedItem(ATTR_NAME).getNodeValue();
-                    Node scTextNode = (Node) TEXT.evaluate(scNode, XPathConstants.NODE);
-                    configType = scTextNode.getNodeValue();
-                } else { // If not service-config node, check for a component config node
-                    Node cNode = (Node) COMPONENT.evaluate(propertyNode, XPathConstants.NODE);
-                    if (cNode != null) {
-                        // Component config property
-                        propertyType = Property.TYPE_COMPONENT;
-                        componentType = cNode.getFirstChild().getNodeValue();
-                        Node cTextNode = (Node) TEXT.evaluate(cNode, XPathConstants.NODE);
-                        configType = cTextNode.getNodeValue();
-                        componentType = cTextNode.getNodeValue();
-                    }
-                }
-
-                // Check for a config property node
-                Node cpNode = (Node) CONFIG_PROPERTY.evaluate(propertyNode, XPathConstants.NODE);
-                if (cpNode != null) {
-                    // Check for a condition element
-                    Node ifNode = (Node) IF.evaluate(cpNode, XPathConstants.NODE);
-                    if (ifNode != null) {
-                        propertyType = TYPE_DERIVED;
-                        pch = getConditionHandler(serviceName, ifNode);
-                    } else {
-                        Node cpTextNode = (Node) TEXT.evaluate(cpNode, XPathConstants.NODE);
-                        value = cpTextNode.getNodeValue();
-                    }
-                }
-            } catch (Exception e) {
-                e.printStackTrace();
-            }
-
-            // Create and return the property representation
-            return new Property(propertyType, propertyName, componentType, serviceType, configType, value, pch);
-        }
-
-        private static ConditionalValueHandler getConditionHandler(String serviceName, Node ifNode) throws Exception {
-            ConditionalValueHandler result = null;
-
-            if (ifNode != null) {
-                NamedNodeMap attrs = ifNode.getAttributes();
-                String comparisonPropName = attrs.getNamedItem(ATTR_PROPERTY).getNodeValue();
-                String comparisonValue = attrs.getNamedItem(ATTR_VALUE).getNodeValue();
-
-                ConditionalValueHandler affirmativeResult = null;
-                Node thenNode = (Node) THEN.evaluate(ifNode, XPathConstants.NODE);
-                if (thenNode != null) {
-                    Node subIfNode = (Node) IF.evaluate(thenNode, XPathConstants.NODE);
-                    if (subIfNode != null) {
-                        affirmativeResult = getConditionHandler(serviceName, subIfNode);
-                    } else {
-                        affirmativeResult = new SimpleValueHandler(thenNode.getFirstChild().getNodeValue());
-                    }
-                }
-
-                ConditionalValueHandler negativeResult = null;
-                Node elseNode = (Node) ELSE.evaluate(ifNode, XPathConstants.NODE);
-                if (elseNode != null) {
-                    Node subIfNode = (Node) IF.evaluate(elseNode, XPathConstants.NODE);
-                    if (subIfNode != null) {
-                        negativeResult = getConditionHandler(serviceName, subIfNode);
-                    } else {
-                        negativeResult = new SimpleValueHandler(elseNode.getFirstChild().getNodeValue());
-                    }
-                }
-
-                result = new PropertyEqualsHandler(serviceName,
-                        comparisonPropName,
-                        comparisonValue,
-                        affirmativeResult,
-                        negativeResult);
-            }
-
-            return result;
-        }
-
-        String getType() { return type; }
-        String getName() { return name; }
-        String getComponent() { return component; }
-        String getService() { return service; }
-        String getServiceConfig() { return serviceConfig; }
-        String getValue() {
-            return value;
-        }
-        ConditionalValueHandler getConditionHandler() { return conditionHandler; }
-    }
-}

http://git-wip-us.apache.org/repos/asf/knox/blob/7d0bff16/gateway-discovery-ambari/src/main/java/org/apache/hadoop/gateway/topology/discovery/ambari/SimpleValueHandler.java
----------------------------------------------------------------------
diff --git a/gateway-discovery-ambari/src/main/java/org/apache/hadoop/gateway/topology/discovery/ambari/SimpleValueHandler.java b/gateway-discovery-ambari/src/main/java/org/apache/hadoop/gateway/topology/discovery/ambari/SimpleValueHandler.java
deleted file mode 100644
index 8e0cd75..0000000
--- a/gateway-discovery-ambari/src/main/java/org/apache/hadoop/gateway/topology/discovery/ambari/SimpleValueHandler.java
+++ /dev/null
@@ -1,32 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.gateway.topology.discovery.ambari;
-
-
-class SimpleValueHandler implements ConditionalValueHandler {
-    private String value;
-
-    SimpleValueHandler(String value) {
-        this.value = value;
-    }
-
-    @Override
-    public String evaluate(ServiceURLPropertyConfig config, AmbariCluster cluster) {
-        return value;
-    }
-}
-

http://git-wip-us.apache.org/repos/asf/knox/blob/7d0bff16/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariDynamicServiceURLCreator.java
----------------------------------------------------------------------
diff --git a/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariDynamicServiceURLCreator.java b/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariDynamicServiceURLCreator.java
new file mode 100644
index 0000000..3c2269d
--- /dev/null
+++ b/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariDynamicServiceURLCreator.java
@@ -0,0 +1,151 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.knox.gateway.topology.discovery.ambari;
+
+import org.apache.knox.gateway.i18n.messages.MessagesFactory;
+
+import java.io.ByteArrayInputStream;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+
+class AmbariDynamicServiceURLCreator {
+
+    static final String MAPPING_CONFIG_OVERRIDE_PROPERTY = "org.apache.gateway.topology.discovery.ambari.config";
+
+    private AmbariServiceDiscoveryMessages log = MessagesFactory.get(AmbariServiceDiscoveryMessages.class);
+
+    private AmbariCluster cluster = null;
+    private ServiceURLPropertyConfig config;
+
+    AmbariDynamicServiceURLCreator(AmbariCluster cluster) {
+        this.cluster = cluster;
+
+        String mappingConfiguration = System.getProperty(MAPPING_CONFIG_OVERRIDE_PROPERTY);
+        if (mappingConfiguration != null) {
+            File mappingConfigFile = new File(mappingConfiguration);
+            if (mappingConfigFile.exists()) {
+                try {
+                    config = new ServiceURLPropertyConfig(mappingConfigFile);
+                    log.loadedComponentConfigMappings(mappingConfigFile.getAbsolutePath());
+                } catch (Exception e) {
+                    log.failedToLoadComponentConfigMappings(mappingConfigFile.getAbsolutePath(), e);
+                }
+            }
+        }
+
+        // If there is no valid override configured, fall-back to the internal mapping configuration
+        if (config == null) {
+            config = new ServiceURLPropertyConfig();
+        }
+    }
+
+    AmbariDynamicServiceURLCreator(AmbariCluster cluster, File mappingConfiguration) throws IOException {
+        this.cluster = cluster;
+        config = new ServiceURLPropertyConfig(new FileInputStream(mappingConfiguration));
+    }
+
+    AmbariDynamicServiceURLCreator(AmbariCluster cluster, String mappings) {
+        this.cluster = cluster;
+        config = new ServiceURLPropertyConfig(new ByteArrayInputStream(mappings.getBytes()));
+    }
+
+    List<String> create(String serviceName) {
+        List<String> urls = new ArrayList<>();
+
+        Map<String, String> placeholderValues = new HashMap<>();
+        List<String> componentHostnames = new ArrayList<>();
+        String hostNamePlaceholder = null;
+
+        ServiceURLPropertyConfig.URLPattern pattern = config.getURLPattern(serviceName);
+        if (pattern != null) {
+            for (String propertyName : pattern.getPlaceholders()) {
+                ServiceURLPropertyConfig.Property configProperty = config.getConfigProperty(serviceName, propertyName);
+
+                String propertyValue = null;
+                String propertyType = configProperty.getType();
+                if (ServiceURLPropertyConfig.Property.TYPE_SERVICE.equals(propertyType)) {
+                    log.lookingUpServiceConfigProperty(configProperty.getService(), configProperty.getServiceConfig(), configProperty.getValue());
+                    AmbariCluster.ServiceConfiguration svcConfig =
+                        cluster.getServiceConfiguration(configProperty.getService(), configProperty.getServiceConfig());
+                    if (svcConfig != null) {
+                        propertyValue = svcConfig.getProperties().get(configProperty.getValue());
+                    }
+                } else if (ServiceURLPropertyConfig.Property.TYPE_COMPONENT.equals(propertyType)) {
+                    String compName = configProperty.getComponent();
+                    if (compName != null) {
+                        AmbariComponent component = cluster.getComponent(compName);
+                        if (component != null) {
+                            if (ServiceURLPropertyConfig.Property.PROP_COMP_HOSTNAME.equals(configProperty.getValue())) {
+                                log.lookingUpComponentHosts(compName);
+                                componentHostnames.addAll(component.getHostNames());
+                                hostNamePlaceholder = propertyName; // Remember the host name placeholder
+                            } else {
+                                log.lookingUpComponentConfigProperty(compName, configProperty.getValue());
+                                propertyValue = component.getConfigProperty(configProperty.getValue());
+                            }
+                        }
+                    }
+                } else { // Derived property
+                    log.handlingDerivedProperty(serviceName, configProperty.getType(), configProperty.getName());
+                    ServiceURLPropertyConfig.Property p = config.getConfigProperty(serviceName, configProperty.getName());
+                    propertyValue = p.getValue();
+                    if (propertyValue == null) {
+                        if (p.getConditionHandler() != null) {
+                            propertyValue = p.getConditionHandler().evaluate(config, cluster);
+                        }
+                    }
+                }
+
+                log.determinedPropertyValue(configProperty.getName(), propertyValue);
+                placeholderValues.put(configProperty.getName(), propertyValue);
+            }
+
+            // For patterns with a placeholder value for the hostname (e.g., multiple URL scenarios)
+            if (!componentHostnames.isEmpty()) {
+                for (String componentHostname : componentHostnames) {
+                    String url = pattern.get().replace("{" + hostNamePlaceholder + "}", componentHostname);
+                    urls.add(createURL(url, placeholderValues));
+                }
+            } else { // Single URL result case
+                urls.add(createURL(pattern.get(), placeholderValues));
+            }
+        }
+
+        return urls;
+    }
+
+    private String createURL(String pattern, Map<String, String> placeholderValues) {
+        String url = null;
+        if (pattern != null) {
+            url = pattern;
+            for (String placeHolder : placeholderValues.keySet()) {
+                String value = placeholderValues.get(placeHolder);
+                if (value != null) {
+                    url = url.replace("{" + placeHolder + "}", value);
+                }
+            }
+        }
+        return url;
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/knox/blob/7d0bff16/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariServiceURLCreator.java
----------------------------------------------------------------------
diff --git a/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariServiceURLCreator.java b/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariServiceURLCreator.java
deleted file mode 100644
index e69de29..0000000

http://git-wip-us.apache.org/repos/asf/knox/blob/7d0bff16/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/ConditionalValueHandler.java
----------------------------------------------------------------------
diff --git a/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/ConditionalValueHandler.java b/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/ConditionalValueHandler.java
new file mode 100644
index 0000000..168fce6
--- /dev/null
+++ b/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/ConditionalValueHandler.java
@@ -0,0 +1,24 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.knox.gateway.topology.discovery.ambari;
+
+
+interface ConditionalValueHandler {
+
+    String evaluate(ServiceURLPropertyConfig config, AmbariCluster cluster);
+
+}

http://git-wip-us.apache.org/repos/asf/knox/blob/7d0bff16/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/PropertyEqualsHandler.java
----------------------------------------------------------------------
diff --git a/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/PropertyEqualsHandler.java b/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/PropertyEqualsHandler.java
new file mode 100644
index 0000000..4044d56
--- /dev/null
+++ b/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/PropertyEqualsHandler.java
@@ -0,0 +1,76 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.knox.gateway.topology.discovery.ambari;
+
+
+class PropertyEqualsHandler implements ConditionalValueHandler {
+
+    private String serviceName                        = null;
+    private String propertyName                       = null;
+    private String propertyValue                      = null;
+    private ConditionalValueHandler affirmativeResult = null;
+    private ConditionalValueHandler negativeResult    = null;
+
+    PropertyEqualsHandler(String                  serviceName,
+                          String                  propertyName,
+                          String                  propertyValue,
+                          ConditionalValueHandler affirmativeResult,
+                          ConditionalValueHandler negativeResult) {
+        this.serviceName       = serviceName;
+        this.propertyName      = propertyName;
+        this.propertyValue     = propertyValue;
+        this.affirmativeResult = affirmativeResult;
+        this.negativeResult    = negativeResult;
+    }
+
+    @Override
+    public String evaluate(ServiceURLPropertyConfig config, AmbariCluster cluster) {
+        String result = null;
+
+        ServiceURLPropertyConfig.Property p = config.getConfigProperty(serviceName, propertyName);
+        if (p != null) {
+            String value = getActualPropertyValue(cluster, p);
+            if (propertyValue.equals(value)) {
+                result = affirmativeResult.evaluate(config, cluster);
+            } else if (negativeResult != null) {
+                result = negativeResult.evaluate(config, cluster);
+            }
+
+            // Check if the result is a reference to a local derived property
+            ServiceURLPropertyConfig.Property derived = config.getConfigProperty(serviceName, result);
+            if (derived != null) {
+                result = getActualPropertyValue(cluster, derived);
+            }
+        }
+
+        return result;
+    }
+
+    private String getActualPropertyValue(AmbariCluster cluster, ServiceURLPropertyConfig.Property property) {
+        String value = null;
+        String propertyType = property.getType();
+        if (ServiceURLPropertyConfig.Property.TYPE_COMPONENT.equals(propertyType)) {
+            AmbariComponent component = cluster.getComponent(property.getComponent());
+            if (component != null) {
+                value = component.getConfigProperty(property.getValue());
+            }
+        } else if (ServiceURLPropertyConfig.Property.TYPE_SERVICE.equals(propertyType)) {
+            value = cluster.getServiceConfiguration(property.getService(), property.getServiceConfig()).getProperties().get(property.getValue());
+        }
+        return value;
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/knox/blob/7d0bff16/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/ServiceURLPropertyConfig.java
----------------------------------------------------------------------
diff --git a/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/ServiceURLPropertyConfig.java b/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/ServiceURLPropertyConfig.java
new file mode 100644
index 0000000..ed07873
--- /dev/null
+++ b/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/ServiceURLPropertyConfig.java
@@ -0,0 +1,324 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.knox.gateway.topology.discovery.ambari;
+
+import org.apache.knox.gateway.i18n.messages.MessagesFactory;
+import org.apache.knox.gateway.util.XmlUtils;
+import org.w3c.dom.Document;
+import org.w3c.dom.NamedNodeMap;
+import org.w3c.dom.Node;
+import org.w3c.dom.NodeList;
+
+import javax.xml.xpath.XPath;
+import javax.xml.xpath.XPathConstants;
+import javax.xml.xpath.XPathExpression;
+import javax.xml.xpath.XPathExpressionException;
+import javax.xml.xpath.XPathFactory;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+/**
+ * Service URL pattern mapping configuration model.
+ */
+class ServiceURLPropertyConfig {
+
+    private static final AmbariServiceDiscoveryMessages log = MessagesFactory.get(AmbariServiceDiscoveryMessages.class);
+
+    private static final String ATTR_NAME = "name";
+
+    private static XPathExpression SERVICE_URL_PATTERN_MAPPINGS;
+    private static XPathExpression URL_PATTERN;
+    private static XPathExpression PROPERTIES;
+    static {
+        XPath xpath = XPathFactory.newInstance().newXPath();
+        try {
+            SERVICE_URL_PATTERN_MAPPINGS = xpath.compile("/service-discovery-url-mappings/service");
+            URL_PATTERN                  = xpath.compile("url-pattern/text()");
+            PROPERTIES                   = xpath.compile("properties/property");
+        } catch (XPathExpressionException e) {
+            e.printStackTrace();
+        }
+    }
+
+    private static final String DEFAULT_SERVICE_URL_MAPPINGS = "ambari-service-discovery-url-mappings.xml";
+
+    private Map<String, URLPattern> urlPatterns = new HashMap<>();
+
+    private Map<String, Map<String, Property>> properties = new HashMap<>();
+
+
+    /**
+     * The default service URL pattern to property mapping configuration will be used.
+     */
+    ServiceURLPropertyConfig() {
+        this(ServiceURLPropertyConfig.class.getClassLoader().getResourceAsStream(DEFAULT_SERVICE_URL_MAPPINGS));
+    }
+
+    /**
+     * The default service URL pattern to property mapping configuration will be used.
+     */
+    ServiceURLPropertyConfig(File mappingConfigurationFile) throws Exception {
+        this(new FileInputStream(mappingConfigurationFile));
+    }
+
+    /**
+     *
+     * @param source An InputStream for the XML content
+     */
+    ServiceURLPropertyConfig(InputStream source) {
+        // Parse the XML, and build the model
+        try {
+            Document doc = XmlUtils.readXml(source);
+
+            NodeList serviceNodes =
+                    (NodeList) SERVICE_URL_PATTERN_MAPPINGS.evaluate(doc, XPathConstants.NODESET);
+            for (int i=0; i < serviceNodes.getLength(); i++) {
+                Node serviceNode = serviceNodes.item(i);
+                String serviceName = serviceNode.getAttributes().getNamedItem(ATTR_NAME).getNodeValue();
+                properties.put(serviceName, new HashMap<String, Property>());
+
+                Node urlPatternNode = (Node) URL_PATTERN.evaluate(serviceNode, XPathConstants.NODE);
+                if (urlPatternNode != null) {
+                    urlPatterns.put(serviceName, new URLPattern(urlPatternNode.getNodeValue()));
+                }
+
+                NodeList propertiesNode = (NodeList) PROPERTIES.evaluate(serviceNode, XPathConstants.NODESET);
+                if (propertiesNode != null) {
+                    processProperties(serviceName, propertiesNode);
+                }
+            }
+        } catch (Exception e) {
+            log.failedToLoadServiceDiscoveryConfiguration(e);
+        } finally {
+            try {
+                source.close();
+            } catch (IOException e) {
+                // Ignore
+            }
+        }
+    }
+
+    private void processProperties(String serviceName, NodeList propertyNodes) {
+        for (int i = 0; i < propertyNodes.getLength(); i++) {
+            Property p = Property.createProperty(serviceName, propertyNodes.item(i));
+            properties.get(serviceName).put(p.getName(), p);
+        }
+    }
+
+    URLPattern getURLPattern(String service) {
+        return urlPatterns.get(service);
+    }
+
+    Property getConfigProperty(String service, String property) {
+        return properties.get(service).get(property);
+    }
+
+    static class URLPattern {
+        String pattern;
+        List<String> placeholders = new ArrayList<>();
+
+        URLPattern(String pattern) {
+            this.pattern = pattern;
+
+            final Pattern regex = Pattern.compile("\\{(.*?)}", Pattern.DOTALL);
+            final Matcher matcher = regex.matcher(pattern);
+            while( matcher.find() ){
+                placeholders.add(matcher.group(1));
+            }
+        }
+
+        String get() {return pattern; }
+        List<String> getPlaceholders() {
+            return placeholders;
+        }
+    }
+
+    static class Property {
+        static final String TYPE_SERVICE   = "SERVICE";
+        static final String TYPE_COMPONENT = "COMPONENT";
+        static final String TYPE_DERIVED   = "DERIVED";
+
+        static final String PROP_COMP_HOSTNAME = "component.host.name";
+
+        static final String ATTR_NAME     = "name";
+        static final String ATTR_PROPERTY = "property";
+        static final String ATTR_VALUE    = "value";
+
+        static XPathExpression HOSTNAME;
+        static XPathExpression SERVICE_CONFIG;
+        static XPathExpression COMPONENT;
+        static XPathExpression CONFIG_PROPERTY;
+        static XPathExpression IF;
+        static XPathExpression THEN;
+        static XPathExpression ELSE;
+        static XPathExpression TEXT;
+        static {
+            XPath xpath = XPathFactory.newInstance().newXPath();
+            try {
+                HOSTNAME        = xpath.compile("hostname");
+                SERVICE_CONFIG  = xpath.compile("service-config");
+                COMPONENT       = xpath.compile("component");
+                CONFIG_PROPERTY = xpath.compile("config-property");
+                IF              = xpath.compile("if");
+                THEN            = xpath.compile("then");
+                ELSE            = xpath.compile("else");
+                TEXT            = xpath.compile("text()");
+            } catch (XPathExpressionException e) {
+                e.printStackTrace();
+            }
+        }
+
+
+        String type;
+        String name;
+        String component;
+        String service;
+        String serviceConfig;
+        String value;
+        ConditionalValueHandler conditionHandler = null;
+
+        private Property(String type,
+                         String propertyName,
+                         String component,
+                         String service,
+                         String configType,
+                         String value,
+                         ConditionalValueHandler pch) {
+            this.type = type;
+            this.name = propertyName;
+            this.service = service;
+            this.component = component;
+            this.serviceConfig = configType;
+            this.value = value;
+            conditionHandler = pch;
+        }
+
+        static Property createProperty(String serviceName, Node propertyNode) {
+            String propertyName = propertyNode.getAttributes().getNamedItem(ATTR_NAME).getNodeValue();
+            String propertyType = null;
+            String serviceType = null;
+            String configType = null;
+            String componentType = null;
+            String value = null;
+            ConditionalValueHandler pch = null;
+
+            try {
+                Node hostNameNode = (Node) HOSTNAME.evaluate(propertyNode, XPathConstants.NODE);
+                if (hostNameNode != null) {
+                    value = PROP_COMP_HOSTNAME;
+                }
+
+                // Check for a service-config node
+                Node scNode = (Node) SERVICE_CONFIG.evaluate(propertyNode, XPathConstants.NODE);
+                if (scNode != null) {
+                    // Service config property
+                    propertyType = Property.TYPE_SERVICE;
+                    serviceType = scNode.getAttributes().getNamedItem(ATTR_NAME).getNodeValue();
+                    Node scTextNode = (Node) TEXT.evaluate(scNode, XPathConstants.NODE);
+                    configType = scTextNode.getNodeValue();
+                } else { // If not service-config node, check for a component config node
+                    Node cNode = (Node) COMPONENT.evaluate(propertyNode, XPathConstants.NODE);
+                    if (cNode != null) {
+                        // Component config property
+                        propertyType = Property.TYPE_COMPONENT;
+                        componentType = cNode.getFirstChild().getNodeValue();
+                        Node cTextNode = (Node) TEXT.evaluate(cNode, XPathConstants.NODE);
+                        configType = cTextNode.getNodeValue();
+                        componentType = cTextNode.getNodeValue();
+                    }
+                }
+
+                // Check for a config property node
+                Node cpNode = (Node) CONFIG_PROPERTY.evaluate(propertyNode, XPathConstants.NODE);
+                if (cpNode != null) {
+                    // Check for a condition element
+                    Node ifNode = (Node) IF.evaluate(cpNode, XPathConstants.NODE);
+                    if (ifNode != null) {
+                        propertyType = TYPE_DERIVED;
+                        pch = getConditionHandler(serviceName, ifNode);
+                    } else {
+                        Node cpTextNode = (Node) TEXT.evaluate(cpNode, XPathConstants.NODE);
+                        value = cpTextNode.getNodeValue();
+                    }
+                }
+            } catch (Exception e) {
+                e.printStackTrace();
+            }
+
+            // Create and return the property representation
+            return new Property(propertyType, propertyName, componentType, serviceType, configType, value, pch);
+        }
+
+        private static ConditionalValueHandler getConditionHandler(String serviceName, Node ifNode) throws Exception {
+            ConditionalValueHandler result = null;
+
+            if (ifNode != null) {
+                NamedNodeMap attrs = ifNode.getAttributes();
+                String comparisonPropName = attrs.getNamedItem(ATTR_PROPERTY).getNodeValue();
+                String comparisonValue = attrs.getNamedItem(ATTR_VALUE).getNodeValue();
+
+                ConditionalValueHandler affirmativeResult = null;
+                Node thenNode = (Node) THEN.evaluate(ifNode, XPathConstants.NODE);
+                if (thenNode != null) {
+                    Node subIfNode = (Node) IF.evaluate(thenNode, XPathConstants.NODE);
+                    if (subIfNode != null) {
+                        affirmativeResult = getConditionHandler(serviceName, subIfNode);
+                    } else {
+                        affirmativeResult = new SimpleValueHandler(thenNode.getFirstChild().getNodeValue());
+                    }
+                }
+
+                ConditionalValueHandler negativeResult = null;
+                Node elseNode = (Node) ELSE.evaluate(ifNode, XPathConstants.NODE);
+                if (elseNode != null) {
+                    Node subIfNode = (Node) IF.evaluate(elseNode, XPathConstants.NODE);
+                    if (subIfNode != null) {
+                        negativeResult = getConditionHandler(serviceName, subIfNode);
+                    } else {
+                        negativeResult = new SimpleValueHandler(elseNode.getFirstChild().getNodeValue());
+                    }
+                }
+
+                result = new PropertyEqualsHandler(serviceName,
+                        comparisonPropName,
+                        comparisonValue,
+                        affirmativeResult,
+                        negativeResult);
+            }
+
+            return result;
+        }
+
+        String getType() { return type; }
+        String getName() { return name; }
+        String getComponent() { return component; }
+        String getService() { return service; }
+        String getServiceConfig() { return serviceConfig; }
+        String getValue() {
+            return value;
+        }
+        ConditionalValueHandler getConditionHandler() { return conditionHandler; }
+    }
+}

http://git-wip-us.apache.org/repos/asf/knox/blob/7d0bff16/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/SimpleValueHandler.java
----------------------------------------------------------------------
diff --git a/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/SimpleValueHandler.java b/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/SimpleValueHandler.java
new file mode 100644
index 0000000..d1678d8
--- /dev/null
+++ b/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/SimpleValueHandler.java
@@ -0,0 +1,32 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.knox.gateway.topology.discovery.ambari;
+
+
+class SimpleValueHandler implements ConditionalValueHandler {
+    private String value;
+
+    SimpleValueHandler(String value) {
+        this.value = value;
+    }
+
+    @Override
+    public String evaluate(ServiceURLPropertyConfig config, AmbariCluster cluster) {
+        return value;
+    }
+}
+


[23/53] [abbrv] knox git commit: KNOX-998 - Some more refactoring

Posted by mo...@apache.org.
http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-test-utils/src/main/java/org/apache/knox/test/mock/MockHttpServletRequest.java
----------------------------------------------------------------------
diff --git a/gateway-test-utils/src/main/java/org/apache/knox/test/mock/MockHttpServletRequest.java b/gateway-test-utils/src/main/java/org/apache/knox/test/mock/MockHttpServletRequest.java
new file mode 100644
index 0000000..b43465f
--- /dev/null
+++ b/gateway-test-utils/src/main/java/org/apache/knox/test/mock/MockHttpServletRequest.java
@@ -0,0 +1,410 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.knox.test.mock;
+
+import javax.servlet.AsyncContext;
+import javax.servlet.DispatcherType;
+import javax.servlet.RequestDispatcher;
+import javax.servlet.ServletContext;
+import javax.servlet.ServletException;
+import javax.servlet.ServletInputStream;
+import javax.servlet.ServletRequest;
+import javax.servlet.ServletResponse;
+import javax.servlet.http.Cookie;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+import javax.servlet.http.HttpSession;
+import javax.servlet.http.HttpUpgradeHandler;
+import javax.servlet.http.Part;
+import java.io.BufferedReader;
+import java.io.IOException;
+import java.io.UnsupportedEncodingException;
+import java.security.Principal;
+import java.util.Collection;
+import java.util.Enumeration;
+import java.util.Locale;
+import java.util.Map;
+
+public class MockHttpServletRequest implements HttpServletRequest {
+
+  private String queryString;
+  private String contentType;
+  private String characterEncoding;
+  private ServletInputStream inputStream;
+  private String method = "GET";
+
+  @Override
+  public String getAuthType() {
+    return null;
+  }
+
+  @Override
+  public Cookie[] getCookies() {
+    return new Cookie[ 0 ];
+  }
+
+  @Override
+  public long getDateHeader( String s ) {
+    return 0;
+  }
+
+  @Override
+  public String getHeader( String s ) {
+    return null;
+  }
+
+  @Override
+  public Enumeration<String> getHeaders( String s ) {
+    return null;
+  }
+
+  @Override
+  public Enumeration<String> getHeaderNames() {
+    return null;
+  }
+
+  @Override
+  public int getIntHeader( String s ) {
+    return 0;
+  }
+
+  @Override
+  public String getMethod() {
+    return method;
+  }
+
+  public void setMethod( String method ) {
+    this.method = method;
+  }
+
+  @Override
+  public String getPathInfo() {
+    return null;
+  }
+
+  @Override
+  public String getPathTranslated() {
+    return null;
+  }
+
+  @Override
+  public String getContextPath() {
+    return null;
+  }
+
+  @Override
+  public String getQueryString() {
+    return queryString;
+  }
+
+  public void setQueryString( String queryString ) {
+    this.queryString = queryString;
+  }
+
+  @Override
+  public String getRemoteUser() {
+    return null;
+  }
+
+  @Override
+  public boolean isUserInRole( String s ) {
+    return false;
+  }
+
+  @Override
+  public Principal getUserPrincipal() {
+    return null;
+  }
+
+  @Override
+  public String getRequestedSessionId() {
+    return null;
+  }
+
+  @Override
+  public String getRequestURI() {
+    return null;
+  }
+
+  @Override
+  public StringBuffer getRequestURL() {
+    return null;
+  }
+
+  @Override
+  public String getServletPath() {
+    return null;
+  }
+
+  @Override
+  public HttpSession getSession( boolean b ) {
+    return null;
+  }
+
+  @Override
+  public HttpSession getSession() {
+    return null;
+  }
+
+  @Override
+  public String changeSessionId() {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public boolean isRequestedSessionIdValid() {
+    return false;
+  }
+
+  @Override
+  public boolean isRequestedSessionIdFromCookie() {
+    return false;
+  }
+
+  @Override
+  public boolean isRequestedSessionIdFromURL() {
+    return false;
+  }
+
+  @Override
+  @SuppressWarnings("deprecation")
+  public boolean isRequestedSessionIdFromUrl() {
+    return false;
+  }
+
+  @Override
+  public boolean authenticate( HttpServletResponse httpServletResponse ) throws IOException, ServletException {
+    return false;
+  }
+
+  @Override
+  public void login( String s, String s1 ) throws ServletException {
+  }
+
+  @Override
+  public void logout() throws ServletException {
+  }
+
+  @Override
+  public Collection<Part> getParts() throws IOException, ServletException {
+    return null;
+  }
+
+  @Override
+  public Part getPart( String s ) throws IOException, ServletException {
+    return null;
+  }
+
+  @Override
+  public <T extends HttpUpgradeHandler> T upgrade( Class<T> aClass ) throws IOException, ServletException {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public Object getAttribute( String s ) {
+    return null;
+  }
+
+  @Override
+  public Enumeration<String> getAttributeNames() {
+    return null;
+  }
+
+  @Override
+  public String getCharacterEncoding() {
+    return characterEncoding;
+  }
+
+  @Override
+  public void setCharacterEncoding( String characterEncoding ) throws UnsupportedEncodingException {
+    this.characterEncoding = characterEncoding;
+  }
+
+  @Override
+  public int getContentLength() {
+    return 0;
+  }
+
+  @Override
+  public long getContentLengthLong() {
+    return 0;
+  }
+
+  @Override
+  public String getContentType() {
+    return contentType;
+  }
+
+  public void setContentType( String contentType ) {
+    this.contentType = contentType;
+  }
+
+  @Override
+  public ServletInputStream getInputStream() throws IOException {
+    return inputStream;
+  }
+
+  public void setInputStream( ServletInputStream intputStream ) {
+    this.inputStream = intputStream;
+  }
+
+  @Override
+  public String getParameter( String s ) {
+    return null;
+  }
+
+  @Override
+  public Enumeration<String> getParameterNames() {
+    return null;
+  }
+
+  @Override
+  public String[] getParameterValues( String s ) {
+    return new String[ 0 ];
+  }
+
+  @Override
+  public Map<String, String[]> getParameterMap() {
+    return null;
+  }
+
+  @Override
+  public String getProtocol() {
+    return null;
+  }
+
+  @Override
+  public String getScheme() {
+    return null;
+  }
+
+  @Override
+  public String getServerName() {
+    return null;
+  }
+
+  @Override
+  public int getServerPort() {
+    return 0;
+  }
+
+  @Override
+  public BufferedReader getReader() throws IOException {
+    return null;
+  }
+
+  @Override
+  public String getRemoteAddr() {
+    return null;
+  }
+
+  @Override
+  public String getRemoteHost() {
+    return null;
+  }
+
+  @Override
+  public void setAttribute( String s, Object o ) {
+  }
+
+  @Override
+  public void removeAttribute( String s ) {
+  }
+
+  @Override
+  public Locale getLocale() {
+    return null;
+  }
+
+  @Override
+  public Enumeration<Locale> getLocales() {
+    return null;
+  }
+
+  @Override
+  public boolean isSecure() {
+    return false;
+  }
+
+  @Override
+  public RequestDispatcher getRequestDispatcher( String s ) {
+    return null;
+  }
+
+  @Override
+  @SuppressWarnings("deprecation")
+  public String getRealPath( String s ) {
+    return null;
+  }
+
+  @Override
+  public int getRemotePort() {
+    return 0;
+  }
+
+  @Override
+  public String getLocalName() {
+    return null;
+  }
+
+  @Override
+  public String getLocalAddr() {
+    return null;
+  }
+
+  @Override
+  public int getLocalPort() {
+    return 0;
+  }
+
+  @Override
+  public ServletContext getServletContext() {
+    return null;
+  }
+
+  @Override
+  public AsyncContext startAsync() throws IllegalStateException {
+    return null;
+  }
+
+  @Override
+  public AsyncContext startAsync( ServletRequest servletRequest, ServletResponse servletResponse ) throws IllegalStateException {
+    return null;
+  }
+
+  @Override
+  public boolean isAsyncStarted() {
+    return false;
+  }
+
+  @Override
+  public boolean isAsyncSupported() {
+    return false;
+  }
+
+  @Override
+  public AsyncContext getAsyncContext() {
+    return null;
+  }
+
+  @Override
+  public DispatcherType getDispatcherType() {
+    return null;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-test-utils/src/main/java/org/apache/knox/test/mock/MockHttpServletResponse.java
----------------------------------------------------------------------
diff --git a/gateway-test-utils/src/main/java/org/apache/knox/test/mock/MockHttpServletResponse.java b/gateway-test-utils/src/main/java/org/apache/knox/test/mock/MockHttpServletResponse.java
new file mode 100644
index 0000000..69f69b3
--- /dev/null
+++ b/gateway-test-utils/src/main/java/org/apache/knox/test/mock/MockHttpServletResponse.java
@@ -0,0 +1,195 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.knox.test.mock;
+
+import javax.servlet.ServletOutputStream;
+import javax.servlet.http.Cookie;
+import javax.servlet.http.HttpServletResponse;
+import java.io.IOException;
+import java.io.PrintWriter;
+import java.util.Collection;
+import java.util.Locale;
+
+public class MockHttpServletResponse implements HttpServletResponse {
+
+  @Override
+  public void addCookie( Cookie cookie ) {
+  }
+
+  @Override
+  public boolean containsHeader( String s ) {
+    return false;
+  }
+
+  @Override
+  public String encodeURL( String s ) {
+    return null;
+  }
+
+  @Override
+  public String encodeRedirectURL( String s ) {
+    return null;
+  }
+
+  @Override
+  @SuppressWarnings("deprecation")
+  public String encodeUrl( String s ) {
+    return null;
+  }
+
+  @Override
+  public String encodeRedirectUrl( String s ) {
+    return null;
+  }
+
+  @Override
+  public void sendError( int i, String s ) throws IOException {
+  }
+
+  @Override
+  public void sendError( int i ) throws IOException {
+  }
+
+  @Override
+  public void sendRedirect( String s ) throws IOException {
+  }
+
+  @Override
+  public void setDateHeader( String s, long l ) {
+  }
+
+  @Override
+  public void addDateHeader( String s, long l ) {
+  }
+
+  @Override
+  public void setHeader( String s, String s1 ) {
+  }
+
+  @Override
+  public void addHeader( String s, String s1 ) {
+  }
+
+  @Override
+  public void setIntHeader( String s, int i ) {
+  }
+
+  @Override
+  public void addIntHeader( String s, int i ) {
+  }
+
+  @Override
+  public void setStatus( int i ) {
+  }
+
+  @Override
+  @SuppressWarnings("deprecation")
+  public void setStatus( int i, String s ) {
+  }
+
+  @Override
+  public int getStatus() {
+    return 0;
+  }
+
+  @Override
+  public String getHeader( String s ) {
+    return null;
+  }
+
+  @Override
+  public Collection<String> getHeaders( String s ) {
+    return null;
+  }
+
+  @Override
+  public Collection<String> getHeaderNames() {
+    return null;
+  }
+
+  @Override
+  public String getCharacterEncoding() {
+    return null;
+  }
+
+  @Override
+  public String getContentType() {
+    return null;
+  }
+
+  @Override
+  public ServletOutputStream getOutputStream() throws IOException {
+    return null;
+  }
+
+  @Override
+  public PrintWriter getWriter() throws IOException {
+    return null;
+  }
+
+  @Override
+  public void setCharacterEncoding( String s ) {
+  }
+
+  @Override
+  public void setContentLength( int i ) {
+  }
+
+  @Override
+  public void setContentLengthLong( long l ) {
+  }
+
+  @Override
+  public void setContentType( String s ) {
+  }
+
+  @Override
+  public void setBufferSize( int i ) {
+  }
+
+  @Override
+  public int getBufferSize() {
+    return 0;
+  }
+
+  @Override
+  public void flushBuffer() throws IOException {
+  }
+
+  @Override
+  public void resetBuffer() {
+  }
+
+  @Override
+  public boolean isCommitted() {
+    return false;
+  }
+
+  @Override
+  public void reset() {
+  }
+
+  @Override
+  public void setLocale( Locale locale ) {
+  }
+
+  @Override
+  public Locale getLocale() {
+    return null;
+  }
+}

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-test-utils/src/main/java/org/apache/knox/test/mock/MockInteraction.java
----------------------------------------------------------------------
diff --git a/gateway-test-utils/src/main/java/org/apache/knox/test/mock/MockInteraction.java b/gateway-test-utils/src/main/java/org/apache/knox/test/mock/MockInteraction.java
new file mode 100644
index 0000000..b326ec4
--- /dev/null
+++ b/gateway-test-utils/src/main/java/org/apache/knox/test/mock/MockInteraction.java
@@ -0,0 +1,33 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.knox.test.mock;
+
+public class MockInteraction {
+
+  private MockResponseProvider response = new MockResponseProvider();
+  private MockRequestMatcher request = new MockRequestMatcher( response );
+
+  public MockRequestMatcher expect() {
+    return request;
+  }
+
+  public MockResponseProvider respond() {
+    return response;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-test-utils/src/main/java/org/apache/knox/test/mock/MockRequestMatcher.java
----------------------------------------------------------------------
diff --git a/gateway-test-utils/src/main/java/org/apache/knox/test/mock/MockRequestMatcher.java b/gateway-test-utils/src/main/java/org/apache/knox/test/mock/MockRequestMatcher.java
new file mode 100644
index 0000000..fc0a105
--- /dev/null
+++ b/gateway-test-utils/src/main/java/org/apache/knox/test/mock/MockRequestMatcher.java
@@ -0,0 +1,330 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.knox.test.mock;
+
+import org.apache.commons.io.IOUtils;
+import org.apache.commons.lang3.ArrayUtils;
+import org.apache.http.NameValuePair;
+import org.apache.http.client.utils.URLEncodedUtils;
+import org.apache.http.message.BasicNameValuePair;
+import org.hamcrest.Matcher;
+import org.hamcrest.Matchers;
+
+import javax.servlet.http.Cookie;
+import javax.servlet.http.HttpServletRequest;
+import java.io.IOException;
+import java.io.InputStream;
+import java.net.URL;
+import java.nio.charset.Charset;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+
+import static org.hamcrest.CoreMatchers.*;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalToIgnoringCase;
+import static org.xmlmatchers.XmlMatchers.isEquivalentTo;
+import static org.xmlmatchers.transform.XmlConverters.the;
+import static uk.co.datumedge.hamcrest.json.SameJSONAs.sameJSONAs;
+
+public class MockRequestMatcher {
+
+  private static final Charset UTF8 = Charset.forName( "UTF-8" );
+
+  private String from;
+  private MockResponseProvider response;
+  private Set<String> methods = null;
+  private String pathInfo = null;
+  private String requestURL = null;
+  Map<String,Matcher> headers = null;
+  Set<Cookie> cookies = null;
+  private Map<String,Object> attributes = null;
+  private Map<String,String> queryParams = null;
+  private String contentType = null;
+  private String characterEncoding = null;
+  private Integer contentLength = null;
+  private byte[] entity = null;
+  private Map<String,String[]> formParams = null;
+
+  public MockRequestMatcher( MockResponseProvider response ) {
+    this.response = response;
+  }
+
+  public MockResponseProvider respond() {
+    return response;
+  }
+
+  public MockRequestMatcher from( String from ) {
+    this.from = from;
+    return this;
+  }
+
+  public MockRequestMatcher method( String... methods ) {
+    if( this.methods == null ) {
+      this.methods = new HashSet<>();
+    }
+    if( methods != null ) {
+      for( String method: methods ) {
+        this.methods.add( method );
+      }
+    }
+    return this;
+  }
+
+  public MockRequestMatcher pathInfo( String pathInfo ) {
+    this.pathInfo = pathInfo;
+    return this;
+  }
+
+  public MockRequestMatcher requestUrl( String requestUrl ) {
+    this.requestURL = requestUrl;
+    return this;
+  }
+
+  public MockRequestMatcher header( String name, String value ) {
+    if( headers == null ) {
+      headers = new HashMap<>();
+    }
+    headers.put( name, Matchers.is(value) );
+    return this;
+  }
+
+  public MockRequestMatcher header( String name, Matcher matcher ) {
+    if( headers == null ) {
+      headers = new HashMap<>();
+    }
+    headers.put( name, matcher );
+    return this;
+  }
+
+  public MockRequestMatcher cookie( Cookie cookie ) {
+    if( cookies == null ) {
+      cookies = new HashSet<>();
+    }
+    cookies.add( cookie );
+    return this;
+  }
+
+  public MockRequestMatcher attribute( String name, Object value ) {
+    if( this.attributes == null ) {
+      this.attributes = new HashMap<>();
+    }
+    attributes.put( name, value );
+    return this;
+  }
+
+  public MockRequestMatcher queryParam( String name, String value ) {
+    if( this.queryParams == null ) {
+      this.queryParams = new HashMap<>();
+    }
+    queryParams.put( name, value );
+    return this;
+  }
+
+  public MockRequestMatcher formParam( String name, String... values ) {
+    if( entity != null ) {
+      throw new IllegalStateException( "Entity already specified." );
+    }
+    if( formParams == null ) {
+      formParams = new HashMap<>();
+    }
+    String[] currentValues = formParams.get( name );
+    if( currentValues == null ) {
+      currentValues = values;
+    } else if ( values != null ) {
+      currentValues = ArrayUtils.addAll( currentValues, values );
+    }
+    formParams.put( name, currentValues );
+    return this;
+  }
+
+  public MockRequestMatcher content( String string, Charset charset ) {
+    content( string.getBytes( charset ) );
+    return this;
+  }
+
+  public MockRequestMatcher content( byte[] entity ) {
+    if( formParams != null ) {
+      throw new IllegalStateException( "Form params already specified." );
+    }
+    this.entity = entity;
+    return this;
+  }
+
+  public MockRequestMatcher content( URL url ) throws IOException {
+    content( url.openStream() );
+    return this;
+  }
+
+  public MockRequestMatcher content( InputStream stream ) throws IOException {
+    content( IOUtils.toByteArray( stream ) );
+    return this;
+  }
+
+  public MockRequestMatcher contentType( String contentType ) {
+    this.contentType = contentType;
+    return this;
+  }
+
+  public MockRequestMatcher contentLength( int length ) {
+    this.contentLength = length;
+    return this;
+  }
+
+  public MockRequestMatcher characterEncoding( String charset ) {
+    this.characterEncoding = charset;
+    return this;
+  }
+
+  public void match( HttpServletRequest request ) throws IOException {
+    if( methods != null ) {
+      assertThat(
+          "Request " + request.getMethod() + " " + request.getRequestURL() +
+              " is not using one of the expected HTTP methods",
+          methods, hasItem( request.getMethod() ) );
+    }
+    if( pathInfo != null ) {
+      assertThat(
+          "Request " + request.getMethod() + " " + request.getRequestURL() +
+              " does not have the expected pathInfo",
+          request.getPathInfo(), is( pathInfo ) );
+    }
+    if( requestURL != null ) {
+      assertThat( 
+          "Request " + request.getMethod() + " " + request.getRequestURL() +
+              " does not have the expected requestURL",
+          request.getRequestURL().toString(), is( requestURL ) );
+    }
+    if( headers != null ) {
+      for( Entry<String, Matcher> entry : headers.entrySet() ) {
+        assertThat(
+            "Request " + request.getMethod() + " " + request.getRequestURL() +
+                " does not have the expected value for header " + entry.getKey(),
+            request.getHeader( entry.getKey() ),  entry.getValue() );
+      }
+    }
+    if( cookies != null ) {
+      List<Cookie> requestCookies = Arrays.asList( request.getCookies() );
+      for( Cookie cookie: cookies ) {
+        assertThat(
+            "Request " + request.getMethod() + " " + request.getRequestURL() +
+                " does not have the expected cookie " + cookie,
+            requestCookies, hasItem( cookie ) );
+      }
+    }
+    if( contentType != null ) {
+      String[] requestContentType = request.getContentType().split(";",2);
+      assertThat(
+          "Request " + request.getMethod() + " " + request.getRequestURL() +
+              " does not have the expected content type",
+          requestContentType[ 0 ], is( contentType ) );
+    }
+    if( characterEncoding != null ) {
+      assertThat(
+          "Request " + request.getMethod() + " " + request.getRequestURL() +
+              " does not have the expected character encoding",
+          request.getCharacterEncoding(), equalToIgnoringCase( characterEncoding ) );
+    }
+    if( contentLength != null ) {
+      assertThat(
+          "Request " + request.getMethod() + " " + request.getRequestURL() +
+              " does not have the expected content length",
+          request.getContentLength(), is( contentLength ) );
+    }
+    if( attributes != null ) {
+      for( String name: attributes.keySet() ) {
+        assertThat(
+            "Request " + request.getMethod() + " " + request.getRequestURL() +
+                " is missing attribute '" + name + "'",
+            request.getAttribute( name ), notNullValue() );
+        assertThat(
+            "Request " + request.getMethod() + " " + request.getRequestURL() +
+                " has wrong value for attribute '" + name + "'",
+            request.getAttribute( name ), is( request.getAttribute( name ) ) );
+      }
+    }
+    // Note: Cannot use any of the expect.getParameter*() methods because they will read the
+    // body and we don't want that to happen.
+    if( queryParams != null ) {
+      String queryString = request.getQueryString();
+      List<NameValuePair> requestParams = parseQueryString( queryString == null ? "" : queryString );
+      for( Entry<String, String> entry : queryParams.entrySet() ) {
+        assertThat(
+            "Request " + request.getMethod() + " " + request.getRequestURL() +
+                " query string " + queryString + " is missing parameter '" + entry.getKey() + "'",
+            requestParams, hasItem( new BasicNameValuePair(entry.getKey(), entry.getValue())) );
+      }
+    }
+    if( formParams != null ) {
+      String paramString = IOUtils.toString( request.getInputStream(), request.getCharacterEncoding() );
+      List<NameValuePair> requestParams = parseQueryString( paramString == null ? "" : paramString );
+      for( Entry<String, String[]> entry : formParams.entrySet() ) {
+        String[] expectedValues = entry.getValue();
+        for( String expectedValue : expectedValues ) {
+          assertThat(
+              "Request " + request.getMethod() + " " + request.getRequestURL() +
+                  " form params " + paramString + " is missing a value " + expectedValue + " for parameter '" + entry.getKey() + "'",
+              requestParams, hasItem( new BasicNameValuePair(entry.getKey(), expectedValue ) ));
+        }
+      }
+    }
+    if( entity != null ) {
+      if( contentType != null && contentType.endsWith( "/xml" ) ) {
+        String expectEncoding = characterEncoding;
+        String expect = new String( entity, ( expectEncoding == null ? UTF8.name() : expectEncoding ) );
+        String actualEncoding = request.getCharacterEncoding();
+        String actual = IOUtils.toString( request.getInputStream(), actualEncoding == null ? UTF8.name() : actualEncoding );
+        assertThat( the( actual ), isEquivalentTo( the( expect ) ) );
+      } else if ( contentType != null && contentType.endsWith( "/json" ) )  {
+        String expectEncoding = characterEncoding;
+        String expect = new String( entity, ( expectEncoding == null ? UTF8.name() : expectEncoding ) );
+        String actualEncoding = request.getCharacterEncoding();
+        String actual = IOUtils.toString( request.getInputStream(), actualEncoding == null ? UTF8.name() : actualEncoding );
+//        System.out.println( "EXPECT=" + expect );
+//        System.out.println( "ACTUAL=" + actual );
+        assertThat( actual, sameJSONAs( expect ) );
+      } else if( characterEncoding == null || request.getCharacterEncoding() == null ) {
+        byte[] bytes = IOUtils.toByteArray( request.getInputStream() );
+        assertThat(
+            "Request " + request.getMethod() + " " + request.getRequestURL() +
+                " content does not match the expected content",
+            bytes, is( entity ) );
+      } else {
+        String expect = new String( entity, characterEncoding );
+        String actual = IOUtils.toString( request.getInputStream(), request.getCharacterEncoding() );
+        assertThat(
+            "Request " + request.getMethod() + " " + request.getRequestURL() +
+                " content does not match the expected content",
+            actual, is( expect ) );
+      }
+    }
+  }
+
+  public String toString() {
+    return "from=" + from + ", pathInfo=" + pathInfo;
+  }
+
+  private static List<NameValuePair> parseQueryString( String queryString ) {
+    return URLEncodedUtils.parse(queryString, Charset.defaultCharset());
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-test-utils/src/main/java/org/apache/knox/test/mock/MockResponseProvider.java
----------------------------------------------------------------------
diff --git a/gateway-test-utils/src/main/java/org/apache/knox/test/mock/MockResponseProvider.java b/gateway-test-utils/src/main/java/org/apache/knox/test/mock/MockResponseProvider.java
new file mode 100644
index 0000000..503ff65
--- /dev/null
+++ b/gateway-test-utils/src/main/java/org/apache/knox/test/mock/MockResponseProvider.java
@@ -0,0 +1,157 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.knox.test.mock;
+
+import org.apache.commons.io.IOUtils;
+
+import javax.servlet.http.Cookie;
+import javax.servlet.http.HttpServletResponse;
+import java.io.IOException;
+import java.io.InputStream;
+import java.net.URL;
+import java.nio.charset.Charset;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+
+public class MockResponseProvider {
+
+  Integer errorCode = null;
+  String errorMsg = null;
+  Integer statusCode = null;
+  String redirectUrl = null;
+  Map<String,String> headers = null;
+  Set<Cookie> cookies = null;
+  byte[] entity = null;
+  String contentType = null;
+  String characterEncoding = null;
+  Integer contentLength = null;
+
+  public MockResponseProvider status( int statusCode ) {
+    this.statusCode = statusCode;
+    return this;
+  }
+
+  public MockResponseProvider error( int code, String message ) {
+    errorCode = code;
+    errorMsg = message;
+    return this;
+  }
+
+  public MockResponseProvider redirect( String location ) {
+    redirectUrl = location;
+    return this;
+  }
+
+  public MockResponseProvider header( String name, String value ) {
+    if( headers == null ) {
+      headers = new HashMap<>();
+    }
+    headers.put( name, value );
+    return this;
+  }
+
+  public MockResponseProvider cookie( Cookie cookie ) {
+    if( cookies == null ) {
+      cookies = new HashSet<>();
+    }
+    cookies.add( cookie );
+    return this;
+  }
+
+  public MockResponseProvider content( byte[] entity ) {
+    this.entity = entity;
+    return this;
+  }
+
+  public MockResponseProvider content( String string, Charset charset ) {
+    this.entity = string.getBytes( charset );
+    return this;
+  }
+
+  public MockResponseProvider content( URL url ) throws IOException {
+    content( url.openStream() );
+    return this;
+  }
+
+  public MockResponseProvider content( InputStream stream ) throws IOException {
+    content( IOUtils.toByteArray( stream ) );
+    return this;
+  }
+
+  public MockResponseProvider contentType( String contentType ) {
+    this.contentType = contentType;
+    return this;
+  }
+
+  public MockResponseProvider contentLength( int contentLength ) {
+    this.contentLength = contentLength;
+    return this;
+  }
+
+  public MockResponseProvider characterEncoding( String charset ) {
+    this.characterEncoding = charset;
+    return this;
+  }
+
+  public void apply( HttpServletResponse response ) throws IOException {
+    if( statusCode != null ) {
+      response.setStatus( statusCode );
+    } else {
+      response.setStatus( HttpServletResponse.SC_OK );
+    }
+    if( errorCode != null ) {
+      if( errorMsg != null ) {
+        response.sendError( errorCode, errorMsg );
+      } else {
+        response.sendError( errorCode );
+      }
+    }
+    if( redirectUrl != null ) {
+      response.sendRedirect( redirectUrl );
+    }
+    if( headers != null ) {
+      for( Entry<String, String> entry : headers.entrySet() ) {
+        response.addHeader( entry.getKey(), entry.getValue() );
+      }
+    }
+    if( cookies != null ) {
+      for( Cookie cookie: cookies ) {
+        response.addCookie( cookie );
+      }
+    }
+    if( contentType != null ) {
+      response.setContentType( contentType );
+    }
+    if( characterEncoding != null ) {
+      response.setCharacterEncoding( characterEncoding );
+    }
+    if( contentLength != null ) {
+      response.setContentLength( contentLength );
+    }
+    response.flushBuffer();
+    if( entity != null ) {
+      response.getOutputStream().write( entity );
+      //KNOX-685: response.getOutputStream().flush();
+      response.getOutputStream().close();
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-test-utils/src/main/java/org/apache/knox/test/mock/MockServer.java
----------------------------------------------------------------------
diff --git a/gateway-test-utils/src/main/java/org/apache/knox/test/mock/MockServer.java b/gateway-test-utils/src/main/java/org/apache/knox/test/mock/MockServer.java
new file mode 100644
index 0000000..09905cd
--- /dev/null
+++ b/gateway-test-utils/src/main/java/org/apache/knox/test/mock/MockServer.java
@@ -0,0 +1,119 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.knox.test.mock;
+
+import org.eclipse.jetty.server.Handler;
+import org.eclipse.jetty.server.Server;
+import org.eclipse.jetty.servlet.ServletContextHandler;
+import org.eclipse.jetty.servlet.ServletHolder;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.servlet.Servlet;
+import java.util.LinkedList;
+import java.util.Queue;
+
+/**
+ * An embedded Jetty server with a single servlet deployed on "/*".
+ * It is used by populating a queue of "interactions".
+ * Each interaction is an expected request and a resulting response.
+ * These interactions are added to a queue in a fluent API style.
+ * So in most of the tests like GatewayBasicFuncTest.testBasicJsonUseCase you will see calls like
+ * driver.getMock( "WEBHDFS" ).expect()....respond()...;
+ * This adds a single interaction to the mock server which is returned via the driver.getMock( "WEBHDFS" ) above.
+ * Any number of interactions may be added.
+ * When the request comes in it will check the request against the expected request.
+ * If it matches return the response otherwise it will return a 500 error.
+ * Typically at the end of a test you should check to make sure the interaction queue is consumed by calling isEmpty().
+ * The reset() method can be used to ensure everything is cleaned up so that the mock server can be reused beteween tests.
+ * The whole idea was modeled after how the REST testing framework REST-assured and aims to be a server side equivalent.
+ */
+public class MockServer {
+
+  private Logger log = LoggerFactory.getLogger( this.getClass() );
+
+  private String name;
+  private Server jetty;
+
+  private Queue<MockInteraction> interactions = new LinkedList<MockInteraction>();
+
+  public MockServer( String name ) {
+    this.name = name;
+  }
+
+  public MockServer( String name, boolean start ) throws Exception {
+    this.name = name;
+    if( start ) {
+      start();
+    }
+  }
+
+  public String getName() {
+    return name;
+  }
+
+  public void start() throws Exception {
+    Handler context = createHandler();
+    jetty = new Server(0);
+    jetty.setHandler( context );
+    jetty.start();
+    log.info( "Mock server started on port " + getPort() );
+  }
+
+  public void stop() throws Exception {
+    jetty.stop();
+    jetty.join();
+  }
+
+  private ServletContextHandler createHandler() {
+    Servlet servlet = new MockServlet( getName(), interactions );
+    ServletHolder holder = new ServletHolder( servlet );
+    ServletContextHandler context = new ServletContextHandler( ServletContextHandler.SESSIONS );
+    context.setContextPath( "/" );
+    context.addServlet( holder, "/*" );
+    return context;
+  }
+
+  public int getPort() {
+    return jetty.getURI().getPort();
+  }
+
+  public MockRequestMatcher expect() {
+    MockInteraction interaction = new MockInteraction();
+    interactions.add( interaction );
+    return interaction.expect();
+  }
+
+  public MockResponseProvider respond() {
+    MockInteraction interaction = new MockInteraction();
+    interactions.add( interaction );
+    return interaction.respond();
+  }
+
+  public int getCount() {
+    return interactions.size();
+  }
+
+  public boolean isEmpty() {
+    return interactions.isEmpty();
+  }
+
+  public void reset() {
+    interactions.clear();
+  }
+}

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-test-utils/src/main/java/org/apache/knox/test/mock/MockServlet.java
----------------------------------------------------------------------
diff --git a/gateway-test-utils/src/main/java/org/apache/knox/test/mock/MockServlet.java b/gateway-test-utils/src/main/java/org/apache/knox/test/mock/MockServlet.java
new file mode 100644
index 0000000..2c82dfd
--- /dev/null
+++ b/gateway-test-utils/src/main/java/org/apache/knox/test/mock/MockServlet.java
@@ -0,0 +1,61 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.knox.test.mock;
+
+import javax.servlet.ServletException;
+import javax.servlet.http.HttpServlet;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+import java.io.IOException;
+import java.util.Queue;
+
+import org.apache.log4j.Logger;
+
+import static org.junit.Assert.fail;
+
+public class MockServlet extends HttpServlet {
+
+  private static final Logger LOG = Logger.getLogger(MockServlet.class.getName());
+
+  public String name;
+  public Queue<MockInteraction> interactions;
+
+  public MockServlet( String name, Queue<MockInteraction> interactions ) {
+    this.name = name;
+    this.interactions = interactions;
+  }
+
+  @Override
+  protected void service( HttpServletRequest request, HttpServletResponse response ) throws ServletException, IOException {
+    LOG.debug( "service: request=" + request.getMethod() + " " + request.getRequestURL() + "?" + request.getQueryString() );
+    try {
+      if( interactions.isEmpty() ) {
+        fail( "Mock servlet " + name + " received a request but the expected interaction queue is empty." );
+      }
+      MockInteraction interaction = interactions.remove();
+      interaction.expect().match( request );
+      interaction.respond().apply( response );
+      LOG.debug( "service: response=" + response.getStatus() );
+    } catch( AssertionError e ) {
+      LOG.debug( "service: exception=" + e.getMessage() );
+      e.printStackTrace(); // I18N not required.
+      throw new ServletException( e );
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-test-utils/src/main/java/org/apache/knox/test/mock/MockServletContext.java
----------------------------------------------------------------------
diff --git a/gateway-test-utils/src/main/java/org/apache/knox/test/mock/MockServletContext.java b/gateway-test-utils/src/main/java/org/apache/knox/test/mock/MockServletContext.java
new file mode 100644
index 0000000..4181067
--- /dev/null
+++ b/gateway-test-utils/src/main/java/org/apache/knox/test/mock/MockServletContext.java
@@ -0,0 +1,293 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.knox.test.mock;
+
+import javax.servlet.Filter;
+import javax.servlet.FilterRegistration;
+import javax.servlet.RequestDispatcher;
+import javax.servlet.Servlet;
+import javax.servlet.ServletContext;
+import javax.servlet.ServletException;
+import javax.servlet.ServletRegistration;
+import javax.servlet.SessionCookieConfig;
+import javax.servlet.SessionTrackingMode;
+import javax.servlet.descriptor.JspConfigDescriptor;
+import java.io.InputStream;
+import java.net.MalformedURLException;
+import java.net.URL;
+import java.util.Enumeration;
+import java.util.EventListener;
+import java.util.Map;
+import java.util.Set;
+
+public class MockServletContext implements ServletContext {
+
+  @Override
+  public String getContextPath() {
+    return null;
+  }
+
+  @Override
+  public ServletContext getContext( String s ) {
+    return null;
+  }
+
+  @Override
+  public int getMajorVersion() {
+    return 0;
+  }
+
+  @Override
+  public int getMinorVersion() {
+    return 0;
+  }
+
+  @Override
+  public int getEffectiveMajorVersion() {
+    return 0;
+  }
+
+  @Override
+  public int getEffectiveMinorVersion() {
+    return 0;
+  }
+
+  @Override
+  public String getMimeType( String s ) {
+    return null;
+  }
+
+  @Override
+  public Set<String> getResourcePaths( String s ) {
+    return null;
+  }
+
+  @Override
+  public URL getResource( String s ) throws MalformedURLException {
+    return null;
+  }
+
+  @Override
+  public InputStream getResourceAsStream( String s ) {
+    return null;
+  }
+
+  @Override
+  public RequestDispatcher getRequestDispatcher( String s ) {
+    return null;
+  }
+
+  @Override
+  public RequestDispatcher getNamedDispatcher( String s ) {
+    return null;
+  }
+
+  @Override
+  @SuppressWarnings("deprecation")
+  public Servlet getServlet( String s ) throws ServletException {
+    return null;
+  }
+
+  @Override
+  @SuppressWarnings("deprecation")
+  public Enumeration<Servlet> getServlets() {
+    return null;
+  }
+
+  @Override
+  @SuppressWarnings("deprecation")
+  public Enumeration<String> getServletNames() {
+    return null;
+  }
+
+  @Override
+  public void log( String s ) {
+  }
+
+  @Override
+  @SuppressWarnings("deprecation")
+  public void log( Exception e, String s ) {
+  }
+
+  @Override
+  public void log( String s, Throwable throwable ) {
+  }
+
+  @Override
+  public String getRealPath( String s ) {
+    return null;
+  }
+
+  @Override
+  public String getServerInfo() {
+    return null;
+  }
+
+  @Override
+  public String getInitParameter( String s ) {
+    return null;
+  }
+
+  @Override
+  public Enumeration<String> getInitParameterNames() {
+    return null;
+  }
+
+  @Override
+  public boolean setInitParameter( String s, String s1 ) {
+    return false;
+  }
+
+  @Override
+  public Object getAttribute( String s ) {
+    return null;
+  }
+
+  @Override
+  public Enumeration<String> getAttributeNames() {
+    return null;
+  }
+
+  @Override
+  public void setAttribute( String s, Object o ) {
+  }
+
+  @Override
+  public void removeAttribute( String s ) {
+  }
+
+  @Override
+  public String getServletContextName() {
+    return null;
+  }
+
+  @Override
+  public ServletRegistration.Dynamic addServlet( String s, String s1 ) {
+    return null;
+  }
+
+  @Override
+  public ServletRegistration.Dynamic addServlet( String s, Servlet servlet ) {
+    return null;
+  }
+
+  @Override
+  public ServletRegistration.Dynamic addServlet( String s, Class<? extends Servlet> aClass ) {
+    return null;
+  }
+
+  @Override
+  public <T extends Servlet> T createServlet( Class<T> tClass ) throws ServletException {
+    return null;
+  }
+
+  @Override
+  public ServletRegistration getServletRegistration( String s ) {
+    return null;
+  }
+
+  @Override
+  public Map<String, ? extends ServletRegistration> getServletRegistrations() {
+    return null;
+  }
+
+  @Override
+  public FilterRegistration.Dynamic addFilter( String s, String s1 ) {
+    return null;
+  }
+
+  @Override
+  public FilterRegistration.Dynamic addFilter( String s, Filter filter ) {
+    return null;
+  }
+
+  @Override
+  public FilterRegistration.Dynamic addFilter( String s, Class<? extends Filter> aClass ) {
+    return null;
+  }
+
+  @Override
+  public <T extends Filter> T createFilter( Class<T> tClass ) throws ServletException {
+    return null;
+  }
+
+  @Override
+  public FilterRegistration getFilterRegistration( String s ) {
+    return null;
+  }
+
+  @Override
+  public Map<String, ? extends FilterRegistration> getFilterRegistrations() {
+    return null;
+  }
+
+  @Override
+  public SessionCookieConfig getSessionCookieConfig() {
+    return null;
+  }
+
+  @Override
+  public void setSessionTrackingModes( Set<SessionTrackingMode> sessionTrackingModes ) {
+  }
+
+  @Override
+  public Set<SessionTrackingMode> getDefaultSessionTrackingModes() {
+    return null;
+  }
+
+  @Override
+  public Set<SessionTrackingMode> getEffectiveSessionTrackingModes() {
+    return null;
+  }
+
+  @Override
+  public void addListener( String s ) {
+  }
+
+  @Override
+  public <T extends EventListener> void addListener( T t ) {
+  }
+
+  @Override
+  public void addListener( Class<? extends EventListener> aClass ) {
+  }
+
+  @Override
+  public <T extends EventListener> T createListener( Class<T> tClass ) throws ServletException {
+    return null;
+  }
+
+  @Override
+  public JspConfigDescriptor getJspConfigDescriptor() {
+    return null;
+  }
+
+  @Override
+  public ClassLoader getClassLoader() {
+    return null;
+  }
+
+  @Override
+  public void declareRoles( String... strings ) {
+  }
+
+  @Override
+  public String getVirtualServerName() {
+    throw new UnsupportedOperationException();
+  }
+}

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-test-utils/src/main/java/org/apache/knox/test/mock/MockServletInputStream.java
----------------------------------------------------------------------
diff --git a/gateway-test-utils/src/main/java/org/apache/knox/test/mock/MockServletInputStream.java b/gateway-test-utils/src/main/java/org/apache/knox/test/mock/MockServletInputStream.java
new file mode 100644
index 0000000..82eda72
--- /dev/null
+++ b/gateway-test-utils/src/main/java/org/apache/knox/test/mock/MockServletInputStream.java
@@ -0,0 +1,54 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.knox.test.mock;
+
+import java.io.IOException;
+import java.io.InputStream;
+
+import javax.servlet.ReadListener;
+import javax.servlet.ServletInputStream;
+
+public class MockServletInputStream extends ServletInputStream {
+
+  private InputStream stream;
+
+  public MockServletInputStream( InputStream stream ) {
+    this.stream = stream;
+  }
+
+  @Override
+  public int read() throws IOException {
+    return stream.read();
+  }
+
+  @Override
+  public boolean isFinished() {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public boolean isReady() {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void setReadListener( ReadListener readListener ) {
+    throw new UnsupportedOperationException();
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-test/src/test/java/org/apache/knox/gateway/AmbariServiceDefinitionTest.java
----------------------------------------------------------------------
diff --git a/gateway-test/src/test/java/org/apache/knox/gateway/AmbariServiceDefinitionTest.java b/gateway-test/src/test/java/org/apache/knox/gateway/AmbariServiceDefinitionTest.java
index 79837e8..9478574 100644
--- a/gateway-test/src/test/java/org/apache/knox/gateway/AmbariServiceDefinitionTest.java
+++ b/gateway-test/src/test/java/org/apache/knox/gateway/AmbariServiceDefinitionTest.java
@@ -23,8 +23,8 @@ import org.apache.knox.gateway.services.DefaultGatewayServices;
 import org.apache.knox.gateway.services.GatewayServices;
 import org.apache.knox.gateway.services.ServiceLifecycleException;
 import org.apache.knox.gateway.services.topology.TopologyService;
-import org.apache.hadoop.test.TestUtils;
-import org.apache.hadoop.test.mock.MockServer;
+import org.apache.knox.test.TestUtils;
+import org.apache.knox.test.mock.MockServer;
 import org.apache.http.HttpStatus;
 import org.apache.velocity.Template;
 import org.apache.velocity.VelocityContext;
@@ -48,8 +48,8 @@ import java.util.Properties;
 import java.util.UUID;
 
 import static io.restassured.RestAssured.given;
-import static org.apache.hadoop.test.TestUtils.LOG_ENTER;
-import static org.apache.hadoop.test.TestUtils.LOG_EXIT;
+import static org.apache.knox.test.TestUtils.LOG_ENTER;
+import static org.apache.knox.test.TestUtils.LOG_EXIT;
 import static org.hamcrest.CoreMatchers.notNullValue;
 import static uk.co.datumedge.hamcrest.json.SameJSONAs.sameJSONAs;
 

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-test/src/test/java/org/apache/knox/gateway/GatewayAdminFuncTest.java
----------------------------------------------------------------------
diff --git a/gateway-test/src/test/java/org/apache/knox/gateway/GatewayAdminFuncTest.java b/gateway-test/src/test/java/org/apache/knox/gateway/GatewayAdminFuncTest.java
index eba5de6..9778169 100644
--- a/gateway-test/src/test/java/org/apache/knox/gateway/GatewayAdminFuncTest.java
+++ b/gateway-test/src/test/java/org/apache/knox/gateway/GatewayAdminFuncTest.java
@@ -22,7 +22,7 @@ import com.mycila.xmltool.XMLTag;
 import org.apache.knox.gateway.config.GatewayConfig;
 import org.apache.knox.gateway.services.DefaultGatewayServices;
 import org.apache.knox.gateway.services.ServiceLifecycleException;
-import org.apache.hadoop.test.TestUtils;
+import org.apache.knox.test.TestUtils;
 import org.apache.http.HttpStatus;
 import org.hamcrest.MatcherAssert;
 import org.junit.AfterClass;

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-test/src/test/java/org/apache/knox/gateway/GatewayAdminTopologyFuncTest.java
----------------------------------------------------------------------
diff --git a/gateway-test/src/test/java/org/apache/knox/gateway/GatewayAdminTopologyFuncTest.java b/gateway-test/src/test/java/org/apache/knox/gateway/GatewayAdminTopologyFuncTest.java
index 7dcb4e0..e6f7b80 100644
--- a/gateway-test/src/test/java/org/apache/knox/gateway/GatewayAdminTopologyFuncTest.java
+++ b/gateway-test/src/test/java/org/apache/knox/gateway/GatewayAdminTopologyFuncTest.java
@@ -34,9 +34,7 @@ import javax.ws.rs.core.MediaType;
 import io.restassured.http.ContentType;
 import com.mycila.xmltool.XMLDoc;
 import com.mycila.xmltool.XMLTag;
-import org.apache.directory.server.protocol.shared.transport.TcpTransport;
 import org.apache.knox.gateway.config.GatewayConfig;
-import org.apache.knox.gateway.security.ldap.SimpleLdapDirectoryServer;
 import org.apache.knox.gateway.services.DefaultGatewayServices;
 import org.apache.knox.gateway.services.GatewayServices;
 import org.apache.knox.gateway.services.ServiceLifecycleException;
@@ -49,7 +47,7 @@ import org.apache.knox.gateway.util.XmlUtils;
 import io.restassured.response.ResponseBody;
 import org.apache.commons.io.FileUtils;
 import org.apache.commons.io.FilenameUtils;
-import org.apache.hadoop.test.TestUtils;
+import org.apache.knox.test.TestUtils;
 import org.apache.http.HttpStatus;
 import org.apache.log4j.Appender;
 import org.hamcrest.MatcherAssert;
@@ -63,8 +61,8 @@ import org.xml.sax.InputSource;
 
 import static io.restassured.RestAssured.given;
 import static junit.framework.TestCase.assertTrue;
-import static org.apache.hadoop.test.TestUtils.LOG_ENTER;
-import static org.apache.hadoop.test.TestUtils.LOG_EXIT;
+import static org.apache.knox.test.TestUtils.LOG_ENTER;
+import static org.apache.knox.test.TestUtils.LOG_EXIT;
 import static org.hamcrest.CoreMatchers.containsString;
 import static org.hamcrest.CoreMatchers.equalTo;
 import static org.hamcrest.CoreMatchers.is;

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-test/src/test/java/org/apache/knox/gateway/GatewayAppFuncTest.java
----------------------------------------------------------------------
diff --git a/gateway-test/src/test/java/org/apache/knox/gateway/GatewayAppFuncTest.java b/gateway-test/src/test/java/org/apache/knox/gateway/GatewayAppFuncTest.java
index a282cfe..84bed16 100644
--- a/gateway-test/src/test/java/org/apache/knox/gateway/GatewayAppFuncTest.java
+++ b/gateway-test/src/test/java/org/apache/knox/gateway/GatewayAppFuncTest.java
@@ -30,14 +30,12 @@ import java.util.Properties;
 import java.util.UUID;
 
 import org.apache.commons.io.FileUtils;
-import org.apache.directory.server.protocol.shared.transport.TcpTransport;
-import org.apache.knox.gateway.security.ldap.SimpleLdapDirectoryServer;
 import org.apache.knox.gateway.services.DefaultGatewayServices;
 import org.apache.knox.gateway.services.GatewayServices;
 import org.apache.knox.gateway.services.ServiceLifecycleException;
 import org.apache.knox.gateway.services.topology.TopologyService;
-import org.apache.hadoop.test.TestUtils;
-import org.apache.hadoop.test.mock.MockServer;
+import org.apache.knox.test.TestUtils;
+import org.apache.knox.test.mock.MockServer;
 import org.apache.http.HttpStatus;
 import org.apache.log4j.Appender;
 import org.hamcrest.MatcherAssert;
@@ -49,8 +47,8 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import static io.restassured.RestAssured.given;
-import static org.apache.hadoop.test.TestUtils.LOG_ENTER;
-import static org.apache.hadoop.test.TestUtils.LOG_EXIT;
+import static org.apache.knox.test.TestUtils.LOG_ENTER;
+import static org.apache.knox.test.TestUtils.LOG_EXIT;
 import static org.hamcrest.CoreMatchers.equalTo;
 import static org.hamcrest.CoreMatchers.notNullValue;
 import static org.hamcrest.Matchers.arrayWithSize;

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-test/src/test/java/org/apache/knox/gateway/GatewayBasicFuncTest.java
----------------------------------------------------------------------
diff --git a/gateway-test/src/test/java/org/apache/knox/gateway/GatewayBasicFuncTest.java b/gateway-test/src/test/java/org/apache/knox/gateway/GatewayBasicFuncTest.java
index 3adf41a..02be270 100644
--- a/gateway-test/src/test/java/org/apache/knox/gateway/GatewayBasicFuncTest.java
+++ b/gateway-test/src/test/java/org/apache/knox/gateway/GatewayBasicFuncTest.java
@@ -47,10 +47,10 @@ import com.mycila.xmltool.XMLTag;
 import org.apache.commons.io.filefilter.WildcardFileFilter;
 import org.apache.commons.lang3.ArrayUtils;
 import org.apache.knox.gateway.util.KnoxCLI;
-import org.apache.hadoop.test.TestUtils;
-import org.apache.hadoop.test.category.MediumTests;
-import org.apache.hadoop.test.category.VerifyTest;
-import org.apache.hadoop.test.mock.MockRequestMatcher;
+import org.apache.knox.test.TestUtils;
+import org.apache.knox.test.category.MediumTests;
+import org.apache.knox.test.category.VerifyTest;
+import org.apache.knox.test.mock.MockRequestMatcher;
 import org.apache.http.HttpHost;
 import org.apache.http.HttpResponse;
 import org.apache.http.HttpStatus;
@@ -87,8 +87,8 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import static io.restassured.RestAssured.given;
-import static org.apache.hadoop.test.TestUtils.LOG_ENTER;
-import static org.apache.hadoop.test.TestUtils.LOG_EXIT;
+import static org.apache.knox.test.TestUtils.LOG_ENTER;
+import static org.apache.knox.test.TestUtils.LOG_EXIT;
 import static org.hamcrest.CoreMatchers.*;
 import static org.hamcrest.Matchers.containsString;
 import static org.hamcrest.Matchers.greaterThan;

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-test/src/test/java/org/apache/knox/gateway/GatewayDeployFuncTest.java
----------------------------------------------------------------------
diff --git a/gateway-test/src/test/java/org/apache/knox/gateway/GatewayDeployFuncTest.java b/gateway-test/src/test/java/org/apache/knox/gateway/GatewayDeployFuncTest.java
index 9349dca..137cd47 100644
--- a/gateway-test/src/test/java/org/apache/knox/gateway/GatewayDeployFuncTest.java
+++ b/gateway-test/src/test/java/org/apache/knox/gateway/GatewayDeployFuncTest.java
@@ -21,13 +21,11 @@ import io.restassured.response.Response;
 import com.mycila.xmltool.XMLDoc;
 import com.mycila.xmltool.XMLTag;
 import org.apache.commons.io.FileUtils;
-import org.apache.directory.server.protocol.shared.transport.TcpTransport;
 import org.apache.knox.gateway.config.GatewayConfig;
-import org.apache.knox.gateway.security.ldap.SimpleLdapDirectoryServer;
 import org.apache.knox.gateway.services.DefaultGatewayServices;
 import org.apache.knox.gateway.services.ServiceLifecycleException;
-import org.apache.hadoop.test.TestUtils;
-import org.apache.hadoop.test.category.ReleaseTest;
+import org.apache.knox.test.TestUtils;
+import org.apache.knox.test.category.ReleaseTest;
 import org.apache.http.HttpStatus;
 import org.apache.log4j.Appender;
 import org.hamcrest.MatcherAssert;
@@ -51,8 +49,8 @@ import java.util.UUID;
 import java.util.regex.Pattern;
 
 import static io.restassured.RestAssured.given;
-import static org.apache.hadoop.test.TestUtils.LOG_ENTER;
-import static org.apache.hadoop.test.TestUtils.LOG_EXIT;
+import static org.apache.knox.test.TestUtils.LOG_ENTER;
+import static org.apache.knox.test.TestUtils.LOG_EXIT;
 import static org.hamcrest.CoreMatchers.containsString;
 import static org.hamcrest.CoreMatchers.is;
 import static org.hamcrest.CoreMatchers.notNullValue;

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-test/src/test/java/org/apache/knox/gateway/GatewayHealthFuncTest.java
----------------------------------------------------------------------
diff --git a/gateway-test/src/test/java/org/apache/knox/gateway/GatewayHealthFuncTest.java b/gateway-test/src/test/java/org/apache/knox/gateway/GatewayHealthFuncTest.java
index c7ac9ee..3c71248 100644
--- a/gateway-test/src/test/java/org/apache/knox/gateway/GatewayHealthFuncTest.java
+++ b/gateway-test/src/test/java/org/apache/knox/gateway/GatewayHealthFuncTest.java
@@ -25,7 +25,7 @@ import org.apache.knox.gateway.config.GatewayConfig;
 import org.apache.knox.gateway.security.ldap.SimpleLdapDirectoryServer;
 import org.apache.knox.gateway.services.DefaultGatewayServices;
 import org.apache.knox.gateway.services.ServiceLifecycleException;
-import org.apache.hadoop.test.TestUtils;
+import org.apache.knox.test.TestUtils;
 import org.apache.http.HttpStatus;
 import org.hamcrest.MatcherAssert;
 import org.junit.AfterClass;

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-test/src/test/java/org/apache/knox/gateway/GatewayLdapDynamicGroupFuncTest.java
----------------------------------------------------------------------
diff --git a/gateway-test/src/test/java/org/apache/knox/gateway/GatewayLdapDynamicGroupFuncTest.java b/gateway-test/src/test/java/org/apache/knox/gateway/GatewayLdapDynamicGroupFuncTest.java
index 3a3d776..74b8a21 100755
--- a/gateway-test/src/test/java/org/apache/knox/gateway/GatewayLdapDynamicGroupFuncTest.java
+++ b/gateway-test/src/test/java/org/apache/knox/gateway/GatewayLdapDynamicGroupFuncTest.java
@@ -18,8 +18,8 @@
 package org.apache.knox.gateway;
 
 import static io.restassured.RestAssured.given;
-import static org.apache.hadoop.test.TestUtils.LOG_ENTER;
-import static org.apache.hadoop.test.TestUtils.LOG_EXIT;
+import static org.apache.knox.test.TestUtils.LOG_ENTER;
+import static org.apache.knox.test.TestUtils.LOG_EXIT;
 import static org.hamcrest.CoreMatchers.is;
 import static org.hamcrest.CoreMatchers.notNullValue;
 
@@ -39,7 +39,7 @@ import org.apache.knox.gateway.services.DefaultGatewayServices;
 import org.apache.knox.gateway.services.GatewayServices;
 import org.apache.knox.gateway.services.ServiceLifecycleException;
 import org.apache.knox.gateway.services.security.AliasService;
-import org.apache.hadoop.test.TestUtils;
+import org.apache.knox.test.TestUtils;
 import org.apache.http.HttpStatus;
 import org.apache.log4j.Appender;
 import org.hamcrest.MatcherAssert;

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-test/src/test/java/org/apache/knox/gateway/GatewayLdapGroupFuncTest.java
----------------------------------------------------------------------
diff --git a/gateway-test/src/test/java/org/apache/knox/gateway/GatewayLdapGroupFuncTest.java b/gateway-test/src/test/java/org/apache/knox/gateway/GatewayLdapGroupFuncTest.java
index 37ee90c..ba044b4 100644
--- a/gateway-test/src/test/java/org/apache/knox/gateway/GatewayLdapGroupFuncTest.java
+++ b/gateway-test/src/test/java/org/apache/knox/gateway/GatewayLdapGroupFuncTest.java
@@ -18,15 +18,14 @@
 package org.apache.knox.gateway;
 
 import static io.restassured.RestAssured.given;
-import static org.apache.hadoop.test.TestUtils.LOG_ENTER;
-import static org.apache.hadoop.test.TestUtils.LOG_EXIT;
+import static org.apache.knox.test.TestUtils.LOG_ENTER;
+import static org.apache.knox.test.TestUtils.LOG_EXIT;
 import static org.hamcrest.CoreMatchers.is;
 import static org.hamcrest.CoreMatchers.notNullValue;
 
 import java.io.File;
 import java.io.FileOutputStream;
 import java.io.IOException;
-import java.io.InputStream;
 import java.net.URL;
 import java.util.Enumeration;
 import java.util.HashMap;
@@ -39,7 +38,7 @@ import org.apache.knox.gateway.services.DefaultGatewayServices;
 import org.apache.knox.gateway.services.GatewayServices;
 import org.apache.knox.gateway.services.ServiceLifecycleException;
 import org.apache.knox.gateway.services.security.AliasService;
-import org.apache.hadoop.test.TestUtils;
+import org.apache.knox.test.TestUtils;
 import org.apache.http.HttpStatus;
 import org.apache.log4j.Appender;
 import org.hamcrest.MatcherAssert;

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-test/src/test/java/org/apache/knox/gateway/GatewayLdapPosixGroupFuncTest.java
----------------------------------------------------------------------
diff --git a/gateway-test/src/test/java/org/apache/knox/gateway/GatewayLdapPosixGroupFuncTest.java b/gateway-test/src/test/java/org/apache/knox/gateway/GatewayLdapPosixGroupFuncTest.java
index b623f06..2654db1 100644
--- a/gateway-test/src/test/java/org/apache/knox/gateway/GatewayLdapPosixGroupFuncTest.java
+++ b/gateway-test/src/test/java/org/apache/knox/gateway/GatewayLdapPosixGroupFuncTest.java
@@ -19,15 +19,13 @@ package org.apache.knox.gateway;
 
 import com.mycila.xmltool.XMLDoc;
 import com.mycila.xmltool.XMLTag;
-import org.apache.directory.server.protocol.shared.transport.TcpTransport;
 import org.apache.knox.gateway.config.GatewayConfig;
-import org.apache.knox.gateway.security.ldap.SimpleLdapDirectoryServer;
 import org.apache.knox.gateway.services.DefaultGatewayServices;
 import org.apache.knox.gateway.services.GatewayServices;
 import org.apache.knox.gateway.services.ServiceLifecycleException;
 import org.apache.knox.gateway.services.security.AliasService;
-import org.apache.hadoop.test.TestUtils;
-import org.apache.hadoop.test.category.ReleaseTest;
+import org.apache.knox.test.TestUtils;
+import org.apache.knox.test.category.ReleaseTest;
 import org.apache.http.HttpStatus;
 import org.apache.log4j.Appender;
 import org.hamcrest.MatcherAssert;
@@ -43,7 +41,6 @@ import java.io.File;
 import java.io.FileOutputStream;
 import java.io.IOException;
 import java.io.OutputStream;
-import java.net.InetSocketAddress;
 import java.net.URL;
 import java.util.Enumeration;
 import java.util.HashMap;
@@ -51,8 +48,8 @@ import java.util.Map;
 import java.util.UUID;
 
 import static io.restassured.RestAssured.given;
-import static org.apache.hadoop.test.TestUtils.LOG_ENTER;
-import static org.apache.hadoop.test.TestUtils.LOG_EXIT;
+import static org.apache.knox.test.TestUtils.LOG_ENTER;
+import static org.apache.knox.test.TestUtils.LOG_EXIT;
 import static org.hamcrest.CoreMatchers.is;
 import static org.hamcrest.CoreMatchers.notNullValue;
 

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-test/src/test/java/org/apache/knox/gateway/GatewayLocalServiceFuncTest.java
----------------------------------------------------------------------
diff --git a/gateway-test/src/test/java/org/apache/knox/gateway/GatewayLocalServiceFuncTest.java b/gateway-test/src/test/java/org/apache/knox/gateway/GatewayLocalServiceFuncTest.java
index 442a767..d73d200 100644
--- a/gateway-test/src/test/java/org/apache/knox/gateway/GatewayLocalServiceFuncTest.java
+++ b/gateway-test/src/test/java/org/apache/knox/gateway/GatewayLocalServiceFuncTest.java
@@ -20,13 +20,11 @@ package org.apache.knox.gateway;
 import com.mycila.xmltool.XMLDoc;
 import com.mycila.xmltool.XMLTag;
 import org.apache.commons.io.FileUtils;
-import org.apache.directory.server.protocol.shared.transport.TcpTransport;
 import org.apache.knox.gateway.config.GatewayConfig;
-import org.apache.knox.gateway.security.ldap.SimpleLdapDirectoryServer;
 import org.apache.knox.gateway.services.DefaultGatewayServices;
 import org.apache.knox.gateway.services.ServiceLifecycleException;
-import org.apache.hadoop.test.TestUtils;
-import org.apache.hadoop.test.log.NoOpAppender;
+import org.apache.knox.test.TestUtils;
+import org.apache.knox.test.log.NoOpAppender;
 import org.apache.http.HttpStatus;
 import org.apache.log4j.Appender;
 import org.hamcrest.MatcherAssert;
@@ -44,8 +42,8 @@ import java.util.Map;
 import java.util.UUID;
 
 import static io.restassured.RestAssured.given;
-import static org.apache.hadoop.test.TestUtils.LOG_ENTER;
-import static org.apache.hadoop.test.TestUtils.LOG_EXIT;
+import static org.apache.knox.test.TestUtils.LOG_ENTER;
+import static org.apache.knox.test.TestUtils.LOG_EXIT;
 import static org.hamcrest.CoreMatchers.is;
 import static org.hamcrest.CoreMatchers.notNullValue;
 import static org.junit.Assert.assertThat;

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-test/src/test/java/org/apache/knox/gateway/GatewayMultiFuncTest.java
----------------------------------------------------------------------
diff --git a/gateway-test/src/test/java/org/apache/knox/gateway/GatewayMultiFuncTest.java b/gateway-test/src/test/java/org/apache/knox/gateway/GatewayMultiFuncTest.java
index 6dc469c..e14f44a 100644
--- a/gateway-test/src/test/java/org/apache/knox/gateway/GatewayMultiFuncTest.java
+++ b/gateway-test/src/test/java/org/apache/knox/gateway/GatewayMultiFuncTest.java
@@ -29,14 +29,13 @@ import java.util.UUID;
 import org.apache.commons.io.FileUtils;
 import org.apache.commons.io.IOUtils;
 import org.apache.directory.server.protocol.shared.transport.TcpTransport;
-import org.apache.knox.gateway.security.ldap.SimpleLdapDirectoryServer;
 import org.apache.knox.gateway.services.DefaultGatewayServices;
 import org.apache.knox.gateway.services.GatewayServices;
 import org.apache.knox.gateway.services.ServiceLifecycleException;
 import org.apache.knox.gateway.services.topology.TopologyService;
-import org.apache.hadoop.test.TestUtils;
-import org.apache.hadoop.test.category.ReleaseTest;
-import org.apache.hadoop.test.mock.MockServer;
+import org.apache.knox.test.TestUtils;
+import org.apache.knox.test.category.ReleaseTest;
+import org.apache.knox.test.mock.MockServer;
 import org.apache.http.HttpHost;
 import org.apache.http.HttpStatus;
 import org.apache.http.auth.AuthScope;
@@ -61,8 +60,8 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import static io.restassured.RestAssured.given;
-import static org.apache.hadoop.test.TestUtils.LOG_ENTER;
-import static org.apache.hadoop.test.TestUtils.LOG_EXIT;
+import static org.apache.knox.test.TestUtils.LOG_ENTER;
+import static org.apache.knox.test.TestUtils.LOG_EXIT;
 import static org.hamcrest.CoreMatchers.endsWith;
 import static org.hamcrest.CoreMatchers.equalTo;
 import static org.hamcrest.CoreMatchers.notNullValue;

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-test/src/test/java/org/apache/knox/gateway/GatewayPortMappingDisableFeatureTest.java
----------------------------------------------------------------------
diff --git a/gateway-test/src/test/java/org/apache/knox/gateway/GatewayPortMappingDisableFeatureTest.java b/gateway-test/src/test/java/org/apache/knox/gateway/GatewayPortMappingDisableFeatureTest.java
index a4d8166..3c0429f 100644
--- a/gateway-test/src/test/java/org/apache/knox/gateway/GatewayPortMappingDisableFeatureTest.java
+++ b/gateway-test/src/test/java/org/apache/knox/gateway/GatewayPortMappingDisableFeatureTest.java
@@ -20,9 +20,9 @@ package org.apache.knox.gateway;
 
 import com.mycila.xmltool.XMLDoc;
 import com.mycila.xmltool.XMLTag;
-import org.apache.hadoop.test.TestUtils;
-import org.apache.hadoop.test.category.ReleaseTest;
-import org.apache.hadoop.test.mock.MockServer;
+import org.apache.knox.test.TestUtils;
+import org.apache.knox.test.category.ReleaseTest;
+import org.apache.knox.test.mock.MockServer;
 import org.apache.http.HttpStatus;
 import org.junit.After;
 import org.junit.Before;
@@ -36,8 +36,8 @@ import java.net.ConnectException;
 import java.util.concurrent.ConcurrentHashMap;
 
 import static io.restassured.RestAssured.given;
-import static org.apache.hadoop.test.TestUtils.LOG_ENTER;
-import static org.apache.hadoop.test.TestUtils.LOG_EXIT;
+import static org.apache.knox.test.TestUtils.LOG_ENTER;
+import static org.apache.knox.test.TestUtils.LOG_EXIT;
 import static org.hamcrest.CoreMatchers.is;
 
 /**

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-test/src/test/java/org/apache/knox/gateway/GatewayPortMappingFailTest.java
----------------------------------------------------------------------
diff --git a/gateway-test/src/test/java/org/apache/knox/gateway/GatewayPortMappingFailTest.java b/gateway-test/src/test/java/org/apache/knox/gateway/GatewayPortMappingFailTest.java
index bc01c86..dcaa353 100644
--- a/gateway-test/src/test/java/org/apache/knox/gateway/GatewayPortMappingFailTest.java
+++ b/gateway-test/src/test/java/org/apache/knox/gateway/GatewayPortMappingFailTest.java
@@ -18,9 +18,9 @@ package org.apache.knox.gateway;
  * limitations under the License.
  */
 
-import org.apache.hadoop.test.TestUtils;
-import org.apache.hadoop.test.category.ReleaseTest;
-import org.apache.hadoop.test.mock.MockServer;
+import org.apache.knox.test.TestUtils;
+import org.apache.knox.test.category.ReleaseTest;
+import org.apache.knox.test.mock.MockServer;
 import org.apache.http.HttpStatus;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
@@ -31,8 +31,8 @@ import java.io.IOException;
 import java.util.concurrent.ConcurrentHashMap;
 
 import static io.restassured.RestAssured.given;
-import static org.apache.hadoop.test.TestUtils.LOG_ENTER;
-import static org.apache.hadoop.test.TestUtils.LOG_EXIT;
+import static org.apache.knox.test.TestUtils.LOG_ENTER;
+import static org.apache.knox.test.TestUtils.LOG_EXIT;
 
 /**
  * Test the fail cases for the Port Mapping Feature

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-test/src/test/java/org/apache/knox/gateway/GatewayPortMappingFuncTest.java
----------------------------------------------------------------------
diff --git a/gateway-test/src/test/java/org/apache/knox/gateway/GatewayPortMappingFuncTest.java b/gateway-test/src/test/java/org/apache/knox/gateway/GatewayPortMappingFuncTest.java
index cbf138b..18e1487 100644
--- a/gateway-test/src/test/java/org/apache/knox/gateway/GatewayPortMappingFuncTest.java
+++ b/gateway-test/src/test/java/org/apache/knox/gateway/GatewayPortMappingFuncTest.java
@@ -20,9 +20,9 @@ package org.apache.knox.gateway;
 
 import com.mycila.xmltool.XMLDoc;
 import com.mycila.xmltool.XMLTag;
-import org.apache.hadoop.test.TestUtils;
-import org.apache.hadoop.test.category.ReleaseTest;
-import org.apache.hadoop.test.mock.MockServer;
+import org.apache.knox.test.TestUtils;
+import org.apache.knox.test.category.ReleaseTest;
+import org.apache.knox.test.mock.MockServer;
 import org.apache.http.HttpStatus;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
@@ -33,8 +33,8 @@ import java.io.IOException;
 import java.util.concurrent.ConcurrentHashMap;
 
 import static io.restassured.RestAssured.given;
-import static org.apache.hadoop.test.TestUtils.LOG_ENTER;
-import static org.apache.hadoop.test.TestUtils.LOG_EXIT;
+import static org.apache.knox.test.TestUtils.LOG_ENTER;
+import static org.apache.knox.test.TestUtils.LOG_EXIT;
 import static org.hamcrest.CoreMatchers.is;
 
 /**

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-test/src/test/java/org/apache/knox/gateway/GatewaySampleFuncTest.java
----------------------------------------------------------------------
diff --git a/gateway-test/src/test/java/org/apache/knox/gateway/GatewaySampleFuncTest.java b/gateway-test/src/test/java/org/apache/knox/gateway/GatewaySampleFuncTest.java
index b146972..757ade7 100644
--- a/gateway-test/src/test/java/org/apache/knox/gateway/GatewaySampleFuncTest.java
+++ b/gateway-test/src/test/java/org/apache/knox/gateway/GatewaySampleFuncTest.java
@@ -19,12 +19,10 @@ package org.apache.knox.gateway;
 
 import com.mycila.xmltool.XMLDoc;
 import com.mycila.xmltool.XMLTag;
-import org.apache.directory.server.protocol.shared.transport.TcpTransport;
 import org.apache.knox.gateway.config.GatewayConfig;
-import org.apache.knox.gateway.security.ldap.SimpleLdapDirectoryServer;
 import org.apache.knox.gateway.services.DefaultGatewayServices;
 import org.apache.knox.gateway.services.ServiceLifecycleException;
-import org.apache.hadoop.test.TestUtils;
+import org.apache.knox.test.TestUtils;
 import org.apache.http.HttpStatus;
 import org.apache.log4j.Appender;
 import org.hamcrest.MatcherAssert;
@@ -43,8 +41,8 @@ import java.util.Map;
 import java.util.UUID;
 
 import static io.restassured.RestAssured.given;
-import static org.apache.hadoop.test.TestUtils.LOG_ENTER;
-import static org.apache.hadoop.test.TestUtils.LOG_EXIT;
+import static org.apache.knox.test.TestUtils.LOG_ENTER;
+import static org.apache.knox.test.TestUtils.LOG_EXIT;
 import static org.hamcrest.CoreMatchers.is;
 import static org.hamcrest.CoreMatchers.notNullValue;
 

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-test/src/test/java/org/apache/knox/gateway/GatewaySslFuncTest.java
----------------------------------------------------------------------
diff --git a/gateway-test/src/test/java/org/apache/knox/gateway/GatewaySslFuncTest.java b/gateway-test/src/test/java/org/apache/knox/gateway/GatewaySslFuncTest.java
index 3726dbc..4e85542 100644
--- a/gateway-test/src/test/java/org/apache/knox/gateway/GatewaySslFuncTest.java
+++ b/gateway-test/src/test/java/org/apache/knox/gateway/GatewaySslFuncTest.java
@@ -18,8 +18,6 @@
 package org.apache.knox.gateway;
 
 import java.io.File;
-import java.nio.file.FileSystems;
-import java.nio.file.Path;
 import java.security.KeyManagementException;
 import java.security.NoSuchAlgorithmException;
 import java.security.SecureRandom;
@@ -42,15 +40,13 @@ import javax.net.ssl.X509TrustManager;
 import javax.xml.transform.stream.StreamSource;
 
 import org.apache.commons.io.FileUtils;
-import org.apache.directory.server.protocol.shared.transport.TcpTransport;
-import org.apache.knox.gateway.security.ldap.SimpleLdapDirectoryServer;
 import org.apache.knox.gateway.services.DefaultGatewayServices;
 import org.apache.knox.gateway.services.GatewayServices;
 import org.apache.knox.gateway.services.ServiceLifecycleException;
 import org.apache.knox.gateway.services.topology.TopologyService;
-import org.apache.hadoop.test.TestUtils;
-import org.apache.hadoop.test.category.ReleaseTest;
-import org.apache.hadoop.test.mock.MockServer;
+import org.apache.knox.test.TestUtils;
+import org.apache.knox.test.category.ReleaseTest;
+import org.apache.knox.test.mock.MockServer;
 import org.apache.http.HttpHost;
 import org.apache.http.auth.AuthScope;
 import org.apache.http.auth.UsernamePasswordCredentials;
@@ -75,8 +71,8 @@ import org.junit.experimental.categories.Category;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import static org.apache.hadoop.test.TestUtils.LOG_ENTER;
-import static org.apache.hadoop.test.TestUtils.LOG_EXIT;
+import static org.apache.knox.test.TestUtils.LOG_ENTER;
+import static org.apache.knox.test.TestUtils.LOG_EXIT;
 import static org.hamcrest.CoreMatchers.notNullValue;
 import static org.junit.Assert.assertThat;
 import static org.junit.Assert.fail;

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-test/src/test/java/org/apache/knox/gateway/Knox242FuncTest.java
----------------------------------------------------------------------
diff --git a/gateway-test/src/test/java/org/apache/knox/gateway/Knox242FuncTest.java b/gateway-test/src/test/java/org/apache/knox/gateway/Knox242FuncTest.java
index cd30311..024919b 100755
--- a/gateway-test/src/test/java/org/apache/knox/gateway/Knox242FuncTest.java
+++ b/gateway-test/src/test/java/org/apache/knox/gateway/Knox242FuncTest.java
@@ -18,16 +18,14 @@
 package org.apache.knox.gateway;
 
 import static io.restassured.RestAssured.given;
-import static org.apache.hadoop.test.TestUtils.LOG_ENTER;
-import static org.apache.hadoop.test.TestUtils.LOG_EXIT;
+import static org.apache.knox.test.TestUtils.LOG_ENTER;
+import static org.apache.knox.test.TestUtils.LOG_EXIT;
 import static org.hamcrest.CoreMatchers.is;
 import static org.hamcrest.CoreMatchers.notNullValue;
 
 import java.io.File;
 import java.io.FileOutputStream;
 import java.io.IOException;
-import java.io.InputStream;
-import java.net.InetSocketAddress;
 import java.net.URL;
 import java.nio.file.FileSystems;
 import java.nio.file.Path;
@@ -41,7 +39,7 @@ import org.apache.knox.gateway.services.DefaultGatewayServices;
 import org.apache.knox.gateway.services.GatewayServices;
 import org.apache.knox.gateway.services.ServiceLifecycleException;
 import org.apache.knox.gateway.services.security.AliasService;
-import org.apache.hadoop.test.TestUtils;
+import org.apache.knox.test.TestUtils;
 import org.apache.http.HttpStatus;
 import org.apache.log4j.Appender;
 import org.hamcrest.MatcherAssert;

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-test/src/test/java/org/apache/knox/gateway/KnoxCliLdapFuncTestNegative.java
----------------------------------------------------------------------
diff --git a/gateway-test/src/test/java/org/apache/knox/gateway/KnoxCliLdapFuncTestNegative.java b/gateway-test/src/test/java/org/apache/knox/gateway/KnoxCliLdapFuncTestNegative.java
index fc2f601..e59f3a0 100644
--- a/gateway-test/src/test/java/org/apache/knox/gateway/KnoxCliLdapFuncTestNegative.java
+++ b/gateway-test/src/test/java/org/apache/knox/gateway/KnoxCliLdapFuncTestNegative.java
@@ -22,8 +22,8 @@ import com.mycila.xmltool.XMLTag;
 import org.apache.knox.gateway.services.DefaultGatewayServices;
 import org.apache.knox.gateway.services.ServiceLifecycleException;
 import org.apache.knox.gateway.util.KnoxCLI;
-import org.apache.hadoop.test.TestUtils;
-import org.apache.hadoop.test.log.NoOpAppender;
+import org.apache.knox.test.TestUtils;
+import org.apache.knox.test.log.NoOpAppender;
 import org.apache.log4j.Appender;
 import org.junit.BeforeClass;
 import org.junit.AfterClass;
@@ -38,8 +38,8 @@ import java.util.HashMap;
 import java.util.Map;
 import java.util.UUID;
 
-import static org.apache.hadoop.test.TestUtils.LOG_ENTER;
-import static org.apache.hadoop.test.TestUtils.LOG_EXIT;
+import static org.apache.knox.test.TestUtils.LOG_ENTER;
+import static org.apache.knox.test.TestUtils.LOG_EXIT;
 import static org.hamcrest.CoreMatchers.containsString;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertThat;

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-test/src/test/java/org/apache/knox/gateway/KnoxCliLdapFuncTestPositive.java
----------------------------------------------------------------------
diff --git a/gateway-test/src/test/java/org/apache/knox/gateway/KnoxCliLdapFuncTestPositive.java b/gateway-test/src/test/java/org/apache/knox/gateway/KnoxCliLdapFuncTestPositive.java
index f612a4e..12a7c15 100644
--- a/gateway-test/src/test/java/org/apache/knox/gateway/KnoxCliLdapFuncTestPositive.java
+++ b/gateway-test/src/test/java/org/apache/knox/gateway/KnoxCliLdapFuncTestPositive.java
@@ -19,13 +19,11 @@ package org.apache.knox.gateway;
 
 import com.mycila.xmltool.XMLDoc;
 import com.mycila.xmltool.XMLTag;
-import org.apache.directory.server.protocol.shared.transport.TcpTransport;
-import org.apache.knox.gateway.security.ldap.SimpleLdapDirectoryServer;
 import org.apache.knox.gateway.services.DefaultGatewayServices;
 import org.apache.knox.gateway.services.ServiceLifecycleException;
 import org.apache.knox.gateway.util.KnoxCLI;
-import org.apache.hadoop.test.TestUtils;
-import org.apache.hadoop.test.log.NoOpAppender;
+import org.apache.knox.test.TestUtils;
+import org.apache.knox.test.log.NoOpAppender;
 import org.apache.log4j.Appender;
 import org.junit.BeforeClass;
 import org.junit.AfterClass;
@@ -40,8 +38,8 @@ import java.util.HashMap;
 import java.util.Map;
 import java.util.UUID;
 
-import static org.apache.hadoop.test.TestUtils.LOG_ENTER;
-import static org.apache.hadoop.test.TestUtils.LOG_EXIT;
+import static org.apache.knox.test.TestUtils.LOG_ENTER;
+import static org.apache.knox.test.TestUtils.LOG_EXIT;
 import static org.hamcrest.CoreMatchers.containsString;
 import static org.hamcrest.CoreMatchers.not;
 import static org.junit.Assert.assertThat;

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-test/src/test/java/org/apache/knox/gateway/KnoxCliSysBindTest.java
----------------------------------------------------------------------
diff --git a/gateway-test/src/test/java/org/apache/knox/gateway/KnoxCliSysBindTest.java b/gateway-test/src/test/java/org/apache/knox/gateway/KnoxCliSysBindTest.java
index 73336c7..d8b6496 100644
--- a/gateway-test/src/test/java/org/apache/knox/gateway/KnoxCliSysBindTest.java
+++ b/gateway-test/src/test/java/org/apache/knox/gateway/KnoxCliSysBindTest.java
@@ -19,13 +19,11 @@ package org.apache.knox.gateway;
 
 import com.mycila.xmltool.XMLDoc;
 import com.mycila.xmltool.XMLTag;
-import org.apache.directory.server.protocol.shared.transport.TcpTransport;
-import org.apache.knox.gateway.security.ldap.SimpleLdapDirectoryServer;
 import org.apache.knox.gateway.services.DefaultGatewayServices;
 import org.apache.knox.gateway.services.ServiceLifecycleException;
 import org.apache.knox.gateway.util.KnoxCLI;
-import org.apache.hadoop.test.TestUtils;
-import org.apache.hadoop.test.log.NoOpAppender;
+import org.apache.knox.test.TestUtils;
+import org.apache.knox.test.log.NoOpAppender;
 import org.apache.log4j.Appender;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
@@ -40,8 +38,8 @@ import java.util.HashMap;
 import java.util.Map;
 import java.util.UUID;
 
-import static org.apache.hadoop.test.TestUtils.LOG_ENTER;
-import static org.apache.hadoop.test.TestUtils.LOG_EXIT;
+import static org.apache.knox.test.TestUtils.LOG_ENTER;
+import static org.apache.knox.test.TestUtils.LOG_EXIT;
 import static org.hamcrest.CoreMatchers.containsString;
 import static org.junit.Assert.assertThat;
 

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-test/src/test/java/org/apache/knox/gateway/OozieServiceDefinitionTest.java
----------------------------------------------------------------------
diff --git a/gateway-test/src/test/java/org/apache/knox/gateway/OozieServiceDefinitionTest.java b/gateway-test/src/test/java/org/apache/knox/gateway/OozieServiceDefinitionTest.java
index b0f23e7..491b6bb 100644
--- a/gateway-test/src/test/java/org/apache/knox/gateway/OozieServiceDefinitionTest.java
+++ b/gateway-test/src/test/java/org/apache/knox/gateway/OozieServiceDefinitionTest.java
@@ -28,8 +28,8 @@ import org.apache.knox.gateway.filter.rewrite.impl.UrlRewriteRequest;
 import org.apache.knox.gateway.services.GatewayServices;
 import org.apache.knox.gateway.services.registry.ServiceRegistry;
 import org.apache.knox.gateway.util.XmlUtils;
-import org.apache.hadoop.test.TestUtils;
-import org.apache.hadoop.test.mock.MockServletInputStream;
+import org.apache.knox.test.TestUtils;
+import org.apache.knox.test.mock.MockServletInputStream;
 import org.easymock.EasyMock;
 import org.junit.Test;
 import org.w3c.dom.Document;
@@ -40,8 +40,8 @@ import javax.servlet.http.HttpServletRequest;
 import java.io.InputStream;
 import java.io.Reader;
 
-import static org.apache.hadoop.test.TestUtils.LOG_ENTER;
-import static org.apache.hadoop.test.TestUtils.LOG_EXIT;
+import static org.apache.knox.test.TestUtils.LOG_ENTER;
+import static org.apache.knox.test.TestUtils.LOG_EXIT;
 import static org.hamcrest.MatcherAssert.assertThat;
 import static org.hamcrest.Matchers.equalTo;
 import static org.hamcrest.xml.HasXPath.hasXPath;

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-test/src/test/java/org/apache/knox/gateway/WebHdfsHaFuncTest.java
----------------------------------------------------------------------
diff --git a/gateway-test/src/test/java/org/apache/knox/gateway/WebHdfsHaFuncTest.java b/gateway-test/src/test/java/org/apache/knox/gateway/WebHdfsHaFuncTest.java
index 98739a1..f4f77e2 100644
--- a/gateway-test/src/test/java/org/apache/knox/gateway/WebHdfsHaFuncTest.java
+++ b/gateway-test/src/test/java/org/apache/knox/gateway/WebHdfsHaFuncTest.java
@@ -19,9 +19,9 @@ package org.apache.knox.gateway;
 
 import com.mycila.xmltool.XMLDoc;
 import com.mycila.xmltool.XMLTag;
-import org.apache.hadoop.test.TestUtils;
-import org.apache.hadoop.test.category.ReleaseTest;
-import org.apache.hadoop.test.mock.MockServer;
+import org.apache.knox.test.TestUtils;
+import org.apache.knox.test.category.ReleaseTest;
+import org.apache.knox.test.mock.MockServer;
 import org.apache.http.HttpStatus;
 import org.junit.After;
 import org.junit.Before;
@@ -32,8 +32,8 @@ import org.junit.experimental.categories.Category;
 import java.io.IOException;
 
 import static io.restassured.RestAssured.given;
-import static org.apache.hadoop.test.TestUtils.LOG_ENTER;
-import static org.apache.hadoop.test.TestUtils.LOG_EXIT;
+import static org.apache.knox.test.TestUtils.LOG_ENTER;
+import static org.apache.knox.test.TestUtils.LOG_EXIT;
 import static org.hamcrest.CoreMatchers.is;
 
 @Category(ReleaseTest.class)


[11/53] [abbrv] knox git commit: Merge branch 'master' into KNOX-998-Package_Restructuring

Posted by mo...@apache.org.
http://git-wip-us.apache.org/repos/asf/knox/blob/58780d37/gateway-service-knoxsso/src/test/java/org/apache/knox/gateway/service/knoxsso/WebSSOResourceTest.java
----------------------------------------------------------------------
diff --cc gateway-service-knoxsso/src/test/java/org/apache/knox/gateway/service/knoxsso/WebSSOResourceTest.java
index 6b8411e,0000000..0eb717e
mode 100644,000000..100644
--- a/gateway-service-knoxsso/src/test/java/org/apache/knox/gateway/service/knoxsso/WebSSOResourceTest.java
+++ b/gateway-service-knoxsso/src/test/java/org/apache/knox/gateway/service/knoxsso/WebSSOResourceTest.java
@@@ -1,410 -1,0 +1,689 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.service.knoxsso;
 +
 +import org.apache.knox.gateway.util.RegExUtils;
 +import static org.junit.Assert.assertEquals;
 +import static org.junit.Assert.assertNotNull;
 +import static org.junit.Assert.assertTrue;
 +
 +import java.security.KeyPair;
 +import java.security.KeyPairGenerator;
 +import java.security.NoSuchAlgorithmException;
 +import java.security.Principal;
 +import java.security.interfaces.RSAPrivateKey;
 +import java.security.interfaces.RSAPublicKey;
 +import java.util.ArrayList;
 +import java.util.Arrays;
 +import java.util.Collections;
++import java.util.Date;
 +import java.util.HashMap;
 +import java.util.List;
 +import java.util.Map;
 +
 +import javax.security.auth.Subject;
 +import javax.servlet.ServletContext;
 +import javax.servlet.ServletOutputStream;
 +import javax.servlet.http.Cookie;
 +import javax.servlet.http.HttpServletRequest;
 +import javax.servlet.http.HttpServletResponse;
 +import javax.servlet.http.HttpServletResponseWrapper;
 +
 +import org.apache.knox.gateway.services.GatewayServices;
 +import org.apache.knox.gateway.services.security.token.JWTokenAuthority;
 +import org.apache.knox.gateway.services.security.token.TokenServiceException;
 +import org.apache.knox.gateway.services.security.token.impl.JWT;
 +import org.apache.knox.gateway.services.security.token.impl.JWTToken;
 +import org.apache.knox.gateway.util.RegExUtils;
 +import org.easymock.EasyMock;
 +import org.junit.Assert;
 +import org.junit.BeforeClass;
 +import org.junit.Test;
 +
 +import com.nimbusds.jose.JWSSigner;
 +import com.nimbusds.jose.JWSVerifier;
 +import com.nimbusds.jose.crypto.RSASSASigner;
 +import com.nimbusds.jose.crypto.RSASSAVerifier;
 +
 +/**
 + * Some tests for the Knox SSO service.
 + */
 +public class WebSSOResourceTest {
 +
 +  protected static RSAPublicKey publicKey;
 +  protected static RSAPrivateKey privateKey;
 +
 +  @BeforeClass
 +  public static void setup() throws Exception, NoSuchAlgorithmException {
 +    KeyPairGenerator kpg = KeyPairGenerator.getInstance("RSA");
 +    kpg.initialize(1024);
 +    KeyPair KPair = kpg.generateKeyPair();
 +
 +    publicKey = (RSAPublicKey) KPair.getPublic();
 +    privateKey = (RSAPrivateKey) KPair.getPrivate();
 +  }
 +
 +  @Test
 +  public void testWhitelistMatching() throws Exception {
 +    String whitelist = "^https?://.*example.com:8080/.*$;" +
 +        "^https?://.*example.com/.*$;" +
 +        "^https?://.*example2.com:\\d{0,9}/.*$;" +
 +        "^https://.*example3.com:\\d{0,9}/.*$;" +
 +        "^https?://localhost:\\d{0,9}/.*$;^/.*$";
 +
 +    // match on explicit hostname/domain and port
 +    Assert.assertTrue("Failed to match whitelist", RegExUtils.checkWhitelist(whitelist,
 +        "http://host.example.com:8080/"));
 +    // match on non-required port
 +    Assert.assertTrue("Failed to match whitelist", RegExUtils.checkWhitelist(whitelist,
 +        "http://host.example.com/"));
 +    // match on required but any port
 +    Assert.assertTrue("Failed to match whitelist", RegExUtils.checkWhitelist(whitelist,
 +        "http://host.example2.com:1234/"));
 +    // fail on missing port
 +    Assert.assertFalse("Matched whitelist inappropriately", RegExUtils.checkWhitelist(whitelist,
 +        "http://host.example2.com/"));
 +    // fail on invalid port
 +    Assert.assertFalse("Matched whitelist inappropriately", RegExUtils.checkWhitelist(whitelist,
 +        "http://host.example.com:8081/"));
 +    // fail on alphanumeric port
 +    Assert.assertFalse("Matched whitelist inappropriately", RegExUtils.checkWhitelist(whitelist,
 +        "http://host.example.com:A080/"));
 +    // fail on invalid hostname/domain
 +    Assert.assertFalse("Matched whitelist inappropriately", RegExUtils.checkWhitelist(whitelist,
 +        "http://host.example.net:8080/"));
 +    // fail on required port
 +    Assert.assertFalse("Matched whitelist inappropriately", RegExUtils.checkWhitelist(whitelist,
 +        "http://host.example2.com/"));
 +    // fail on required https
 +    Assert.assertFalse("Matched whitelist inappropriately", RegExUtils.checkWhitelist(whitelist,
 +        "http://host.example3.com/"));
 +    // match on localhost and port
 +    Assert.assertTrue("Failed to match whitelist", RegExUtils.checkWhitelist(whitelist,
 +        "http://localhost:8080/"));
 +    // match on local/relative path
 +    Assert.assertTrue("Failed to match whitelist", RegExUtils.checkWhitelist(whitelist,
 +        "/local/resource/"));
 +  }
 +
 +  @Test
 +  public void testGetToken() throws Exception {
 +
 +    ServletContext context = EasyMock.createNiceMock(ServletContext.class);
 +    EasyMock.expect(context.getInitParameter("knoxsso.cookie.name")).andReturn(null);
 +    EasyMock.expect(context.getInitParameter("knoxsso.cookie.secure.only")).andReturn(null);
 +    EasyMock.expect(context.getInitParameter("knoxsso.cookie.max.age")).andReturn(null);
 +    EasyMock.expect(context.getInitParameter("knoxsso.cookie.domain.suffix")).andReturn(null);
 +    EasyMock.expect(context.getInitParameter("knoxsso.redirect.whitelist.regex")).andReturn(null);
 +    EasyMock.expect(context.getInitParameter("knoxsso.token.audiences")).andReturn(null);
 +    EasyMock.expect(context.getInitParameter("knoxsso.token.ttl")).andReturn(null);
 +    EasyMock.expect(context.getInitParameter("knoxsso.enable.session")).andReturn(null);
 +
 +    HttpServletRequest request = EasyMock.createNiceMock(HttpServletRequest.class);
 +    EasyMock.expect(request.getParameter("originalUrl")).andReturn("http://localhost:9080/service");
 +    EasyMock.expect(request.getParameterMap()).andReturn(Collections.<String,String[]>emptyMap());
 +    EasyMock.expect(request.getServletContext()).andReturn(context).anyTimes();
 +
 +    Principal principal = EasyMock.createNiceMock(Principal.class);
 +    EasyMock.expect(principal.getName()).andReturn("alice").anyTimes();
 +    EasyMock.expect(request.getUserPrincipal()).andReturn(principal).anyTimes();
 +
 +    GatewayServices services = EasyMock.createNiceMock(GatewayServices.class);
 +    EasyMock.expect(context.getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE)).andReturn(services);
 +
 +    JWTokenAuthority authority = new TestJWTokenAuthority(publicKey, privateKey);
 +    EasyMock.expect(services.getService(GatewayServices.TOKEN_SERVICE)).andReturn(authority);
 +
 +    HttpServletResponse response = EasyMock.createNiceMock(HttpServletResponse.class);
 +    ServletOutputStream outputStream = EasyMock.createNiceMock(ServletOutputStream.class);
 +    CookieResponseWrapper responseWrapper = new CookieResponseWrapper(response, outputStream);
 +
 +    EasyMock.replay(principal, services, context, request);
 +
 +    WebSSOResource webSSOResponse = new WebSSOResource();
 +    webSSOResponse.request = request;
 +    webSSOResponse.response = responseWrapper;
 +    webSSOResponse.context = context;
 +    webSSOResponse.init();
 +
 +    // Issue a token
 +    webSSOResponse.doGet();
 +
 +    // Check the cookie
 +    Cookie cookie = responseWrapper.getCookie("hadoop-jwt");
 +    assertNotNull(cookie);
 +
-     JWTToken parsedToken = new JWTToken(cookie.getValue());
++    JWT parsedToken = new JWTToken(cookie.getValue());
 +    assertEquals("alice", parsedToken.getSubject());
 +    assertTrue(authority.verifyToken(parsedToken));
 +  }
 +
 +  @Test
 +  public void testAudiences() throws Exception {
 +
 +    ServletContext context = EasyMock.createNiceMock(ServletContext.class);
 +    EasyMock.expect(context.getInitParameter("knoxsso.cookie.name")).andReturn(null);
 +    EasyMock.expect(context.getInitParameter("knoxsso.cookie.secure.only")).andReturn(null);
 +    EasyMock.expect(context.getInitParameter("knoxsso.cookie.max.age")).andReturn(null);
 +    EasyMock.expect(context.getInitParameter("knoxsso.cookie.domain.suffix")).andReturn(null);
 +    EasyMock.expect(context.getInitParameter("knoxsso.redirect.whitelist.regex")).andReturn(null);
 +    EasyMock.expect(context.getInitParameter("knoxsso.token.audiences")).andReturn("recipient1,recipient2");
 +    EasyMock.expect(context.getInitParameter("knoxsso.token.ttl")).andReturn(null);
 +    EasyMock.expect(context.getInitParameter("knoxsso.enable.session")).andReturn(null);
 +
 +    HttpServletRequest request = EasyMock.createNiceMock(HttpServletRequest.class);
 +    EasyMock.expect(request.getParameter("originalUrl")).andReturn("http://localhost:9080/service");
 +    EasyMock.expect(request.getParameterMap()).andReturn(Collections.<String,String[]>emptyMap());
 +    EasyMock.expect(request.getServletContext()).andReturn(context).anyTimes();
 +
 +    Principal principal = EasyMock.createNiceMock(Principal.class);
 +    EasyMock.expect(principal.getName()).andReturn("alice").anyTimes();
 +    EasyMock.expect(request.getUserPrincipal()).andReturn(principal).anyTimes();
 +
 +    GatewayServices services = EasyMock.createNiceMock(GatewayServices.class);
 +    EasyMock.expect(context.getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE)).andReturn(services);
 +
 +    JWTokenAuthority authority = new TestJWTokenAuthority(publicKey, privateKey);
 +    EasyMock.expect(services.getService(GatewayServices.TOKEN_SERVICE)).andReturn(authority);
 +
 +    HttpServletResponse response = EasyMock.createNiceMock(HttpServletResponse.class);
 +    ServletOutputStream outputStream = EasyMock.createNiceMock(ServletOutputStream.class);
 +    CookieResponseWrapper responseWrapper = new CookieResponseWrapper(response, outputStream);
 +
 +    EasyMock.replay(principal, services, context, request);
 +
 +    WebSSOResource webSSOResponse = new WebSSOResource();
 +    webSSOResponse.request = request;
 +    webSSOResponse.response = responseWrapper;
 +    webSSOResponse.context = context;
 +    webSSOResponse.init();
 +
 +    // Issue a token
 +    webSSOResponse.doGet();
 +
 +    // Check the cookie
 +    Cookie cookie = responseWrapper.getCookie("hadoop-jwt");
 +    assertNotNull(cookie);
 +
-     JWTToken parsedToken = new JWTToken(cookie.getValue());
++    JWT parsedToken = new JWTToken(cookie.getValue());
 +    assertEquals("alice", parsedToken.getSubject());
 +    assertTrue(authority.verifyToken(parsedToken));
 +
 +    // Verify the audiences
 +    List<String> audiences = Arrays.asList(parsedToken.getAudienceClaims());
 +    assertEquals(2, audiences.size());
 +    assertTrue(audiences.contains("recipient1"));
 +    assertTrue(audiences.contains("recipient2"));
 +  }
 +
 +  @Test
 +  public void testAudiencesWhitespace() throws Exception {
 +
 +    ServletContext context = EasyMock.createNiceMock(ServletContext.class);
 +    EasyMock.expect(context.getInitParameter("knoxsso.cookie.name")).andReturn(null);
 +    EasyMock.expect(context.getInitParameter("knoxsso.cookie.secure.only")).andReturn(null);
 +    EasyMock.expect(context.getInitParameter("knoxsso.cookie.max.age")).andReturn(null);
 +    EasyMock.expect(context.getInitParameter("knoxsso.cookie.domain.suffix")).andReturn(null);
 +    EasyMock.expect(context.getInitParameter("knoxsso.redirect.whitelist.regex")).andReturn(null);
 +    EasyMock.expect(context.getInitParameter("knoxsso.token.audiences")).andReturn(" recipient1, recipient2 ");
 +    EasyMock.expect(context.getInitParameter("knoxsso.token.ttl")).andReturn(null);
 +    EasyMock.expect(context.getInitParameter("knoxsso.enable.session")).andReturn(null);
 +
 +    HttpServletRequest request = EasyMock.createNiceMock(HttpServletRequest.class);
 +    EasyMock.expect(request.getParameter("originalUrl")).andReturn("http://localhost:9080/service");
 +    EasyMock.expect(request.getParameterMap()).andReturn(Collections.<String,String[]>emptyMap());
 +    EasyMock.expect(request.getServletContext()).andReturn(context).anyTimes();
 +
 +    Principal principal = EasyMock.createNiceMock(Principal.class);
 +    EasyMock.expect(principal.getName()).andReturn("alice").anyTimes();
 +    EasyMock.expect(request.getUserPrincipal()).andReturn(principal).anyTimes();
 +
 +    GatewayServices services = EasyMock.createNiceMock(GatewayServices.class);
 +    EasyMock.expect(context.getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE)).andReturn(services);
 +
 +    JWTokenAuthority authority = new TestJWTokenAuthority(publicKey, privateKey);
 +    EasyMock.expect(services.getService(GatewayServices.TOKEN_SERVICE)).andReturn(authority);
 +
 +    HttpServletResponse response = EasyMock.createNiceMock(HttpServletResponse.class);
 +    ServletOutputStream outputStream = EasyMock.createNiceMock(ServletOutputStream.class);
 +    CookieResponseWrapper responseWrapper = new CookieResponseWrapper(response, outputStream);
 +
 +    EasyMock.replay(principal, services, context, request);
 +
 +    WebSSOResource webSSOResponse = new WebSSOResource();
 +    webSSOResponse.request = request;
 +    webSSOResponse.response = responseWrapper;
 +    webSSOResponse.context = context;
 +    webSSOResponse.init();
 +
 +    // Issue a token
 +    webSSOResponse.doGet();
 +
 +    // Check the cookie
 +    Cookie cookie = responseWrapper.getCookie("hadoop-jwt");
 +    assertNotNull(cookie);
 +
 +    JWTToken parsedToken = new JWTToken(cookie.getValue());
 +    assertEquals("alice", parsedToken.getSubject());
 +    assertTrue(authority.verifyToken(parsedToken));
 +
 +    // Verify the audiences
 +    List<String> audiences = Arrays.asList(parsedToken.getAudienceClaims());
 +    assertEquals(2, audiences.size());
 +    assertTrue(audiences.contains("recipient1"));
 +    assertTrue(audiences.contains("recipient2"));
 +  }
 +
++  @Test
++  public void testSignatureAlgorithm() throws Exception {
++
++    ServletContext context = EasyMock.createNiceMock(ServletContext.class);
++    EasyMock.expect(context.getInitParameter("knoxsso.cookie.name")).andReturn(null);
++    EasyMock.expect(context.getInitParameter("knoxsso.cookie.secure.only")).andReturn(null);
++    EasyMock.expect(context.getInitParameter("knoxsso.cookie.max.age")).andReturn(null);
++    EasyMock.expect(context.getInitParameter("knoxsso.cookie.domain.suffix")).andReturn(null);
++    EasyMock.expect(context.getInitParameter("knoxsso.redirect.whitelist.regex")).andReturn(null);
++    EasyMock.expect(context.getInitParameter("knoxsso.token.audiences")).andReturn(null);
++    EasyMock.expect(context.getInitParameter("knoxsso.token.ttl")).andReturn(null);
++    EasyMock.expect(context.getInitParameter("knoxsso.enable.session")).andReturn(null);
++    EasyMock.expect(context.getInitParameter("knoxsso.token.sigalg")).andReturn("RS512");
++
++    HttpServletRequest request = EasyMock.createNiceMock(HttpServletRequest.class);
++    EasyMock.expect(request.getParameter("originalUrl")).andReturn("http://localhost:9080/service");
++    EasyMock.expect(request.getParameterMap()).andReturn(Collections.<String,String[]>emptyMap());
++    EasyMock.expect(request.getServletContext()).andReturn(context).anyTimes();
++
++    Principal principal = EasyMock.createNiceMock(Principal.class);
++    EasyMock.expect(principal.getName()).andReturn("alice").anyTimes();
++    EasyMock.expect(request.getUserPrincipal()).andReturn(principal).anyTimes();
++
++    GatewayServices services = EasyMock.createNiceMock(GatewayServices.class);
++    EasyMock.expect(context.getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE)).andReturn(services);
++
++    JWTokenAuthority authority = new TestJWTokenAuthority(publicKey, privateKey);
++    EasyMock.expect(services.getService(GatewayServices.TOKEN_SERVICE)).andReturn(authority);
++
++    HttpServletResponse response = EasyMock.createNiceMock(HttpServletResponse.class);
++    ServletOutputStream outputStream = EasyMock.createNiceMock(ServletOutputStream.class);
++    CookieResponseWrapper responseWrapper = new CookieResponseWrapper(response, outputStream);
++
++    EasyMock.replay(principal, services, context, request);
++
++    WebSSOResource webSSOResponse = new WebSSOResource();
++    webSSOResponse.request = request;
++    webSSOResponse.response = responseWrapper;
++    webSSOResponse.context = context;
++    webSSOResponse.init();
++
++    // Issue a token
++    webSSOResponse.doGet();
++
++    // Check the cookie
++    Cookie cookie = responseWrapper.getCookie("hadoop-jwt");
++    assertNotNull(cookie);
++
++    JWT parsedToken = new JWTToken(cookie.getValue());
++    assertEquals("alice", parsedToken.getSubject());
++    assertTrue(authority.verifyToken(parsedToken));
++    assertTrue(parsedToken.getHeader().contains("RS512"));
++  }
++
++  @Test
++  public void testDefaultTTL() throws Exception {
++
++    ServletContext context = EasyMock.createNiceMock(ServletContext.class);
++    EasyMock.expect(context.getInitParameter("knoxsso.cookie.name")).andReturn(null);
++    EasyMock.expect(context.getInitParameter("knoxsso.cookie.secure.only")).andReturn(null);
++    EasyMock.expect(context.getInitParameter("knoxsso.cookie.max.age")).andReturn(null);
++    EasyMock.expect(context.getInitParameter("knoxsso.cookie.domain.suffix")).andReturn(null);
++    EasyMock.expect(context.getInitParameter("knoxsso.redirect.whitelist.regex")).andReturn(null);
++    EasyMock.expect(context.getInitParameter("knoxsso.token.audiences")).andReturn(null);
++    EasyMock.expect(context.getInitParameter("knoxsso.token.ttl")).andReturn(null);
++    EasyMock.expect(context.getInitParameter("knoxsso.enable.session")).andReturn(null);
++
++    HttpServletRequest request = EasyMock.createNiceMock(HttpServletRequest.class);
++    EasyMock.expect(request.getParameter("originalUrl")).andReturn("http://localhost:9080/service");
++    EasyMock.expect(request.getParameterMap()).andReturn(Collections.<String,String[]>emptyMap());
++    EasyMock.expect(request.getServletContext()).andReturn(context).anyTimes();
++
++    Principal principal = EasyMock.createNiceMock(Principal.class);
++    EasyMock.expect(principal.getName()).andReturn("alice").anyTimes();
++    EasyMock.expect(request.getUserPrincipal()).andReturn(principal).anyTimes();
++
++    GatewayServices services = EasyMock.createNiceMock(GatewayServices.class);
++    EasyMock.expect(context.getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE)).andReturn(services);
++
++    JWTokenAuthority authority = new TestJWTokenAuthority(publicKey, privateKey);
++    EasyMock.expect(services.getService(GatewayServices.TOKEN_SERVICE)).andReturn(authority);
++
++    HttpServletResponse response = EasyMock.createNiceMock(HttpServletResponse.class);
++    ServletOutputStream outputStream = EasyMock.createNiceMock(ServletOutputStream.class);
++    CookieResponseWrapper responseWrapper = new CookieResponseWrapper(response, outputStream);
++
++    EasyMock.replay(principal, services, context, request);
++
++    WebSSOResource webSSOResponse = new WebSSOResource();
++    webSSOResponse.request = request;
++    webSSOResponse.response = responseWrapper;
++    webSSOResponse.context = context;
++    webSSOResponse.init();
++
++    // Issue a token
++    webSSOResponse.doGet();
++
++    // Check the cookie
++    Cookie cookie = responseWrapper.getCookie("hadoop-jwt");
++    assertNotNull(cookie);
++
++    JWT parsedToken = new JWTToken(cookie.getValue());
++    assertEquals("alice", parsedToken.getSubject());
++    assertTrue(authority.verifyToken(parsedToken));
++
++    Date expiresDate = parsedToken.getExpiresDate();
++    Date now = new Date();
++    assertTrue(expiresDate.after(now));
++    assertTrue((expiresDate.getTime() - now.getTime()) < 30000L);
++  }
++
++  @Test
++  public void testCustomTTL() throws Exception {
++
++    ServletContext context = EasyMock.createNiceMock(ServletContext.class);
++    EasyMock.expect(context.getInitParameter("knoxsso.cookie.name")).andReturn(null);
++    EasyMock.expect(context.getInitParameter("knoxsso.cookie.secure.only")).andReturn(null);
++    EasyMock.expect(context.getInitParameter("knoxsso.cookie.max.age")).andReturn(null);
++    EasyMock.expect(context.getInitParameter("knoxsso.cookie.domain.suffix")).andReturn(null);
++    EasyMock.expect(context.getInitParameter("knoxsso.redirect.whitelist.regex")).andReturn(null);
++    EasyMock.expect(context.getInitParameter("knoxsso.token.audiences")).andReturn(null);
++    EasyMock.expect(context.getInitParameter("knoxsso.token.ttl")).andReturn("60000");
++    EasyMock.expect(context.getInitParameter("knoxsso.enable.session")).andReturn(null);
++
++    HttpServletRequest request = EasyMock.createNiceMock(HttpServletRequest.class);
++    EasyMock.expect(request.getParameter("originalUrl")).andReturn("http://localhost:9080/service");
++    EasyMock.expect(request.getParameterMap()).andReturn(Collections.<String,String[]>emptyMap());
++    EasyMock.expect(request.getServletContext()).andReturn(context).anyTimes();
++
++    Principal principal = EasyMock.createNiceMock(Principal.class);
++    EasyMock.expect(principal.getName()).andReturn("alice").anyTimes();
++    EasyMock.expect(request.getUserPrincipal()).andReturn(principal).anyTimes();
++
++    GatewayServices services = EasyMock.createNiceMock(GatewayServices.class);
++    EasyMock.expect(context.getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE)).andReturn(services);
++
++    JWTokenAuthority authority = new TestJWTokenAuthority(publicKey, privateKey);
++    EasyMock.expect(services.getService(GatewayServices.TOKEN_SERVICE)).andReturn(authority);
++
++    HttpServletResponse response = EasyMock.createNiceMock(HttpServletResponse.class);
++    ServletOutputStream outputStream = EasyMock.createNiceMock(ServletOutputStream.class);
++    CookieResponseWrapper responseWrapper = new CookieResponseWrapper(response, outputStream);
++
++    EasyMock.replay(principal, services, context, request);
++
++    WebSSOResource webSSOResponse = new WebSSOResource();
++    webSSOResponse.request = request;
++    webSSOResponse.response = responseWrapper;
++    webSSOResponse.context = context;
++    webSSOResponse.init();
++
++    // Issue a token
++    webSSOResponse.doGet();
++
++    // Check the cookie
++    Cookie cookie = responseWrapper.getCookie("hadoop-jwt");
++    assertNotNull(cookie);
++
++    JWT parsedToken = new JWTToken(cookie.getValue());
++    assertEquals("alice", parsedToken.getSubject());
++    assertTrue(authority.verifyToken(parsedToken));
++
++    Date expiresDate = parsedToken.getExpiresDate();
++    Date now = new Date();
++    assertTrue(expiresDate.after(now));
++    long diff = expiresDate.getTime() - now.getTime();
++    assertTrue(diff < 60000L && diff > 30000L);
++  }
++
++  @Test
++  public void testNegativeTTL() throws Exception {
++
++    ServletContext context = EasyMock.createNiceMock(ServletContext.class);
++    EasyMock.expect(context.getInitParameter("knoxsso.cookie.name")).andReturn(null);
++    EasyMock.expect(context.getInitParameter("knoxsso.cookie.secure.only")).andReturn(null);
++    EasyMock.expect(context.getInitParameter("knoxsso.cookie.max.age")).andReturn(null);
++    EasyMock.expect(context.getInitParameter("knoxsso.cookie.domain.suffix")).andReturn(null);
++    EasyMock.expect(context.getInitParameter("knoxsso.redirect.whitelist.regex")).andReturn(null);
++    EasyMock.expect(context.getInitParameter("knoxsso.token.audiences")).andReturn(null);
++    EasyMock.expect(context.getInitParameter("knoxsso.token.ttl")).andReturn("-60000");
++    EasyMock.expect(context.getInitParameter("knoxsso.enable.session")).andReturn(null);
++
++    HttpServletRequest request = EasyMock.createNiceMock(HttpServletRequest.class);
++    EasyMock.expect(request.getParameter("originalUrl")).andReturn("http://localhost:9080/service");
++    EasyMock.expect(request.getParameterMap()).andReturn(Collections.<String,String[]>emptyMap());
++    EasyMock.expect(request.getServletContext()).andReturn(context).anyTimes();
++
++    Principal principal = EasyMock.createNiceMock(Principal.class);
++    EasyMock.expect(principal.getName()).andReturn("alice").anyTimes();
++    EasyMock.expect(request.getUserPrincipal()).andReturn(principal).anyTimes();
++
++    GatewayServices services = EasyMock.createNiceMock(GatewayServices.class);
++    EasyMock.expect(context.getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE)).andReturn(services);
++
++    JWTokenAuthority authority = new TestJWTokenAuthority(publicKey, privateKey);
++    EasyMock.expect(services.getService(GatewayServices.TOKEN_SERVICE)).andReturn(authority);
++
++    HttpServletResponse response = EasyMock.createNiceMock(HttpServletResponse.class);
++    ServletOutputStream outputStream = EasyMock.createNiceMock(ServletOutputStream.class);
++    CookieResponseWrapper responseWrapper = new CookieResponseWrapper(response, outputStream);
++
++    EasyMock.replay(principal, services, context, request);
++
++    WebSSOResource webSSOResponse = new WebSSOResource();
++    webSSOResponse.request = request;
++    webSSOResponse.response = responseWrapper;
++    webSSOResponse.context = context;
++    webSSOResponse.init();
++
++    // Issue a token
++    webSSOResponse.doGet();
++
++    // Check the cookie
++    Cookie cookie = responseWrapper.getCookie("hadoop-jwt");
++    assertNotNull(cookie);
++
++    JWT parsedToken = new JWTToken(cookie.getValue());
++    assertEquals("alice", parsedToken.getSubject());
++    assertTrue(authority.verifyToken(parsedToken));
++
++    Date expiresDate = parsedToken.getExpiresDate();
++    Date now = new Date();
++    assertTrue(expiresDate.after(now));
++    assertTrue((expiresDate.getTime() - now.getTime()) < 30000L);
++  }
++
++  @Test
++  public void testOverflowTTL() throws Exception {
++
++    ServletContext context = EasyMock.createNiceMock(ServletContext.class);
++    EasyMock.expect(context.getInitParameter("knoxsso.cookie.name")).andReturn(null);
++    EasyMock.expect(context.getInitParameter("knoxsso.cookie.secure.only")).andReturn(null);
++    EasyMock.expect(context.getInitParameter("knoxsso.cookie.max.age")).andReturn(null);
++    EasyMock.expect(context.getInitParameter("knoxsso.cookie.domain.suffix")).andReturn(null);
++    EasyMock.expect(context.getInitParameter("knoxsso.redirect.whitelist.regex")).andReturn(null);
++    EasyMock.expect(context.getInitParameter("knoxsso.token.audiences")).andReturn(null);
++    EasyMock.expect(context.getInitParameter("knoxsso.token.ttl")).andReturn(String.valueOf(Long.MAX_VALUE));
++    EasyMock.expect(context.getInitParameter("knoxsso.enable.session")).andReturn(null);
++
++    HttpServletRequest request = EasyMock.createNiceMock(HttpServletRequest.class);
++    EasyMock.expect(request.getParameter("originalUrl")).andReturn("http://localhost:9080/service");
++    EasyMock.expect(request.getParameterMap()).andReturn(Collections.<String,String[]>emptyMap());
++    EasyMock.expect(request.getServletContext()).andReturn(context).anyTimes();
++
++    Principal principal = EasyMock.createNiceMock(Principal.class);
++    EasyMock.expect(principal.getName()).andReturn("alice").anyTimes();
++    EasyMock.expect(request.getUserPrincipal()).andReturn(principal).anyTimes();
++
++    GatewayServices services = EasyMock.createNiceMock(GatewayServices.class);
++    EasyMock.expect(context.getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE)).andReturn(services);
++
++    JWTokenAuthority authority = new TestJWTokenAuthority(publicKey, privateKey);
++    EasyMock.expect(services.getService(GatewayServices.TOKEN_SERVICE)).andReturn(authority);
++
++    HttpServletResponse response = EasyMock.createNiceMock(HttpServletResponse.class);
++    ServletOutputStream outputStream = EasyMock.createNiceMock(ServletOutputStream.class);
++    CookieResponseWrapper responseWrapper = new CookieResponseWrapper(response, outputStream);
++
++    EasyMock.replay(principal, services, context, request);
++
++    WebSSOResource webSSOResponse = new WebSSOResource();
++    webSSOResponse.request = request;
++    webSSOResponse.response = responseWrapper;
++    webSSOResponse.context = context;
++    webSSOResponse.init();
++
++    // Issue a token
++    webSSOResponse.doGet();
++
++    // Check the cookie
++    Cookie cookie = responseWrapper.getCookie("hadoop-jwt");
++    assertNotNull(cookie);
++
++    JWT parsedToken = new JWTToken(cookie.getValue());
++    assertEquals("alice", parsedToken.getSubject());
++    assertTrue(authority.verifyToken(parsedToken));
++
++    Date expiresDate = parsedToken.getExpiresDate();
++    Date now = new Date();
++    assertTrue(expiresDate.after(now));
++    assertTrue((expiresDate.getTime() - now.getTime()) < 30000L);
++  }
++
 +  /**
 +   * A wrapper for HttpServletResponseWrapper to store the cookies
 +   */
 +  private static class CookieResponseWrapper extends HttpServletResponseWrapper {
 +
 +    private ServletOutputStream outputStream;
 +    private Map<String, Cookie> cookies = new HashMap<>();
 +
 +    public CookieResponseWrapper(HttpServletResponse response) {
 +        super(response);
 +    }
 +
 +    public CookieResponseWrapper(HttpServletResponse response, ServletOutputStream outputStream) {
 +        super(response);
 +        this.outputStream = outputStream;
 +    }
 +
 +    @Override
 +    public ServletOutputStream getOutputStream() {
 +        return outputStream;
 +    }
 +
 +    @Override
 +    public void addCookie(Cookie cookie) {
 +        super.addCookie(cookie);
 +        cookies.put(cookie.getName(), cookie);
 +    }
 +
 +    public Cookie getCookie(String name) {
 +        return cookies.get(name);
 +    }
 +
 +  }
 +
 +  private static class TestJWTokenAuthority implements JWTokenAuthority {
 +
 +    private RSAPublicKey publicKey;
 +    private RSAPrivateKey privateKey;
 +
 +    public TestJWTokenAuthority(RSAPublicKey publicKey, RSAPrivateKey privateKey) {
 +      this.publicKey = publicKey;
 +      this.privateKey = privateKey;
 +    }
 +
 +    @Override
 +    public JWT issueToken(Subject subject, String algorithm)
 +      throws TokenServiceException {
 +      Principal p = (Principal) subject.getPrincipals().toArray()[0];
 +      return issueToken(p, algorithm);
 +    }
 +
 +    @Override
 +    public JWT issueToken(Principal p, String algorithm)
 +      throws TokenServiceException {
 +      return issueToken(p, null, algorithm);
 +    }
 +
 +    @Override
 +    public JWT issueToken(Principal p, String audience, String algorithm)
 +      throws TokenServiceException {
 +      return issueToken(p, audience, algorithm, -1);
 +    }
 +
 +    @Override
 +    public boolean verifyToken(JWT token) throws TokenServiceException {
 +      JWSVerifier verifier = new RSASSAVerifier(publicKey);
 +      return token.verify(verifier);
 +    }
 +
 +    @Override
 +    public JWT issueToken(Principal p, String audience, String algorithm,
 +                               long expires) throws TokenServiceException {
 +      List<String> audiences = null;
 +      if (audience != null) {
 +        audiences = new ArrayList<String>();
 +        audiences.add(audience);
 +      }
 +      return issueToken(p, audiences, algorithm, expires);
 +    }
 +
 +    @Override
 +    public JWT issueToken(Principal p, List<String> audiences, String algorithm,
 +                               long expires) throws TokenServiceException {
 +      String[] claimArray = new String[4];
 +      claimArray[0] = "KNOXSSO";
 +      claimArray[1] = p.getName();
 +      claimArray[2] = null;
 +      if (expires == -1) {
 +        claimArray[3] = null;
 +      } else {
 +        claimArray[3] = String.valueOf(expires);
 +      }
 +
-       JWTToken token = null;
-       if ("RS256".equals(algorithm)) {
-         token = new JWTToken("RS256", claimArray, audiences);
-         JWSSigner signer = new RSASSASigner(privateKey);
-         token.sign(signer);
-       } else {
-         throw new TokenServiceException("Cannot issue token - Unsupported algorithm");
-       }
++      JWT token = new JWTToken(algorithm, claimArray, audiences);
++      JWSSigner signer = new RSASSASigner(privateKey);
++      token.sign(signer);
 +
 +      return token;
 +    }
 +
 +    @Override
 +    public JWT issueToken(Principal p, String algorithm, long expiry)
 +        throws TokenServiceException {
 +      return issueToken(p, Collections.<String>emptyList(), algorithm, expiry);
 +    }
 +
 +    @Override
 +    public boolean verifyToken(JWT token, RSAPublicKey publicKey) throws TokenServiceException {
 +      JWSVerifier verifier = new RSASSAVerifier(publicKey);
 +      return token.verify(verifier);
 +    }
 +
 +  }
 +
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/58780d37/gateway-service-knoxtoken/src/main/java/org/apache/knox/gateway/service/knoxtoken/TokenResource.java
----------------------------------------------------------------------
diff --cc gateway-service-knoxtoken/src/main/java/org/apache/knox/gateway/service/knoxtoken/TokenResource.java
index 1c16ab3,0000000..f8eb124
mode 100644,000000..100644
--- a/gateway-service-knoxtoken/src/main/java/org/apache/knox/gateway/service/knoxtoken/TokenResource.java
+++ b/gateway-service-knoxtoken/src/main/java/org/apache/knox/gateway/service/knoxtoken/TokenResource.java
@@@ -1,218 -1,0 +1,230 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.service.knoxtoken;
 +
 +import java.io.IOException;
 +import java.security.Principal;
 +import java.security.cert.X509Certificate;
 +import java.util.ArrayList;
 +import java.util.Map;
 +import java.util.HashMap;
 +import java.util.List;
 +
 +import javax.annotation.PostConstruct;
 +import javax.servlet.ServletContext;
 +import javax.servlet.http.HttpServletRequest;
 +import javax.servlet.http.HttpServletResponse;
 +import javax.ws.rs.GET;
 +import javax.ws.rs.POST;
 +import javax.ws.rs.Path;
 +import javax.ws.rs.Produces;
 +import javax.ws.rs.core.Context;
 +import javax.ws.rs.core.Response;
 +import org.apache.knox.gateway.i18n.messages.MessagesFactory;
 +import org.apache.knox.gateway.services.GatewayServices;
 +import org.apache.knox.gateway.services.security.token.JWTokenAuthority;
 +import org.apache.knox.gateway.services.security.token.TokenServiceException;
 +import org.apache.knox.gateway.services.security.token.impl.JWT;
 +import org.apache.knox.gateway.util.JsonUtils;
 +
 +import static javax.ws.rs.core.MediaType.APPLICATION_JSON;
 +import static javax.ws.rs.core.MediaType.APPLICATION_XML;
 +
 +@Path( TokenResource.RESOURCE_PATH )
 +public class TokenResource {
 +  private static final String EXPIRES_IN = "expires_in";
 +  private static final String TOKEN_TYPE = "token_type";
 +  private static final String ACCESS_TOKEN = "access_token";
 +  private static final String TARGET_URL = "target_url";
-   private static final String BEARER = "Bearer ";
++  private static final String BEARER = "Bearer";
 +  private static final String TOKEN_TTL_PARAM = "knox.token.ttl";
 +  private static final String TOKEN_AUDIENCES_PARAM = "knox.token.audiences";
 +  private static final String TOKEN_TARGET_URL = "knox.token.target.url";
 +  private static final String TOKEN_CLIENT_DATA = "knox.token.client.data";
 +  private static final String TOKEN_CLIENT_CERT_REQUIRED = "knox.token.client.cert.required";
 +  private static final String TOKEN_ALLOWED_PRINCIPALS = "knox.token.allowed.principals";
++  private static final String TOKEN_SIG_ALG = "knox.token.sigalg";
++  private static final long TOKEN_TTL_DEFAULT = 30000L;
 +  static final String RESOURCE_PATH = "knoxtoken/api/v1/token";
 +  private static TokenServiceMessages log = MessagesFactory.get( TokenServiceMessages.class );
-   private long tokenTTL = 30000l;
++  private long tokenTTL = TOKEN_TTL_DEFAULT;
 +  private List<String> targetAudiences = new ArrayList<>();
 +  private String tokenTargetUrl = null;
 +  private Map<String,Object> tokenClientDataMap = null;
 +  private ArrayList<String> allowedDNs = new ArrayList<>();
 +  private boolean clientCertRequired = false;
++  private String signatureAlgorithm = "RS256";
 +
 +  @Context
 +  HttpServletRequest request;
 +
 +  @Context
 +  HttpServletResponse response;
 +
 +  @Context
 +  ServletContext context;
 +
 +  @PostConstruct
 +  public void init() {
 +
 +    String audiences = context.getInitParameter(TOKEN_AUDIENCES_PARAM);
 +    if (audiences != null) {
 +      String[] auds = audiences.split(",");
 +      for (int i = 0; i < auds.length; i++) {
 +        targetAudiences.add(auds[i].trim());
 +      }
 +    }
 +
 +    String clientCert = context.getInitParameter(TOKEN_CLIENT_CERT_REQUIRED);
 +    clientCertRequired = "true".equals(clientCert);
 +
 +    String principals = context.getInitParameter(TOKEN_ALLOWED_PRINCIPALS);
 +    if (principals != null) {
 +      String[] dns = principals.split(";");
 +      for (int i = 0; i < dns.length; i++) {
 +        allowedDNs.add(dns[i]);
 +      }
 +    }
 +
 +    String ttl = context.getInitParameter(TOKEN_TTL_PARAM);
 +    if (ttl != null) {
 +      try {
 +        tokenTTL = Long.parseLong(ttl);
++        if (tokenTTL < -1 || (tokenTTL + System.currentTimeMillis() < 0)) {
++          log.invalidTokenTTLEncountered(ttl);
++          tokenTTL = TOKEN_TTL_DEFAULT;
++        }
 +      }
 +      catch (NumberFormatException nfe) {
 +        log.invalidTokenTTLEncountered(ttl);
 +      }
 +    }
 +
 +    tokenTargetUrl = context.getInitParameter(TOKEN_TARGET_URL);
 +
 +    String clientData = context.getInitParameter(TOKEN_CLIENT_DATA);
 +    if (clientData != null) {
 +      tokenClientDataMap = new HashMap<>();
 +      String[] tokenClientData = clientData.split(",");
 +      addClientDataToMap(tokenClientData, tokenClientDataMap);
 +    }
++
++    String sigAlg = context.getInitParameter(TOKEN_SIG_ALG);
++    if (sigAlg != null) {
++      signatureAlgorithm = sigAlg;
++    }
 +  }
 +
 +  @GET
 +  @Produces({APPLICATION_JSON, APPLICATION_XML})
 +  public Response doGet() {
 +    return getAuthenticationToken();
 +  }
 +
 +  @POST
 +  @Produces({APPLICATION_JSON, APPLICATION_XML})
 +  public Response doPost() {
 +    return getAuthenticationToken();
 +  }
 +
 +  private X509Certificate extractCertificate(HttpServletRequest req) {
 +    X509Certificate[] certs = (X509Certificate[]) req.getAttribute("javax.servlet.request.X509Certificate");
 +    if (null != certs && certs.length > 0) {
 +        return certs[0];
 +    }
 +    return null;
 +  }
 +
 +  private Response getAuthenticationToken() {
 +    if (clientCertRequired) {
 +      X509Certificate cert = extractCertificate(request);
 +      if (cert != null) {
 +        if (!allowedDNs.contains(cert.getSubjectDN().getName())) {
 +          return Response.status(403).entity("{ \"Unable to get token - untrusted client cert.\" }").build();
 +        }
 +      }
 +      else {
 +        return Response.status(403).entity("{ \"Unable to get token - client cert required.\" }").build();
 +      }
 +    }
 +    GatewayServices services = (GatewayServices) request.getServletContext()
 +            .getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE);
 +
 +    JWTokenAuthority ts = services.getService(GatewayServices.TOKEN_SERVICE);
 +    Principal p = ((HttpServletRequest)request).getUserPrincipal();
 +    long expires = getExpiry();
 +
 +    try {
 +      JWT token = null;
 +      if (targetAudiences.isEmpty()) {
-         token = ts.issueToken(p, "RS256", expires);
++        token = ts.issueToken(p, signatureAlgorithm, expires);
 +      } else {
-         token = ts.issueToken(p, targetAudiences, "RS256", expires);
++        token = ts.issueToken(p, targetAudiences, signatureAlgorithm, expires);
 +      }
 +
 +      if (token != null) {
 +        String accessToken = token.toString();
 +
 +        HashMap<String, Object> map = new HashMap<>();
 +        map.put(ACCESS_TOKEN, accessToken);
 +        map.put(TOKEN_TYPE, BEARER);
 +        map.put(EXPIRES_IN, expires);
 +        if (tokenTargetUrl != null) {
 +          map.put(TARGET_URL, tokenTargetUrl);
 +        }
 +        if (tokenClientDataMap != null) {
 +          map.putAll(tokenClientDataMap);
 +        }
 +
 +        String jsonResponse = JsonUtils.renderAsJsonString(map);
 +
 +        response.getWriter().write(jsonResponse);
 +        return Response.ok().build();
 +      }
 +      else {
 +        return Response.serverError().build();
 +      }
 +    }
 +    catch (TokenServiceException | IOException e) {
 +      log.unableToIssueToken(e);
 +    }
 +    return Response.ok().entity("{ \"Unable to acquire token.\" }").build();
 +  }
 +
 +  void addClientDataToMap(String[] tokenClientData,
 +      Map<String,Object> map) {
 +    String[] kv = null;
 +    for (int i = 0; i < tokenClientData.length; i++) {
 +      kv = tokenClientData[i].split("=");
 +      if (kv.length == 2) {
 +        map.put(kv[0], kv[1]);
 +      }
 +    }
 +  }
 +
 +  private long getExpiry() {
 +    long expiry = 0l;
 +    if (tokenTTL == -1) {
 +      expiry = -1;
 +    }
 +    else {
 +      expiry = System.currentTimeMillis() + tokenTTL;
 +    }
 +    return expiry;
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/58780d37/gateway-service-knoxtoken/src/test/java/org/apache/knox/gateway/service/knoxtoken/TokenServiceResourceTest.java
----------------------------------------------------------------------
diff --cc gateway-service-knoxtoken/src/test/java/org/apache/knox/gateway/service/knoxtoken/TokenServiceResourceTest.java
index b73b1b7,0000000..3753b27
mode 100644,000000..100644
--- a/gateway-service-knoxtoken/src/test/java/org/apache/knox/gateway/service/knoxtoken/TokenServiceResourceTest.java
+++ b/gateway-service-knoxtoken/src/test/java/org/apache/knox/gateway/service/knoxtoken/TokenServiceResourceTest.java
@@@ -1,510 -1,0 +1,782 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.service.knoxtoken;
 +
 +import org.apache.knox.gateway.service.knoxtoken.TokenResource;
 +import org.apache.knox.gateway.services.GatewayServices;
 +import org.apache.knox.gateway.services.security.token.JWTokenAuthority;
 +import org.apache.knox.gateway.services.security.token.TokenServiceException;
 +import org.apache.knox.gateway.services.security.token.impl.JWT;
 +import org.apache.knox.gateway.services.security.token.impl.JWTToken;
 +import org.apache.knox.gateway.security.PrimaryPrincipal;
 +
 +import org.easymock.EasyMock;
 +import org.junit.Assert;
 +import org.junit.BeforeClass;
 +import org.junit.Test;
 +
 +import com.nimbusds.jose.JWSSigner;
 +import com.nimbusds.jose.JWSVerifier;
 +import com.nimbusds.jose.crypto.RSASSASigner;
 +import com.nimbusds.jose.crypto.RSASSAVerifier;
 +
 +import java.util.Map;
 +
 +import javax.security.auth.Subject;
 +import javax.servlet.ServletContext;
 +import javax.servlet.http.HttpServletRequest;
 +import javax.servlet.http.HttpServletResponse;
 +import javax.ws.rs.core.Response;
 +
 +import static org.junit.Assert.*;
 +
 +import java.io.PrintWriter;
 +import java.io.StringWriter;
 +import java.security.KeyPair;
 +import java.security.KeyPairGenerator;
 +import java.security.NoSuchAlgorithmException;
 +import java.security.Principal;
 +import java.security.cert.X509Certificate;
 +import java.security.interfaces.RSAPrivateKey;
 +import java.security.interfaces.RSAPublicKey;
 +import java.util.ArrayList;
 +import java.util.Arrays;
 +import java.util.Collections;
++import java.util.Date;
 +import java.util.HashMap;
 +import java.util.List;
 +
 +/**
 + * Some tests for the token service
 + */
 +public class TokenServiceResourceTest {
 +
 +  protected static RSAPublicKey publicKey;
 +  protected static RSAPrivateKey privateKey;
 +
 +  @BeforeClass
 +  public static void setup() throws Exception, NoSuchAlgorithmException {
 +    KeyPairGenerator kpg = KeyPairGenerator.getInstance("RSA");
 +    kpg.initialize(1024);
 +    KeyPair KPair = kpg.generateKeyPair();
 +
 +    publicKey = (RSAPublicKey) KPair.getPublic();
 +    privateKey = (RSAPrivateKey) KPair.getPrivate();
 +  }
 +
 +  @Test
 +  public void testTokenService() throws Exception {
 +    Assert.assertTrue(true);
 +  }
 +
 +  @Test
 +  public void testClientData() throws Exception {
 +    TokenResource tr = new TokenResource();
 +
 +    Map<String,Object> clientDataMap = new HashMap<>();
 +    tr.addClientDataToMap("cookie.name=hadoop-jwt,test=value".split(","), clientDataMap);
 +    Assert.assertTrue(clientDataMap.size() == 2);
 +
 +    clientDataMap = new HashMap<>();
 +    tr.addClientDataToMap("cookie.name=hadoop-jwt".split(","), clientDataMap);
 +    Assert.assertTrue(clientDataMap.size() == 1);
 +
 +    clientDataMap = new HashMap<>();
 +    tr.addClientDataToMap("".split(","), clientDataMap);
 +    Assert.assertTrue(clientDataMap.size() == 0);
 +  }
 +
 +  @Test
 +  public void testGetToken() throws Exception {
-     TokenResource tr = new TokenResource();
 +
 +    ServletContext context = EasyMock.createNiceMock(ServletContext.class);
-     //tr.context = context;
-     // tr.init();
 +
 +    HttpServletRequest request = EasyMock.createNiceMock(HttpServletRequest.class);
 +    EasyMock.expect(request.getServletContext()).andReturn(context).anyTimes();
 +    Principal principal = EasyMock.createNiceMock(Principal.class);
 +    EasyMock.expect(principal.getName()).andReturn("alice").anyTimes();
 +    EasyMock.expect(request.getUserPrincipal()).andReturn(principal).anyTimes();
 +
 +    GatewayServices services = EasyMock.createNiceMock(GatewayServices.class);
 +    EasyMock.expect(context.getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE)).andReturn(services);
 +
 +    JWTokenAuthority authority = new TestJWTokenAuthority(publicKey, privateKey);
 +    EasyMock.expect(services.getService(GatewayServices.TOKEN_SERVICE)).andReturn(authority);
 +
 +    StringWriter writer = new StringWriter();
 +    PrintWriter printWriter = new PrintWriter(writer);
 +    HttpServletResponse response = EasyMock.createNiceMock(HttpServletResponse.class);
 +    EasyMock.expect(response.getWriter()).andReturn(printWriter);
 +
 +    EasyMock.replay(principal, services, context, request, response);
 +
++    TokenResource tr = new TokenResource();
 +    tr.request = request;
 +    tr.response = response;
 +
 +    // Issue a token
 +    Response retResponse = tr.doGet();
 +
 +    assertEquals(200, retResponse.getStatus());
 +
 +    // Parse the response
 +    String retString = writer.toString();
 +    String accessToken = getTagValue(retString, "access_token");
 +    assertNotNull(accessToken);
 +    String expiry = getTagValue(retString, "expires_in");
 +    assertNotNull(expiry);
 +
 +    // Verify the token
-     JWTToken parsedToken = new JWTToken(accessToken);
++    JWT parsedToken = new JWTToken(accessToken);
 +    assertEquals("alice", parsedToken.getSubject());
 +    assertTrue(authority.verifyToken(parsedToken));
 +  }
 +
 +  @Test
 +  public void testAudiences() throws Exception {
 +
 +    ServletContext context = EasyMock.createNiceMock(ServletContext.class);
 +    EasyMock.expect(context.getInitParameter("knox.token.audiences")).andReturn("recipient1,recipient2");
 +    EasyMock.expect(context.getInitParameter("knox.token.ttl")).andReturn(null);
 +    EasyMock.expect(context.getInitParameter("knox.token.target.url")).andReturn(null);
 +    EasyMock.expect(context.getInitParameter("knox.token.client.data")).andReturn(null);
 +
 +    HttpServletRequest request = EasyMock.createNiceMock(HttpServletRequest.class);
 +    EasyMock.expect(request.getServletContext()).andReturn(context).anyTimes();
 +    Principal principal = EasyMock.createNiceMock(Principal.class);
 +    EasyMock.expect(principal.getName()).andReturn("alice").anyTimes();
 +    EasyMock.expect(request.getUserPrincipal()).andReturn(principal).anyTimes();
 +
 +    GatewayServices services = EasyMock.createNiceMock(GatewayServices.class);
 +    EasyMock.expect(context.getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE)).andReturn(services);
 +
 +    JWTokenAuthority authority = new TestJWTokenAuthority(publicKey, privateKey);
 +    EasyMock.expect(services.getService(GatewayServices.TOKEN_SERVICE)).andReturn(authority);
 +
 +    StringWriter writer = new StringWriter();
 +    PrintWriter printWriter = new PrintWriter(writer);
 +    HttpServletResponse response = EasyMock.createNiceMock(HttpServletResponse.class);
 +    EasyMock.expect(response.getWriter()).andReturn(printWriter);
 +
 +    EasyMock.replay(principal, services, context, request, response);
 +
 +    TokenResource tr = new TokenResource();
 +    tr.request = request;
 +    tr.response = response;
 +    tr.context = context;
 +    tr.init();
 +
 +    // Issue a token
 +    Response retResponse = tr.doGet();
 +
 +    assertEquals(200, retResponse.getStatus());
 +
 +    // Parse the response
 +    String retString = writer.toString();
 +    String accessToken = getTagValue(retString, "access_token");
 +    assertNotNull(accessToken);
 +    String expiry = getTagValue(retString, "expires_in");
 +    assertNotNull(expiry);
 +
 +    // Verify the token
-     JWTToken parsedToken = new JWTToken(accessToken);
++    JWT parsedToken = new JWTToken(accessToken);
 +    assertEquals("alice", parsedToken.getSubject());
 +    assertTrue(authority.verifyToken(parsedToken));
 +
 +    // Verify the audiences
 +    List<String> audiences = Arrays.asList(parsedToken.getAudienceClaims());
 +    assertEquals(2, audiences.size());
 +    assertTrue(audiences.contains("recipient1"));
 +    assertTrue(audiences.contains("recipient2"));
 +  }
 +
 +  @Test
 +  public void testAudiencesWhitespace() throws Exception {
 +
 +    ServletContext context = EasyMock.createNiceMock(ServletContext.class);
 +    EasyMock.expect(context.getInitParameter("knox.token.audiences")).andReturn(" recipient1, recipient2 ");
 +    EasyMock.expect(context.getInitParameter("knox.token.ttl")).andReturn(null);
 +    EasyMock.expect(context.getInitParameter("knox.token.target.url")).andReturn(null);
 +    EasyMock.expect(context.getInitParameter("knox.token.client.data")).andReturn(null);
 +
 +    HttpServletRequest request = EasyMock.createNiceMock(HttpServletRequest.class);
 +    EasyMock.expect(request.getServletContext()).andReturn(context).anyTimes();
 +    Principal principal = EasyMock.createNiceMock(Principal.class);
 +    EasyMock.expect(principal.getName()).andReturn("alice").anyTimes();
 +    EasyMock.expect(request.getUserPrincipal()).andReturn(principal).anyTimes();
 +
 +    GatewayServices services = EasyMock.createNiceMock(GatewayServices.class);
 +    EasyMock.expect(context.getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE)).andReturn(services);
 +
 +    JWTokenAuthority authority = new TestJWTokenAuthority(publicKey, privateKey);
 +    EasyMock.expect(services.getService(GatewayServices.TOKEN_SERVICE)).andReturn(authority);
 +
 +    StringWriter writer = new StringWriter();
 +    PrintWriter printWriter = new PrintWriter(writer);
 +    HttpServletResponse response = EasyMock.createNiceMock(HttpServletResponse.class);
 +    EasyMock.expect(response.getWriter()).andReturn(printWriter);
 +
 +    EasyMock.replay(principal, services, context, request, response);
 +
 +    TokenResource tr = new TokenResource();
 +    tr.request = request;
 +    tr.response = response;
 +    tr.context = context;
 +    tr.init();
 +
 +    // Issue a token
 +    Response retResponse = tr.doGet();
 +
 +    assertEquals(200, retResponse.getStatus());
 +
 +    // Parse the response
 +    String retString = writer.toString();
 +    String accessToken = getTagValue(retString, "access_token");
 +    assertNotNull(accessToken);
 +    String expiry = getTagValue(retString, "expires_in");
 +    assertNotNull(expiry);
 +
 +    // Verify the token
-     JWTToken parsedToken = new JWTToken(accessToken);
++    JWT parsedToken = new JWTToken(accessToken);
 +    assertEquals("alice", parsedToken.getSubject());
 +    assertTrue(authority.verifyToken(parsedToken));
 +
 +    // Verify the audiences
 +    List<String> audiences = Arrays.asList(parsedToken.getAudienceClaims());
 +    assertEquals(2, audiences.size());
 +    assertTrue(audiences.contains("recipient1"));
 +    assertTrue(audiences.contains("recipient2"));
 +  }
 +
 +  @Test
 +  public void testValidClientCert() throws Exception {
 +
 +    ServletContext context = EasyMock.createNiceMock(ServletContext.class);
 +    EasyMock.expect(context.getInitParameter("knox.token.client.cert.required")).andReturn("true");
 +    EasyMock.expect(context.getInitParameter("knox.token.allowed.principals")).andReturn("CN=localhost, OU=Test, O=Hadoop, L=Test, ST=Test, C=US");
 +
 +    HttpServletRequest request = EasyMock.createNiceMock(HttpServletRequest.class);
 +    EasyMock.expect(request.getServletContext()).andReturn(context).anyTimes();
 +    X509Certificate trustedCertMock = EasyMock.createMock(X509Certificate.class);
 +    EasyMock.expect(trustedCertMock.getSubjectDN()).andReturn(new PrimaryPrincipal("CN=localhost, OU=Test, O=Hadoop, L=Test, ST=Test, C=US")).anyTimes();
 +    ArrayList<X509Certificate> certArrayList = new ArrayList<X509Certificate>();
 +    certArrayList.add(trustedCertMock);
 +    X509Certificate[] certs = {};
 +    EasyMock.expect(request.getAttribute("javax.servlet.request.X509Certificate")).andReturn(certArrayList.toArray(certs)).anyTimes();
 +
 +    Principal principal = EasyMock.createNiceMock(Principal.class);
 +    EasyMock.expect(principal.getName()).andReturn("alice").anyTimes();
 +    EasyMock.expect(request.getUserPrincipal()).andReturn(principal).anyTimes();
 +
 +    GatewayServices services = EasyMock.createNiceMock(GatewayServices.class);
 +    EasyMock.expect(context.getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE)).andReturn(services);
 +
 +    JWTokenAuthority authority = new TestJWTokenAuthority(publicKey, privateKey);
 +    EasyMock.expect(services.getService(GatewayServices.TOKEN_SERVICE)).andReturn(authority);
 +
 +    StringWriter writer = new StringWriter();
 +    PrintWriter printWriter = new PrintWriter(writer);
 +    HttpServletResponse response = EasyMock.createNiceMock(HttpServletResponse.class);
 +    EasyMock.expect(response.getWriter()).andReturn(printWriter);
 +
 +    EasyMock.replay(principal, services, context, request, response, trustedCertMock);
 +
 +    TokenResource tr = new TokenResource();
 +    tr.request = request;
 +    tr.response = response;
 +    tr.context = context;
 +    tr.init();
 +
 +    // Issue a token
 +    Response retResponse = tr.doGet();
 +
 +    assertEquals(200, retResponse.getStatus());
 +
 +    // Parse the response
 +    String retString = writer.toString();
 +    String accessToken = getTagValue(retString, "access_token");
 +    assertNotNull(accessToken);
 +    String expiry = getTagValue(retString, "expires_in");
 +    assertNotNull(expiry);
 +
 +    // Verify the token
-     JWTToken parsedToken = new JWTToken(accessToken);
++    JWT parsedToken = new JWTToken(accessToken);
 +    assertEquals("alice", parsedToken.getSubject());
 +    assertTrue(authority.verifyToken(parsedToken));
 +  }
 +
 +  @Test
 +  public void testValidClientCertWrongUser() throws Exception {
 +
 +    ServletContext context = EasyMock.createNiceMock(ServletContext.class);
 +    EasyMock.expect(context.getInitParameter("knox.token.client.cert.required")).andReturn("true");
 +    EasyMock.expect(context.getInitParameter("knox.token.allowed.principals")).andReturn("CN=remotehost, OU=Test, O=Hadoop, L=Test, ST=Test, C=US");
 +
 +    HttpServletRequest request = EasyMock.createNiceMock(HttpServletRequest.class);
 +    EasyMock.expect(request.getServletContext()).andReturn(context).anyTimes();
 +    X509Certificate trustedCertMock = EasyMock.createMock(X509Certificate.class);
 +    EasyMock.expect(trustedCertMock.getSubjectDN()).andReturn(new PrimaryPrincipal("CN=localhost, OU=Test, O=Hadoop, L=Test, ST=Test, C=US")).anyTimes();
 +    ArrayList<X509Certificate> certArrayList = new ArrayList<X509Certificate>();
 +    certArrayList.add(trustedCertMock);
 +    X509Certificate[] certs = {};
 +    EasyMock.expect(request.getAttribute("javax.servlet.request.X509Certificate")).andReturn(certArrayList.toArray(certs)).anyTimes();
 +
 +    Principal principal = EasyMock.createNiceMock(Principal.class);
 +    EasyMock.expect(principal.getName()).andReturn("alice").anyTimes();
 +    EasyMock.expect(request.getUserPrincipal()).andReturn(principal).anyTimes();
 +
 +    GatewayServices services = EasyMock.createNiceMock(GatewayServices.class);
 +    EasyMock.expect(context.getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE)).andReturn(services);
 +
 +    JWTokenAuthority authority = new TestJWTokenAuthority(publicKey, privateKey);
 +    EasyMock.expect(services.getService(GatewayServices.TOKEN_SERVICE)).andReturn(authority);
 +
 +    StringWriter writer = new StringWriter();
 +    PrintWriter printWriter = new PrintWriter(writer);
 +    HttpServletResponse response = EasyMock.createNiceMock(HttpServletResponse.class);
 +    EasyMock.expect(response.getWriter()).andReturn(printWriter);
 +
 +    EasyMock.replay(principal, services, context, request, response, trustedCertMock);
 +
 +    TokenResource tr = new TokenResource();
 +    tr.request = request;
 +    tr.response = response;
 +    tr.context = context;
 +    tr.init();
 +
 +    // Issue a token
 +    Response retResponse = tr.doGet();
 +
 +    assertEquals(403, retResponse.getStatus());
 +  }
 +
 +  @Test
 +  public void testMissingClientCert() throws Exception {
 +
 +    ServletContext context = EasyMock.createNiceMock(ServletContext.class);
 +    EasyMock.expect(context.getInitParameter("knox.token.client.cert.required")).andReturn("true");
 +    EasyMock.expect(context.getInitParameter("knox.token.allowed.principals")).andReturn("CN=remotehost, OU=Test, O=Hadoop, L=Test, ST=Test, C=US");
 +
 +    HttpServletRequest request = EasyMock.createNiceMock(HttpServletRequest.class);
 +    EasyMock.expect(request.getServletContext()).andReturn(context).anyTimes();
 +    EasyMock.expect(request.getAttribute("javax.servlet.request.X509Certificate")).andReturn(null).anyTimes();
 +
 +    Principal principal = EasyMock.createNiceMock(Principal.class);
 +    EasyMock.expect(principal.getName()).andReturn("alice").anyTimes();
 +    EasyMock.expect(request.getUserPrincipal()).andReturn(principal).anyTimes();
 +
 +    GatewayServices services = EasyMock.createNiceMock(GatewayServices.class);
 +    EasyMock.expect(context.getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE)).andReturn(services);
 +
 +    JWTokenAuthority authority = new TestJWTokenAuthority(publicKey, privateKey);
 +    EasyMock.expect(services.getService(GatewayServices.TOKEN_SERVICE)).andReturn(authority);
 +
 +    StringWriter writer = new StringWriter();
 +    PrintWriter printWriter = new PrintWriter(writer);
 +    HttpServletResponse response = EasyMock.createNiceMock(HttpServletResponse.class);
 +    EasyMock.expect(response.getWriter()).andReturn(printWriter);
 +
 +    EasyMock.replay(principal, services, context, request, response);
 +
 +    TokenResource tr = new TokenResource();
 +    tr.request = request;
 +    tr.response = response;
 +    tr.context = context;
 +    tr.init();
 +
 +    // Issue a token
 +    Response retResponse = tr.doGet();
 +
 +    assertEquals(403, retResponse.getStatus());
 +  }
 +
++  @Test
++  public void testSignatureAlgorithm() throws Exception {
++    ServletContext context = EasyMock.createNiceMock(ServletContext.class);
++    EasyMock.expect(context.getInitParameter("knox.token.audiences")).andReturn("recipient1,recipient2");
++    EasyMock.expect(context.getInitParameter("knox.token.ttl")).andReturn(null);
++    EasyMock.expect(context.getInitParameter("knox.token.target.url")).andReturn(null);
++    EasyMock.expect(context.getInitParameter("knox.token.client.data")).andReturn(null);
++    EasyMock.expect(context.getInitParameter("knox.token.sigalg")).andReturn("RS512");
++
++    HttpServletRequest request = EasyMock.createNiceMock(HttpServletRequest.class);
++    EasyMock.expect(request.getServletContext()).andReturn(context).anyTimes();
++    Principal principal = EasyMock.createNiceMock(Principal.class);
++    EasyMock.expect(principal.getName()).andReturn("alice").anyTimes();
++    EasyMock.expect(request.getUserPrincipal()).andReturn(principal).anyTimes();
++
++    GatewayServices services = EasyMock.createNiceMock(GatewayServices.class);
++    EasyMock.expect(context.getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE)).andReturn(services);
++
++    JWTokenAuthority authority = new TestJWTokenAuthority(publicKey, privateKey);
++    EasyMock.expect(services.getService(GatewayServices.TOKEN_SERVICE)).andReturn(authority);
++
++    StringWriter writer = new StringWriter();
++    PrintWriter printWriter = new PrintWriter(writer);
++    HttpServletResponse response = EasyMock.createNiceMock(HttpServletResponse.class);
++    EasyMock.expect(response.getWriter()).andReturn(printWriter);
++
++    EasyMock.replay(principal, services, context, request, response);
++
++    TokenResource tr = new TokenResource();
++    tr.request = request;
++    tr.response = response;
++    tr.context = context;
++    tr.init();
++
++    // Issue a token
++    Response retResponse = tr.doGet();
++
++    assertEquals(200, retResponse.getStatus());
++
++    // Parse the response
++    String retString = writer.toString();
++    String accessToken = getTagValue(retString, "access_token");
++    assertNotNull(accessToken);
++    String expiry = getTagValue(retString, "expires_in");
++    assertNotNull(expiry);
++
++    // Verify the token
++    JWT parsedToken = new JWTToken(accessToken);
++    assertEquals("alice", parsedToken.getSubject());
++    assertTrue(authority.verifyToken(parsedToken));
++    assertTrue(parsedToken.getHeader().contains("RS512"));
++  }
++
++  @Test
++  public void testDefaultTTL() throws Exception {
++    ServletContext context = EasyMock.createNiceMock(ServletContext.class);
++    EasyMock.expect(context.getInitParameter("knox.token.audiences")).andReturn("recipient1,recipient2");
++    EasyMock.expect(context.getInitParameter("knox.token.ttl")).andReturn(null);
++    EasyMock.expect(context.getInitParameter("knox.token.target.url")).andReturn(null);
++    EasyMock.expect(context.getInitParameter("knox.token.client.data")).andReturn(null);
++
++    HttpServletRequest request = EasyMock.createNiceMock(HttpServletRequest.class);
++    EasyMock.expect(request.getServletContext()).andReturn(context).anyTimes();
++    Principal principal = EasyMock.createNiceMock(Principal.class);
++    EasyMock.expect(principal.getName()).andReturn("alice").anyTimes();
++    EasyMock.expect(request.getUserPrincipal()).andReturn(principal).anyTimes();
++
++    GatewayServices services = EasyMock.createNiceMock(GatewayServices.class);
++    EasyMock.expect(context.getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE)).andReturn(services);
++
++    JWTokenAuthority authority = new TestJWTokenAuthority(publicKey, privateKey);
++    EasyMock.expect(services.getService(GatewayServices.TOKEN_SERVICE)).andReturn(authority);
++
++    StringWriter writer = new StringWriter();
++    PrintWriter printWriter = new PrintWriter(writer);
++    HttpServletResponse response = EasyMock.createNiceMock(HttpServletResponse.class);
++    EasyMock.expect(response.getWriter()).andReturn(printWriter);
++
++    EasyMock.replay(principal, services, context, request, response);
++
++    TokenResource tr = new TokenResource();
++    tr.request = request;
++    tr.response = response;
++    tr.context = context;
++    tr.init();
++
++    // Issue a token
++    Response retResponse = tr.doGet();
++
++    assertEquals(200, retResponse.getStatus());
++
++    // Parse the response
++    String retString = writer.toString();
++    String accessToken = getTagValue(retString, "access_token");
++    assertNotNull(accessToken);
++    String expiry = getTagValue(retString, "expires_in");
++    assertNotNull(expiry);
++
++    // Verify the token
++    JWT parsedToken = new JWTToken(accessToken);
++    assertEquals("alice", parsedToken.getSubject());
++    assertTrue(authority.verifyToken(parsedToken));
++
++    Date expiresDate = parsedToken.getExpiresDate();
++    Date now = new Date();
++    assertTrue(expiresDate.after(now));
++    assertTrue((expiresDate.getTime() - now.getTime()) < 30000L);
++  }
++
++  @Test
++  public void testCustomTTL() throws Exception {
++    ServletContext context = EasyMock.createNiceMock(ServletContext.class);
++    EasyMock.expect(context.getInitParameter("knox.token.audiences")).andReturn("recipient1,recipient2");
++    EasyMock.expect(context.getInitParameter("knox.token.ttl")).andReturn("60000");
++    EasyMock.expect(context.getInitParameter("knox.token.target.url")).andReturn(null);
++    EasyMock.expect(context.getInitParameter("knox.token.client.data")).andReturn(null);
++
++    HttpServletRequest request = EasyMock.createNiceMock(HttpServletRequest.class);
++    EasyMock.expect(request.getServletContext()).andReturn(context).anyTimes();
++    Principal principal = EasyMock.createNiceMock(Principal.class);
++    EasyMock.expect(principal.getName()).andReturn("alice").anyTimes();
++    EasyMock.expect(request.getUserPrincipal()).andReturn(principal).anyTimes();
++
++    GatewayServices services = EasyMock.createNiceMock(GatewayServices.class);
++    EasyMock.expect(context.getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE)).andReturn(services);
++
++    JWTokenAuthority authority = new TestJWTokenAuthority(publicKey, privateKey);
++    EasyMock.expect(services.getService(GatewayServices.TOKEN_SERVICE)).andReturn(authority);
++
++    StringWriter writer = new StringWriter();
++    PrintWriter printWriter = new PrintWriter(writer);
++    HttpServletResponse response = EasyMock.createNiceMock(HttpServletResponse.class);
++    EasyMock.expect(response.getWriter()).andReturn(printWriter);
++
++    EasyMock.replay(principal, services, context, request, response);
++
++    TokenResource tr = new TokenResource();
++    tr.request = request;
++    tr.response = response;
++    tr.context = context;
++    tr.init();
++
++    // Issue a token
++    Response retResponse = tr.doGet();
++
++    assertEquals(200, retResponse.getStatus());
++
++    // Parse the response
++    String retString = writer.toString();
++    String accessToken = getTagValue(retString, "access_token");
++    assertNotNull(accessToken);
++    String expiry = getTagValue(retString, "expires_in");
++    assertNotNull(expiry);
++
++    // Verify the token
++    JWT parsedToken = new JWTToken(accessToken);
++    assertEquals("alice", parsedToken.getSubject());
++    assertTrue(authority.verifyToken(parsedToken));
++
++    Date expiresDate = parsedToken.getExpiresDate();
++    Date now = new Date();
++    assertTrue(expiresDate.after(now));
++    long diff = expiresDate.getTime() - now.getTime();
++    assertTrue(diff < 60000L && diff > 30000L);
++  }
++
++  @Test
++  public void testNegativeTTL() throws Exception {
++    ServletContext context = EasyMock.createNiceMock(ServletContext.class);
++    EasyMock.expect(context.getInitParameter("knox.token.audiences")).andReturn("recipient1,recipient2");
++    EasyMock.expect(context.getInitParameter("knox.token.ttl")).andReturn("-60000");
++    EasyMock.expect(context.getInitParameter("knox.token.target.url")).andReturn(null);
++    EasyMock.expect(context.getInitParameter("knox.token.client.data")).andReturn(null);
++
++    HttpServletRequest request = EasyMock.createNiceMock(HttpServletRequest.class);
++    EasyMock.expect(request.getServletContext()).andReturn(context).anyTimes();
++    Principal principal = EasyMock.createNiceMock(Principal.class);
++    EasyMock.expect(principal.getName()).andReturn("alice").anyTimes();
++    EasyMock.expect(request.getUserPrincipal()).andReturn(principal).anyTimes();
++
++    GatewayServices services = EasyMock.createNiceMock(GatewayServices.class);
++    EasyMock.expect(context.getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE)).andReturn(services);
++
++    JWTokenAuthority authority = new TestJWTokenAuthority(publicKey, privateKey);
++    EasyMock.expect(services.getService(GatewayServices.TOKEN_SERVICE)).andReturn(authority);
++
++    StringWriter writer = new StringWriter();
++    PrintWriter printWriter = new PrintWriter(writer);
++    HttpServletResponse response = EasyMock.createNiceMock(HttpServletResponse.class);
++    EasyMock.expect(response.getWriter()).andReturn(printWriter);
++
++    EasyMock.replay(principal, services, context, request, response);
++
++    TokenResource tr = new TokenResource();
++    tr.request = request;
++    tr.response = response;
++    tr.context = context;
++    tr.init();
++
++    // Issue a token
++    Response retResponse = tr.doGet();
++
++    assertEquals(200, retResponse.getStatus());
++
++    // Parse the response
++    String retString = writer.toString();
++    String accessToken = getTagValue(retString, "access_token");
++    assertNotNull(accessToken);
++    String expiry = getTagValue(retString, "expires_in");
++    assertNotNull(expiry);
++
++    // Verify the token
++    JWT parsedToken = new JWTToken(accessToken);
++    assertEquals("alice", parsedToken.getSubject());
++    assertTrue(authority.verifyToken(parsedToken));
++
++    Date expiresDate = parsedToken.getExpiresDate();
++    Date now = new Date();
++    assertTrue(expiresDate.after(now));
++    assertTrue((expiresDate.getTime() - now.getTime()) < 30000L);
++  }
++
++  @Test
++  public void testOverflowTTL() throws Exception {
++    ServletContext context = EasyMock.createNiceMock(ServletContext.class);
++    EasyMock.expect(context.getInitParameter("knox.token.audiences")).andReturn("recipient1,recipient2");
++    EasyMock.expect(context.getInitParameter("knox.token.ttl")).andReturn(String.valueOf(Long.MAX_VALUE));
++    EasyMock.expect(context.getInitParameter("knox.token.target.url")).andReturn(null);
++    EasyMock.expect(context.getInitParameter("knox.token.client.data")).andReturn(null);
++
++    HttpServletRequest request = EasyMock.createNiceMock(HttpServletRequest.class);
++    EasyMock.expect(request.getServletContext()).andReturn(context).anyTimes();
++    Principal principal = EasyMock.createNiceMock(Principal.class);
++    EasyMock.expect(principal.getName()).andReturn("alice").anyTimes();
++    EasyMock.expect(request.getUserPrincipal()).andReturn(principal).anyTimes();
++
++    GatewayServices services = EasyMock.createNiceMock(GatewayServices.class);
++    EasyMock.expect(context.getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE)).andReturn(services);
++
++    JWTokenAuthority authority = new TestJWTokenAuthority(publicKey, privateKey);
++    EasyMock.expect(services.getService(GatewayServices.TOKEN_SERVICE)).andReturn(authority);
++
++    StringWriter writer = new StringWriter();
++    PrintWriter printWriter = new PrintWriter(writer);
++    HttpServletResponse response = EasyMock.createNiceMock(HttpServletResponse.class);
++    EasyMock.expect(response.getWriter()).andReturn(printWriter);
++
++    EasyMock.replay(principal, services, context, request, response);
++
++    TokenResource tr = new TokenResource();
++    tr.request = request;
++    tr.response = response;
++    tr.context = context;
++    tr.init();
++
++    // Issue a token
++    Response retResponse = tr.doGet();
++
++    assertEquals(200, retResponse.getStatus());
++
++    // Parse the response
++    String retString = writer.toString();
++    String accessToken = getTagValue(retString, "access_token");
++    assertNotNull(accessToken);
++    String expiry = getTagValue(retString, "expires_in");
++    assertNotNull(expiry);
++
++    // Verify the token
++    JWT parsedToken = new JWTToken(accessToken);
++    assertEquals("alice", parsedToken.getSubject());
++    assertTrue(authority.verifyToken(parsedToken));
++
++    Date expiresDate = parsedToken.getExpiresDate();
++    Date now = new Date();
++    assertTrue(expiresDate.after(now));
++    assertTrue((expiresDate.getTime() - now.getTime()) < 30000L);
++  }
++
 +  private String getTagValue(String token, String tagName) {
 +    String searchString = tagName + "\":";
 +    String value = token.substring(token.indexOf(searchString) + searchString.length());
 +    if (value.startsWith("\"")) {
 +      value = value.substring(1);
 +    }
 +    if (value.contains("\"")) {
 +      return value.substring(0, value.indexOf("\""));
 +    } else if (value.contains(",")) {
 +      return value.substring(0, value.indexOf(","));
 +    } else {
 +      return value.substring(0, value.length() - 1);
 +    }
 +  }
 +
 +  private static class TestJWTokenAuthority implements JWTokenAuthority {
 +
 +    private RSAPublicKey publicKey;
 +    private RSAPrivateKey privateKey;
 +
 +    public TestJWTokenAuthority(RSAPublicKey publicKey, RSAPrivateKey privateKey) {
 +      this.publicKey = publicKey;
 +      this.privateKey = privateKey;
 +    }
 +
 +    @Override
 +    public JWT issueToken(Subject subject, String algorithm)
 +      throws TokenServiceException {
 +      Principal p = (Principal) subject.getPrincipals().toArray()[0];
 +      return issueToken(p, algorithm);
 +    }
 +
 +    @Override
 +    public JWT issueToken(Principal p, String algorithm)
 +      throws TokenServiceException {
 +      return issueToken(p, null, algorithm);
 +    }
 +
 +    @Override
 +    public JWT issueToken(Principal p, String audience, String algorithm)
 +      throws TokenServiceException {
 +      return issueToken(p, audience, algorithm, -1);
 +    }
 +
 +    @Override
 +    public boolean verifyToken(JWT token) throws TokenServiceException {
 +      JWSVerifier verifier = new RSASSAVerifier(publicKey);
 +      return token.verify(verifier);
 +    }
 +
 +    @Override
 +    public JWT issueToken(Principal p, String audience, String algorithm,
 +                               long expires) throws TokenServiceException {
 +      ArrayList<String> audiences = null;
 +      if (audience != null) {
 +        audiences = new ArrayList<String>();
 +        audiences.add(audience);
 +      }
 +      return issueToken(p, audiences, algorithm, expires);
 +    }
 +
 +    @Override
 +    public JWT issueToken(Principal p, List<String> audiences, String algorithm,
 +                               long expires) throws TokenServiceException {
 +      String[] claimArray = new String[4];
 +      claimArray[0] = "KNOXSSO";
 +      claimArray[1] = p.getName();
 +      claimArray[2] = null;
 +      if (expires == -1) {
 +        claimArray[3] = null;
 +      } else {
 +        claimArray[3] = String.valueOf(expires);
 +      }
 +
-       JWTToken token = null;
-       if ("RS256".equals(algorithm)) {
-         token = new JWTToken("RS256", claimArray, audiences);
-         JWSSigner signer = new RSASSASigner(privateKey);
-         token.sign(signer);
-       } else {
-         throw new TokenServiceException("Cannot issue token - Unsupported algorithm");
-       }
++      JWT token = new JWTToken(algorithm, claimArray, audiences);
++      JWSSigner signer = new RSASSASigner(privateKey);
++      token.sign(signer);
 +
 +      return token;
 +    }
 +
 +    @Override
 +    public JWT issueToken(Principal p, String algorithm, long expiry)
 +        throws TokenServiceException {
 +      return issueToken(p, Collections.<String>emptyList(), algorithm, expiry);
 +    }
 +
 +    @Override
 +    public boolean verifyToken(JWT token, RSAPublicKey publicKey) throws TokenServiceException {
 +      JWSVerifier verifier = new RSASSAVerifier(publicKey);
 +      return token.verify(verifier);
 +    }
 +
 +  }
 +
 +
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/58780d37/gateway-shell/src/main/java/org/apache/knox/gateway/shell/job/Sqoop.java
----------------------------------------------------------------------
diff --cc gateway-shell/src/main/java/org/apache/knox/gateway/shell/job/Sqoop.java
index ec9f907,0000000..072b58d
mode 100644,000000..100644
--- a/gateway-shell/src/main/java/org/apache/knox/gateway/shell/job/Sqoop.java
+++ b/gateway-shell/src/main/java/org/apache/knox/gateway/shell/job/Sqoop.java
@@@ -1,99 -1,0 +1,99 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.shell.job;
 +
 +import com.jayway.jsonpath.JsonPath;
 +import org.apache.knox.gateway.shell.AbstractRequest;
 +import org.apache.knox.gateway.shell.BasicResponse;
 +import org.apache.knox.gateway.shell.Hadoop;
 +import org.apache.http.HttpResponse;
 +import org.apache.http.NameValuePair;
 +import org.apache.http.client.entity.UrlEncodedFormEntity;
 +import org.apache.http.client.methods.HttpPost;
 +import org.apache.http.client.utils.URIBuilder;
 +
 +import java.io.IOException;
 +import java.util.ArrayList;
 +import java.util.List;
 +import java.util.concurrent.Callable;
 +
 +public class Sqoop {
 +
-   static class Request extends AbstractRequest<Response> {
++  public static class Request extends AbstractRequest<Response> {
 +
 +    private String statusDir;
 +    List<NameValuePair> params = new ArrayList<NameValuePair>();
 +
 +    public Request( Hadoop session ) {
 +      super( session );
 +    }
 +
 +    public Request command( String command ) {
 +      addParam( params, "command", command );
 +      return this;
 +    }
 +
 +    public Request libdir( String libdir ) {
 +      addParam( params, "libdir", libdir );
 +      return this;
 +    }
 +
 +    public Request files( String files ) {
 +      addParam( params, "files", files );
 +      return this;
 +    }
 +
 +    public Request optionsfile( String optionsFile ) {
 +      addParam( params, "optionsfile", optionsFile );
 +      return this;
 +    }
 +
 +    public Request statusDir( String dir ) {
 +      this.statusDir = dir;
 +      return this;
 +    }
 +
 +    protected Callable<Response> callable() {
 +      return new Callable<Response>() {
 +        @Override
 +        public Response call() throws Exception {
 +          URIBuilder uri = uri( Job.SERVICE_PATH, "/sqoop" );
 +          addParam( params, "statusdir", statusDir );
 +          UrlEncodedFormEntity form = new UrlEncodedFormEntity( params );
 +          HttpPost request = new HttpPost( uri.build() );
 +          request.setEntity( form );
 +          return new Response( execute( request ) );
 +        }
 +      };
 +    }
 +
 +  }
 +
 +  public static class Response extends BasicResponse {
 +
 +    protected Response( HttpResponse response ) {
 +      super( response );
 +    }
 +
 +    public String getJobId() throws IOException {
 +      return JsonPath.read( getString(), "$.id" );
 +    }
 +
 +  }
 +
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/58780d37/gateway-spi/src/main/java/org/apache/knox/gateway/services/security/token/impl/JWT.java
----------------------------------------------------------------------
diff --cc gateway-spi/src/main/java/org/apache/knox/gateway/services/security/token/impl/JWT.java
index 8638da5,0000000..bd7cd5c
mode 100644,000000..100644
--- a/gateway-spi/src/main/java/org/apache/knox/gateway/services/security/token/impl/JWT.java
+++ b/gateway-spi/src/main/java/org/apache/knox/gateway/services/security/token/impl/JWT.java
@@@ -1,63 -1,0 +1,66 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.services.security.token.impl;
 +
 +import java.util.Date;
 +
 +import com.nimbusds.jose.JWSSigner;
 +import com.nimbusds.jose.JWSVerifier;
 +
 +public interface JWT {
 +
 +  String PRINCIPAL = "prn";
 +  String SUBJECT = "sub";
 +  String ISSUER = "iss";
 +  String AUDIENCE = "aud";
 +  String EXPIRES = "exp";
++  String NOT_BEFORE = "nbf";
 +
 +  String getPayload();
 +
 +  void setSignaturePayload(byte[] payload);
 +
 +  byte[] getSignaturePayload();
 +
 +  String getClaim(String claimName);
 +
 +  String getPrincipal();
 +
 +  String getIssuer();
 +
 +  String getAudience();
 +
 +  public String[] getAudienceClaims();
 +
 +  String getExpires();
 +
 +  Date getExpiresDate();
 +
++  Date getNotBeforeDate();
++
 +  String getSubject();
 +
 +  String getHeader();
 +
 +  String getClaims();
 +
 +  void sign(JWSSigner signer);
 +
 +  boolean verify(JWSVerifier verifier);
 +
 +}


[25/53] [abbrv] knox git commit: KNOX-998 - Some more refactoring

Posted by mo...@apache.org.
KNOX-998 - Some more refactoring


Project: http://git-wip-us.apache.org/repos/asf/knox/repo
Commit: http://git-wip-us.apache.org/repos/asf/knox/commit/1451428f
Tree: http://git-wip-us.apache.org/repos/asf/knox/tree/1451428f
Diff: http://git-wip-us.apache.org/repos/asf/knox/diff/1451428f

Branch: refs/heads/master
Commit: 1451428f7cca88758e8163276cb5f6a33c1f812d
Parents: 46109ad
Author: Sandeep More <mo...@apache.org>
Authored: Thu Nov 2 14:47:28 2017 -0400
Committer: Sandeep More <mo...@apache.org>
Committed: Thu Nov 2 14:47:28 2017 -0400

----------------------------------------------------------------------
 .../gateway/i18n/messages/MessagesTest.java     |   4 +-
 .../gateway/i18n/resources/ResourcesTest.java   |   4 +-
 ...yAssertionHttpServletRequestWrapperTest.java |   8 +-
 .../function/UsernameFunctionProcessorTest.java |   8 +-
 .../impl/HostmapFunctionProcessorTest.java      |   2 +-
 .../impl/ServiceRegistryFunctionsTest.java      |   8 +-
 .../UrlRewriteServletContextListenerTest.java   |   4 +-
 .../api/UrlRewriteServletFilterTest.java        |   8 +-
 .../impl/FrontendFunctionProcessorTest.java     |   8 +-
 .../rewrite/impl/json/JsonFilterReaderTest.java |   2 +-
 .../rewrite/impl/xml/XmlFilterReaderTest.java   |   2 +-
 .../apache/knox/gateway/AuditLoggingTest.java   |   2 +-
 .../apache/knox/gateway/GatewayFilterTest.java  |   4 +-
 .../knox/gateway/GatewayGlobalConfigTest.java   |   2 +-
 .../org/apache/knox/gateway/TempletonDemo.java  |   4 +-
 .../config/impl/GatewayConfigImplTest.java      |   2 +-
 .../gateway/deploy/DeploymentFactoryTest.java   |   2 +-
 .../xml/XmlGatewayDescriptorExporterTest.java   |   2 +-
 .../knox/gateway/jetty/SslSocketTest.java       |   4 +-
 .../knox/gateway/mock/MockConsoleFactory.java   |   2 +-
 .../services/security/CryptoServiceTest.java    |   4 +-
 .../topology/DefaultTopologyServiceTest.java    |   2 +-
 .../builder/PropertyTopologyBuilderTest.java    |   2 +-
 .../validation/TopologyValidatorTest.java       |   2 +-
 .../topology/xml/TopologyRulesModuleTest.java   |   2 +-
 .../knox/gateway/websockets/BadUrlTest.java     |   2 +-
 .../gateway/websockets/WebsocketEchoTest.java   |   2 +-
 .../WebsocketMultipleConnectionTest.java        |   2 +-
 .../src/test/resources/log4j.properties         |   2 +-
 .../knox/gateway/hbase/HBaseDispatchTest.java   |   6 +-
 .../gateway/dispatch/DefaultDispatchTest.java   |   6 +-
 .../security/principal/PrincipalMapperTest.java |   4 +-
 .../hostmap/FileBasedHostMapperTest.java        |   2 +-
 .../security/impl/CMFKeystoreServiceTest.java   |   4 +-
 .../security/impl/CMFMasterServiceTest.java     |   4 +-
 .../apache/knox/gateway/GatewayTestDriver.java  |   2 +-
 .../apache/knox/gateway/SecureClusterTest.java  |   4 +-
 .../java/org/apache/knox/gateway/ShellTest.java |   4 +-
 .../java/org/apache/hadoop/test/Console.java    |  57 --
 .../java/org/apache/hadoop/test/TestUtils.java  | 216 -----
 .../apache/hadoop/test/category/FastTests.java  |  21 -
 .../hadoop/test/category/ManualTests.java       |  21 -
 .../hadoop/test/category/MediumTests.java       |  21 -
 .../hadoop/test/category/ReleaseTest.java       |  21 -
 .../apache/hadoop/test/category/SlowTests.java  |  21 -
 .../apache/hadoop/test/category/UnitTests.java  |  21 -
 .../apache/hadoop/test/category/VerifyTest.java |  21 -
 .../apache/hadoop/test/log/CollectAppender.java |  51 --
 .../apache/hadoop/test/log/NoOpAppender.java    |  98 ---
 .../org/apache/hadoop/test/log/NoOpLogger.java  |  87 --
 .../hadoop/test/mock/MockFilterConfig.java      |  46 -
 .../test/mock/MockHttpServletRequest.java       | 410 ---------
 .../test/mock/MockHttpServletResponse.java      | 195 -----
 .../hadoop/test/mock/MockInteraction.java       |  33 -
 .../hadoop/test/mock/MockRequestMatcher.java    | 330 --------
 .../hadoop/test/mock/MockResponseProvider.java  | 158 ----
 .../org/apache/hadoop/test/mock/MockServer.java | 119 ---
 .../apache/hadoop/test/mock/MockServlet.java    |  61 --
 .../hadoop/test/mock/MockServletContext.java    | 293 -------
 .../test/mock/MockServletInputStream.java       |  54 --
 .../main/java/org/apache/knox/test/Console.java |  57 ++
 .../java/org/apache/knox/test/TestUtils.java    | 216 +++++
 .../apache/knox/test/category/FastTests.java    |  21 +
 .../apache/knox/test/category/ManualTests.java  |  21 +
 .../apache/knox/test/category/MediumTests.java  |  21 +
 .../apache/knox/test/category/ReleaseTest.java  |  21 +
 .../apache/knox/test/category/SlowTests.java    |  21 +
 .../apache/knox/test/category/UnitTests.java    |  21 +
 .../apache/knox/test/category/VerifyTest.java   |  21 +
 .../apache/knox/test/log/CollectAppender.java   |  51 ++
 .../org/apache/knox/test/log/NoOpAppender.java  |  98 +++
 .../org/apache/knox/test/log/NoOpLogger.java    |  87 ++
 .../apache/knox/test/mock/MockFilterConfig.java |  46 +
 .../knox/test/mock/MockHttpServletRequest.java  | 410 +++++++++
 .../knox/test/mock/MockHttpServletResponse.java | 195 +++++
 .../apache/knox/test/mock/MockInteraction.java  |  33 +
 .../knox/test/mock/MockRequestMatcher.java      | 330 ++++++++
 .../knox/test/mock/MockResponseProvider.java    | 157 ++++
 .../org/apache/knox/test/mock/MockServer.java   | 119 +++
 .../org/apache/knox/test/mock/MockServlet.java  |  61 ++
 .../knox/test/mock/MockServletContext.java      | 293 +++++++
 .../knox/test/mock/MockServletInputStream.java  |  54 ++
 .../gateway/AmbariServiceDefinitionTest.java    |   8 +-
 .../knox/gateway/GatewayAdminFuncTest.java      |   2 +-
 .../gateway/GatewayAdminTopologyFuncTest.java   |   8 +-
 .../apache/knox/gateway/GatewayAppFuncTest.java |  10 +-
 .../knox/gateway/GatewayBasicFuncTest.java      |  12 +-
 .../knox/gateway/GatewayDeployFuncTest.java     |  10 +-
 .../knox/gateway/GatewayHealthFuncTest.java     |   2 +-
 .../GatewayLdapDynamicGroupFuncTest.java        |   6 +-
 .../knox/gateway/GatewayLdapGroupFuncTest.java  |   7 +-
 .../gateway/GatewayLdapPosixGroupFuncTest.java  |  11 +-
 .../gateway/GatewayLocalServiceFuncTest.java    |  10 +-
 .../knox/gateway/GatewayMultiFuncTest.java      |  11 +-
 .../GatewayPortMappingDisableFeatureTest.java   |  10 +-
 .../gateway/GatewayPortMappingFailTest.java     |  10 +-
 .../gateway/GatewayPortMappingFuncTest.java     |  10 +-
 .../knox/gateway/GatewaySampleFuncTest.java     |   8 +-
 .../apache/knox/gateway/GatewaySslFuncTest.java |  14 +-
 .../apache/knox/gateway/Knox242FuncTest.java    |   8 +-
 .../gateway/KnoxCliLdapFuncTestNegative.java    |   8 +-
 .../gateway/KnoxCliLdapFuncTestPositive.java    |  10 +-
 .../apache/knox/gateway/KnoxCliSysBindTest.java |  10 +-
 .../gateway/OozieServiceDefinitionTest.java     |   8 +-
 .../apache/knox/gateway/WebHdfsHaFuncTest.java  |  10 +-
 .../deploy/DeploymentFactoryFuncTest.java       |   8 +-
 .../knox/gateway/audit/AuditLayoutTest.java     |   2 +-
 .../knox/gateway/audit/AuditServiceTest.java    |   2 +-
 .../audit/StoreAndForwardAppenderTest.java      |   2 +-
 .../src/test/resources/audit-log4j.properties   |   2 +-
 .../gateway/util/urltemplate/ExpanderTest.java  |   4 +-
 .../gateway/util/urltemplate/MatcherTest.java   |   4 +-
 .../util/urltemplate/MatcherTest.java.orig      | 839 -------------------
 .../gateway/util/urltemplate/ParserTest.java    |   4 +-
 .../gateway/util/urltemplate/RewriterTest.java  |   4 +-
 .../gateway/util/urltemplate/SegmentTest.java   |   4 +-
 .../gateway/util/urltemplate/TemplateTest.java  |   4 +-
 pom.xml                                         |   8 +-
 118 files changed, 2536 insertions(+), 3401 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-i18n/src/test/java/org/apache/knox/gateway/i18n/messages/MessagesTest.java
----------------------------------------------------------------------
diff --git a/gateway-i18n/src/test/java/org/apache/knox/gateway/i18n/messages/MessagesTest.java b/gateway-i18n/src/test/java/org/apache/knox/gateway/i18n/messages/MessagesTest.java
index d53e99d..8a9c42e 100644
--- a/gateway-i18n/src/test/java/org/apache/knox/gateway/i18n/messages/MessagesTest.java
+++ b/gateway-i18n/src/test/java/org/apache/knox/gateway/i18n/messages/MessagesTest.java
@@ -20,8 +20,8 @@ package org.apache.knox.gateway.i18n.messages;
 import org.apache.knox.gateway.i18n.messages.loggers.test.TestMessageLogger;
 import org.apache.knox.gateway.i18n.messages.loggers.test.TestMessageLoggerFactory;
 import org.apache.knox.gateway.i18n.messages.loggers.test.TestMessageRecord;
-import org.apache.hadoop.test.category.FastTests;
-import org.apache.hadoop.test.category.UnitTests;
+import org.apache.knox.test.category.FastTests;
+import org.apache.knox.test.category.UnitTests;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-i18n/src/test/java/org/apache/knox/gateway/i18n/resources/ResourcesTest.java
----------------------------------------------------------------------
diff --git a/gateway-i18n/src/test/java/org/apache/knox/gateway/i18n/resources/ResourcesTest.java b/gateway-i18n/src/test/java/org/apache/knox/gateway/i18n/resources/ResourcesTest.java
index a876b89..d54f379 100644
--- a/gateway-i18n/src/test/java/org/apache/knox/gateway/i18n/resources/ResourcesTest.java
+++ b/gateway-i18n/src/test/java/org/apache/knox/gateway/i18n/resources/ResourcesTest.java
@@ -17,8 +17,8 @@
  */
 package org.apache.knox.gateway.i18n.resources;
 
-import org.apache.hadoop.test.category.FastTests;
-import org.apache.hadoop.test.category.UnitTests;
+import org.apache.knox.test.category.FastTests;
+import org.apache.knox.test.category.UnitTests;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-provider-identity-assertion-common/src/test/java/org/apache/knox/gateway/identityasserter/filter/IdentityAssertionHttpServletRequestWrapperTest.java
----------------------------------------------------------------------
diff --git a/gateway-provider-identity-assertion-common/src/test/java/org/apache/knox/gateway/identityasserter/filter/IdentityAssertionHttpServletRequestWrapperTest.java b/gateway-provider-identity-assertion-common/src/test/java/org/apache/knox/gateway/identityasserter/filter/IdentityAssertionHttpServletRequestWrapperTest.java
index 745fbdd..eaa2245 100644
--- a/gateway-provider-identity-assertion-common/src/test/java/org/apache/knox/gateway/identityasserter/filter/IdentityAssertionHttpServletRequestWrapperTest.java
+++ b/gateway-provider-identity-assertion-common/src/test/java/org/apache/knox/gateway/identityasserter/filter/IdentityAssertionHttpServletRequestWrapperTest.java
@@ -20,10 +20,10 @@ package org.apache.knox.gateway.identityasserter.filter;
 import org.apache.commons.io.IOUtils;
 import org.apache.knox.gateway.config.GatewayConfig;
 import org.apache.knox.gateway.identityasserter.common.filter.IdentityAsserterHttpServletRequestWrapper;
-import org.apache.hadoop.test.category.FastTests;
-import org.apache.hadoop.test.category.UnitTests;
-import org.apache.hadoop.test.mock.MockHttpServletRequest;
-import org.apache.hadoop.test.mock.MockServletInputStream;
+import org.apache.knox.test.category.FastTests;
+import org.apache.knox.test.category.UnitTests;
+import org.apache.knox.test.mock.MockHttpServletRequest;
+import org.apache.knox.test.mock.MockServletInputStream;
 import org.junit.Test;
 import org.junit.After;
 import org.junit.experimental.categories.Category;

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-provider-identity-assertion-common/src/test/java/org/apache/knox/gateway/identityasserter/function/UsernameFunctionProcessorTest.java
----------------------------------------------------------------------
diff --git a/gateway-provider-identity-assertion-common/src/test/java/org/apache/knox/gateway/identityasserter/function/UsernameFunctionProcessorTest.java b/gateway-provider-identity-assertion-common/src/test/java/org/apache/knox/gateway/identityasserter/function/UsernameFunctionProcessorTest.java
index 0f9d67c..556443f 100644
--- a/gateway-provider-identity-assertion-common/src/test/java/org/apache/knox/gateway/identityasserter/function/UsernameFunctionProcessorTest.java
+++ b/gateway-provider-identity-assertion-common/src/test/java/org/apache/knox/gateway/identityasserter/function/UsernameFunctionProcessorTest.java
@@ -24,10 +24,10 @@ import org.apache.knox.gateway.filter.rewrite.spi.UrlRewriteFunctionProcessor;
 import org.apache.knox.gateway.identityasserter.common.function.UsernameFunctionProcessor;
 import org.apache.knox.gateway.security.PrimaryPrincipal;
 import org.apache.knox.gateway.util.urltemplate.Parser;
-import org.apache.hadoop.test.TestUtils;
-import org.apache.hadoop.test.log.NoOpLogger;
-import org.apache.hadoop.test.mock.MockInteraction;
-import org.apache.hadoop.test.mock.MockServlet;
+import org.apache.knox.test.TestUtils;
+import org.apache.knox.test.log.NoOpLogger;
+import org.apache.knox.test.mock.MockInteraction;
+import org.apache.knox.test.mock.MockServlet;
 import org.apache.http.auth.BasicUserPrincipal;
 import org.eclipse.jetty.servlet.FilterHolder;
 import org.eclipse.jetty.servlet.ServletHolder;

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-provider-rewrite-func-hostmap-static/src/test/java/org/apache/knox/gateway/hostmap/impl/HostmapFunctionProcessorTest.java
----------------------------------------------------------------------
diff --git a/gateway-provider-rewrite-func-hostmap-static/src/test/java/org/apache/knox/gateway/hostmap/impl/HostmapFunctionProcessorTest.java b/gateway-provider-rewrite-func-hostmap-static/src/test/java/org/apache/knox/gateway/hostmap/impl/HostmapFunctionProcessorTest.java
index 92ec957..c373dc0 100644
--- a/gateway-provider-rewrite-func-hostmap-static/src/test/java/org/apache/knox/gateway/hostmap/impl/HostmapFunctionProcessorTest.java
+++ b/gateway-provider-rewrite-func-hostmap-static/src/test/java/org/apache/knox/gateway/hostmap/impl/HostmapFunctionProcessorTest.java
@@ -31,7 +31,7 @@ import org.apache.knox.gateway.services.hostmap.HostMapperService;
 import org.apache.knox.gateway.util.urltemplate.Parser;
 import org.apache.knox.gateway.util.urltemplate.Resolver;
 import org.apache.knox.gateway.util.urltemplate.Template;
-import org.apache.hadoop.test.TestUtils;
+import org.apache.knox.test.TestUtils;
 import org.easymock.EasyMock;
 import org.junit.Test;
 

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-provider-rewrite-func-service-registry/src/test/java/org/apache/knox/gateway/svcregfunc/impl/ServiceRegistryFunctionsTest.java
----------------------------------------------------------------------
diff --git a/gateway-provider-rewrite-func-service-registry/src/test/java/org/apache/knox/gateway/svcregfunc/impl/ServiceRegistryFunctionsTest.java b/gateway-provider-rewrite-func-service-registry/src/test/java/org/apache/knox/gateway/svcregfunc/impl/ServiceRegistryFunctionsTest.java
index 9f19a4f..47525bb 100644
--- a/gateway-provider-rewrite-func-service-registry/src/test/java/org/apache/knox/gateway/svcregfunc/impl/ServiceRegistryFunctionsTest.java
+++ b/gateway-provider-rewrite-func-service-registry/src/test/java/org/apache/knox/gateway/svcregfunc/impl/ServiceRegistryFunctionsTest.java
@@ -23,10 +23,10 @@ import org.apache.knox.gateway.filter.rewrite.api.UrlRewriteServletFilter;
 import org.apache.knox.gateway.services.GatewayServices;
 import org.apache.knox.gateway.services.registry.ServiceRegistry;
 import org.apache.knox.gateway.util.urltemplate.Parser;
-import org.apache.hadoop.test.TestUtils;
-import org.apache.hadoop.test.log.NoOpLogger;
-import org.apache.hadoop.test.mock.MockInteraction;
-import org.apache.hadoop.test.mock.MockServlet;
+import org.apache.knox.test.TestUtils;
+import org.apache.knox.test.log.NoOpLogger;
+import org.apache.knox.test.mock.MockInteraction;
+import org.apache.knox.test.mock.MockServlet;
 import org.apache.http.auth.BasicUserPrincipal;
 import org.easymock.EasyMock;
 import org.eclipse.jetty.servlet.FilterHolder;

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-provider-rewrite/src/test/java/org/apache/knox/gateway/filter/rewrite/api/UrlRewriteServletContextListenerTest.java
----------------------------------------------------------------------
diff --git a/gateway-provider-rewrite/src/test/java/org/apache/knox/gateway/filter/rewrite/api/UrlRewriteServletContextListenerTest.java b/gateway-provider-rewrite/src/test/java/org/apache/knox/gateway/filter/rewrite/api/UrlRewriteServletContextListenerTest.java
index 1d0d72d..db65e35 100644
--- a/gateway-provider-rewrite/src/test/java/org/apache/knox/gateway/filter/rewrite/api/UrlRewriteServletContextListenerTest.java
+++ b/gateway-provider-rewrite/src/test/java/org/apache/knox/gateway/filter/rewrite/api/UrlRewriteServletContextListenerTest.java
@@ -17,8 +17,8 @@
  */
 package org.apache.knox.gateway.filter.rewrite.api;
 
-import org.apache.hadoop.test.mock.MockInteraction;
-import org.apache.hadoop.test.mock.MockServlet;
+import org.apache.knox.test.mock.MockInteraction;
+import org.apache.knox.test.mock.MockServlet;
 import org.eclipse.jetty.servlet.FilterHolder;
 import org.eclipse.jetty.servlet.ServletHolder;
 import org.eclipse.jetty.http.HttpTester;

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-provider-rewrite/src/test/java/org/apache/knox/gateway/filter/rewrite/api/UrlRewriteServletFilterTest.java
----------------------------------------------------------------------
diff --git a/gateway-provider-rewrite/src/test/java/org/apache/knox/gateway/filter/rewrite/api/UrlRewriteServletFilterTest.java b/gateway-provider-rewrite/src/test/java/org/apache/knox/gateway/filter/rewrite/api/UrlRewriteServletFilterTest.java
index 484786e..1df1c20 100644
--- a/gateway-provider-rewrite/src/test/java/org/apache/knox/gateway/filter/rewrite/api/UrlRewriteServletFilterTest.java
+++ b/gateway-provider-rewrite/src/test/java/org/apache/knox/gateway/filter/rewrite/api/UrlRewriteServletFilterTest.java
@@ -20,10 +20,10 @@ package org.apache.knox.gateway.filter.rewrite.api;
 import com.jayway.jsonassert.JsonAssert;
 import org.apache.knox.gateway.filter.AbstractGatewayFilter;
 import org.apache.knox.gateway.util.urltemplate.Parser;
-import org.apache.hadoop.test.TestUtils;
-import org.apache.hadoop.test.log.NoOpAppender;
-import org.apache.hadoop.test.mock.MockInteraction;
-import org.apache.hadoop.test.mock.MockServlet;
+import org.apache.knox.test.TestUtils;
+import org.apache.knox.test.log.NoOpAppender;
+import org.apache.knox.test.mock.MockInteraction;
+import org.apache.knox.test.mock.MockServlet;
 import org.apache.log4j.Appender;
 import org.apache.log4j.Logger;
 import org.eclipse.jetty.http.HttpHeader;

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-provider-rewrite/src/test/java/org/apache/knox/gateway/filter/rewrite/impl/FrontendFunctionProcessorTest.java
----------------------------------------------------------------------
diff --git a/gateway-provider-rewrite/src/test/java/org/apache/knox/gateway/filter/rewrite/impl/FrontendFunctionProcessorTest.java b/gateway-provider-rewrite/src/test/java/org/apache/knox/gateway/filter/rewrite/impl/FrontendFunctionProcessorTest.java
index d40edc9..a946bd2 100644
--- a/gateway-provider-rewrite/src/test/java/org/apache/knox/gateway/filter/rewrite/impl/FrontendFunctionProcessorTest.java
+++ b/gateway-provider-rewrite/src/test/java/org/apache/knox/gateway/filter/rewrite/impl/FrontendFunctionProcessorTest.java
@@ -27,10 +27,10 @@ import org.apache.knox.gateway.filter.rewrite.spi.UrlRewriteFunctionProcessor;
 import org.apache.knox.gateway.services.GatewayServices;
 import org.apache.knox.gateway.services.registry.ServiceRegistry;
 import org.apache.knox.gateway.util.urltemplate.Parser;
-import org.apache.hadoop.test.TestUtils;
-import org.apache.hadoop.test.log.NoOpLogger;
-import org.apache.hadoop.test.mock.MockInteraction;
-import org.apache.hadoop.test.mock.MockServlet;
+import org.apache.knox.test.TestUtils;
+import org.apache.knox.test.log.NoOpLogger;
+import org.apache.knox.test.mock.MockInteraction;
+import org.apache.knox.test.mock.MockServlet;
 import org.apache.http.auth.BasicUserPrincipal;
 import org.easymock.EasyMock;
 import org.eclipse.jetty.http.HttpTester;

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-provider-rewrite/src/test/java/org/apache/knox/gateway/filter/rewrite/impl/json/JsonFilterReaderTest.java
----------------------------------------------------------------------
diff --git a/gateway-provider-rewrite/src/test/java/org/apache/knox/gateway/filter/rewrite/impl/json/JsonFilterReaderTest.java b/gateway-provider-rewrite/src/test/java/org/apache/knox/gateway/filter/rewrite/impl/json/JsonFilterReaderTest.java
index 1378fef..ad55aeb 100644
--- a/gateway-provider-rewrite/src/test/java/org/apache/knox/gateway/filter/rewrite/impl/json/JsonFilterReaderTest.java
+++ b/gateway-provider-rewrite/src/test/java/org/apache/knox/gateway/filter/rewrite/impl/json/JsonFilterReaderTest.java
@@ -26,7 +26,7 @@ import org.apache.knox.gateway.filter.rewrite.api.UrlRewriteFilterDescriptor;
 import org.apache.knox.gateway.filter.rewrite.api.UrlRewriteFilterDetectDescriptor;
 import org.apache.knox.gateway.filter.rewrite.api.UrlRewriteRulesDescriptor;
 import org.apache.knox.gateway.filter.rewrite.api.UrlRewriteRulesDescriptorFactory;
-import org.apache.hadoop.test.TestUtils;
+import org.apache.knox.test.TestUtils;
 import org.junit.Test;
 
 import java.io.IOException;

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-provider-rewrite/src/test/java/org/apache/knox/gateway/filter/rewrite/impl/xml/XmlFilterReaderTest.java
----------------------------------------------------------------------
diff --git a/gateway-provider-rewrite/src/test/java/org/apache/knox/gateway/filter/rewrite/impl/xml/XmlFilterReaderTest.java b/gateway-provider-rewrite/src/test/java/org/apache/knox/gateway/filter/rewrite/impl/xml/XmlFilterReaderTest.java
index 7ac4626..5aa5cce 100644
--- a/gateway-provider-rewrite/src/test/java/org/apache/knox/gateway/filter/rewrite/impl/xml/XmlFilterReaderTest.java
+++ b/gateway-provider-rewrite/src/test/java/org/apache/knox/gateway/filter/rewrite/impl/xml/XmlFilterReaderTest.java
@@ -37,7 +37,7 @@ import org.apache.knox.gateway.filter.rewrite.ext.UrlRewriteControlDescriptor;
 import org.apache.knox.gateway.filter.rewrite.ext.UrlRewriteMatchDescriptor;
 import org.apache.knox.gateway.filter.rewrite.ext.UrlRewriteMatchDescriptorExt;
 import org.apache.knox.gateway.filter.rewrite.spi.UrlRewriteActionDescriptorBase;
-import org.apache.hadoop.test.TestUtils;
+import org.apache.knox.test.TestUtils;
 import org.hamcrest.Matchers;
 import org.junit.Before;
 import org.junit.Test;

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-server/src/test/java/org/apache/knox/gateway/AuditLoggingTest.java
----------------------------------------------------------------------
diff --git a/gateway-server/src/test/java/org/apache/knox/gateway/AuditLoggingTest.java b/gateway-server/src/test/java/org/apache/knox/gateway/AuditLoggingTest.java
index 82890c4..03ee0d7 100644
--- a/gateway-server/src/test/java/org/apache/knox/gateway/AuditLoggingTest.java
+++ b/gateway-server/src/test/java/org/apache/knox/gateway/AuditLoggingTest.java
@@ -50,7 +50,7 @@ import org.apache.knox.gateway.audit.log4j.correlation.Log4jCorrelationService;
 import org.apache.knox.gateway.config.GatewayConfig;
 import org.apache.knox.gateway.dispatch.DefaultDispatch;
 import org.apache.knox.gateway.i18n.resources.ResourcesFactory;
-import org.apache.hadoop.test.log.CollectAppender;
+import org.apache.knox.test.log.CollectAppender;
 import org.apache.http.impl.client.CloseableHttpClient;
 import org.apache.http.impl.client.HttpClientBuilder;
 import org.apache.log4j.spi.LoggingEvent;

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-server/src/test/java/org/apache/knox/gateway/GatewayFilterTest.java
----------------------------------------------------------------------
diff --git a/gateway-server/src/test/java/org/apache/knox/gateway/GatewayFilterTest.java b/gateway-server/src/test/java/org/apache/knox/gateway/GatewayFilterTest.java
index 2fe1f1a..b7f787a 100644
--- a/gateway-server/src/test/java/org/apache/knox/gateway/GatewayFilterTest.java
+++ b/gateway-server/src/test/java/org/apache/knox/gateway/GatewayFilterTest.java
@@ -21,8 +21,8 @@ import org.apache.knox.gateway.audit.api.AuditServiceFactory;
 import org.apache.knox.gateway.config.GatewayConfig;
 import org.apache.knox.gateway.filter.AbstractGatewayFilter;
 import org.apache.knox.gateway.topology.Topology;
-import org.apache.hadoop.test.category.FastTests;
-import org.apache.hadoop.test.category.UnitTests;
+import org.apache.knox.test.category.FastTests;
+import org.apache.knox.test.category.UnitTests;
 import org.easymock.EasyMock;
 import org.junit.After;
 import org.junit.Before;

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-server/src/test/java/org/apache/knox/gateway/GatewayGlobalConfigTest.java
----------------------------------------------------------------------
diff --git a/gateway-server/src/test/java/org/apache/knox/gateway/GatewayGlobalConfigTest.java b/gateway-server/src/test/java/org/apache/knox/gateway/GatewayGlobalConfigTest.java
index 4cfdb8e..1acf9d6 100644
--- a/gateway-server/src/test/java/org/apache/knox/gateway/GatewayGlobalConfigTest.java
+++ b/gateway-server/src/test/java/org/apache/knox/gateway/GatewayGlobalConfigTest.java
@@ -19,7 +19,7 @@ package org.apache.knox.gateway;
 
 import org.apache.knox.gateway.config.GatewayConfig;
 import org.apache.knox.gateway.config.impl.GatewayConfigImpl;
-import org.apache.hadoop.test.TestUtils;
+import org.apache.knox.test.TestUtils;
 import org.hamcrest.Matchers;
 import org.junit.Test;
 

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-server/src/test/java/org/apache/knox/gateway/TempletonDemo.java
----------------------------------------------------------------------
diff --git a/gateway-server/src/test/java/org/apache/knox/gateway/TempletonDemo.java b/gateway-server/src/test/java/org/apache/knox/gateway/TempletonDemo.java
index 66321ea..fd32abc 100644
--- a/gateway-server/src/test/java/org/apache/knox/gateway/TempletonDemo.java
+++ b/gateway-server/src/test/java/org/apache/knox/gateway/TempletonDemo.java
@@ -17,8 +17,8 @@
  */
 package org.apache.knox.gateway;
 
-import org.apache.hadoop.test.category.ManualTests;
-import org.apache.hadoop.test.category.SlowTests;
+import org.apache.knox.test.category.ManualTests;
+import org.apache.knox.test.category.SlowTests;
 import org.apache.http.HttpResponse;
 import org.apache.http.NameValuePair;
 import org.apache.http.client.entity.UrlEncodedFormEntity;

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-server/src/test/java/org/apache/knox/gateway/config/impl/GatewayConfigImplTest.java
----------------------------------------------------------------------
diff --git a/gateway-server/src/test/java/org/apache/knox/gateway/config/impl/GatewayConfigImplTest.java b/gateway-server/src/test/java/org/apache/knox/gateway/config/impl/GatewayConfigImplTest.java
index bae67e6..06da13d 100644
--- a/gateway-server/src/test/java/org/apache/knox/gateway/config/impl/GatewayConfigImplTest.java
+++ b/gateway-server/src/test/java/org/apache/knox/gateway/config/impl/GatewayConfigImplTest.java
@@ -1,6 +1,6 @@
 package org.apache.knox.gateway.config.impl;
 
-import org.apache.hadoop.test.TestUtils;
+import org.apache.knox.test.TestUtils;
 import org.hamcrest.CoreMatchers;
 import org.junit.Test;
 

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-server/src/test/java/org/apache/knox/gateway/deploy/DeploymentFactoryTest.java
----------------------------------------------------------------------
diff --git a/gateway-server/src/test/java/org/apache/knox/gateway/deploy/DeploymentFactoryTest.java b/gateway-server/src/test/java/org/apache/knox/gateway/deploy/DeploymentFactoryTest.java
index 2973f40..7cea065 100644
--- a/gateway-server/src/test/java/org/apache/knox/gateway/deploy/DeploymentFactoryTest.java
+++ b/gateway-server/src/test/java/org/apache/knox/gateway/deploy/DeploymentFactoryTest.java
@@ -27,7 +27,7 @@ import org.apache.knox.gateway.topology.Application;
 import org.apache.knox.gateway.topology.Service;
 import org.apache.knox.gateway.topology.Topology;
 import org.apache.knox.gateway.util.XmlUtils;
-import org.apache.hadoop.test.TestUtils;
+import org.apache.knox.test.TestUtils;
 import org.jboss.shrinkwrap.api.spec.EnterpriseArchive;
 import org.junit.Test;
 import org.w3c.dom.Document;

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-server/src/test/java/org/apache/knox/gateway/descriptor/xml/XmlGatewayDescriptorExporterTest.java
----------------------------------------------------------------------
diff --git a/gateway-server/src/test/java/org/apache/knox/gateway/descriptor/xml/XmlGatewayDescriptorExporterTest.java b/gateway-server/src/test/java/org/apache/knox/gateway/descriptor/xml/XmlGatewayDescriptorExporterTest.java
index 5624a10..d4469c9 100644
--- a/gateway-server/src/test/java/org/apache/knox/gateway/descriptor/xml/XmlGatewayDescriptorExporterTest.java
+++ b/gateway-server/src/test/java/org/apache/knox/gateway/descriptor/xml/XmlGatewayDescriptorExporterTest.java
@@ -20,7 +20,7 @@ package org.apache.knox.gateway.descriptor.xml;
 import org.apache.knox.gateway.descriptor.GatewayDescriptor;
 import org.apache.knox.gateway.descriptor.GatewayDescriptorFactory;
 import org.apache.knox.gateway.util.XmlUtils;
-import org.apache.hadoop.test.Console;
+import org.apache.knox.test.Console;
 import org.junit.Test;
 import org.w3c.dom.Document;
 import org.xml.sax.InputSource;

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-server/src/test/java/org/apache/knox/gateway/jetty/SslSocketTest.java
----------------------------------------------------------------------
diff --git a/gateway-server/src/test/java/org/apache/knox/gateway/jetty/SslSocketTest.java b/gateway-server/src/test/java/org/apache/knox/gateway/jetty/SslSocketTest.java
index 5aafc20..f65c220 100644
--- a/gateway-server/src/test/java/org/apache/knox/gateway/jetty/SslSocketTest.java
+++ b/gateway-server/src/test/java/org/apache/knox/gateway/jetty/SslSocketTest.java
@@ -17,8 +17,8 @@
  */
 package org.apache.knox.gateway.jetty;
 
-import org.apache.hadoop.test.category.MediumTests;
-import org.apache.hadoop.test.category.ManualTests;
+import org.apache.knox.test.category.MediumTests;
+import org.apache.knox.test.category.ManualTests;
 import org.apache.http.HttpVersion;
 import org.apache.http.conn.ssl.SSLSocketFactory;
 import org.apache.http.params.BasicHttpParams;

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-server/src/test/java/org/apache/knox/gateway/mock/MockConsoleFactory.java
----------------------------------------------------------------------
diff --git a/gateway-server/src/test/java/org/apache/knox/gateway/mock/MockConsoleFactory.java b/gateway-server/src/test/java/org/apache/knox/gateway/mock/MockConsoleFactory.java
index cb15db1..590cca0 100644
--- a/gateway-server/src/test/java/org/apache/knox/gateway/mock/MockConsoleFactory.java
+++ b/gateway-server/src/test/java/org/apache/knox/gateway/mock/MockConsoleFactory.java
@@ -17,7 +17,7 @@
  */
 package org.apache.knox.gateway.mock;
 
-import org.apache.hadoop.test.mock.MockServlet;
+import org.apache.knox.test.mock.MockServlet;
 import org.eclipse.jetty.server.Handler;
 import org.eclipse.jetty.servlet.ServletContextHandler;
 import org.eclipse.jetty.servlet.ServletHolder;

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-server/src/test/java/org/apache/knox/gateway/services/security/CryptoServiceTest.java
----------------------------------------------------------------------
diff --git a/gateway-server/src/test/java/org/apache/knox/gateway/services/security/CryptoServiceTest.java b/gateway-server/src/test/java/org/apache/knox/gateway/services/security/CryptoServiceTest.java
index 0d8b7b8..72a21b0 100644
--- a/gateway-server/src/test/java/org/apache/knox/gateway/services/security/CryptoServiceTest.java
+++ b/gateway-server/src/test/java/org/apache/knox/gateway/services/security/CryptoServiceTest.java
@@ -21,8 +21,8 @@ import org.apache.knox.gateway.config.GatewayConfig;
 import org.apache.knox.gateway.services.ServiceLifecycleException;
 import org.apache.knox.gateway.services.security.impl.ConfigurableEncryptor;
 import org.apache.knox.gateway.services.security.impl.DefaultCryptoService;
-import org.apache.hadoop.test.category.ManualTests;
-import org.apache.hadoop.test.category.MediumTests;
+import org.apache.knox.test.category.ManualTests;
+import org.apache.knox.test.category.MediumTests;
 import org.easymock.EasyMock;
 import org.junit.BeforeClass;
 import org.junit.Test;

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-server/src/test/java/org/apache/knox/gateway/services/topology/DefaultTopologyServiceTest.java
----------------------------------------------------------------------
diff --git a/gateway-server/src/test/java/org/apache/knox/gateway/services/topology/DefaultTopologyServiceTest.java b/gateway-server/src/test/java/org/apache/knox/gateway/services/topology/DefaultTopologyServiceTest.java
index e70d096..408d396 100644
--- a/gateway-server/src/test/java/org/apache/knox/gateway/services/topology/DefaultTopologyServiceTest.java
+++ b/gateway-server/src/test/java/org/apache/knox/gateway/services/topology/DefaultTopologyServiceTest.java
@@ -26,7 +26,7 @@ import org.apache.commons.io.monitor.FileAlterationObserver;
 import org.apache.knox.gateway.config.GatewayConfig;
 import org.apache.knox.gateway.services.topology.impl.DefaultTopologyService;
 import org.apache.knox.gateway.services.security.AliasService;
-import org.apache.hadoop.test.TestUtils;
+import org.apache.knox.test.TestUtils;
 import org.apache.knox.gateway.topology.Param;
 import org.apache.knox.gateway.topology.Provider;
 import org.apache.knox.gateway.topology.Topology;

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-server/src/test/java/org/apache/knox/gateway/topology/builder/PropertyTopologyBuilderTest.java
----------------------------------------------------------------------
diff --git a/gateway-server/src/test/java/org/apache/knox/gateway/topology/builder/PropertyTopologyBuilderTest.java b/gateway-server/src/test/java/org/apache/knox/gateway/topology/builder/PropertyTopologyBuilderTest.java
index 700ac9b..f69dc53 100644
--- a/gateway-server/src/test/java/org/apache/knox/gateway/topology/builder/PropertyTopologyBuilderTest.java
+++ b/gateway-server/src/test/java/org/apache/knox/gateway/topology/builder/PropertyTopologyBuilderTest.java
@@ -20,7 +20,7 @@ import java.util.Enumeration;
 
 import org.apache.knox.gateway.topology.Topology;
 import org.apache.knox.gateway.topology.builder.property.Property;
-import org.apache.hadoop.test.log.NoOpAppender;
+import org.apache.knox.test.log.NoOpAppender;
 import org.apache.log4j.Appender;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-server/src/test/java/org/apache/knox/gateway/topology/validation/TopologyValidatorTest.java
----------------------------------------------------------------------
diff --git a/gateway-server/src/test/java/org/apache/knox/gateway/topology/validation/TopologyValidatorTest.java b/gateway-server/src/test/java/org/apache/knox/gateway/topology/validation/TopologyValidatorTest.java
index 9337c85..7c16ac4 100644
--- a/gateway-server/src/test/java/org/apache/knox/gateway/topology/validation/TopologyValidatorTest.java
+++ b/gateway-server/src/test/java/org/apache/knox/gateway/topology/validation/TopologyValidatorTest.java
@@ -18,7 +18,7 @@ package org.apache.knox.gateway.topology.validation;
 
 import java.net.URL;
 
-import org.apache.hadoop.test.TestUtils;
+import org.apache.knox.test.TestUtils;
 import org.junit.Test;
 
 import static org.hamcrest.core.Is.is;

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-server/src/test/java/org/apache/knox/gateway/topology/xml/TopologyRulesModuleTest.java
----------------------------------------------------------------------
diff --git a/gateway-server/src/test/java/org/apache/knox/gateway/topology/xml/TopologyRulesModuleTest.java b/gateway-server/src/test/java/org/apache/knox/gateway/topology/xml/TopologyRulesModuleTest.java
index 55c80bd..d75dcfb 100644
--- a/gateway-server/src/test/java/org/apache/knox/gateway/topology/xml/TopologyRulesModuleTest.java
+++ b/gateway-server/src/test/java/org/apache/knox/gateway/topology/xml/TopologyRulesModuleTest.java
@@ -25,7 +25,7 @@ import org.apache.knox.gateway.topology.Service;
 import org.apache.knox.gateway.topology.Topology;
 import org.apache.knox.gateway.topology.Version;
 import org.apache.knox.gateway.topology.builder.TopologyBuilder;
-import org.apache.hadoop.test.TestUtils;
+import org.apache.knox.test.TestUtils;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-server/src/test/java/org/apache/knox/gateway/websockets/BadUrlTest.java
----------------------------------------------------------------------
diff --git a/gateway-server/src/test/java/org/apache/knox/gateway/websockets/BadUrlTest.java b/gateway-server/src/test/java/org/apache/knox/gateway/websockets/BadUrlTest.java
index 25891bf..3aceadd 100644
--- a/gateway-server/src/test/java/org/apache/knox/gateway/websockets/BadUrlTest.java
+++ b/gateway-server/src/test/java/org/apache/knox/gateway/websockets/BadUrlTest.java
@@ -42,7 +42,7 @@ import org.apache.knox.gateway.services.ServiceLifecycleException;
 import org.apache.knox.gateway.services.topology.TopologyService;
 import org.apache.knox.gateway.topology.TopologyEvent;
 import org.apache.knox.gateway.topology.TopologyListener;
-import org.apache.hadoop.test.TestUtils;
+import org.apache.knox.test.TestUtils;
 import org.easymock.EasyMock;
 import org.eclipse.jetty.server.Server;
 import org.eclipse.jetty.server.ServerConnector;

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-server/src/test/java/org/apache/knox/gateway/websockets/WebsocketEchoTest.java
----------------------------------------------------------------------
diff --git a/gateway-server/src/test/java/org/apache/knox/gateway/websockets/WebsocketEchoTest.java b/gateway-server/src/test/java/org/apache/knox/gateway/websockets/WebsocketEchoTest.java
index da24b98..268e14b 100644
--- a/gateway-server/src/test/java/org/apache/knox/gateway/websockets/WebsocketEchoTest.java
+++ b/gateway-server/src/test/java/org/apache/knox/gateway/websockets/WebsocketEchoTest.java
@@ -45,7 +45,7 @@ import org.apache.knox.gateway.services.ServiceLifecycleException;
 import org.apache.knox.gateway.services.topology.TopologyService;
 import org.apache.knox.gateway.topology.TopologyEvent;
 import org.apache.knox.gateway.topology.TopologyListener;
-import org.apache.hadoop.test.TestUtils;
+import org.apache.knox.test.TestUtils;
 import org.easymock.EasyMock;
 import org.eclipse.jetty.server.Server;
 import org.eclipse.jetty.server.ServerConnector;

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-server/src/test/java/org/apache/knox/gateway/websockets/WebsocketMultipleConnectionTest.java
----------------------------------------------------------------------
diff --git a/gateway-server/src/test/java/org/apache/knox/gateway/websockets/WebsocketMultipleConnectionTest.java b/gateway-server/src/test/java/org/apache/knox/gateway/websockets/WebsocketMultipleConnectionTest.java
index 1b98616..42bc9c3 100644
--- a/gateway-server/src/test/java/org/apache/knox/gateway/websockets/WebsocketMultipleConnectionTest.java
+++ b/gateway-server/src/test/java/org/apache/knox/gateway/websockets/WebsocketMultipleConnectionTest.java
@@ -48,7 +48,7 @@ import org.apache.knox.gateway.services.ServiceLifecycleException;
 import org.apache.knox.gateway.services.topology.TopologyService;
 import org.apache.knox.gateway.topology.TopologyEvent;
 import org.apache.knox.gateway.topology.TopologyListener;
-import org.apache.hadoop.test.TestUtils;
+import org.apache.knox.test.TestUtils;
 import org.easymock.EasyMock;
 import org.eclipse.jetty.server.Server;
 import org.eclipse.jetty.server.ServerConnector;

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-server/src/test/resources/log4j.properties
----------------------------------------------------------------------
diff --git a/gateway-server/src/test/resources/log4j.properties b/gateway-server/src/test/resources/log4j.properties
index b212231..f35213e 100644
--- a/gateway-server/src/test/resources/log4j.properties
+++ b/gateway-server/src/test/resources/log4j.properties
@@ -24,7 +24,7 @@ log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
 log4j.appender.stdout.layout.ConversionPattern=%5p [%c] %m%n
 
 log4j.logger.audit = INFO, collectappender
-log4j.appender.collectappender = org.apache.hadoop.test.log.CollectAppender
+log4j.appender.collectappender = org.apache.knox.test.log.CollectAppender
 
 #log4j.logger.org.apache.knox.gateway=DEBUG
 #log4j.logger.org.eclipse.jetty=DEBUG

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-service-hbase/src/test/java/org/apache/knox/gateway/hbase/HBaseDispatchTest.java
----------------------------------------------------------------------
diff --git a/gateway-service-hbase/src/test/java/org/apache/knox/gateway/hbase/HBaseDispatchTest.java b/gateway-service-hbase/src/test/java/org/apache/knox/gateway/hbase/HBaseDispatchTest.java
index 526b0e7..e5bae02 100644
--- a/gateway-service-hbase/src/test/java/org/apache/knox/gateway/hbase/HBaseDispatchTest.java
+++ b/gateway-service-hbase/src/test/java/org/apache/knox/gateway/hbase/HBaseDispatchTest.java
@@ -21,9 +21,9 @@ import java.net.URI;
 import javax.servlet.http.HttpServletRequest;
 
 import org.apache.knox.gateway.dispatch.Dispatch;
-import org.apache.hadoop.test.TestUtils;
-import org.apache.hadoop.test.category.FastTests;
-import org.apache.hadoop.test.category.UnitTests;
+import org.apache.knox.test.TestUtils;
+import org.apache.knox.test.category.FastTests;
+import org.apache.knox.test.category.UnitTests;
 import org.easymock.EasyMock;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-spi/src/test/java/org/apache/knox/gateway/dispatch/DefaultDispatchTest.java
----------------------------------------------------------------------
diff --git a/gateway-spi/src/test/java/org/apache/knox/gateway/dispatch/DefaultDispatchTest.java b/gateway-spi/src/test/java/org/apache/knox/gateway/dispatch/DefaultDispatchTest.java
index 99e3a33..9d0afc9 100644
--- a/gateway-spi/src/test/java/org/apache/knox/gateway/dispatch/DefaultDispatchTest.java
+++ b/gateway-spi/src/test/java/org/apache/knox/gateway/dispatch/DefaultDispatchTest.java
@@ -40,9 +40,9 @@ import javax.servlet.http.HttpServletResponse;
 
 import org.apache.knox.gateway.config.GatewayConfig;
 import org.apache.knox.gateway.servlet.SynchronousServletOutputStreamAdapter;
-import org.apache.hadoop.test.TestUtils;
-import org.apache.hadoop.test.category.FastTests;
-import org.apache.hadoop.test.category.UnitTests;
+import org.apache.knox.test.TestUtils;
+import org.apache.knox.test.category.FastTests;
+import org.apache.knox.test.category.UnitTests;
 import org.apache.http.HttpEntity;
 import org.apache.http.HttpVersion;
 import org.apache.http.RequestLine;

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-spi/src/test/java/org/apache/knox/gateway/security/principal/PrincipalMapperTest.java
----------------------------------------------------------------------
diff --git a/gateway-spi/src/test/java/org/apache/knox/gateway/security/principal/PrincipalMapperTest.java b/gateway-spi/src/test/java/org/apache/knox/gateway/security/principal/PrincipalMapperTest.java
index cba7f1c..66b7b46 100644
--- a/gateway-spi/src/test/java/org/apache/knox/gateway/security/principal/PrincipalMapperTest.java
+++ b/gateway-spi/src/test/java/org/apache/knox/gateway/security/principal/PrincipalMapperTest.java
@@ -17,8 +17,8 @@
  */
 package org.apache.knox.gateway.security.principal;
 
-import org.apache.hadoop.test.category.FastTests;
-import org.apache.hadoop.test.category.UnitTests;
+import org.apache.knox.test.category.FastTests;
+import org.apache.knox.test.category.UnitTests;
 import org.junit.Before;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-spi/src/test/java/org/apache/knox/gateway/services/hostmap/FileBasedHostMapperTest.java
----------------------------------------------------------------------
diff --git a/gateway-spi/src/test/java/org/apache/knox/gateway/services/hostmap/FileBasedHostMapperTest.java b/gateway-spi/src/test/java/org/apache/knox/gateway/services/hostmap/FileBasedHostMapperTest.java
index 99be7b7..be4d798 100644
--- a/gateway-spi/src/test/java/org/apache/knox/gateway/services/hostmap/FileBasedHostMapperTest.java
+++ b/gateway-spi/src/test/java/org/apache/knox/gateway/services/hostmap/FileBasedHostMapperTest.java
@@ -17,7 +17,7 @@
  */
 package org.apache.knox.gateway.services.hostmap;
 
-import org.apache.hadoop.test.TestUtils;
+import org.apache.knox.test.TestUtils;
 import org.junit.Test;
 
 import java.net.URL;

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-spi/src/test/java/org/apache/knox/gateway/services/security/impl/CMFKeystoreServiceTest.java
----------------------------------------------------------------------
diff --git a/gateway-spi/src/test/java/org/apache/knox/gateway/services/security/impl/CMFKeystoreServiceTest.java b/gateway-spi/src/test/java/org/apache/knox/gateway/services/security/impl/CMFKeystoreServiceTest.java
index 7386f74..28c5ea6 100644
--- a/gateway-spi/src/test/java/org/apache/knox/gateway/services/security/impl/CMFKeystoreServiceTest.java
+++ b/gateway-spi/src/test/java/org/apache/knox/gateway/services/security/impl/CMFKeystoreServiceTest.java
@@ -26,8 +26,8 @@ import org.apache.knox.gateway.config.GatewayConfig;
 import org.apache.knox.gateway.services.ServiceLifecycleException;
 import org.apache.knox.gateway.services.security.KeystoreServiceException;
 import org.apache.knox.gateway.services.security.MasterService;
-import org.apache.hadoop.test.category.FastTests;
-import org.apache.hadoop.test.category.UnitTests;
+import org.apache.knox.test.category.FastTests;
+import org.apache.knox.test.category.UnitTests;
 import org.junit.Before;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-spi/src/test/java/org/apache/knox/gateway/services/security/impl/CMFMasterServiceTest.java
----------------------------------------------------------------------
diff --git a/gateway-spi/src/test/java/org/apache/knox/gateway/services/security/impl/CMFMasterServiceTest.java b/gateway-spi/src/test/java/org/apache/knox/gateway/services/security/impl/CMFMasterServiceTest.java
index 275b090..d3449c0 100644
--- a/gateway-spi/src/test/java/org/apache/knox/gateway/services/security/impl/CMFMasterServiceTest.java
+++ b/gateway-spi/src/test/java/org/apache/knox/gateway/services/security/impl/CMFMasterServiceTest.java
@@ -20,8 +20,8 @@ package org.apache.knox.gateway.services.security.impl;
 import java.io.File;
 
 import org.apache.knox.gateway.services.ServiceLifecycleException;
-import org.apache.hadoop.test.category.FastTests;
-import org.apache.hadoop.test.category.UnitTests;
+import org.apache.knox.test.category.FastTests;
+import org.apache.knox.test.category.UnitTests;
 import org.junit.Before;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-test-release-utils/src/main/java/org/apache/knox/gateway/GatewayTestDriver.java
----------------------------------------------------------------------
diff --git a/gateway-test-release-utils/src/main/java/org/apache/knox/gateway/GatewayTestDriver.java b/gateway-test-release-utils/src/main/java/org/apache/knox/gateway/GatewayTestDriver.java
index dd4216f..3dcd02f 100644
--- a/gateway-test-release-utils/src/main/java/org/apache/knox/gateway/GatewayTestDriver.java
+++ b/gateway-test-release-utils/src/main/java/org/apache/knox/gateway/GatewayTestDriver.java
@@ -43,7 +43,7 @@ import org.apache.knox.gateway.config.GatewayConfig;
 import org.apache.knox.gateway.security.ldap.SimpleLdapDirectoryServer;
 import org.apache.knox.gateway.services.DefaultGatewayServices;
 import org.apache.knox.gateway.services.ServiceLifecycleException;
-import org.apache.hadoop.test.mock.MockServer;
+import org.apache.knox.test.mock.MockServer;
 import org.hamcrest.CoreMatchers;
 import org.hamcrest.MatcherAssert;
 import org.hamcrest.Matchers;

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-test-release/webhdfs-kerb-test/src/test/java/org/apache/knox/gateway/SecureClusterTest.java
----------------------------------------------------------------------
diff --git a/gateway-test-release/webhdfs-kerb-test/src/test/java/org/apache/knox/gateway/SecureClusterTest.java b/gateway-test-release/webhdfs-kerb-test/src/test/java/org/apache/knox/gateway/SecureClusterTest.java
index dca2a19..5395a82 100644
--- a/gateway-test-release/webhdfs-kerb-test/src/test/java/org/apache/knox/gateway/SecureClusterTest.java
+++ b/gateway-test-release/webhdfs-kerb-test/src/test/java/org/apache/knox/gateway/SecureClusterTest.java
@@ -33,8 +33,8 @@ import org.apache.hadoop.minikdc.MiniKdc;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
-import org.apache.hadoop.test.TestUtils;
-import org.apache.hadoop.test.category.ReleaseTest;
+import org.apache.knox.test.TestUtils;
+import org.apache.knox.test.category.ReleaseTest;
 import org.apache.http.HttpHost;
 import org.apache.http.HttpRequest;
 import org.apache.http.auth.AuthScope;

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-test-release/webhdfs-test/src/test/java/org/apache/knox/gateway/ShellTest.java
----------------------------------------------------------------------
diff --git a/gateway-test-release/webhdfs-test/src/test/java/org/apache/knox/gateway/ShellTest.java b/gateway-test-release/webhdfs-test/src/test/java/org/apache/knox/gateway/ShellTest.java
index 455af41..b4f5c1b 100644
--- a/gateway-test-release/webhdfs-test/src/test/java/org/apache/knox/gateway/ShellTest.java
+++ b/gateway-test-release/webhdfs-test/src/test/java/org/apache/knox/gateway/ShellTest.java
@@ -16,7 +16,7 @@
  * limitations under the License.
  */
 package org.apache.knox.gateway;
-import org.apache.hadoop.test.category.ReleaseTest;
+import org.apache.knox.test.category.ReleaseTest;
 import org.junit.experimental.categories.Category;
 
 import java.io.File;
@@ -35,7 +35,7 @@ import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
-import org.apache.hadoop.test.TestUtils;
+import org.apache.knox.test.TestUtils;
 import org.apache.log4j.PropertyConfigurator;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-test-utils/src/main/java/org/apache/hadoop/test/Console.java
----------------------------------------------------------------------
diff --git a/gateway-test-utils/src/main/java/org/apache/hadoop/test/Console.java b/gateway-test-utils/src/main/java/org/apache/hadoop/test/Console.java
deleted file mode 100644
index e5bce70..0000000
--- a/gateway-test-utils/src/main/java/org/apache/hadoop/test/Console.java
+++ /dev/null
@@ -1,57 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.test;
-
-import java.io.ByteArrayOutputStream;
-import java.io.PrintStream;
-
-public class Console {
-
-  PrintStream oldOut, newOut;
-  PrintStream oldErr, newErr;
-  ByteArrayOutputStream newOutBuf, newErrBuf;
-
-  public void capture() {
-    oldErr = System.err;
-    newErrBuf = new ByteArrayOutputStream();
-    newErr = new PrintStream( newErrBuf );
-
-    oldOut = System.out; // I18N not required.
-    newOutBuf = new ByteArrayOutputStream();
-    newOut = new PrintStream( newOutBuf );
-
-    System.setErr( newErr );
-    System.setOut( newOut );
-  }
-
-  public byte[] getOut() {
-    return newOutBuf.toByteArray();
-  }
-
-  public byte[] getErr() {
-    return newErrBuf.toByteArray();
-  }
-
-  public void release() {
-    System.setErr( oldErr );
-    System.setOut( oldOut );
-    newErr.close();
-    newOut.close();
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-test-utils/src/main/java/org/apache/hadoop/test/TestUtils.java
----------------------------------------------------------------------
diff --git a/gateway-test-utils/src/main/java/org/apache/hadoop/test/TestUtils.java b/gateway-test-utils/src/main/java/org/apache/hadoop/test/TestUtils.java
deleted file mode 100644
index 076c312..0000000
--- a/gateway-test-utils/src/main/java/org/apache/hadoop/test/TestUtils.java
+++ /dev/null
@@ -1,216 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.test;
-
-import java.io.File;
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.InputStreamReader;
-import java.io.Reader;
-import java.io.StringWriter;
-import java.net.HttpURLConnection;
-import java.net.InetSocketAddress;
-import java.net.ServerSocket;
-import java.net.Socket;
-import java.net.URL;
-import java.nio.ByteBuffer;
-import java.util.Properties;
-import java.util.UUID;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.commons.io.FileUtils;
-import org.apache.commons.io.IOUtils;
-import org.apache.log4j.Logger;
-import org.apache.velocity.Template;
-import org.apache.velocity.VelocityContext;
-import org.apache.velocity.app.VelocityEngine;
-import org.apache.velocity.runtime.RuntimeConstants;
-import org.apache.velocity.runtime.resource.loader.ClasspathResourceLoader;
-import org.eclipse.jetty.http.HttpTester;
-import org.eclipse.jetty.servlet.ServletTester;
-
-public class TestUtils {
-
-  private static Logger LOG = Logger.getLogger(TestUtils.class);
-
-  public static final long SHORT_TIMEOUT = 1000L;
-  public static final long MEDIUM_TIMEOUT = 20 * 1000L;
-  public static final long LONG_TIMEOUT = 60 * 1000L;
-
-  public static String getResourceName( Class clazz, String name ) {
-    name = clazz.getName().replaceAll( "\\.", "/" ) + "/" + name;
-    return name;
-  }
-
-  public static URL getResourceUrl( Class clazz, String name ) throws FileNotFoundException {
-    name = getResourceName( clazz, name );
-    URL url = ClassLoader.getSystemResource( name );
-    if( url == null ) {
-      throw new FileNotFoundException( name );
-    }
-    return url;
-  }
-
-  public static URL getResourceUrl( String name ) throws FileNotFoundException {
-    URL url = ClassLoader.getSystemResource( name );
-    if( url == null ) {
-      throw new FileNotFoundException( name );
-    }
-    return url;
-  }
-
-  public static InputStream getResourceStream( String name ) throws IOException {
-    URL url = ClassLoader.getSystemResource( name );
-    InputStream stream = url.openStream();
-    return stream;
-  }
-
-  public static InputStream getResourceStream( Class clazz, String name ) throws IOException {
-    URL url = getResourceUrl( clazz, name );
-    InputStream stream = url.openStream();
-    return stream;
-  }
-
-  public static Reader getResourceReader( String name, String charset ) throws IOException {
-    return new InputStreamReader( getResourceStream( name ), charset );
-  }
-
-  public static Reader getResourceReader( Class clazz, String name, String charset ) throws IOException {
-    return new InputStreamReader( getResourceStream( clazz, name ), charset );
-  }
-
-  public static String getResourceString( Class clazz, String name, String charset ) throws IOException {
-    return IOUtils.toString( getResourceReader( clazz, name, charset ) );
-  }
-
-  public static File createTempDir( String prefix ) throws IOException {
-    File targetDir = new File( System.getProperty( "user.dir" ), "target" );
-    File tempDir = new File( targetDir, prefix + UUID.randomUUID() );
-    FileUtils.forceMkdir( tempDir );
-    return tempDir;
-  }
-
-  public static void LOG_ENTER() {
-    StackTraceElement caller = Thread.currentThread().getStackTrace()[2];
-    System.out.flush();
-    System.out.println( String.format( "Running %s#%s", caller.getClassName(), caller.getMethodName() ) );
-    System.out.flush();
-  }
-
-  public static void LOG_EXIT() {
-    StackTraceElement caller = Thread.currentThread().getStackTrace()[2];
-    System.out.flush();
-    System.out.println( String.format( "Exiting %s#%s", caller.getClassName(), caller.getMethodName() ) );
-    System.out.flush();
-  }
-
-  public static void awaitPortOpen( InetSocketAddress address, int timeout, int delay ) throws InterruptedException {
-    long maxTime = System.currentTimeMillis() + timeout;
-    do {
-      try {
-        Socket socket = new Socket();
-        socket.connect( address, delay );
-        socket.close();
-        return;
-      } catch ( IOException e ) {
-        //e.printStackTrace();
-      }
-    } while( System.currentTimeMillis() < maxTime );
-    throw new IllegalStateException( "Timed out " + timeout + " waiting for port " + address );
-  }
-
-  public static void awaitNon404HttpStatus( URL url, int timeout, int delay ) throws InterruptedException {
-    long maxTime = System.currentTimeMillis() + timeout;
-    do {
-      Thread.sleep( delay );
-      HttpURLConnection conn = null;
-      try {
-        conn = (HttpURLConnection)url.openConnection();
-        conn.getInputStream().close();
-        return;
-      } catch ( IOException e ) {
-        //e.printStackTrace();
-        try {
-          if( conn != null && conn.getResponseCode() != 404 ) {
-            return;
-          }
-        } catch ( IOException ee ) {
-          //ee.printStackTrace();
-        }
-      }
-    } while( System.currentTimeMillis() < maxTime );
-    throw new IllegalStateException( "Timed out " + timeout + " waiting for URL " + url );
-  }
-
-  public static String merge( String resource, Properties properties ) {
-    ClasspathResourceLoader loader = new ClasspathResourceLoader();
-    loader.getResourceStream( resource );
-
-    VelocityEngine engine = new VelocityEngine();
-    Properties config = new Properties();
-    config.setProperty( RuntimeConstants.RUNTIME_LOG_LOGSYSTEM_CLASS, "org.apache.velocity.runtime.log.NullLogSystem" );
-    config.setProperty( RuntimeConstants.RESOURCE_LOADER, "classpath" );
-    config.setProperty( "classpath.resource.loader.class", ClasspathResourceLoader.class.getName() );
-    engine.init( config );
-
-    VelocityContext context = new VelocityContext( properties );
-    Template template = engine.getTemplate( resource );
-    StringWriter writer = new StringWriter();
-    template.merge( context, writer );
-    return writer.toString();
-  }
-
-  public static String merge( Class base, String resource, Properties properties ) {
-    String baseResource = base.getName().replaceAll( "\\.", "/" );
-    String fullResource = baseResource + "/" + resource;
-    return merge( fullResource, properties );
-  }
-
-  public static int findFreePort() throws IOException {
-    ServerSocket socket = new ServerSocket(0);
-    int port = socket.getLocalPort();
-    socket.close();
-    return port;
-  }
-
-  public static void waitUntilNextSecond() {
-    long before = System.currentTimeMillis();
-    long wait;
-    while( ( wait = ( 1000 - ( System.currentTimeMillis() - before ) ) ) > 0 ) {
-      try {
-        Thread.sleep( wait );
-      } catch( InterruptedException e ) {
-        // Ignore.
-      }
-    }
-  }
-
-  public static HttpTester.Response execute( ServletTester server, HttpTester.Request request ) throws Exception {
-    LOG.debug( "execute: request=" + request );
-    ByteBuffer requestBuffer = request.generate();
-    LOG.trace( "execute: requestBuffer=[" + new String(requestBuffer.array(),0,requestBuffer.limit()) + "]" );
-    ByteBuffer responseBuffer = server.getResponses( requestBuffer, 30, TimeUnit.SECONDS );
-    HttpTester.Response response = HttpTester.parseResponse( responseBuffer );
-    LOG.trace( "execute: responseBuffer=[" + new String(responseBuffer.array(),0,responseBuffer.limit()) + "]" );
-    LOG.debug( "execute: reponse=" + response );
-    return response;
-  }
-
-
-}

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-test-utils/src/main/java/org/apache/hadoop/test/category/FastTests.java
----------------------------------------------------------------------
diff --git a/gateway-test-utils/src/main/java/org/apache/hadoop/test/category/FastTests.java b/gateway-test-utils/src/main/java/org/apache/hadoop/test/category/FastTests.java
deleted file mode 100644
index 7761430..0000000
--- a/gateway-test-utils/src/main/java/org/apache/hadoop/test/category/FastTests.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p/>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p/>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.test.category;
-
-public interface FastTests {
-}

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-test-utils/src/main/java/org/apache/hadoop/test/category/ManualTests.java
----------------------------------------------------------------------
diff --git a/gateway-test-utils/src/main/java/org/apache/hadoop/test/category/ManualTests.java b/gateway-test-utils/src/main/java/org/apache/hadoop/test/category/ManualTests.java
deleted file mode 100644
index 840dbb3..0000000
--- a/gateway-test-utils/src/main/java/org/apache/hadoop/test/category/ManualTests.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.test.category;
-
-public interface ManualTests {
-}

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-test-utils/src/main/java/org/apache/hadoop/test/category/MediumTests.java
----------------------------------------------------------------------
diff --git a/gateway-test-utils/src/main/java/org/apache/hadoop/test/category/MediumTests.java b/gateway-test-utils/src/main/java/org/apache/hadoop/test/category/MediumTests.java
deleted file mode 100644
index 5cec811..0000000
--- a/gateway-test-utils/src/main/java/org/apache/hadoop/test/category/MediumTests.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.test.category;
-
-public interface MediumTests {
-}

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-test-utils/src/main/java/org/apache/hadoop/test/category/ReleaseTest.java
----------------------------------------------------------------------
diff --git a/gateway-test-utils/src/main/java/org/apache/hadoop/test/category/ReleaseTest.java b/gateway-test-utils/src/main/java/org/apache/hadoop/test/category/ReleaseTest.java
deleted file mode 100644
index bd52807..0000000
--- a/gateway-test-utils/src/main/java/org/apache/hadoop/test/category/ReleaseTest.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.test.category;
-
-public interface ReleaseTest {
-}

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-test-utils/src/main/java/org/apache/hadoop/test/category/SlowTests.java
----------------------------------------------------------------------
diff --git a/gateway-test-utils/src/main/java/org/apache/hadoop/test/category/SlowTests.java b/gateway-test-utils/src/main/java/org/apache/hadoop/test/category/SlowTests.java
deleted file mode 100644
index d395b02..0000000
--- a/gateway-test-utils/src/main/java/org/apache/hadoop/test/category/SlowTests.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p/>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p/>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.test.category;
-
-public interface SlowTests {
-}

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-test-utils/src/main/java/org/apache/hadoop/test/category/UnitTests.java
----------------------------------------------------------------------
diff --git a/gateway-test-utils/src/main/java/org/apache/hadoop/test/category/UnitTests.java b/gateway-test-utils/src/main/java/org/apache/hadoop/test/category/UnitTests.java
deleted file mode 100644
index f36d539..0000000
--- a/gateway-test-utils/src/main/java/org/apache/hadoop/test/category/UnitTests.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.test.category;
-
-public interface UnitTests {
-}

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-test-utils/src/main/java/org/apache/hadoop/test/category/VerifyTest.java
----------------------------------------------------------------------
diff --git a/gateway-test-utils/src/main/java/org/apache/hadoop/test/category/VerifyTest.java b/gateway-test-utils/src/main/java/org/apache/hadoop/test/category/VerifyTest.java
deleted file mode 100644
index 0b0acaa..0000000
--- a/gateway-test-utils/src/main/java/org/apache/hadoop/test/category/VerifyTest.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.test.category;
-
-public interface VerifyTest {
-}

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-test-utils/src/main/java/org/apache/hadoop/test/log/CollectAppender.java
----------------------------------------------------------------------
diff --git a/gateway-test-utils/src/main/java/org/apache/hadoop/test/log/CollectAppender.java b/gateway-test-utils/src/main/java/org/apache/hadoop/test/log/CollectAppender.java
deleted file mode 100644
index d14ab7b..0000000
--- a/gateway-test-utils/src/main/java/org/apache/hadoop/test/log/CollectAppender.java
+++ /dev/null
@@ -1,51 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.test.log;
-
-import java.util.concurrent.BlockingQueue;
-import java.util.concurrent.LinkedBlockingQueue;
-
-import org.apache.log4j.AppenderSkeleton;
-import org.apache.log4j.spi.LoggingEvent;
-
-public class CollectAppender extends AppenderSkeleton {
-
-  public CollectAppender() {
-    super();
-  }
-
-  public static BlockingQueue<LoggingEvent> queue = new LinkedBlockingQueue<LoggingEvent>();
-  public static boolean closed = false;
-
-  @Override
-  protected void append( LoggingEvent event ) {
-    event.getProperties();
-    queue.add( event );
-  }
-
-  @Override
-  public void close() {
-    closed = true;
-  }
-
-  @Override
-  public boolean requiresLayout() {
-    return false;
-  }
-
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-test-utils/src/main/java/org/apache/hadoop/test/log/NoOpAppender.java
----------------------------------------------------------------------
diff --git a/gateway-test-utils/src/main/java/org/apache/hadoop/test/log/NoOpAppender.java b/gateway-test-utils/src/main/java/org/apache/hadoop/test/log/NoOpAppender.java
deleted file mode 100644
index 647f0e2..0000000
--- a/gateway-test-utils/src/main/java/org/apache/hadoop/test/log/NoOpAppender.java
+++ /dev/null
@@ -1,98 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.test.log;
-
-import org.apache.log4j.Appender;
-import org.apache.log4j.Layout;
-import org.apache.log4j.Logger;
-import org.apache.log4j.spi.ErrorHandler;
-import org.apache.log4j.spi.Filter;
-import org.apache.log4j.spi.LoggingEvent;
-
-import java.util.Enumeration;
-
-public class NoOpAppender implements Appender {
-
-  public static Enumeration<Appender> setUp() {
-    Enumeration<Appender> appenders = (Enumeration<Appender>)Logger.getRootLogger().getAllAppenders();
-    Logger.getRootLogger().removeAllAppenders();
-    Logger.getRootLogger().addAppender( new NoOpAppender() );
-    return appenders;
-  }
-
-  public static void tearDown( Enumeration<Appender> appenders ) {
-    if( appenders != null ) {
-      while( appenders.hasMoreElements() ) {
-        Logger.getRootLogger().addAppender( appenders.nextElement() );
-      }
-    }
-  }
-
-  @Override
-  public void addFilter( Filter newFilter ) {
-  }
-
-  @Override
-  public Filter getFilter() {
-    return null;
-  }
-
-  @Override
-  public void clearFilters() {
-  }
-
-  @Override
-  public void close() {
-  }
-
-  @Override
-  public void doAppend( LoggingEvent event ) {
-  }
-
-  @Override
-  public String getName() {
-    return this.getClass().getName();
-  }
-
-  @Override
-  public void setErrorHandler( ErrorHandler errorHandler ) {
-  }
-
-  @Override
-  public ErrorHandler getErrorHandler() {
-    return null;
-  }
-
-  @Override
-  public void setLayout( Layout layout ) {
-  }
-
-  @Override
-  public Layout getLayout() {
-    return null;
-  }
-
-  @Override
-  public void setName( String name ) {
-  }
-
-  @Override
-  public boolean requiresLayout() {
-    return false;
-  }
-}

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-test-utils/src/main/java/org/apache/hadoop/test/log/NoOpLogger.java
----------------------------------------------------------------------
diff --git a/gateway-test-utils/src/main/java/org/apache/hadoop/test/log/NoOpLogger.java b/gateway-test-utils/src/main/java/org/apache/hadoop/test/log/NoOpLogger.java
deleted file mode 100644
index 8fd24ed..0000000
--- a/gateway-test-utils/src/main/java/org/apache/hadoop/test/log/NoOpLogger.java
+++ /dev/null
@@ -1,87 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.test.log;
-
-import org.eclipse.jetty.util.log.Logger;
-
-public class NoOpLogger implements Logger {
-
-  @Override
-  public String getName() {
-    return "";
-  }
-
-  @Override
-  public void warn( String msg, Object... args ) {
-  }
-
-  @Override
-  public void warn( Throwable thrown ) {
-  }
-
-  @Override
-  public void warn( String msg, Throwable thrown ) {
-  }
-
-  @Override
-  public void info( String msg, Object... args ) {
-  }
-
-  @Override
-  public void info( Throwable thrown ) {
-  }
-
-  @Override
-  public void info( String msg, Throwable thrown ) {
-  }
-
-  @Override
-  public boolean isDebugEnabled() {
-    return false;
-  }
-
-  @Override
-  public void setDebugEnabled( boolean enabled ) {
-  }
-
-  @Override
-  public void debug( String msg, Object... args ) {
-  }
-
-  @Override
-  public void debug( String msg, long arg ) {
-  }
-
-  @Override
-  public void debug( Throwable thrown ) {
-  }
-
-  @Override
-  public void debug( String msg, Throwable thrown ) {
-  }
-
-  @Override
-  public Logger getLogger( String name ) {
-    return this;
-  }
-
-  @Override
-  public void ignore( Throwable ignored ) {
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-test-utils/src/main/java/org/apache/hadoop/test/mock/MockFilterConfig.java
----------------------------------------------------------------------
diff --git a/gateway-test-utils/src/main/java/org/apache/hadoop/test/mock/MockFilterConfig.java b/gateway-test-utils/src/main/java/org/apache/hadoop/test/mock/MockFilterConfig.java
deleted file mode 100644
index eae1ef6..0000000
--- a/gateway-test-utils/src/main/java/org/apache/hadoop/test/mock/MockFilterConfig.java
+++ /dev/null
@@ -1,46 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.test.mock;
-
-import javax.servlet.FilterConfig;
-import javax.servlet.ServletContext;
-import java.util.Enumeration;
-
-public class MockFilterConfig implements FilterConfig {
-
-  @Override
-  public String getFilterName() {
-    return null;
-  }
-
-  @Override
-  public ServletContext getServletContext() {
-    return null;
-  }
-
-  @Override
-  public String getInitParameter( String s ) {
-    return null;
-  }
-
-  @Override
-  public Enumeration<String> getInitParameterNames() {
-    return null;
-  }
-
-}


[20/53] [abbrv] knox git commit: Merge branch 'master' into KNOX-998-Package_Restructuring

Posted by mo...@apache.org.
Merge branch 'master' into KNOX-998-Package_Restructuring

# Conflicts:
#	gateway-demo-ldap/src/main/java/org/apache/knox/gateway/security/ldap/BaseDirectoryService.java
#	gateway-demo-ldap/src/main/java/org/apache/knox/gateway/security/ldap/BaseDirectoryServiceFactory.java
#	gateway-demo-ldap/src/main/java/org/apache/knox/gateway/security/ldap/SimpleDirectoryServiceFactory.java
#	gateway-server/src/main/java/org/apache/knox/gateway/GatewayFilter.java
#	gateway-server/src/test/java/org/apache/knox/gateway/GatewayFilterTest.java
#	gateway-server/src/test/java/org/apache/knox/gateway/services/topology/DefaultTopologyServiceTest.java
#	gateway-service-admin/src/main/java/org/apache/knox/gateway/service/admin/TopologiesResource.java
#	gateway-test/src/test/java/org/apache/knox/gateway/GatewayAdminTopologyFuncTest.java


Project: http://git-wip-us.apache.org/repos/asf/knox/repo
Commit: http://git-wip-us.apache.org/repos/asf/knox/commit/c754cc06
Tree: http://git-wip-us.apache.org/repos/asf/knox/tree/c754cc06
Diff: http://git-wip-us.apache.org/repos/asf/knox/diff/c754cc06

Branch: refs/heads/master
Commit: c754cc06ac33c7cfff28c47ec562d888241c2641
Parents: 9577842 11ec78a
Author: Sandeep More <mo...@apache.org>
Authored: Wed Nov 1 17:10:14 2017 -0400
Committer: Sandeep More <mo...@apache.org>
Committed: Wed Nov 1 17:10:14 2017 -0400

----------------------------------------------------------------------
 gateway-demo-ldap/pom.xml                       |   36 +-
 .../security/ldap/BaseDirectoryService.java     | 2323 ------------------
 .../ldap/BaseDirectoryServiceFactory.java       |  290 ---
 .../security/ldap/SimpleDirectoryService.java   |    6 +-
 .../ldap/SimpleDirectoryServiceFactory.java     |   34 -
 .../ldap/SimpleLdapDirectoryServer.java         |   38 +-
 .../ambari/AmbariServiceDiscovery.java          |    3 +-
 .../filter/RegexIdentityAssertionFilter.java    |    4 +-
 .../regex/filter/RegexTemplate.java             |   12 +-
 .../regex/filter/RegexTemplateTest.java         |   23 +-
 .../webappsec/filter/StrictTranportFilter.java  |  137 ++
 .../webappsec/deploy/WebAppSecContributor.java  |   11 +
 .../webappsec/StrictTranportFilterTest.java     |  164 ++
 .../home/conf/topologies/manager.xml            |    1 +
 gateway-release/home/templates/sandbox-apps.xml |    1 +
 .../org/apache/knox/gateway/GatewayFilter.java  |   65 +-
 .../apache/knox/gateway/GatewayMessages.java    |   34 +-
 .../gateway/config/impl/GatewayConfigImpl.java  |    3 +-
 .../topology/impl/DefaultTopologyService.java   |  221 +-
 .../builder/BeanPropertyTopologyBuilder.java    |   11 +
 .../xml/KnoxFormatXmlTopologyRules.java         |    2 +
 .../src/main/resources/conf/topology-v1.xsd     |    1 +
 .../apache/knox/gateway/GatewayFilterTest.java  |   49 +
 .../topology/DefaultTopologyServiceTest.java    |  404 ++-
 .../topology/file/provider-config-one.xml       |   74 +
 .../topology/file/simple-descriptor-five.json   |   14 +
 .../topology/file/simple-descriptor-six.json    |   18 +
 .../service/admin/HrefListingMarshaller.java    |   75 +
 .../service/admin/TopologiesResource.java       |  393 ++-
 .../service/admin/beans/BeanConverter.java      |    2 +
 .../gateway/service/admin/beans/Topology.java   |   11 +
 .../services/ambariui/2.2.1/rewrite.xml         |  104 +
 .../services/ambariui/2.2.1/service.xml         |   92 +
 .../knox/gateway/i18n/GatewaySpiMessages.java   |   10 +-
 .../services/topology/TopologyService.java      |   33 +-
 .../apache/knox/gateway/topology/Topology.java  |    9 +
 .../gateway/topology/topology_binding-xml.xml   |    5 +-
 gateway-test-release/pom.xml                    |   11 -
 gateway-test/pom.xml                            |   14 -
 .../gateway/GatewayAdminTopologyFuncTest.java   |  586 +++++
 pom.xml                                         |    8 +-
 41 files changed, 2495 insertions(+), 2837 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/knox/blob/c754cc06/gateway-demo-ldap/pom.xml
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/knox/blob/c754cc06/gateway-demo-ldap/src/main/java/org/apache/knox/gateway/security/ldap/BaseDirectoryService.java
----------------------------------------------------------------------
diff --cc gateway-demo-ldap/src/main/java/org/apache/knox/gateway/security/ldap/BaseDirectoryService.java
index 53add76,0000000..e69de29
mode 100644,000000..100644
--- a/gateway-demo-ldap/src/main/java/org/apache/knox/gateway/security/ldap/BaseDirectoryService.java
+++ b/gateway-demo-ldap/src/main/java/org/apache/knox/gateway/security/ldap/BaseDirectoryService.java

http://git-wip-us.apache.org/repos/asf/knox/blob/c754cc06/gateway-demo-ldap/src/main/java/org/apache/knox/gateway/security/ldap/BaseDirectoryServiceFactory.java
----------------------------------------------------------------------
diff --cc gateway-demo-ldap/src/main/java/org/apache/knox/gateway/security/ldap/BaseDirectoryServiceFactory.java
index aed78bf,0000000..e69de29
mode 100644,000000..100644
--- a/gateway-demo-ldap/src/main/java/org/apache/knox/gateway/security/ldap/BaseDirectoryServiceFactory.java
+++ b/gateway-demo-ldap/src/main/java/org/apache/knox/gateway/security/ldap/BaseDirectoryServiceFactory.java

http://git-wip-us.apache.org/repos/asf/knox/blob/c754cc06/gateway-demo-ldap/src/main/java/org/apache/knox/gateway/security/ldap/SimpleDirectoryService.java
----------------------------------------------------------------------
diff --cc gateway-demo-ldap/src/main/java/org/apache/knox/gateway/security/ldap/SimpleDirectoryService.java
index 69cdb3c,0000000..4e843a5
mode 100644,000000..100644
--- a/gateway-demo-ldap/src/main/java/org/apache/knox/gateway/security/ldap/SimpleDirectoryService.java
+++ b/gateway-demo-ldap/src/main/java/org/apache/knox/gateway/security/ldap/SimpleDirectoryService.java
@@@ -1,29 -1,0 +1,33 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.security.ldap;
 +
- public class SimpleDirectoryService extends BaseDirectoryService {
++import org.apache.directory.server.core.DefaultDirectoryService;
++
++public class SimpleDirectoryService extends DefaultDirectoryService {
 +
 +  public SimpleDirectoryService() throws Exception {
++    super();
 +  }
 +
++  @Override
 +  protected void showSecurityWarnings() throws Exception {
 +    // NoOp - This prevents confusing warnings from being output.
 +  }
 +
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/c754cc06/gateway-demo-ldap/src/main/java/org/apache/knox/gateway/security/ldap/SimpleDirectoryServiceFactory.java
----------------------------------------------------------------------
diff --cc gateway-demo-ldap/src/main/java/org/apache/knox/gateway/security/ldap/SimpleDirectoryServiceFactory.java
index a25355b,0000000..e69de29
mode 100644,000000..100644
--- a/gateway-demo-ldap/src/main/java/org/apache/knox/gateway/security/ldap/SimpleDirectoryServiceFactory.java
+++ b/gateway-demo-ldap/src/main/java/org/apache/knox/gateway/security/ldap/SimpleDirectoryServiceFactory.java

http://git-wip-us.apache.org/repos/asf/knox/blob/c754cc06/gateway-demo-ldap/src/main/java/org/apache/knox/gateway/security/ldap/SimpleLdapDirectoryServer.java
----------------------------------------------------------------------
diff --cc gateway-demo-ldap/src/main/java/org/apache/knox/gateway/security/ldap/SimpleLdapDirectoryServer.java
index 9f59e9b,0000000..4809f19
mode 100644,000000..100644
--- a/gateway-demo-ldap/src/main/java/org/apache/knox/gateway/security/ldap/SimpleLdapDirectoryServer.java
+++ b/gateway-demo-ldap/src/main/java/org/apache/knox/gateway/security/ldap/SimpleLdapDirectoryServer.java
@@@ -1,124 -1,0 +1,160 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.security.ldap;
 +
 +import org.apache.commons.io.FileUtils;
 +import org.apache.directory.api.ldap.model.entry.DefaultModification;
 +import org.apache.directory.api.ldap.model.entry.ModificationOperation;
 +import org.apache.directory.api.ldap.model.exception.LdapException;
 +import org.apache.directory.api.ldap.model.name.Dn;
 +import org.apache.directory.server.core.api.CoreSession;
 +import org.apache.directory.server.core.api.DirectoryService;
 +import org.apache.directory.server.core.api.partition.Partition;
++import org.apache.directory.server.core.factory.DefaultDirectoryServiceFactory;
 +import org.apache.directory.server.core.factory.DirectoryServiceFactory;
++import org.apache.directory.server.core.factory.JdbmPartitionFactory;
++import org.apache.directory.server.core.factory.PartitionFactory;
 +import org.apache.directory.server.ldap.LdapServer;
 +import org.apache.directory.server.protocol.shared.store.LdifFileLoader;
 +import org.apache.directory.server.protocol.shared.transport.TcpTransport;
 +import org.apache.directory.server.protocol.shared.transport.Transport;
 +import org.apache.log4j.PropertyConfigurator;
++import org.slf4j.Logger;
++import org.slf4j.LoggerFactory;
 +
 +import java.io.File;
 +import java.io.FileNotFoundException;
 +import java.net.ServerSocket;
 +import java.util.UUID;
 +
 +public class SimpleLdapDirectoryServer {
 +
++  private static final Logger LOG = LoggerFactory.getLogger(SimpleLdapDirectoryServer.class);
++
 +  private DirectoryServiceFactory factory;
 +
 +  private DirectoryService service;
 +
 +  private LdapServer server;
 +
 +  public SimpleLdapDirectoryServer( String rootDn, File usersLdif, Transport... transports ) throws Exception {
 +    if( !usersLdif.exists() ) {
 +      throw new FileNotFoundException( usersLdif.getAbsolutePath() );
 +    }
 +
-     factory = new SimpleDirectoryServiceFactory();
++    DirectoryService directoryService = null;
++    try {
++      // creating the instance here so that
++      // we we can set some properties like accesscontrol, anon access
++      // before starting up the service
++      directoryService = new SimpleDirectoryService();
++
++      // no need to register a shutdown hook during tests because this
++      // starts a lot of threads and slows down test execution
++      directoryService.setShutdownHookEnabled( false );
++    } catch ( Exception e ) {
++      throw new RuntimeException( e );
++    }
++
++    PartitionFactory partitionFactory = null;
++    try {
++      String typeName = System.getProperty( "apacheds.partition.factory" );
++
++      if ( typeName != null ) {
++        Class<? extends PartitionFactory> type = ( Class<? extends PartitionFactory> ) Class.forName( typeName );
++        partitionFactory = type.newInstance();
++      } else {
++        partitionFactory = new JdbmPartitionFactory();
++      }
++    } catch ( Exception e ) {
++      LOG.error( "Error instantiating custom partiton factory", e );
++      throw new RuntimeException( e );
++    }
++
++    factory = new DefaultDirectoryServiceFactory( directoryService, partitionFactory );
 +    factory.init( UUID.randomUUID().toString() );
 +    service = factory.getDirectoryService();
 +
 +    enabledPosixSchema( service );
 +
 +    Partition partition = factory.getPartitionFactory().createPartition(
 +        service.getSchemaManager(), service.getDnFactory(), "users", rootDn, 500,
 +        service.getInstanceLayout().getInstanceDirectory() );
 +    service.addPartition( partition );
 +
 +    CoreSession session = service.getAdminSession();
 +    LdifFileLoader lfl = new LdifFileLoader( session, usersLdif, null );
 +    lfl.execute();
 +
 +    server = new LdapServer();
 +    server.setTransports( transports );
 +    server.setDirectoryService( service );
 +  }
 +
 +  private static void enabledPosixSchema( DirectoryService service ) throws LdapException {
 +    service.getSchemaManager().getLoadedSchema( "nis" ).enable();
 +    service.getAdminSession().modify(
 +        new Dn( "cn=nis,ou=schema" ),
 +        new DefaultModification( ModificationOperation.REPLACE_ATTRIBUTE, "m-disabled", "FALSE" ) );
 +  }
 +
 +  public void start() throws Exception {
 +    service.startup();
 +    server.start();
 +  }
 +
 +  public void stop( boolean clean ) throws Exception {
 +    server.stop();
 +    service.shutdown();
 +    if( clean ) {
 +      FileUtils.deleteDirectory( service.getInstanceLayout().getInstanceDirectory() );
 +    }
 +  }
 +
 +  public static void main( String[] args ) throws Exception {
 +    PropertyConfigurator.configure( System.getProperty( "log4j.configuration" ) );
 +
 +    SimpleLdapDirectoryServer ldap;
 +
 +    File file;
 +    if ( args.length < 1 ) {
 +      file = new File( "conf/users.ldif" );
 +    } else {
 +      File dir = new File( args[0] );
 +      if( !dir.exists() || !dir.isDirectory() ) {
 +        throw new FileNotFoundException( dir.getAbsolutePath() );
 +      }
 +      file = new File( dir, "users.ldif" );
 +    }
 +
 +    if( !file.exists() || !file.canRead() ) {
 +      throw new FileNotFoundException( file.getAbsolutePath() );
 +    }
 +
 +    int port = 33389;
 +
 +    // Make sure the port is free.
 +    ServerSocket socket = new ServerSocket( port );
 +    socket.close();
 +
 +    TcpTransport transport = new TcpTransport( port );
 +    ldap = new SimpleLdapDirectoryServer( "dc=hadoop,dc=apache,dc=org", file, transport );
 +    ldap.start();
 +  }
 +
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/c754cc06/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariServiceDiscovery.java
----------------------------------------------------------------------
diff --cc gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariServiceDiscovery.java
index 70af903,0000000..dbc783d
mode 100644,000000..100644
--- a/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariServiceDiscovery.java
+++ b/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariServiceDiscovery.java
@@@ -1,305 -1,0 +1,306 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements. See the NOTICE file distributed with this
 + * work for additional information regarding copyright ownership. The ASF
 + * licenses this file to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance with the License.
 + * You may obtain a copy of the License at
 + *
 + * http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 + * License for the specific language governing permissions and limitations under
 + * the License.
 + */
 +package org.apache.knox.gateway.topology.discovery.ambari;
 +
 +import java.io.IOException;
 +import java.util.ArrayList;
 +import java.util.HashMap;
 +import java.util.List;
 +import java.util.Map;
 +import java.util.Properties;
 +
 +import net.minidev.json.JSONArray;
 +import net.minidev.json.JSONObject;
 +import net.minidev.json.JSONValue;
 +import org.apache.knox.gateway.config.ConfigurationException;
 +import org.apache.knox.gateway.i18n.messages.MessagesFactory;
 +import org.apache.knox.gateway.services.security.AliasService;
 +import org.apache.knox.gateway.services.security.AliasServiceException;
 +import org.apache.knox.gateway.topology.discovery.GatewayService;
 +import org.apache.knox.gateway.topology.discovery.ServiceDiscovery;
 +import org.apache.knox.gateway.topology.discovery.ServiceDiscoveryConfig;
 +import org.apache.http.HttpEntity;
 +import org.apache.http.HttpStatus;
 +import org.apache.http.client.methods.CloseableHttpResponse;
 +import org.apache.http.client.methods.HttpGet;
 +import org.apache.http.impl.client.CloseableHttpClient;
 +import org.apache.http.message.BasicHeader;
 +import org.apache.http.util.EntityUtils;
 +
 +
 +class AmbariServiceDiscovery implements ServiceDiscovery {
 +
 +    static final String TYPE = "AMBARI";
 +
 +    static final String AMBARI_CLUSTERS_URI = "/api/v1/clusters";
 +
 +    static final String AMBARI_HOSTROLES_URI =
 +                                       AMBARI_CLUSTERS_URI + "/%s/services?fields=components/host_components/HostRoles";
 +
 +    static final String AMBARI_SERVICECONFIGS_URI =
 +            AMBARI_CLUSTERS_URI + "/%s/configurations/service_config_versions?is_current=true";
 +
 +    private static final String COMPONENT_CONFIG_MAPPING_FILE =
 +                                                        "ambari-service-discovery-component-config-mapping.properties";
 +
 +    private static final AmbariServiceDiscoveryMessages log = MessagesFactory.get(AmbariServiceDiscoveryMessages.class);
 +
 +    // Map of component names to service configuration types
 +    private static Map<String, String> componentServiceConfigs = new HashMap<>();
 +    static {
 +        try {
 +            Properties configMapping = new Properties();
 +            configMapping.load(AmbariServiceDiscovery.class.getClassLoader().getResourceAsStream(COMPONENT_CONFIG_MAPPING_FILE));
 +            for (String componentName : configMapping.stringPropertyNames()) {
 +                componentServiceConfigs.put(componentName, configMapping.getProperty(componentName));
 +            }
 +        } catch (Exception e) {
 +            log.failedToLoadServiceDiscoveryConfiguration(COMPONENT_CONFIG_MAPPING_FILE, e);
 +        }
 +    }
 +
 +    private static final String DEFAULT_USER_ALIAS = "ambari.discovery.user";
 +    private static final String DEFAULT_PWD_ALIAS  = "ambari.discovery.password";
 +
 +    @GatewayService
 +    private AliasService aliasService;
 +
 +    private CloseableHttpClient httpClient = null;
 +
 +
 +    AmbariServiceDiscovery() {
 +        httpClient = org.apache.http.impl.client.HttpClients.createDefault();
 +    }
 +
 +
 +    @Override
 +    public String getType() {
 +        return TYPE;
 +    }
 +
 +
 +    @Override
 +    public Map<String, Cluster> discover(ServiceDiscoveryConfig config) {
 +        Map<String, Cluster> clusters = new HashMap<String, Cluster>();
 +
 +        String discoveryAddress = config.getAddress();
 +
 +        // Invoke Ambari REST API to discover the available clusters
 +        String clustersDiscoveryURL = String.format("%s" + AMBARI_CLUSTERS_URI, discoveryAddress);
 +
 +        JSONObject json = invokeREST(clustersDiscoveryURL, config.getUser(), config.getPasswordAlias());
 +
 +        // Parse the cluster names from the response, and perform the cluster discovery
 +        JSONArray clusterItems = (JSONArray) json.get("items");
 +        for (Object clusterItem : clusterItems) {
 +            String clusterName = (String) ((JSONObject)((JSONObject) clusterItem).get("Clusters")).get("cluster_name");
 +            try {
 +                Cluster c = discover(config, clusterName);
 +                clusters.put(clusterName, c);
 +            } catch (Exception e) {
 +                log.clusterDiscoveryError(clusterName, e);
 +            }
 +        }
 +
 +        return clusters;
 +    }
 +
 +
 +    @Override
 +    public Cluster discover(ServiceDiscoveryConfig config, String clusterName) {
 +        AmbariCluster cluster = new AmbariCluster(clusterName);
 +
 +        Map<String, String> serviceComponents = new HashMap<>();
 +
 +        String discoveryAddress = config.getAddress();
 +        String discoveryUser = config.getUser();
 +        String discoveryPwdAlias = config.getPasswordAlias();
 +
 +        Map<String, List<String>> componentHostNames = new HashMap<>();
 +        String hostRolesURL = String.format("%s" + AMBARI_HOSTROLES_URI, discoveryAddress, clusterName);
 +        JSONObject hostRolesJSON = invokeREST(hostRolesURL, discoveryUser, discoveryPwdAlias);
 +        if (hostRolesJSON != null) {
 +            // Process the host roles JSON
 +            JSONArray items = (JSONArray) hostRolesJSON.get("items");
 +            for (Object obj : items) {
 +                JSONArray components = (JSONArray) ((JSONObject) obj).get("components");
 +                for (Object component : components) {
 +                    JSONArray hostComponents = (JSONArray) ((JSONObject) component).get("host_components");
 +                    for (Object hostComponent : hostComponents) {
 +                        JSONObject hostRoles = (JSONObject) ((JSONObject) hostComponent).get("HostRoles");
 +                        String serviceName = (String) hostRoles.get("service_name");
 +                        String componentName = (String) hostRoles.get("component_name");
 +
 +                        serviceComponents.put(componentName, serviceName);
 +
 +                        // Assuming public host name is more applicable than host_name
 +                        String hostName = (String) hostRoles.get("public_host_name");
 +                        if (hostName == null) {
 +                            // Some (even slightly) older versions of Ambari/HDP do not return public_host_name,
 +                            // so fall back to host_name in those cases.
 +                            hostName = (String) hostRoles.get("host_name");
 +                        }
 +
 +                        if (hostName != null) {
 +                            log.discoveredServiceHost(serviceName, hostName);
 +                            if (!componentHostNames.containsKey(componentName)) {
 +                                componentHostNames.put(componentName, new ArrayList<String>());
 +                            }
 +                            componentHostNames.get(componentName).add(hostName);
 +                        }
 +                    }
 +                }
 +            }
 +        }
 +
 +        Map<String, Map<String, AmbariCluster.ServiceConfiguration>> serviceConfigurations =
 +                                                 new HashMap<String, Map<String, AmbariCluster.ServiceConfiguration>>();
 +        String serviceConfigsURL = String.format("%s" + AMBARI_SERVICECONFIGS_URI, discoveryAddress, clusterName);
 +        JSONObject serviceConfigsJSON = invokeREST(serviceConfigsURL, discoveryUser, discoveryPwdAlias);
 +        if (serviceConfigsJSON != null) {
 +            // Process the service configurations
 +            JSONArray serviceConfigs = (JSONArray) serviceConfigsJSON.get("items");
 +            for (Object serviceConfig : serviceConfigs) {
 +                String serviceName = (String) ((JSONObject) serviceConfig).get("service_name");
 +                JSONArray configurations = (JSONArray) ((JSONObject) serviceConfig).get("configurations");
 +                for (Object configuration : configurations) {
 +                    String configType = (String) ((JSONObject) configuration).get("type");
 +                    String configVersion = String.valueOf(((JSONObject) configuration).get("version"));
 +
 +                    Map<String, String> configProps = new HashMap<String, String>();
 +                    JSONObject configProperties = (JSONObject) ((JSONObject) configuration).get("properties");
 +                    for (String propertyName : configProperties.keySet()) {
 +                        configProps.put(propertyName, String.valueOf(((JSONObject) configProperties).get(propertyName)));
 +                    }
 +                    if (!serviceConfigurations.containsKey(serviceName)) {
 +                        serviceConfigurations.put(serviceName, new HashMap<String, AmbariCluster.ServiceConfiguration>());
 +                    }
 +                    serviceConfigurations.get(serviceName).put(configType, new AmbariCluster.ServiceConfiguration(configType, configVersion, configProps));
 +                    cluster.addServiceConfiguration(serviceName, configType, new AmbariCluster.ServiceConfiguration(configType, configVersion, configProps));
 +                }
 +            }
 +        }
 +
 +        // Construct the AmbariCluster model
 +        for (String componentName : serviceComponents.keySet()) {
 +            String serviceName = serviceComponents.get(componentName);
 +            List<String> hostNames = componentHostNames.get(componentName);
 +
 +            Map<String, AmbariCluster.ServiceConfiguration> configs = serviceConfigurations.get(serviceName);
 +            String configType = componentServiceConfigs.get(componentName);
 +            if (configType != null) {
 +                AmbariCluster.ServiceConfiguration svcConfig = configs.get(configType);
 +                AmbariComponent c = new AmbariComponent(componentName,
 +                                                        svcConfig.getVersion(),
 +                                                        clusterName,
 +                                                        serviceName,
 +                                                        hostNames,
 +                                                        svcConfig.getProperties());
 +                cluster.addComponent(c);
 +            }
 +        }
 +
 +        return cluster;
 +    }
 +
 +
 +    protected JSONObject invokeREST(String url, String username, String passwordAlias) {
 +        JSONObject result = null;
 +
 +        CloseableHttpResponse response = null;
 +        try {
 +            HttpGet request = new HttpGet(url);
 +
 +            // If no configured username, then use default username alias
 +            String password = null;
 +            if (username == null) {
 +                if (aliasService != null) {
 +                    try {
 +                        char[] defaultUser = aliasService.getPasswordFromAliasForGateway(DEFAULT_USER_ALIAS);
 +                        if (defaultUser != null) {
 +                            username = new String(defaultUser);
 +                        }
 +                    } catch (AliasServiceException e) {
 +                        log.aliasServiceUserError(DEFAULT_USER_ALIAS, e.getLocalizedMessage());
 +                    }
 +                }
 +
 +                // If username is still null
 +                if (username == null) {
 +                    log.aliasServiceUserNotFound();
 +                    throw new ConfigurationException("No username is configured for Ambari service discovery.");
 +                }
 +            }
 +
 +            if (aliasService != null) {
-                 // If not password alias is configured, then try the default alias
++                // If no password alias is configured, then try the default alias
 +                if (passwordAlias == null) {
 +                    passwordAlias = DEFAULT_PWD_ALIAS;
 +                }
++
 +                try {
 +                    char[] pwd = aliasService.getPasswordFromAliasForGateway(passwordAlias);
 +                    if (pwd != null) {
 +                        password = new String(pwd);
 +                    }
 +
 +                } catch (AliasServiceException e) {
 +                    log.aliasServicePasswordError(passwordAlias, e.getLocalizedMessage());
 +                }
 +            }
 +
 +            // If the password could not be determined
 +            if (password == null) {
 +                log.aliasServicePasswordNotFound();
 +                throw new ConfigurationException("No password is configured for Ambari service discovery.");
 +            }
 +
 +            // Add an auth header if credentials are available
 +            String encodedCreds =
 +                    org.apache.commons.codec.binary.Base64.encodeBase64String((username + ":" + password).getBytes());
 +            request.addHeader(new BasicHeader("Authorization", "Basic " + encodedCreds));
 +
 +            response = httpClient.execute(request);
 +
 +            if (HttpStatus.SC_OK == response.getStatusLine().getStatusCode()) {
 +                HttpEntity entity = response.getEntity();
 +                if (entity != null) {
 +                    result = (JSONObject) JSONValue.parse((EntityUtils.toString(entity)));
 +                    log.debugJSON(result.toJSONString());
 +                } else {
 +                    log.noJSON(url);
 +                }
 +            } else {
 +                log.unexpectedRestResponseStatusCode(url, response.getStatusLine().getStatusCode());
 +            }
 +
 +        } catch (IOException e) {
 +            log.restInvocationError(url, e);
 +        } finally {
 +            if(response != null) {
 +                try {
 +                    response.close();
 +                } catch (IOException e) {
 +                    // Ignore
 +                }
 +            }
 +        }
 +        return result;
 +    }
 +
 +
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/c754cc06/gateway-provider-identity-assertion-regex/src/main/java/org/apache/knox/gateway/identityasserter/regex/filter/RegexIdentityAssertionFilter.java
----------------------------------------------------------------------
diff --cc gateway-provider-identity-assertion-regex/src/main/java/org/apache/knox/gateway/identityasserter/regex/filter/RegexIdentityAssertionFilter.java
index 4cc86ae,0000000..3c9cf11
mode 100644,000000..100644
--- a/gateway-provider-identity-assertion-regex/src/main/java/org/apache/knox/gateway/identityasserter/regex/filter/RegexIdentityAssertionFilter.java
+++ b/gateway-provider-identity-assertion-regex/src/main/java/org/apache/knox/gateway/identityasserter/regex/filter/RegexIdentityAssertionFilter.java
@@@ -1,88 -1,0 +1,90 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.identityasserter.regex.filter;
 +
 +import javax.security.auth.Subject;
 +import javax.servlet.FilterConfig;
 +import javax.servlet.ServletException;
 +
 +import org.apache.knox.gateway.identityasserter.common.filter.CommonIdentityAssertionFilter;
 +import org.apache.knox.gateway.security.principal.PrincipalMappingException;
 +
 +import java.util.Map;
 +import java.util.StringTokenizer;
 +import java.util.TreeMap;
++import java.lang.Boolean;
 +
 +public class RegexIdentityAssertionFilter extends
 +    CommonIdentityAssertionFilter {
 +
 +  private String input = null;
 +  private String output = null;
 +  private Map<String,String> dict;
 +  RegexTemplate template;
 +  
 +  @Override
 +  public void init(FilterConfig filterConfig) throws ServletException {
 +    super.init(filterConfig);
 +    try {
 +      input = filterConfig.getInitParameter( "input" );
 +      if( input == null ) {
 +        input = "";
 +      }
 +      output = filterConfig.getInitParameter( "output" );
 +      if( output == null ) {
 +        output = "";
 +      }
 +      dict = loadDictionary( filterConfig.getInitParameter( "lookup" ) );
-       template = new RegexTemplate( input, output, dict );
++      boolean useOriginalOnLookupFailure = Boolean.parseBoolean(filterConfig.getInitParameter("use.original.on.lookup.failure"));
++      template = new RegexTemplate( input, output, dict, useOriginalOnLookupFailure);
 +    } catch ( PrincipalMappingException e ) {
 +      throw new ServletException( e );
 +    }
 +  }
 +
 +  public String[] mapGroupPrincipals(String mappedPrincipalName, Subject subject) {
 +    // Returning null will allow existing Subject group principals to remain the same
 +    return null;
 +  }
 +
 +  public String mapUserPrincipal(String principalName) {
 +    return template.apply( principalName );
 +  }
 +
 +  private Map<String, String> loadDictionary( String config ) throws PrincipalMappingException {
 +    Map<String,String> dict = new TreeMap<>(String.CASE_INSENSITIVE_ORDER);
 +    if( config != null && !config.isEmpty() ) {
 +      try {
 +        StringTokenizer t = new StringTokenizer( config, ";" );
 +        while( t.hasMoreTokens() ) {
 +          String nvp = t.nextToken();
 +          String[] a = nvp.split( "=" );
 +          dict.put( a[0].trim(), a[1].trim() );
 +        }
 +        return dict;
 +      } catch( Exception e ) {
 +        dict.clear();
 +        throw new PrincipalMappingException(
 +            "Unable to load lookup dictionary from provided configuration: " + config +
 +                ".  No principal mapping will be provided.", e );
 +      }
 +    }
 +    return dict;
 +  }
 +
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/c754cc06/gateway-provider-identity-assertion-regex/src/main/java/org/apache/knox/gateway/identityasserter/regex/filter/RegexTemplate.java
----------------------------------------------------------------------
diff --cc gateway-provider-identity-assertion-regex/src/main/java/org/apache/knox/gateway/identityasserter/regex/filter/RegexTemplate.java
index e8f108e,0000000..659d3df
mode 100644,000000..100644
--- a/gateway-provider-identity-assertion-regex/src/main/java/org/apache/knox/gateway/identityasserter/regex/filter/RegexTemplate.java
+++ b/gateway-provider-identity-assertion-regex/src/main/java/org/apache/knox/gateway/identityasserter/regex/filter/RegexTemplate.java
@@@ -1,75 -1,0 +1,79 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + * <p/>
 + * http://www.apache.org/licenses/LICENSE-2.0
 + * <p/>
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.identityasserter.regex.filter;
 +
 +import java.util.Map;
 +import java.util.regex.Matcher;
 +import java.util.regex.Pattern;
 +
 +public class RegexTemplate {
 +
 +  private static Pattern directPattern = Pattern.compile( "\\{(\\[?\\d+?\\]?)\\}" );
 +  private static Pattern indirectPattern = Pattern.compile( "\\[(\\d+?)\\]" );
 +
 +  Pattern inputPattern;
 +  String outputTemplate;
 +  Map<String,String> lookupTable;
++  boolean useOriginalOnLookupFailure;
 +
 +  public RegexTemplate( String regex, String template ) {
-     this( regex, template, null );
++    this( regex, template, null, false );
 +  }
 +
-   public RegexTemplate( String regex, String template, Map<String,String> map ) {
++  public RegexTemplate( String regex, String template, Map<String,String> map, boolean useOriginalOnLookupFailure ) {
 +    this.inputPattern = Pattern.compile( regex );
 +    this.outputTemplate = template;
 +    this.lookupTable = map;
++    this.useOriginalOnLookupFailure = useOriginalOnLookupFailure;
 +  }
 +
 +  public String apply( String input ) {
 +    String output = outputTemplate;
 +    Matcher inputMatcher = inputPattern.matcher( input );
 +    if( inputMatcher.find() ) {
 +      output = expandTemplate( inputMatcher, output );
 +    }
 +    return output;
 +  }
 +
 +  private String expandTemplate( Matcher inputMatcher, String output ) {
 +    Matcher directMatcher = directPattern.matcher( output );
 +    while( directMatcher.find() ) {
++      String lookupKey = null;
 +      String lookupValue = null;
 +      String lookupStr = directMatcher.group( 1 );
 +      Matcher indirectMatcher = indirectPattern.matcher( lookupStr );
 +      if( indirectMatcher.find() ) {
 +        lookupStr = indirectMatcher.group( 1 );
 +        int lookupIndex = Integer.parseInt( lookupStr );
 +        if( lookupTable != null ) {
-           String lookupKey = inputMatcher.group( lookupIndex );
++          lookupKey = inputMatcher.group( lookupIndex );
 +          lookupValue = lookupTable.get( lookupKey );
 +        }
 +      } else {
 +        int lookupIndex = Integer.parseInt( lookupStr );
 +        lookupValue = inputMatcher.group( lookupIndex );
 +      }
-       output = directMatcher.replaceFirst( lookupValue == null ? "" : lookupValue );
++      String replaceWith = this.useOriginalOnLookupFailure ? lookupKey : "" ;
++      output = directMatcher.replaceFirst( lookupValue == null ? replaceWith : lookupValue );
 +      directMatcher = directPattern.matcher( output );
 +    }
 +    return output;
 +  }
 +
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/c754cc06/gateway-provider-identity-assertion-regex/src/test/java/org/apache/knox/gateway/identityasserter/regex/filter/RegexTemplateTest.java
----------------------------------------------------------------------
diff --cc gateway-provider-identity-assertion-regex/src/test/java/org/apache/knox/gateway/identityasserter/regex/filter/RegexTemplateTest.java
index 3c3b06f,0000000..49630be
mode 100644,000000..100644
--- a/gateway-provider-identity-assertion-regex/src/test/java/org/apache/knox/gateway/identityasserter/regex/filter/RegexTemplateTest.java
+++ b/gateway-provider-identity-assertion-regex/src/test/java/org/apache/knox/gateway/identityasserter/regex/filter/RegexTemplateTest.java
@@@ -1,72 -1,0 +1,93 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + * <p/>
 + * http://www.apache.org/licenses/LICENSE-2.0
 + * <p/>
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.identityasserter.regex.filter;
 +
 +import org.junit.Test;
 +
 +import java.util.Map;
 +import java.util.TreeMap;
 +
 +import static org.hamcrest.MatcherAssert.assertThat;
 +import static org.hamcrest.core.Is.is;
 +
 +public class RegexTemplateTest {
 +
 +  @Test
 +  public void testExtractUsernameFromEmailAddress() {
 +
 +    RegexTemplate template;
 +    String actual;
 +
 +    template = new RegexTemplate( "(.*)@.*", "prefix_{1}_suffix" );
 +    actual = template.apply( "member@apache.org" );
 +    assertThat( actual, is( "prefix_member_suffix" ) );
 +
 +    template = new RegexTemplate( "(.*)@.*", "prefix_{0}_suffix" );
 +    actual = template.apply( "member@apache.org" );
 +    assertThat( actual, is( "prefix_member@apache.org_suffix" ) );
 +
 +    template = new RegexTemplate( "(.*)@.*", "prefix_{1}_{a}_suffix" );
 +    actual = template.apply( "member@apache.org" );
 +    assertThat( actual, is( "prefix_member_{a}_suffix" ) );
 +
 +  }
 +
 +  @Test
 +  public void testExtractUsernameFromEmailAddressAndMapDomain() {
 +
 +    RegexTemplate template;
 +    Map<String,String> map = new TreeMap<>(String.CASE_INSENSITIVE_ORDER);
 +    map.put( "us", "USA" );
 +    map.put( "ca", "CANADA" );
 +
 +    String actual;
 +
-     template = new RegexTemplate( "(.*)@(.*?)\\..*", "prefix_{1}:{[2]}_suffix", map );
++    template = new RegexTemplate( "(.*)@(.*?)\\..*", "prefix_{1}:{[2]}_suffix", map, false );
 +    actual = template.apply( "member@us.apache.org" );
 +    assertThat( actual, is( "prefix_member:USA_suffix" ) );
 +
 +    actual = template.apply( "member@ca.apache.org" );
 +    assertThat( actual, is( "prefix_member:CANADA_suffix" ) );
 +
 +    actual = template.apply( "member@nj.apache.org" );
 +    assertThat( actual, is( "prefix_member:_suffix" ) );
 +
 +  }
 +
++  @Test
++  public void testLookupFailure() {
++
++    RegexTemplate template;
++    Map<String,String> map = new TreeMap<>(String.CASE_INSENSITIVE_ORDER);
++    map.put( "us", "USA" );
++    map.put( "ca", "CANADA" );
++
++    String actual;
++
++    template = new RegexTemplate( "(.*)@(.*?)\\..*", "prefix_{1}:{[2]}_suffix", map, true );
++    actual = template.apply( "member@us.apache.org" );
++    assertThat( actual, is( "prefix_member:USA_suffix" ) );
++
++    actual = template.apply( "member@ca.apache.org" );
++    assertThat( actual, is( "prefix_member:CANADA_suffix" ) );
++
++    actual = template.apply( "member@nj.apache.org" );
++    assertThat( actual, is( "prefix_member:nj_suffix" ) );
++
++  }
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/c754cc06/gateway-provider-security-webappsec/src/main/java/org/apache/knox/gateway/webappsec/deploy/WebAppSecContributor.java
----------------------------------------------------------------------
diff --cc gateway-provider-security-webappsec/src/main/java/org/apache/knox/gateway/webappsec/deploy/WebAppSecContributor.java
index a182b37,0000000..17fb8c2
mode 100644,000000..100644
--- a/gateway-provider-security-webappsec/src/main/java/org/apache/knox/gateway/webappsec/deploy/WebAppSecContributor.java
+++ b/gateway-provider-security-webappsec/src/main/java/org/apache/knox/gateway/webappsec/deploy/WebAppSecContributor.java
@@@ -1,107 -1,0 +1,118 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.webappsec.deploy;
 +
 +import java.util.ArrayList;
 +import java.util.List;
 +import java.util.Map;
 +import java.util.Map.Entry;
 +
 +import org.apache.knox.gateway.deploy.DeploymentContext;
 +import org.apache.knox.gateway.deploy.ProviderDeploymentContributorBase;
 +import org.apache.knox.gateway.descriptor.FilterParamDescriptor;
 +import org.apache.knox.gateway.descriptor.ResourceDescriptor;
 +import org.apache.knox.gateway.topology.Provider;
 +import org.apache.knox.gateway.topology.Service;
 +
 +public class WebAppSecContributor extends
 +    ProviderDeploymentContributorBase {
 +  private static final String ROLE = "webappsec";
 +  private static final String NAME = "WebAppSec";
 +  private static final String CSRF_SUFFIX = "_CSRF";
 +  private static final String CSRF_FILTER_CLASSNAME = "org.apache.knox.gateway.webappsec.filter.CSRFPreventionFilter";
 +  private static final String CSRF_ENABLED = "csrf.enabled";
 +  private static final String CORS_SUFFIX = "_CORS";
 +  private static final String CORS_FILTER_CLASSNAME = "com.thetransactioncompany.cors.CORSFilter";
 +  private static final String CORS_ENABLED = "cors.enabled";
 +  private static final String XFRAME_OPTIONS_SUFFIX = "_XFRAMEOPTIONS";
 +  private static final String XFRAME_OPTIONS_FILTER_CLASSNAME = "org.apache.knox.gateway.webappsec.filter.XFrameOptionsFilter";
 +  private static final String XFRAME_OPTIONS_ENABLED = "xframe.options.enabled";
++  private static final String STRICT_TRANSPORT_SUFFIX = "_STRICTTRANSPORT";
++  private static final String STRICT_TRANSPORT_FILTER_CLASSNAME = "org.apache.hadoop.gateway.webappsec.filter.StrictTranportFilter";
++  private static final String STRICT_TRANSPORT_ENABLED = "strict.transport.enabled";
 +
 +
 +  @Override
 +  public String getRole() {
 +    return ROLE;
 +  }
 +
 +  @Override
 +  public String getName() {
 +    return NAME;
 +  }
 +
 +  @Override
 +  public void initializeContribution(DeploymentContext context) {
 +    super.initializeContribution(context);
 +  }
 +
 +  @Override
 +  public void contributeFilter(DeploymentContext context, Provider provider, Service service, 
 +      ResourceDescriptor resource, List<FilterParamDescriptor> params) {
 +    
 +    Provider webappsec = context.getTopology().getProvider(ROLE, NAME);
 +    if (webappsec != null && webappsec.isEnabled()) {
 +      Map<String,String> map = provider.getParams();
 +      if (params == null) {
 +        params = new ArrayList<FilterParamDescriptor>();
 +      }
 +
 +      Map<String, String> providerParams = provider.getParams();
 +      // CORS support
 +      String corsEnabled = map.get(CORS_ENABLED);
 +      if ( corsEnabled != null && "true".equals(corsEnabled)) {
 +        provisionConfig(resource, providerParams, params, "cors.");
 +        resource.addFilter().name( getName() + CORS_SUFFIX ).role( getRole() ).impl( CORS_FILTER_CLASSNAME ).params( params );
 +      }
 +
 +      // CRSF
 +      params = new ArrayList<FilterParamDescriptor>();
 +      String csrfEnabled = map.get(CSRF_ENABLED);
 +      if ( csrfEnabled != null && "true".equals(csrfEnabled)) {
 +        provisionConfig(resource, providerParams, params, "csrf.");
 +        resource.addFilter().name( getName() + CSRF_SUFFIX ).role( getRole() ).impl( CSRF_FILTER_CLASSNAME ).params( params );
 +      }
 +
 +      // X-Frame-Options - clickjacking protection
 +      params = new ArrayList<FilterParamDescriptor>();
 +      String xframeOptionsEnabled = map.get(XFRAME_OPTIONS_ENABLED);
 +      if ( xframeOptionsEnabled != null && "true".equals(xframeOptionsEnabled)) {
 +        provisionConfig(resource, providerParams, params, "xframe.");
 +        resource.addFilter().name( getName() + XFRAME_OPTIONS_SUFFIX ).role( getRole() ).impl( XFRAME_OPTIONS_FILTER_CLASSNAME ).params( params );
 +      }
++
++      // HTTP Strict-Transport-Security
++      params = new ArrayList<FilterParamDescriptor>();
++      String strictTranportEnabled = map.get(STRICT_TRANSPORT_ENABLED);
++      if ( strictTranportEnabled != null && "true".equals(strictTranportEnabled)) {
++        provisionConfig(resource, providerParams, params, "strict.");
++        resource.addFilter().name( getName() + STRICT_TRANSPORT_SUFFIX).role( getRole() ).impl(STRICT_TRANSPORT_FILTER_CLASSNAME).params( params );
++      }
 +    }
 +  }
 +
 +  private void provisionConfig(ResourceDescriptor resource, Map<String,String> providerParams,
 +      List<FilterParamDescriptor> params, String prefix) {
 +    for(Entry<String, String> entry : providerParams.entrySet()) {
 +      if (entry.getKey().startsWith(prefix)) {
 +        params.add( resource.createFilterParam().name( entry.getKey().toLowerCase() ).value( entry.getValue() ) );
 +      }
 +    }
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/c754cc06/gateway-release/home/conf/topologies/manager.xml
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/knox/blob/c754cc06/gateway-server/src/main/java/org/apache/knox/gateway/GatewayFilter.java
----------------------------------------------------------------------
diff --cc gateway-server/src/main/java/org/apache/knox/gateway/GatewayFilter.java
index 5d7c5db,0000000..8dd29bf
mode 100644,000000..100644
--- a/gateway-server/src/main/java/org/apache/knox/gateway/GatewayFilter.java
+++ b/gateway-server/src/main/java/org/apache/knox/gateway/GatewayFilter.java
@@@ -1,390 -1,0 +1,453 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway;
 +
 +import org.apache.knox.gateway.audit.api.Action;
 +import org.apache.knox.gateway.audit.api.ActionOutcome;
 +import org.apache.knox.gateway.audit.api.AuditContext;
 +import org.apache.knox.gateway.audit.api.AuditService;
 +import org.apache.knox.gateway.audit.api.AuditServiceFactory;
 +import org.apache.knox.gateway.audit.api.Auditor;
 +import org.apache.knox.gateway.audit.api.CorrelationContext;
 +import org.apache.knox.gateway.audit.api.CorrelationServiceFactory;
 +import org.apache.knox.gateway.audit.api.ResourceType;
 +import org.apache.knox.gateway.audit.log4j.audit.AuditConstants;
 +import org.apache.knox.gateway.config.GatewayConfig;
 +import org.apache.knox.gateway.filter.AbstractGatewayFilter;
 +import org.apache.knox.gateway.i18n.messages.MessagesFactory;
 +import org.apache.knox.gateway.i18n.resources.ResourcesFactory;
++import org.apache.knox.gateway.topology.Topology;
 +import org.apache.knox.gateway.util.urltemplate.Matcher;
 +import org.apache.knox.gateway.util.urltemplate.Parser;
 +import org.apache.knox.gateway.util.urltemplate.Template;
 +
 +import javax.servlet.Filter;
 +import javax.servlet.FilterChain;
 +import javax.servlet.FilterConfig;
 +import javax.servlet.ServletContext;
 +import javax.servlet.ServletException;
 +import javax.servlet.ServletRequest;
 +import javax.servlet.ServletResponse;
 +import javax.servlet.http.HttpServletRequest;
++import javax.servlet.http.HttpServletRequestWrapper;
 +import javax.servlet.http.HttpServletResponse;
 +
 +import java.io.IOException;
 +import java.net.URISyntaxException;
 +import java.util.ArrayList;
 +import java.util.Collections;
 +import java.util.Enumeration;
 +import java.util.HashSet;
 +import java.util.List;
 +import java.util.Map;
 +import java.util.Set;
 +import java.util.UUID;
 +
 +/**
 + *
 + */
 +public class GatewayFilter implements Filter {
 +
 +  private static final FilterChain EMPTY_CHAIN = new FilterChain() {
 +    public void doFilter( ServletRequest servletRequest, ServletResponse servletResponse ) throws IOException, ServletException {
 +    }
 +  };
 +  
 +  private static final GatewayMessages LOG = MessagesFactory.get( GatewayMessages.class );
 +  private static final GatewayResources RES = ResourcesFactory.get( GatewayResources.class );
 +  private static AuditService auditService = AuditServiceFactory.getAuditService();
 +  private static Auditor auditor = auditService.getAuditor(
 +      AuditConstants.DEFAULT_AUDITOR_NAME, AuditConstants.KNOX_SERVICE_NAME,
 +      AuditConstants.KNOX_COMPONENT_NAME );
 +
 +  private Set<Holder> holders;
 +  private Matcher<Chain> chains;
 +  private FilterConfig config;
 +
 +  public GatewayFilter() {
 +    holders = new HashSet<>();
 +    chains = new Matcher<Chain>();
 +  }
 +
 +  @Override
 +  public void init( FilterConfig filterConfig ) throws ServletException {
 +    this.config = filterConfig;
 +  }
 +
 +  @Override
 +  public void doFilter( ServletRequest servletRequest, ServletResponse servletResponse, FilterChain filterChain ) throws IOException, ServletException {
 +    doFilter( servletRequest, servletResponse );
 +    if( filterChain != null ) {
 +      filterChain.doFilter( servletRequest, servletResponse );
 +    }
 +  }
 +
 +  @SuppressWarnings("unchecked")
 +  public void doFilter( ServletRequest servletRequest, ServletResponse servletResponse ) throws IOException, ServletException {
 +    HttpServletRequest httpRequest = (HttpServletRequest)servletRequest;
 +    HttpServletResponse httpResponse = (HttpServletResponse)servletResponse;
 +
 +    //TODO: The resulting pathInfo + query needs to be added to the servlet context somehow so that filters don't need to rebuild it.  This is done in HttpClientDispatch right now for example.
 +    String servlet = httpRequest.getServletPath();
 +    String path = httpRequest.getPathInfo();
 +    String query = httpRequest.getQueryString();
 +    String requestPath = ( servlet == null ? "" : servlet ) + ( path == null ? "" : path );
 +    String requestPathWithQuery = requestPath + ( query == null ? "" : "?" + query );
 +
 +    Template pathWithQueryTemplate;
 +    try {
 +      pathWithQueryTemplate = Parser.parseLiteral( requestPathWithQuery );
 +    } catch( URISyntaxException e ) {
 +      throw new ServletException( e );
 +    }
 +    String contextWithPathAndQuery = httpRequest.getContextPath() + requestPathWithQuery;
 +    LOG.receivedRequest( httpRequest.getMethod(), requestPath );
 +
 +    servletRequest.setAttribute(
 +        AbstractGatewayFilter.SOURCE_REQUEST_URL_ATTRIBUTE_NAME, pathWithQueryTemplate );
 +    servletRequest.setAttribute(
 +        AbstractGatewayFilter.SOURCE_REQUEST_CONTEXT_URL_ATTRIBUTE_NAME, contextWithPathAndQuery );
 +
 +    Matcher<Chain>.Match match = chains.match( pathWithQueryTemplate );
-     
++
++    // if there was no match then look for a default service for the topology
++    if (match == null) {
++      Topology topology = (Topology) servletRequest.getServletContext().getAttribute("org.apache.hadoop.gateway.topology");
++      if (topology != null) {
++        String defaultServicePath = topology.getDefaultServicePath();
++        if (defaultServicePath != null) {
++          try {
++            String newPathWithQuery = defaultServicePath + "/" + pathWithQueryTemplate;
++            match = chains.match(Parser.parseLiteral(newPathWithQuery));
++            String origUrl = ((HttpServletRequest) servletRequest).getRequestURL().toString();
++            String url = origUrl;
++            if (path.equals("/")) {
++              url += defaultServicePath;
++            }
++            else {
++              int index = origUrl.indexOf(path);
++              url = origUrl.substring(0, index) + "/" + defaultServicePath + path;
++            }
++            String contextPath = defaultServicePath;
++            servletRequest = new ForwardedRequest((HttpServletRequest) servletRequest,
++                contextPath,
++                url);
++          } catch (URISyntaxException e) {
++            throw new ServletException( e );
++          }
++        }
++      }
++    }
++
 +    assignCorrelationRequestId();
 +    // Populate Audit/correlation parameters
 +    AuditContext auditContext = auditService.getContext();
 +    auditContext.setTargetServiceName( match == null ? null : match.getValue().getResourceRole() );
 +    auditContext.setRemoteIp( getRemoteAddress(servletRequest) );
 +    auditContext.setRemoteHostname( servletRequest.getRemoteHost() );
 +    auditor.audit(
 +        Action.ACCESS, contextWithPathAndQuery, ResourceType.URI,
 +        ActionOutcome.UNAVAILABLE, RES.requestMethod(((HttpServletRequest)servletRequest).getMethod()));
 +    
 +    if( match != null ) {
 +      Chain chain = match.getValue();
 +      servletRequest.setAttribute( AbstractGatewayFilter.TARGET_SERVICE_ROLE, chain.getResourceRole() );
 +      try {
 +        chain.doFilter( servletRequest, servletResponse );
 +      } catch( IOException e ) {
 +        LOG.failedToExecuteFilter( e );
 +        auditor.audit( Action.ACCESS, contextWithPathAndQuery, ResourceType.URI, ActionOutcome.FAILURE );
 +        throw e;
 +      } catch( ServletException e ) {
 +        LOG.failedToExecuteFilter( e );
 +        auditor.audit( Action.ACCESS, contextWithPathAndQuery, ResourceType.URI, ActionOutcome.FAILURE );
 +        throw e;
 +      } catch( RuntimeException e ) {
 +        LOG.failedToExecuteFilter( e );
 +        auditor.audit( Action.ACCESS, contextWithPathAndQuery, ResourceType.URI, ActionOutcome.FAILURE );
 +        throw e;
 +      } catch( ThreadDeath e ) {
 +        LOG.failedToExecuteFilter( e );
 +        auditor.audit( Action.ACCESS, contextWithPathAndQuery, ResourceType.URI, ActionOutcome.FAILURE );
 +        throw e;
 +      } catch( Throwable e ) {
 +        LOG.failedToExecuteFilter( e );
 +        auditor.audit( Action.ACCESS, contextWithPathAndQuery, ResourceType.URI, ActionOutcome.FAILURE );
 +        throw new ServletException( e );
 +      }
 +    } else {
 +      LOG.failedToMatchPath( requestPath );
 +      httpResponse.setStatus( HttpServletResponse.SC_NOT_FOUND );
 +    }
 +    //KAM[ Don't do this or the Jetty default servlet will overwrite any response setup by the filter.
 +    // filterChain.doFilter( servletRequest, servletResponse );
 +    //]
 +  }
 +
 +  private String getRemoteAddress(ServletRequest servletRequest) {
 +    GatewayConfig gatewayConfig =
 +        (GatewayConfig) servletRequest.getServletContext().
 +        getAttribute(GatewayConfig.GATEWAY_CONFIG_ATTRIBUTE);
 +
 +    String addrHeaderName = gatewayConfig.getHeaderNameForRemoteAddress();
 +    String addr = ((HttpServletRequest)servletRequest).getHeader(addrHeaderName);
 +    if (addr == null || addr.trim().isEmpty()) {
 +      addr = servletRequest.getRemoteAddr();
 +    }
 +    return addr;
 +  }
 +
 +  @Override
 +  public void destroy() {
 +    for( Holder holder : holders ) {
 +      holder.destroy();
 +    }
 +  }
 +
 +  private void addHolder( Holder holder ) {
 +    holders.add( holder );
 +    Chain chain = chains.get( holder.template );
 +    if( chain == null ) {
 +      chain = new Chain();
 +      chain.setResourceRole( holder.getResourceRole() );
 +      chains.add( holder.template, chain );
 +    }
 +    chain.chain.add( holder );
 +  }
 +
 +  public void addFilter( String path, String name, Filter filter, Map<String,String> params, String resourceRole ) throws URISyntaxException {
 +    Holder holder = new Holder( path, name, filter, params, resourceRole );
 +    addHolder( holder );
 +  }
 +
 +//  public void addFilter( String path, String name, Class<RegexDirFilter> clazz, Map<String,String> params ) throws URISyntaxException {
 +//    Holder holder = new Holder( path, name, clazz, params );
 +//    addHolder( holder );
 +//  }
 +
 +  public void addFilter( String path, String name, String clazz, Map<String,String> params, String resourceRole ) throws URISyntaxException {
 +    Holder holder = new Holder( path, name, clazz, params, resourceRole );
 +    addHolder( holder );
 +  }
 +
 +  // Now creating the correlation context only if required since it may be created upstream in the CorrelationHandler.
 +  private void assignCorrelationRequestId() {
 +    CorrelationContext correlationContext = CorrelationServiceFactory.getCorrelationService().getContext();
 +    if( correlationContext == null ) {
 +      correlationContext = CorrelationServiceFactory.getCorrelationService().createContext();
 +    }
 +    String requestId = correlationContext.getRequestId();
 +    if( requestId == null ) {
 +      correlationContext.setRequestId( UUID.randomUUID().toString() );
 +    }
 +  }
 +
 +  private class Chain implements FilterChain {
 +
 +    private List<Holder> chain;
 +    private String resourceRole; 
 +
 +    private Chain() {
 +      this.chain = new ArrayList<Holder>();
 +    }
 +
 +    private Chain( List<Holder> chain ) {
 +      this.chain = chain;
 +    }
 +
 +    public void doFilter( ServletRequest servletRequest, ServletResponse servletResponse ) throws IOException, ServletException {
 +      if( chain != null && !chain.isEmpty() ) {
 +        final Filter filter = chain.get( 0 );
 +        final FilterChain chain = subChain();
 +        filter.doFilter( servletRequest, servletResponse, chain );
 +      }
 +    }
 +
 +    private FilterChain subChain() {
 +      if( chain != null && chain.size() > 1 ) {
 +        return new Chain( chain.subList( 1, chain.size() ) );
 +      } else {
 +        return EMPTY_CHAIN;
 +      }
 +    }
 +
 +    private String getResourceRole() {
 +      return resourceRole;
 +    }
 +
 +    private void setResourceRole( String resourceRole ) {
 +      this.resourceRole = resourceRole;
 +    }
 +
 +  }
 +
 +  private class Holder implements Filter, FilterConfig {
 +//    private String path;
 +    private Template template;
 +    private String name;
 +    private Map<String,String> params;
 +    private Filter instance;
 +    private Class<? extends Filter> clazz;
 +    private String type;
 +    private String resourceRole;
 +
 +    private Holder( String path, String name, Filter filter, Map<String,String> params, String resourceRole ) throws URISyntaxException {
 +//      this.path = path;
 +      this.template = Parser.parseTemplate( path );
 +      this.name = name;
 +      this.params = params;
 +      this.instance = filter;
 +      this.clazz = filter.getClass();
 +      this.type = clazz.getCanonicalName();
 +      this.resourceRole = resourceRole;
 +    }
 +
 +//    private Holder( String path, String name, Class<RegexDirFilter> clazz, Map<String,String> params ) throws URISyntaxException {
 +//      this.path = path;
 +//      this.template = Parser.parse( path );
 +//      this.name = name;
 +//      this.params = params;
 +//      this.instance = null;
 +//      this.clazz = clazz;
 +//      this.type = clazz.getCanonicalName();
 +//    }
 +
 +    private Holder( String path, String name, String clazz, Map<String,String> params, String resourceRole ) throws URISyntaxException {
 +//      this.path = path;
 +      this.template = Parser.parseTemplate( path );
 +      this.name = name;
 +      this.params = params;
 +      this.instance = null;
 +      this.clazz = null;
 +      this.type = clazz;
 +      this.resourceRole = resourceRole;
 +    }
 +
 +    @Override
 +    public String getFilterName() {
 +      return name;
 +    }
 +
 +    @Override
 +    public ServletContext getServletContext() {
 +      return GatewayFilter.this.config.getServletContext();
 +    }
 +
 +    @Override
 +    public String getInitParameter( String name ) {
 +      String value = null;
 +      if( params != null ) {
 +        value = params.get( name );
 +      }
 +      return value;
 +    }
 +
 +    @Override
 +    public Enumeration<String> getInitParameterNames() {
 +      Enumeration<String> names = null;
 +      if( params != null ) {
 +        names = Collections.enumeration( params.keySet() );
 +      }
 +      return names;
 +    }
 +
 +    @Override
 +    public void init( FilterConfig filterConfig ) throws ServletException {
 +      getInstance().init( filterConfig );
 +    }
 +
 +    @Override
 +    public void doFilter( ServletRequest servletRequest, ServletResponse servletResponse, FilterChain filterChain ) throws IOException, ServletException {
 +      final Filter filter = getInstance();
 +      filter.doFilter( servletRequest, servletResponse, filterChain );
 +    }
 +
 +    @Override
 +    public void destroy() {
 +      if( instance != null ) {
 +        instance.destroy();
 +        instance = null;
 +      }
 +    }
 +
 +    @SuppressWarnings("unchecked")
 +    private Class<? extends Filter> getClazz() throws ClassNotFoundException {
 +      if( clazz == null ) {
 +        ClassLoader loader = Thread.currentThread().getContextClassLoader();
 +        if( loader == null ) {
 +          loader = this.getClass().getClassLoader();
 +        }
 +        clazz = (Class)loader.loadClass( type );
 +      }
 +      return clazz;
 +    }
 +
 +    private Filter getInstance() throws ServletException {
 +      if( instance == null ) {
 +        try {
 +          if( clazz == null ) {
 +            clazz = getClazz();
 +          }
 +          instance = clazz.newInstance();
 +          instance.init( this );
 +        } catch( Exception e ) {
 +          throw new ServletException( e );
 +        }
 +      }
 +      return instance;
 +    }
 +    
 +    private String getResourceRole() {
 +      return resourceRole;
 +    }
 +
 +  }
 +
++  /**
++   * A request wrapper class that wraps a request and adds the context path if
++   * needed.
++   */
++  static class ForwardedRequest extends HttpServletRequestWrapper {
++
++    private String newURL;
++    private String contextpath;
++
++    public ForwardedRequest(final HttpServletRequest request,
++        final String contextpath, final String newURL) {
++      super(request);
++      this.newURL = newURL;
++      this.contextpath = contextpath;
++    }
++
++    @Override
++    public StringBuffer getRequestURL() {
++      return new StringBuffer(newURL);
++    }
++
++    @Override
++    public String getRequestURI() {
++      return newURL;
++    }
++
++    @Override
++    public String getContextPath() {
++      return super.getContextPath() + "/" + this.contextpath;
++    }
++
++  }
 +}


[04/53] [abbrv] knox git commit: Merge branch 'master' into KNOX-998-Package_Restructuring

Posted by mo...@apache.org.
http://git-wip-us.apache.org/repos/asf/knox/blob/8affbc02/gateway-provider-security-jwt/src/main/java/org/apache/knox/gateway/provider/federation/jwt/filter/AbstractJWTFilter.java
----------------------------------------------------------------------
diff --cc gateway-provider-security-jwt/src/main/java/org/apache/knox/gateway/provider/federation/jwt/filter/AbstractJWTFilter.java
index 802019b,0000000..077fa05
mode 100644,000000..100644
--- a/gateway-provider-security-jwt/src/main/java/org/apache/knox/gateway/provider/federation/jwt/filter/AbstractJWTFilter.java
+++ b/gateway-provider-security-jwt/src/main/java/org/apache/knox/gateway/provider/federation/jwt/filter/AbstractJWTFilter.java
@@@ -1,278 -1,0 +1,278 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.provider.federation.jwt.filter;
 +
 +import java.io.IOException;
 +import java.security.Principal;
 +import java.security.PrivilegedActionException;
 +import java.security.PrivilegedExceptionAction;
 +import java.security.interfaces.RSAPublicKey;
 +import java.util.ArrayList;
 +import java.util.Date;
 +import java.util.HashSet;
 +import java.util.List;
 +import java.util.Set;
 +
 +import javax.security.auth.Subject;
 +import javax.servlet.Filter;
 +import javax.servlet.FilterChain;
 +import javax.servlet.FilterConfig;
 +import javax.servlet.ServletContext;
 +import javax.servlet.ServletException;
 +import javax.servlet.ServletRequest;
 +import javax.servlet.ServletResponse;
 +import javax.servlet.http.HttpServletRequest;
 +import javax.servlet.http.HttpServletResponse;
 +
 +import org.apache.knox.gateway.audit.api.Action;
 +import org.apache.knox.gateway.audit.api.ActionOutcome;
 +import org.apache.knox.gateway.audit.api.AuditContext;
 +import org.apache.knox.gateway.audit.api.AuditService;
 +import org.apache.knox.gateway.audit.api.AuditServiceFactory;
 +import org.apache.knox.gateway.audit.api.Auditor;
 +import org.apache.knox.gateway.audit.api.ResourceType;
 +import org.apache.knox.gateway.audit.log4j.audit.AuditConstants;
 +import org.apache.knox.gateway.filter.AbstractGatewayFilter;
 +import org.apache.knox.gateway.i18n.messages.MessagesFactory;
 +import org.apache.knox.gateway.provider.federation.jwt.JWTMessages;
 +import org.apache.knox.gateway.security.PrimaryPrincipal;
 +import org.apache.knox.gateway.services.GatewayServices;
 +import org.apache.knox.gateway.services.security.token.JWTokenAuthority;
 +import org.apache.knox.gateway.services.security.token.TokenServiceException;
 +import org.apache.knox.gateway.services.security.token.impl.JWTToken;
 +
 +/**
 + *
 + */
 +public abstract class AbstractJWTFilter implements Filter {
 +  /**
 +   * If specified, this configuration property refers to a value which the issuer of a received
 +   * token must match. Otherwise, the default value "KNOXSSO" is used
 +   */
 +  public static final String JWT_EXPECTED_ISSUER = "jwt.expected.issuer";
 +  public static final String JWT_DEFAULT_ISSUER = "KNOXSSO";
 +
 +  static JWTMessages log = MessagesFactory.get( JWTMessages.class );
 +  private static AuditService auditService = AuditServiceFactory.getAuditService();
 +  private static Auditor auditor = auditService.getAuditor(
 +      AuditConstants.DEFAULT_AUDITOR_NAME, AuditConstants.KNOX_SERVICE_NAME,
 +      AuditConstants.KNOX_COMPONENT_NAME );
 +
 +  protected List<String> audiences;
 +  protected JWTokenAuthority authority;
 +  protected RSAPublicKey publicKey = null;
 +  private String expectedIssuer;
 +
 +  public abstract void doFilter(ServletRequest request, ServletResponse response, FilterChain chain)
 +      throws IOException, ServletException;
 +
 +  /**
 +   *
 +   */
 +  public AbstractJWTFilter() {
 +    super();
 +  }
 +
 +  @Override
 +  public void init( FilterConfig filterConfig ) throws ServletException {
 +    ServletContext context = filterConfig.getServletContext();
 +    if (context != null) {
 +      GatewayServices services = (GatewayServices) context.getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE);
 +      if (services != null) {
 +        authority = (JWTokenAuthority) services.getService(GatewayServices.TOKEN_SERVICE);
 +      }
 +    }
 +  }
 +
 +  protected void configureExpectedIssuer(FilterConfig filterConfig) {
 +    expectedIssuer = filterConfig.getInitParameter(JWT_EXPECTED_ISSUER);;
 +    if (expectedIssuer == null) {
 +      expectedIssuer = JWT_DEFAULT_ISSUER;
 +    }
 +  }
 +
 +  /**
 +   * @param expectedAudiences
 +   * @return
 +   */
 +  protected List<String> parseExpectedAudiences(String expectedAudiences) {
 +    ArrayList<String> audList = null;
 +    // setup the list of valid audiences for token validation
 +    if (expectedAudiences != null) {
 +      // parse into the list
 +      String[] audArray = expectedAudiences.split(",");
 +      audList = new ArrayList<String>();
 +      for (String a : audArray) {
-         audList.add(a);
++        audList.add(a.trim());
 +      }
 +    }
 +    return audList;
 +  }
 +
 +  protected boolean tokenIsStillValid(JWTToken jwtToken) {
 +    // if there is no expiration date then the lifecycle is tied entirely to
 +    // the cookie validity - otherwise ensure that the current time is before
 +    // the designated expiration time
 +    Date expires = jwtToken.getExpiresDate();
 +    return (expires == null || expires != null && new Date().before(expires));
 +  }
 +
 +  /**
 +   * Validate whether any of the accepted audience claims is present in the
 +   * issued token claims list for audience. Override this method in subclasses
 +   * in order to customize the audience validation behavior.
 +   *
 +   * @param jwtToken
 +   *          the JWT token where the allowed audiences will be found
 +   * @return true if an expected audience is present, otherwise false
 +   */
 +  protected boolean validateAudiences(JWTToken jwtToken) {
 +    boolean valid = false;
 +
 +    String[] tokenAudienceList = jwtToken.getAudienceClaims();
 +    // if there were no expected audiences configured then just
 +    // consider any audience acceptable
 +    if (audiences == null) {
 +      valid = true;
 +    } else {
 +      // if any of the configured audiences is found then consider it
 +      // acceptable
 +      if (tokenAudienceList != null) {
 +        for (String aud : tokenAudienceList) {
 +          if (audiences.contains(aud)) {
 +            log.jwtAudienceValidated();
 +            valid = true;
 +            break;
 +          }
 +        }
 +      }
 +    }
 +    return valid;
 +  }
 +
 +  protected void continueWithEstablishedSecurityContext(Subject subject, final HttpServletRequest request, final HttpServletResponse response, final FilterChain chain) throws IOException, ServletException {
 +    Principal principal = (Principal) subject.getPrincipals(PrimaryPrincipal.class).toArray()[0];
 +    AuditContext context = auditService.getContext();
 +    if (context != null) {
 +      context.setUsername( principal.getName() );
 +      String sourceUri = (String)request.getAttribute( AbstractGatewayFilter.SOURCE_REQUEST_CONTEXT_URL_ATTRIBUTE_NAME );
 +      if (sourceUri != null) {
 +        auditor.audit( Action.AUTHENTICATION , sourceUri, ResourceType.URI, ActionOutcome.SUCCESS );
 +      }
 +    }
 +
 +    try {
 +      Subject.doAs(
 +        subject,
 +        new PrivilegedExceptionAction<Object>() {
 +          @Override
 +          public Object run() throws Exception {
 +            chain.doFilter(request, response);
 +            return null;
 +          }
 +        }
 +        );
 +    }
 +    catch (PrivilegedActionException e) {
 +      Throwable t = e.getCause();
 +      if (t instanceof IOException) {
 +        throw (IOException) t;
 +      }
 +      else if (t instanceof ServletException) {
 +        throw (ServletException) t;
 +      }
 +      else {
 +        throw new ServletException(t);
 +      }
 +    }
 +  }
 +
 +  protected Subject createSubjectFromToken(JWTToken token) {
 +    final String principal = token.getSubject();
 +
 +    @SuppressWarnings("rawtypes")
 +    HashSet emptySet = new HashSet();
 +    Set<Principal> principals = new HashSet<>();
 +    Principal p = new PrimaryPrincipal(principal);
 +    principals.add(p);
 +
 +    // The newly constructed Sets check whether this Subject has been set read-only
 +    // before permitting subsequent modifications. The newly created Sets also prevent
 +    // illegal modifications by ensuring that callers have sufficient permissions.
 +    //
 +    // To modify the Principals Set, the caller must have AuthPermission("modifyPrincipals").
 +    // To modify the public credential Set, the caller must have AuthPermission("modifyPublicCredentials").
 +    // To modify the private credential Set, the caller must have AuthPermission("modifyPrivateCredentials").
 +    javax.security.auth.Subject subject = new javax.security.auth.Subject(true, principals, emptySet, emptySet);
 +    return subject;
 +  }
 +
 +  protected boolean validateToken(HttpServletRequest request, HttpServletResponse response,
 +      FilterChain chain, JWTToken token)
 +      throws IOException, ServletException {
 +    boolean verified = false;
 +    try {
 +      if (publicKey == null) {
 +        verified = authority.verifyToken(token);
 +      }
 +      else {
 +        verified = authority.verifyToken(token, publicKey);
 +      }
 +    } catch (TokenServiceException e) {
 +      log.unableToVerifyToken(e);
 +    }
 +
 +    if (verified) {
 +      // confirm that issue matches intended target
 +      if (expectedIssuer.equals(token.getIssuer())) {
 +        // if there is no expiration data then the lifecycle is tied entirely to
 +        // the cookie validity - otherwise ensure that the current time is before
 +        // the designated expiration time
 +        if (tokenIsStillValid(token)) {
 +          boolean audValid = validateAudiences(token);
 +          if (audValid) {
 +            return true;
 +          }
 +          else {
 +            log.failedToValidateAudience();
 +            handleValidationError(request, response, HttpServletResponse.SC_BAD_REQUEST,
 +                                  "Bad request: missing required token audience");
 +          }
 +        }
 +        else {
 +          log.tokenHasExpired();
 +          handleValidationError(request, response, HttpServletResponse.SC_BAD_REQUEST,
 +                                "Bad request: token has expired");
 +        }
 +      }
 +      else {
 +        handleValidationError(request, response, HttpServletResponse.SC_UNAUTHORIZED, null);
 +      }
 +    }
 +    else {
 +      log.failedToVerifyTokenSignature();
 +      handleValidationError(request, response, HttpServletResponse.SC_UNAUTHORIZED, null);
 +    }
 +
 +    return false;
 +  }
 +
 +  protected abstract void handleValidationError(HttpServletRequest request, HttpServletResponse response, int status,
 +                                                String error) throws IOException;
 +
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/8affbc02/gateway-provider-security-jwt/src/test/java/org/apache/knox/gateway/provider/federation/AbstractJWTFilterTest.java
----------------------------------------------------------------------
diff --cc gateway-provider-security-jwt/src/test/java/org/apache/knox/gateway/provider/federation/AbstractJWTFilterTest.java
index 361a1ff,0000000..9888eab
mode 100644,000000..100644
--- a/gateway-provider-security-jwt/src/test/java/org/apache/knox/gateway/provider/federation/AbstractJWTFilterTest.java
+++ b/gateway-provider-security-jwt/src/test/java/org/apache/knox/gateway/provider/federation/AbstractJWTFilterTest.java
@@@ -1,636 -1,0 +1,667 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.provider.federation;
 +
 +import static org.junit.Assert.fail;
 +
 +import java.io.IOException;
 +import java.net.InetAddress;
 +import java.security.AccessController;
 +import java.security.KeyPair;
 +import java.security.KeyPairGenerator;
 +import java.security.NoSuchAlgorithmException;
 +import java.security.Principal;
 +import java.security.PublicKey;
 +import java.security.cert.Certificate;
 +import java.security.interfaces.RSAPrivateKey;
 +import java.security.interfaces.RSAPublicKey;
 +import java.text.MessageFormat;
 +import java.util.Enumeration;
 +import java.util.List;
 +import java.util.ArrayList;
 +import java.util.Properties;
 +import java.util.Date;
 +import java.util.Set;
 +
 +import javax.security.auth.Subject;
 +import javax.servlet.FilterChain;
 +import javax.servlet.FilterConfig;
 +import javax.servlet.ServletContext;
 +import javax.servlet.ServletException;
 +import javax.servlet.ServletRequest;
 +import javax.servlet.ServletResponse;
 +import javax.servlet.http.HttpServletRequest;
 +import javax.servlet.http.HttpServletResponse;
 +
 +import org.apache.commons.codec.binary.Base64;
 +import org.apache.knox.gateway.provider.federation.jwt.filter.AbstractJWTFilter;
 +import org.apache.knox.gateway.provider.federation.jwt.filter.SSOCookieFederationFilter;
 +import org.apache.knox.gateway.security.PrimaryPrincipal;
 +import org.apache.knox.gateway.services.security.impl.X509CertificateUtil;
 +import org.apache.knox.gateway.services.security.token.JWTokenAuthority;
 +import org.apache.knox.gateway.services.security.token.TokenServiceException;
 +import org.apache.knox.gateway.services.security.token.impl.JWT;
 +import org.apache.knox.gateway.services.security.token.impl.JWTToken;
 +import org.easymock.EasyMock;
 +import org.junit.After;
 +import org.junit.Assert;
 +import org.junit.BeforeClass;
 +import org.junit.Test;
 +
 +import com.nimbusds.jose.*;
 +import com.nimbusds.jwt.JWTClaimsSet;
 +import com.nimbusds.jwt.SignedJWT;
 +import com.nimbusds.jose.crypto.RSASSASigner;
 +import com.nimbusds.jose.crypto.RSASSAVerifier;
 +
 +public abstract class AbstractJWTFilterTest  {
 +  private static final String SERVICE_URL = "https://localhost:8888/resource";
 +  private static final String dnTemplate = "CN={0},OU=Test,O=Hadoop,L=Test,ST=Test,C=US";
 +
 +  protected AbstractJWTFilter handler = null;
 +  protected static RSAPublicKey publicKey = null;
 +  protected static RSAPrivateKey privateKey = null;
 +  protected static String pem = null;
 +
 +  protected abstract void setTokenOnRequest(HttpServletRequest request, SignedJWT jwt);
 +  protected abstract void setGarbledTokenOnRequest(HttpServletRequest request, SignedJWT jwt);
 +  protected abstract String getAudienceProperty();
 +  protected abstract String getVerificationPemProperty();
 +
 +  private static String buildDistinguishedName(String hostname) {
 +    MessageFormat headerFormatter = new MessageFormat(dnTemplate);
 +    String[] paramArray = new String[1];
 +    paramArray[0] = hostname;
 +    String dn = headerFormatter.format(paramArray);
 +    return dn;
 +  }
 +
 +  @BeforeClass
 +  public static void generateKeys() throws Exception, NoSuchAlgorithmException {
 +    KeyPairGenerator kpg = KeyPairGenerator.getInstance("RSA");
 +    kpg.initialize(2048);
 +    KeyPair KPair = kpg.generateKeyPair();
 +    String dn = buildDistinguishedName(InetAddress.getLocalHost().getHostName());
 +    Certificate cert = X509CertificateUtil.generateCertificate(dn, KPair, 365, "SHA1withRSA");
 +    byte[] data = cert.getEncoded();
 +    Base64 encoder = new Base64( 76, "\n".getBytes( "ASCII" ) );
 +    pem = new String(encoder.encodeToString( data ).getBytes( "ASCII" )).trim();
 +
 +    publicKey = (RSAPublicKey) KPair.getPublic();
 +    privateKey = (RSAPrivateKey) KPair.getPrivate();
 +  }
 +
 +  @After
 +  public void teardown() throws Exception {
 +    handler.destroy();
 +  }
 +
 +  @Test
 +  public void testValidJWT() throws Exception {
 +    try {
 +      Properties props = getProperties();
 +      handler.init(new TestFilterConfig(props));
 +
 +      SignedJWT jwt = getJWT("alice", new Date(new Date().getTime() + 5000), privateKey, props);
 +
 +      HttpServletRequest request = EasyMock.createNiceMock(HttpServletRequest.class);
 +      setTokenOnRequest(request, jwt);
 +
 +      EasyMock.expect(request.getRequestURL()).andReturn(
 +          new StringBuffer(SERVICE_URL)).anyTimes();
 +      EasyMock.expect(request.getQueryString()).andReturn(null);
 +      HttpServletResponse response = EasyMock.createNiceMock(HttpServletResponse.class);
 +      EasyMock.expect(response.encodeRedirectURL(SERVICE_URL)).andReturn(
 +          SERVICE_URL);
 +      EasyMock.replay(request);
 +
 +      TestFilterChain chain = new TestFilterChain();
 +      handler.doFilter(request, response, chain);
 +      Assert.assertTrue("doFilterCalled should not be false.", chain.doFilterCalled );
 +      Set<PrimaryPrincipal> principals = chain.subject.getPrincipals(PrimaryPrincipal.class);
 +      Assert.assertTrue("No PrimaryPrincipal", !principals.isEmpty());
 +      Assert.assertEquals("Not the expected principal", "alice", ((Principal)principals.toArray()[0]).getName());
 +    } catch (ServletException se) {
 +      fail("Should NOT have thrown a ServletException.");
 +    }
 +  }
 +
 +  @Test
 +  public void testValidAudienceJWT() throws Exception {
 +    try {
 +      Properties props = getProperties();
 +      props.put(getAudienceProperty(), "bar");
 +      handler.init(new TestFilterConfig(props));
 +
 +      SignedJWT jwt = getJWT("alice", new Date(new Date().getTime() + 5000), privateKey, props);
 +
 +      HttpServletRequest request = EasyMock.createNiceMock(HttpServletRequest.class);
 +      setTokenOnRequest(request, jwt);
 +
 +      EasyMock.expect(request.getRequestURL()).andReturn(
 +          new StringBuffer(SERVICE_URL)).anyTimes();
 +      EasyMock.expect(request.getQueryString()).andReturn(null);
 +      HttpServletResponse response = EasyMock.createNiceMock(HttpServletResponse.class);
 +      EasyMock.expect(response.encodeRedirectURL(SERVICE_URL)).andReturn(
 +          SERVICE_URL);
 +      EasyMock.replay(request);
 +
 +      TestFilterChain chain = new TestFilterChain();
 +      handler.doFilter(request, response, chain);
 +      Assert.assertTrue("doFilterCalled should not be false.", chain.doFilterCalled );
 +      Set<PrimaryPrincipal> principals = chain.subject.getPrincipals(PrimaryPrincipal.class);
 +      Assert.assertTrue("No PrimaryPrincipal", !principals.isEmpty());
 +      Assert.assertEquals("Not the expected principal", "alice", ((Principal)principals.toArray()[0]).getName());
 +    } catch (ServletException se) {
 +      fail("Should NOT have thrown a ServletException.");
 +    }
 +  }
 +
 +  @Test
 +  public void testInvalidAudienceJWT() throws Exception {
 +    try {
 +      Properties props = getProperties();
 +      props.put(getAudienceProperty(), "foo");
 +      props.put("sso.authentication.provider.url", "https://localhost:8443/gateway/knoxsso/api/v1/websso");
 +
 +      handler.init(new TestFilterConfig(props));
 +
 +      SignedJWT jwt = getJWT("alice", new Date(new Date().getTime() + 5000), privateKey, props);
 +
 +      HttpServletRequest request = EasyMock.createNiceMock(HttpServletRequest.class);
 +      setTokenOnRequest(request, jwt);
 +
 +      EasyMock.expect(request.getRequestURL()).andReturn(
 +          new StringBuffer(SERVICE_URL)).anyTimes();
 +      EasyMock.expect(request.getQueryString()).andReturn(null);
 +      HttpServletResponse response = EasyMock.createNiceMock(HttpServletResponse.class);
 +      EasyMock.expect(response.encodeRedirectURL(SERVICE_URL)).andReturn(
 +          SERVICE_URL);
 +      EasyMock.replay(request);
 +
 +      TestFilterChain chain = new TestFilterChain();
 +      handler.doFilter(request, response, chain);
 +      Assert.assertTrue("doFilterCalled should not be true.", !chain.doFilterCalled);
 +      Assert.assertTrue("No Subject should be returned.", chain.subject == null);
 +    } catch (ServletException se) {
 +      fail("Should NOT have thrown a ServletException.");
 +    }
 +  }
 +
 +  @Test
++  public void testValidAudienceJWTWhitespace() throws Exception {
++    try {
++      Properties props = getProperties();
++      props.put(getAudienceProperty(), " foo, bar ");
++      handler.init(new TestFilterConfig(props));
++
++      SignedJWT jwt = getJWT("alice", new Date(new Date().getTime() + 5000), privateKey, props);
++
++      HttpServletRequest request = EasyMock.createNiceMock(HttpServletRequest.class);
++      setTokenOnRequest(request, jwt);
++
++      EasyMock.expect(request.getRequestURL()).andReturn(
++          new StringBuffer(SERVICE_URL)).anyTimes();
++      EasyMock.expect(request.getQueryString()).andReturn(null);
++      HttpServletResponse response = EasyMock.createNiceMock(HttpServletResponse.class);
++      EasyMock.expect(response.encodeRedirectURL(SERVICE_URL)).andReturn(
++          SERVICE_URL);
++      EasyMock.replay(request);
++
++      TestFilterChain chain = new TestFilterChain();
++      handler.doFilter(request, response, chain);
++      Assert.assertTrue("doFilterCalled should not be false.", chain.doFilterCalled );
++      Set<PrimaryPrincipal> principals = chain.subject.getPrincipals(PrimaryPrincipal.class);
++      Assert.assertTrue("No PrimaryPrincipal", !principals.isEmpty());
++      Assert.assertEquals("Not the expected principal", "alice", ((Principal)principals.toArray()[0]).getName());
++    } catch (ServletException se) {
++      fail("Should NOT have thrown a ServletException.");
++    }
++  }
++
++  @Test
 +  public void testValidVerificationPEM() throws Exception {
 +    try {
 +      Properties props = getProperties();
 +
 +//      System.out.println("+" + pem + "+");
 +
 +      props.put(getAudienceProperty(), "bar");
 +      props.put("sso.authentication.provider.url", "https://localhost:8443/gateway/knoxsso/api/v1/websso");
 +      props.put(getVerificationPemProperty(), pem);
 +      handler.init(new TestFilterConfig(props));
 +
 +      SignedJWT jwt = getJWT("alice", new Date(new Date().getTime() + 50000), privateKey, props);
 +
 +      HttpServletRequest request = EasyMock.createNiceMock(HttpServletRequest.class);
 +      setTokenOnRequest(request, jwt);
 +
 +      EasyMock.expect(request.getRequestURL()).andReturn(
 +          new StringBuffer(SERVICE_URL)).anyTimes();
 +      EasyMock.expect(request.getQueryString()).andReturn(null);
 +      HttpServletResponse response = EasyMock.createNiceMock(HttpServletResponse.class);
 +      EasyMock.expect(response.encodeRedirectURL(SERVICE_URL)).andReturn(
 +          SERVICE_URL);
 +      EasyMock.replay(request);
 +
 +      TestFilterChain chain = new TestFilterChain();
 +      handler.doFilter(request, response, chain);
 +      Assert.assertTrue("doFilterCalled should not be false.", chain.doFilterCalled );
 +      Set<PrimaryPrincipal> principals = chain.subject.getPrincipals(PrimaryPrincipal.class);
 +      Assert.assertTrue("No PrimaryPrincipal", !principals.isEmpty());
 +      Assert.assertEquals("Not the expected principal", "alice", ((Principal)principals.toArray()[0]).getName());
 +    } catch (ServletException se) {
 +      fail("Should NOT have thrown a ServletException.");
 +    }
 +  }
 +
 +  @Test
 +  public void testExpiredJWT() throws Exception {
 +    try {
 +      Properties props = getProperties();
 +      handler.init(new TestFilterConfig(props));
 +
 +      SignedJWT jwt = getJWT("alice", new Date(new Date().getTime() - 1000), privateKey, props);
 +
 +      HttpServletRequest request = EasyMock.createNiceMock(HttpServletRequest.class);
 +      setTokenOnRequest(request, jwt);
 +
 +      EasyMock.expect(request.getRequestURL()).andReturn(
 +          new StringBuffer(SERVICE_URL)).anyTimes();
 +      EasyMock.expect(request.getQueryString()).andReturn(null);
 +      HttpServletResponse response = EasyMock.createNiceMock(HttpServletResponse.class);
 +      EasyMock.expect(response.encodeRedirectURL(SERVICE_URL)).andReturn(
 +          SERVICE_URL);
 +      EasyMock.replay(request);
 +
 +      TestFilterChain chain = new TestFilterChain();
 +      handler.doFilter(request, response, chain);
 +      Assert.assertTrue("doFilterCalled should not be false.", !chain.doFilterCalled);
 +      Assert.assertTrue("No Subject should be returned.", chain.subject == null);
 +    } catch (ServletException se) {
 +      fail("Should NOT have thrown a ServletException.");
 +    }
 +  }
 +
 +  @Test
 +  public void testValidJWTNoExpiration() throws Exception {
 +    try {
 +      Properties props = getProperties();
 +      handler.init(new TestFilterConfig(props));
 +
 +      SignedJWT jwt = getJWT("alice", null, privateKey, props);
 +
 +      HttpServletRequest request = EasyMock.createNiceMock(HttpServletRequest.class);
 +      setTokenOnRequest(request, jwt);
 +
 +      EasyMock.expect(request.getRequestURL()).andReturn(
 +          new StringBuffer(SERVICE_URL)).anyTimes();
 +      EasyMock.expect(request.getQueryString()).andReturn(null);
 +      HttpServletResponse response = EasyMock.createNiceMock(HttpServletResponse.class);
 +      EasyMock.expect(response.encodeRedirectURL(SERVICE_URL)).andReturn(
 +          SERVICE_URL).anyTimes();
 +      EasyMock.replay(request);
 +
 +      TestFilterChain chain = new TestFilterChain();
 +      handler.doFilter(request, response, chain);
 +      Assert.assertTrue("doFilterCalled should not be false.", chain.doFilterCalled );
 +      Set<PrimaryPrincipal> principals = chain.subject.getPrincipals(PrimaryPrincipal.class);
 +      Assert.assertTrue("No PrimaryPrincipal", !principals.isEmpty());
 +      Assert.assertEquals("Not the expected principal", "alice", ((Principal)principals.toArray()[0]).getName());
 +    } catch (ServletException se) {
 +      fail("Should NOT have thrown a ServletException.");
 +    }
 +  }
 +
 +  @Test
 +  public void testUnableToParseJWT() throws Exception {
 +    try {
 +      Properties props = getProperties();
 +      handler.init(new TestFilterConfig(props));
 +
 +      SignedJWT jwt = getJWT("bob", new Date(new Date().getTime() + 5000), privateKey, props);
 +
 +      HttpServletRequest request = EasyMock.createNiceMock(HttpServletRequest.class);
 +      setGarbledTokenOnRequest(request, jwt);
 +
 +      EasyMock.expect(request.getRequestURL()).andReturn(
 +          new StringBuffer(SERVICE_URL)).anyTimes();
 +      EasyMock.expect(request.getQueryString()).andReturn(null);
 +      HttpServletResponse response = EasyMock.createNiceMock(HttpServletResponse.class);
 +      EasyMock.expect(response.encodeRedirectURL(SERVICE_URL)).andReturn(
 +          SERVICE_URL).anyTimes();
 +      EasyMock.replay(request);
 +
 +      TestFilterChain chain = new TestFilterChain();
 +      handler.doFilter(request, response, chain);
 +      Assert.assertTrue("doFilterCalled should not be true.", !chain.doFilterCalled);
 +      Assert.assertTrue("No Subject should be returned.", chain.subject == null);
 +    } catch (ServletException se) {
 +      fail("Should NOT have thrown a ServletException.");
 +    }
 +  }
 +
 +  @Test
 +  public void testFailedSignatureValidationJWT() throws Exception {
 +    try {
 +      // Create a private key to sign the token
 +      KeyPairGenerator kpg = KeyPairGenerator.getInstance("RSA");
 +      kpg.initialize(1024);
 +
 +      KeyPair kp = kpg.genKeyPair();
 +
 +      Properties props = getProperties();
 +      handler.init(new TestFilterConfig(props));
 +
 +      SignedJWT jwt = getJWT("bob", new Date(new Date().getTime() + 5000),
 +                             (RSAPrivateKey)kp.getPrivate(), props);
 +
 +      HttpServletRequest request = EasyMock.createNiceMock(HttpServletRequest.class);
 +      setTokenOnRequest(request, jwt);
 +
 +      EasyMock.expect(request.getRequestURL()).andReturn(
 +          new StringBuffer(SERVICE_URL)).anyTimes();
 +      EasyMock.expect(request.getQueryString()).andReturn(null);
 +      HttpServletResponse response = EasyMock.createNiceMock(HttpServletResponse.class);
 +      EasyMock.expect(response.encodeRedirectURL(SERVICE_URL)).andReturn(
 +          SERVICE_URL).anyTimes();
 +      EasyMock.replay(request);
 +
 +      TestFilterChain chain = new TestFilterChain();
 +      handler.doFilter(request, response, chain);
 +      Assert.assertTrue("doFilterCalled should not be true.", !chain.doFilterCalled);
 +      Assert.assertTrue("No Subject should be returned.", chain.subject == null);
 +    } catch (ServletException se) {
 +      fail("Should NOT have thrown a ServletException.");
 +    }
 +  }
 +
 +  @Test
 +  public void testInvalidVerificationPEM() throws Exception {
 +    try {
 +      Properties props = getProperties();
 +
 +      KeyPairGenerator kpg = KeyPairGenerator.getInstance("RSA");
 +      kpg.initialize(1024);
 +
 +      KeyPair KPair = kpg.generateKeyPair();
 +      String dn = buildDistinguishedName(InetAddress.getLocalHost().getHostName());
 +      Certificate cert = X509CertificateUtil.generateCertificate(dn, KPair, 365, "SHA1withRSA");
 +      byte[] data = cert.getEncoded();
 +      Base64 encoder = new Base64( 76, "\n".getBytes( "ASCII" ) );
 +      String failingPem = new String(encoder.encodeToString( data ).getBytes( "ASCII" )).trim();
 +
 +      props.put(getAudienceProperty(), "bar");
 +      props.put(getVerificationPemProperty(), failingPem);
 +      handler.init(new TestFilterConfig(props));
 +
 +      SignedJWT jwt = getJWT("alice", new Date(new Date().getTime() + 50000), privateKey, props);
 +
 +      HttpServletRequest request = EasyMock.createNiceMock(HttpServletRequest.class);
 +      setTokenOnRequest(request, jwt);
 +
 +      EasyMock.expect(request.getRequestURL()).andReturn(
 +          new StringBuffer(SERVICE_URL)).anyTimes();
 +      EasyMock.expect(request.getQueryString()).andReturn(null);
 +      HttpServletResponse response = EasyMock.createNiceMock(HttpServletResponse.class);
 +      EasyMock.expect(response.encodeRedirectURL(SERVICE_URL)).andReturn(SERVICE_URL);
 +      EasyMock.replay(request);
 +
 +      TestFilterChain chain = new TestFilterChain();
 +      handler.doFilter(request, response, chain);
 +      Assert.assertTrue("doFilterCalled should not be true.", chain.doFilterCalled == false);
 +      Assert.assertTrue("No Subject should be returned.", chain.subject == null);
 +    } catch (ServletException se) {
 +      fail("Should NOT have thrown a ServletException.");
 +    }
 +  }
 +
 +  @Test
 +  public void testInvalidIssuer() throws Exception {
 +    try {
 +      Properties props = getProperties();
 +      handler.init(new TestFilterConfig(props));
 +
 +      SignedJWT jwt = getJWT("new-issuer", "alice", new Date(new Date().getTime() + 5000), privateKey);
 +
 +      HttpServletRequest request = EasyMock.createNiceMock(HttpServletRequest.class);
 +      setTokenOnRequest(request, jwt);
 +
 +      EasyMock.expect(request.getRequestURL()).andReturn(
 +         new StringBuffer(SERVICE_URL)).anyTimes();
 +      EasyMock.expect(request.getQueryString()).andReturn(null);
 +      HttpServletResponse response = EasyMock.createNiceMock(HttpServletResponse.class);
 +      EasyMock.expect(response.encodeRedirectURL(SERVICE_URL)).andReturn(
 +          SERVICE_URL);
 +      EasyMock.replay(request);
 +
 +      TestFilterChain chain = new TestFilterChain();
 +      handler.doFilter(request, response, chain);
 +      Assert.assertTrue("doFilterCalled should not be true.", !chain.doFilterCalled);
 +      Assert.assertTrue("No Subject should be returned.", chain.subject == null);
 +    } catch (ServletException se) {
 +      fail("Should NOT have thrown a ServletException.");
 +    }
 +  }
 +
 +  @Test
 +  public void testValidIssuerViaConfig() throws Exception {
 +    try {
 +      Properties props = getProperties();
 +      props.setProperty(AbstractJWTFilter.JWT_EXPECTED_ISSUER, "new-issuer");
 +      handler.init(new TestFilterConfig(props));
 +
 +      SignedJWT jwt = getJWT("new-issuer", "alice", new Date(new Date().getTime() + 5000), privateKey);
 +
 +      HttpServletRequest request = EasyMock.createNiceMock(HttpServletRequest.class);
 +      setTokenOnRequest(request, jwt);
 +
 +      EasyMock.expect(request.getRequestURL()).andReturn(
 +          new StringBuffer(SERVICE_URL)).anyTimes();
 +      EasyMock.expect(request.getQueryString()).andReturn(null);
 +      HttpServletResponse response = EasyMock.createNiceMock(HttpServletResponse.class);
 +      EasyMock.expect(response.encodeRedirectURL(SERVICE_URL)).andReturn(
 +          SERVICE_URL);
 +      EasyMock.replay(request);
 +
 +      TestFilterChain chain = new TestFilterChain();
 +      handler.doFilter(request, response, chain);
 +      Assert.assertTrue("doFilterCalled should not be false.", chain.doFilterCalled);
 +      Set<PrimaryPrincipal> principals = chain.subject.getPrincipals(PrimaryPrincipal.class);
 +      Assert.assertTrue("No PrimaryPrincipal", principals.size() > 0);
 +      Assert.assertEquals("Not the expected principal", "alice", ((Principal)principals.toArray()[0]).getName());
 +    } catch (ServletException se) {
 +      fail("Should NOT have thrown a ServletException.");
 +    }
 +  }
 +
 +  protected Properties getProperties() {
 +    Properties props = new Properties();
 +    props.setProperty(
 +        SSOCookieFederationFilter.SSO_AUTHENTICATION_PROVIDER_URL,
 +        "https://localhost:8443/authserver");
 +    return props;
 +  }
 +
 +  protected SignedJWT getJWT(String sub, Date expires, RSAPrivateKey privateKey,
 +      Properties props) throws Exception {
 +    return getJWT(AbstractJWTFilter.JWT_DEFAULT_ISSUER, sub, expires, privateKey);
 +  }
 +
 +  protected SignedJWT getJWT(String issuer, String sub, Date expires, RSAPrivateKey privateKey)
 +      throws Exception {
 +    List<String> aud = new ArrayList<String>();
 +    aud.add("bar");
 +
 +    JWTClaimsSet claims = new JWTClaimsSet.Builder()
 +    .issuer(issuer)
 +    .subject(sub)
 +    .audience(aud)
 +    .expirationTime(expires)
 +    .claim("scope", "openid")
 +    .build();
 +
 +    JWSHeader header = new JWSHeader.Builder(JWSAlgorithm.RS256).build();
 +
 +    SignedJWT signedJWT = new SignedJWT(header, claims);
 +    JWSSigner signer = new RSASSASigner(privateKey);
 +
 +    signedJWT.sign(signer);
 +
 +    return signedJWT;
 +  }
 +
 +  protected static class TestFilterConfig implements FilterConfig {
 +    Properties props = null;
 +
 +    public TestFilterConfig(Properties props) {
 +      this.props = props;
 +    }
 +
 +    @Override
 +    public String getFilterName() {
 +      return null;
 +    }
 +
 +    /* (non-Javadoc)
 +     * @see javax.servlet.FilterConfig#getServletContext()
 +     */
 +    @Override
 +    public ServletContext getServletContext() {
 +//      JWTokenAuthority authority = EasyMock.createNiceMock(JWTokenAuthority.class);
 +//      GatewayServices services = EasyMock.createNiceMock(GatewayServices.class);
 +//      EasyMock.expect(services.getService("TokenService").andReturn(authority));
 +//      ServletContext context = EasyMock.createNiceMock(ServletContext.class);
 +//      EasyMock.expect(context.getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE).andReturn(new DefaultGatewayServices()));
 +      return null;
 +    }
 +
 +    /* (non-Javadoc)
 +     * @see javax.servlet.FilterConfig#getInitParameter(java.lang.String)
 +     */
 +    @Override
 +    public String getInitParameter(String name) {
 +      return props.getProperty(name, null);
 +    }
 +
 +    /* (non-Javadoc)
 +     * @see javax.servlet.FilterConfig#getInitParameterNames()
 +     */
 +    @Override
 +    public Enumeration<String> getInitParameterNames() {
 +      return null;
 +    }
 +
 +  }
 +
 +  protected static class TestJWTokenAuthority implements JWTokenAuthority {
 +
 +    private PublicKey verifyingKey;
 +
 +    public TestJWTokenAuthority(PublicKey verifyingKey) {
 +      this.verifyingKey = verifyingKey;
 +    }
 +
 +    /* (non-Javadoc)
 +     * @see JWTokenAuthority#issueToken(javax.security.auth.Subject, java.lang.String)
 +     */
 +    @Override
 +    public JWT issueToken(Subject subject, String algorithm)
 +        throws TokenServiceException {
 +      // TODO Auto-generated method stub
 +      return null;
 +    }
 +
 +    /* (non-Javadoc)
 +     * @see JWTokenAuthority#issueToken(java.security.Principal, java.lang.String)
 +     */
 +    @Override
 +    public JWT issueToken(Principal p, String algorithm)
 +        throws TokenServiceException {
 +      // TODO Auto-generated method stub
 +      return null;
 +    }
 +
 +    /* (non-Javadoc)
 +     * @see JWTokenAuthority#issueToken(java.security.Principal, java.lang.String, java.lang.String)
 +     */
 +    @Override
 +    public JWT issueToken(Principal p, String audience, String algorithm)
 +        throws TokenServiceException {
 +      return null;
 +    }
 +
 +    /* (non-Javadoc)
 +     * @see org.apache.knox.gateway.services.security.token.JWTokenAuthority#verifyToken(org.apache.knox.gateway.services.security.token.impl.JWT)
 +     */
 +    @Override
 +    public boolean verifyToken(JWT token) throws TokenServiceException {
 +      JWSVerifier verifier = new RSASSAVerifier((RSAPublicKey) verifyingKey);
 +      return token.verify(verifier);
 +    }
 +
 +    /* (non-Javadoc)
 +     * @see JWTokenAuthority#issueToken(java.security.Principal, java.lang.String, java.lang.String, long)
 +     */
 +    @Override
 +    public JWT issueToken(Principal p, String audience, String algorithm,
 +        long expires) throws TokenServiceException {
 +      return null;
 +    }
 +
 +    @Override
 +    public JWT issueToken(Principal p, List<String> audiences, String algorithm,
 +        long expires) throws TokenServiceException {
 +      return null;
 +    }
 +
 +    /* (non-Javadoc)
 +     * @see JWTokenAuthority#issueToken(java.security.Principal, java.lang.String, long)
 +     */
 +    @Override
 +    public JWT issueToken(Principal p, String algorithm, long expires)
 +        throws TokenServiceException {
 +      // TODO Auto-generated method stub
 +      return null;
 +    }
 +
 +    @Override
 +    public boolean verifyToken(JWT token, RSAPublicKey publicKey) throws TokenServiceException {
 +      JWSVerifier verifier = new RSASSAVerifier(publicKey);
 +      return token.verify(verifier);
 +    }
 +
 +  }
 +
 +  protected static class TestFilterChain implements FilterChain {
 +    boolean doFilterCalled = false;
 +    Subject subject = null;
 +
 +    /* (non-Javadoc)
 +     * @see javax.servlet.FilterChain#doFilter(javax.servlet.ServletRequest, javax.servlet.ServletResponse)
 +     */
 +    @Override
 +    public void doFilter(ServletRequest request, ServletResponse response)
 +        throws IOException, ServletException {
 +      doFilterCalled = true;
 +
 +      subject = Subject.getSubject( AccessController.getContext() );
 +    }
 +
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/8affbc02/gateway-provider-security-picketlink/src/main/java/org/apache/knox/gateway/picketlink/PicketlinkMessages.java
----------------------------------------------------------------------
diff --cc gateway-provider-security-picketlink/src/main/java/org/apache/knox/gateway/picketlink/PicketlinkMessages.java
index 86f2854,0000000..e69de29
mode 100644,000000..100644
--- a/gateway-provider-security-picketlink/src/main/java/org/apache/knox/gateway/picketlink/PicketlinkMessages.java
+++ b/gateway-provider-security-picketlink/src/main/java/org/apache/knox/gateway/picketlink/PicketlinkMessages.java

http://git-wip-us.apache.org/repos/asf/knox/blob/8affbc02/gateway-provider-security-picketlink/src/main/java/org/apache/knox/gateway/picketlink/deploy/PicketlinkConf.java
----------------------------------------------------------------------
diff --cc gateway-provider-security-picketlink/src/main/java/org/apache/knox/gateway/picketlink/deploy/PicketlinkConf.java
index 5b3b6e0,0000000..e69de29
mode 100644,000000..100644
--- a/gateway-provider-security-picketlink/src/main/java/org/apache/knox/gateway/picketlink/deploy/PicketlinkConf.java
+++ b/gateway-provider-security-picketlink/src/main/java/org/apache/knox/gateway/picketlink/deploy/PicketlinkConf.java

http://git-wip-us.apache.org/repos/asf/knox/blob/8affbc02/gateway-provider-security-picketlink/src/main/java/org/apache/knox/gateway/picketlink/deploy/PicketlinkFederationProviderContributor.java
----------------------------------------------------------------------
diff --cc gateway-provider-security-picketlink/src/main/java/org/apache/knox/gateway/picketlink/deploy/PicketlinkFederationProviderContributor.java
index d13bdaa,0000000..e69de29
mode 100644,000000..100644
--- a/gateway-provider-security-picketlink/src/main/java/org/apache/knox/gateway/picketlink/deploy/PicketlinkFederationProviderContributor.java
+++ b/gateway-provider-security-picketlink/src/main/java/org/apache/knox/gateway/picketlink/deploy/PicketlinkFederationProviderContributor.java

http://git-wip-us.apache.org/repos/asf/knox/blob/8affbc02/gateway-provider-security-picketlink/src/main/java/org/apache/knox/gateway/picketlink/filter/CaptureOriginalURLFilter.java
----------------------------------------------------------------------
diff --cc gateway-provider-security-picketlink/src/main/java/org/apache/knox/gateway/picketlink/filter/CaptureOriginalURLFilter.java
index b062013,0000000..e69de29
mode 100644,000000..100644
--- a/gateway-provider-security-picketlink/src/main/java/org/apache/knox/gateway/picketlink/filter/CaptureOriginalURLFilter.java
+++ b/gateway-provider-security-picketlink/src/main/java/org/apache/knox/gateway/picketlink/filter/CaptureOriginalURLFilter.java

http://git-wip-us.apache.org/repos/asf/knox/blob/8affbc02/gateway-provider-security-picketlink/src/main/java/org/apache/knox/gateway/picketlink/filter/PicketlinkIdentityAdapter.java
----------------------------------------------------------------------
diff --cc gateway-provider-security-picketlink/src/main/java/org/apache/knox/gateway/picketlink/filter/PicketlinkIdentityAdapter.java
index e3811b4,0000000..e69de29
mode 100644,000000..100644
--- a/gateway-provider-security-picketlink/src/main/java/org/apache/knox/gateway/picketlink/filter/PicketlinkIdentityAdapter.java
+++ b/gateway-provider-security-picketlink/src/main/java/org/apache/knox/gateway/picketlink/filter/PicketlinkIdentityAdapter.java

http://git-wip-us.apache.org/repos/asf/knox/blob/8affbc02/gateway-provider-security-picketlink/src/test/java/org/apache/knox/gateway/picketlink/PicketlinkTest.java
----------------------------------------------------------------------
diff --cc gateway-provider-security-picketlink/src/test/java/org/apache/knox/gateway/picketlink/PicketlinkTest.java
index a0cd7be,0000000..e69de29
mode 100644,000000..100644
--- a/gateway-provider-security-picketlink/src/test/java/org/apache/knox/gateway/picketlink/PicketlinkTest.java
+++ b/gateway-provider-security-picketlink/src/test/java/org/apache/knox/gateway/picketlink/PicketlinkTest.java

http://git-wip-us.apache.org/repos/asf/knox/blob/8affbc02/gateway-release/pom.xml
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/knox/blob/8affbc02/gateway-server/src/main/java/org/apache/knox/gateway/services/registry/impl/DefaultServiceRegistryService.java
----------------------------------------------------------------------
diff --cc gateway-server/src/main/java/org/apache/knox/gateway/services/registry/impl/DefaultServiceRegistryService.java
index 84330c7,0000000..075eda1
mode 100644,000000..100644
--- a/gateway-server/src/main/java/org/apache/knox/gateway/services/registry/impl/DefaultServiceRegistryService.java
+++ b/gateway-server/src/main/java/org/apache/knox/gateway/services/registry/impl/DefaultServiceRegistryService.java
@@@ -1,207 -1,0 +1,207 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.services.registry.impl;
 +
 +import com.fasterxml.jackson.core.JsonFactory;
 +import com.fasterxml.jackson.core.JsonParseException;
 +import com.fasterxml.jackson.core.JsonProcessingException;
 +import com.fasterxml.jackson.core.type.TypeReference;
 +import com.fasterxml.jackson.databind.JsonMappingException;
 +import com.fasterxml.jackson.databind.ObjectMapper;
 +import org.apache.commons.codec.binary.Base64;
 +import org.apache.commons.io.FileUtils;
 +import org.apache.knox.gateway.GatewayMessages;
 +import org.apache.knox.gateway.config.GatewayConfig;
 +import org.apache.knox.gateway.i18n.messages.MessagesFactory;
 +import org.apache.knox.gateway.services.Service;
 +import org.apache.knox.gateway.services.ServiceLifecycleException;
 +import org.apache.knox.gateway.services.registry.ServiceRegistry;
 +import org.apache.knox.gateway.services.security.CryptoService;
 +
 +import java.io.File;
 +import java.io.IOException;
++import java.security.SecureRandom;
 +import java.util.HashMap;
 +import java.util.List;
 +import java.util.Map;
- import java.util.Random;
 +
 +public class DefaultServiceRegistryService implements ServiceRegistry, Service {
 +  private static GatewayMessages LOG = MessagesFactory.get( GatewayMessages.class );
-   
++
 +  protected char[] chars = { 'a', 'b', 'c', 'd', 'e', 'f', 'g',
 +  'h', 'j', 'k', 'm', 'n', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w',
 +  'x', 'y', 'z', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'J', 'K',
 +  'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z',
 +  '2', '3', '4', '5', '6', '7', '8', '9',};
 +
 +  private CryptoService crypto;
 +  private Registry registry = new Registry();
 +
 +  private String registryFileName;
-   
++
 +  public DefaultServiceRegistryService() {
 +  }
-   
++
 +  public void setCryptoService(CryptoService crypto) {
 +    this.crypto = crypto;
 +  }
-   
++
 +  public String getRegistrationCode(String clusterName) {
 +    String code = generateRegCode(16);
 +    byte[] signature = crypto.sign("SHA256withRSA","gateway-identity",code);
 +    String encodedSig = Base64.encodeBase64URLSafeString(signature);
-     
++
 +    return code + "::" + encodedSig;
 +  }
-   
++
 +  private String generateRegCode(int length) {
-     StringBuffer sb = new StringBuffer();
-     Random r = new Random();
++    StringBuilder sb = new StringBuilder();
++    SecureRandom r = new SecureRandom();
 +    for (int i = 0; i < length; i++) {
 +      sb.append(chars[r.nextInt(chars.length)]);
 +    }
 +    return sb.toString();
 +  }
-   
++
 +  public void removeClusterServices(String clusterName) {
 +    registry.remove(clusterName);
 +  }
 +
 +  public boolean registerService(String regCode, String clusterName, String serviceName, List<String> urls) {
 +    boolean rc = false;
 +    // verify the signature of the regCode
 +    if (regCode == null) {
 +      throw new IllegalArgumentException("Registration Code must not be null.");
 +    }
 +    String[] parts = regCode.split("::");
-     
++
 +    // part one is the code and part two is the signature
 +    boolean verified = crypto.verify("SHA256withRSA", "gateway-identity", parts[0], Base64.decodeBase64(parts[1]));
 +    if (verified) {
 +      HashMap<String,RegEntry> clusterServices = registry.get(clusterName);
 +      if (clusterServices == null) {
 +        synchronized(this) {
 +          clusterServices = new HashMap<>();
 +          registry.put(clusterName, clusterServices);
 +        }
 +      }
 +      RegEntry regEntry = new RegEntry();
 +      regEntry.setClusterName(clusterName);
 +      regEntry.setServiceName(serviceName);
 +      regEntry.setUrls(urls);
 +      clusterServices.put(serviceName , regEntry);
 +      String json = renderAsJsonString(registry);
 +      try {
 +        FileUtils.write(new File(registryFileName), json);
 +        rc = true;
 +      } catch (IOException e) {
 +        // log appropriately
 +        e.printStackTrace(); //TODO: I18N
 +      }
 +    }
-     
++
 +    return rc;
 +  }
-   
++
 +  private String renderAsJsonString(HashMap<String,HashMap<String,RegEntry>> registry) {
 +    String json = null;
 +    ObjectMapper mapper = new ObjectMapper();
-     
++
 +    try {
 +      // write JSON to a file
 +      json = mapper.writeValueAsString((Object)registry);
-     
++
 +    } catch ( JsonProcessingException e ) {
 +      e.printStackTrace(); //TODO: I18N
 +    }
 +    return json;
 +  }
-   
++
 +  @Override
 +  public String lookupServiceURL(String clusterName, String serviceName) {
 +    List<String> urls = lookupServiceURLs( clusterName, serviceName );
 +    if ( urls != null && !urls.isEmpty() ) {
 +      return urls.get( 0 );
 +    }
 +    return null;
 +  }
 +
 +  @Override
 +  public List<String> lookupServiceURLs( String clusterName, String serviceName ) {
 +    RegEntry entry = null;
-     HashMap clusterServices = registry.get(clusterName);
++    HashMap<String, RegEntry> clusterServices = registry.get(clusterName);
 +    if (clusterServices != null) {
-       entry = (RegEntry) clusterServices.get(serviceName);
++      entry = clusterServices.get(serviceName);
 +      if( entry != null ) {
 +        return entry.getUrls();
 +      }
 +    }
 +    return null;
 +  }
-   
++
 +  private HashMap<String, HashMap<String,RegEntry>> getMapFromJsonString(String json) {
 +    Registry map = null;
-     JsonFactory factory = new JsonFactory(); 
-     ObjectMapper mapper = new ObjectMapper(factory); 
-     TypeReference<Registry> typeRef 
-           = new TypeReference<Registry>() {}; 
++    JsonFactory factory = new JsonFactory();
++    ObjectMapper mapper = new ObjectMapper(factory);
++    TypeReference<Registry> typeRef
++          = new TypeReference<Registry>() {};
 +    try {
 +      map = mapper.readValue(json, typeRef);
 +    } catch (JsonParseException e) {
 +      LOG.failedToGetMapFromJsonString( json, e );
 +    } catch (JsonMappingException e) {
 +      LOG.failedToGetMapFromJsonString( json, e );
 +    } catch (IOException e) {
 +      LOG.failedToGetMapFromJsonString( json, e );
-     } 
++    }
 +    return map;
-   }   
++  }
 +
 +  @Override
 +  public void init(GatewayConfig config, Map<String, String> options)
 +      throws ServiceLifecycleException {
 +    String securityDir = config.getGatewaySecurityDir();
 +    String filename = "registry";
 +    setupRegistryFile(securityDir, filename);
 +  }
 +
 +  protected void setupRegistryFile(String securityDir, String filename) throws ServiceLifecycleException {
 +    File registryFile = new File(securityDir, filename);
 +    if (registryFile.exists()) {
 +      try {
 +        String json = FileUtils.readFileToString(registryFile);
 +        Registry reg = (Registry) getMapFromJsonString(json);
 +        if (reg != null) {
 +          registry = reg;
 +        }
 +      } catch (Exception e) {
 +        throw new ServiceLifecycleException("Unable to load the persisted registry.", e);
 +      }
 +    }
 +    registryFileName = registryFile.getAbsolutePath();
 +  }
 +
 +  @Override
 +  public void start() throws ServiceLifecycleException {
 +  }
 +
 +  @Override
 +  public void stop() throws ServiceLifecycleException {
 +  }
 +
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/8affbc02/gateway-server/src/main/java/org/apache/knox/gateway/services/security/impl/DefaultAliasService.java
----------------------------------------------------------------------
diff --cc gateway-server/src/main/java/org/apache/knox/gateway/services/security/impl/DefaultAliasService.java
index f52a7b3,0000000..b5e62ab
mode 100644,000000..100644
--- a/gateway-server/src/main/java/org/apache/knox/gateway/services/security/impl/DefaultAliasService.java
+++ b/gateway-server/src/main/java/org/apache/knox/gateway/services/security/impl/DefaultAliasService.java
@@@ -1,217 -1,0 +1,217 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.services.security.impl;
 +
 +import java.security.KeyStore;
 +import java.security.KeyStoreException;
++import java.security.SecureRandom;
 +import java.security.cert.Certificate;
 +import java.util.ArrayList;
 +import java.util.Enumeration;
 +import java.util.List;
 +import java.util.Map;
- import java.util.Random;
 +
 +import org.apache.knox.gateway.GatewayMessages;
 +import org.apache.knox.gateway.config.GatewayConfig;
 +import org.apache.knox.gateway.i18n.messages.MessagesFactory;
 +import org.apache.knox.gateway.services.ServiceLifecycleException;
 +import org.apache.knox.gateway.services.security.AliasService;
 +import org.apache.knox.gateway.services.security.AliasServiceException;
 +import org.apache.knox.gateway.services.security.KeystoreService;
 +import org.apache.knox.gateway.services.security.KeystoreServiceException;
 +import org.apache.knox.gateway.services.security.MasterService;
 +
 +public class DefaultAliasService implements AliasService {
 +  private static final GatewayMessages LOG = MessagesFactory.get( GatewayMessages.class );
 +
-   private static final String GATEWAY_IDENTITY_PASSPHRASE = "gateway-identity-passphrase"; 
++  private static final String GATEWAY_IDENTITY_PASSPHRASE = "gateway-identity-passphrase";
 +
 +  protected char[] chars = { 'a', 'b', 'c', 'd', 'e', 'f', 'g',
 +  'h', 'j', 'k', 'm', 'n', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w',
 +  'x', 'y', 'z', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'J', 'K',
 +  'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z',
 +  '2', '3', '4', '5', '6', '7', '8', '9',};
 +
 +  private KeystoreService keystoreService;
 +  private MasterService masterService;
 +
 +  @Override
 +  public void init(GatewayConfig config, Map<String, String> options)
 +      throws ServiceLifecycleException {
 +  }
 +
 +  @Override
 +  public void start() throws ServiceLifecycleException {
 +  }
 +
 +  @Override
 +  public void stop() throws ServiceLifecycleException {
 +  }
 +
 +  @Override
 +  public char[] getGatewayIdentityPassphrase() throws AliasServiceException {
 +    char[] passphrase = getPasswordFromAliasForGateway(GATEWAY_IDENTITY_PASSPHRASE);
 +    if (passphrase == null) {
 +      passphrase = masterService.getMasterSecret();
 +    }
 +    return passphrase;
 +  }
 +
 +  /* (non-Javadoc)
 +   * @see org.apache.knox.gateway.services.security.impl.AliasService#getAliasForCluster(java.lang.String, java.lang.String)
 +   */
 +  @Override
 +  public char[] getPasswordFromAliasForCluster(String clusterName, String alias)
 +      throws AliasServiceException {
 +    return getPasswordFromAliasForCluster(clusterName, alias, false);
 +  }
 +
 +  /* (non-Javadoc)
 +   * @see org.apache.knox.gateway.services.security.impl.AliasService#getAliasForCluster(java.lang.String, java.lang.String, boolean)
 +   */
 +  @Override
 +  public char[] getPasswordFromAliasForCluster(String clusterName, String alias, boolean generate)
 +      throws AliasServiceException {
 +    char[] credential = null;
 +    try {
 +      credential = keystoreService.getCredentialForCluster(clusterName, alias);
 +      if (credential == null) {
 +        if (generate) {
 +          generateAliasForCluster(clusterName, alias);
 +          credential = keystoreService.getCredentialForCluster(clusterName, alias);
 +        }
 +      }
 +    } catch (KeystoreServiceException e) {
 +      LOG.failedToGetCredentialForCluster(clusterName, e);
 +      throw new AliasServiceException(e);
 +    }
 +    return credential;
 +  }
 +
 +  private String generatePassword(int length) {
-     StringBuffer sb = new StringBuffer();
-     Random r = new Random();
++    StringBuilder sb = new StringBuilder();
++    SecureRandom r = new SecureRandom();
 +    for (int i = 0; i < length; i++) {
 +      sb.append(chars[r.nextInt(chars.length)]);
 +    }
 +    return sb.toString();
 +  }
-   
++
 +  public void setKeystoreService(KeystoreService ks) {
 +    this.keystoreService = ks;
 +  }
 +
 +  public void setMasterService(MasterService ms) {
 +    this.masterService = ms;
-     
++
 +  }
 +
 +  @Override
 +  public void generateAliasForCluster(String clusterName, String alias)
 +      throws AliasServiceException {
 +    try {
 +      keystoreService.getCredentialStoreForCluster(clusterName);
 +    } catch (KeystoreServiceException e) {
 +      LOG.failedToGenerateAliasForCluster(clusterName, e);
 +      throw new AliasServiceException(e);
 +    }
 +    String passwordString = generatePassword(16);
 +    addAliasForCluster(clusterName, alias, passwordString);
 +  }
 +
 +  /* (non-Javadoc)
 +   * @see org.apache.knox.gateway.services.security.impl.AliasService#addAliasForCluster(java.lang.String, java.lang.String, java.lang.String)
 +   */
 +  @Override
 +  public void addAliasForCluster(String clusterName, String alias, String value) {
 +    try {
 +      keystoreService.addCredentialForCluster(clusterName, alias, value);
 +    } catch (KeystoreServiceException e) {
 +      LOG.failedToAddCredentialForCluster(clusterName, e);
 +    }
 +  }
 +
 +  @Override
 +  public void removeAliasForCluster(String clusterName, String alias)
 +      throws AliasServiceException {
 +    try {
 +      keystoreService.removeCredentialForCluster(clusterName, alias);
 +    } catch (KeystoreServiceException e) {
 +      throw new AliasServiceException(e);
 +    }
 +  }
 +
 +  @Override
 +  public char[] getPasswordFromAliasForGateway(String alias)
 +      throws AliasServiceException {
 +    return getPasswordFromAliasForCluster("__gateway", alias);
 +  }
 +
 +  @Override
 +  public void generateAliasForGateway(String alias)
 +      throws AliasServiceException {
 +    generateAliasForCluster("__gateway", alias);
 +  }
 +
 +  /* (non-Javadoc)
 +   * @see AliasService#getCertificateForGateway(java.lang.String)
 +   */
 +  @Override
 +  public Certificate getCertificateForGateway(String alias) {
 +    Certificate cert = null;
 +    try {
 +      cert = this.keystoreService.getKeystoreForGateway().getCertificate(alias);
 +    } catch (KeyStoreException e) {
 +      LOG.unableToRetrieveCertificateForGateway(e);
 +      // should we throw an exception?
 +    } catch (KeystoreServiceException e) {
 +      LOG.unableToRetrieveCertificateForGateway(e);
 +    }
 +    return cert;
 +  }
 +
 +  /* (non-Javadoc)
 +   * @see AliasService#getAliasesForCluster(java.lang.String)
 +   */
 +  @Override
 +  public List<String> getAliasesForCluster(String clusterName) {
 +    ArrayList<String> list = new ArrayList<String>();
 +    KeyStore keyStore;
 +    try {
 +      keyStore = keystoreService.getCredentialStoreForCluster(clusterName);
 +      if (keyStore != null) {
 +        String alias = null;
 +        try {
 +          Enumeration<String> e = keyStore.aliases();
 +          while (e.hasMoreElements()) {
 +             alias = e.nextElement();
 +             // only include the metadata key names in the list of names
 +             if (!alias.contains("@")) {
 +                 list.add(alias);
 +             }
 +          }
 +        } catch (KeyStoreException e) {
 +          LOG.failedToGetCredentialForCluster(clusterName, e);
 +        }
 +      }
 +    } catch (KeystoreServiceException kse) {
 +      LOG.failedToGetCredentialForCluster(clusterName, kse);
 +    }
 +    return list;
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/8affbc02/gateway-server/src/main/java/org/apache/knox/gateway/topology/simple/SimpleDescriptorHandler.java
----------------------------------------------------------------------
diff --cc gateway-server/src/main/java/org/apache/knox/gateway/topology/simple/SimpleDescriptorHandler.java
index c4a3914,0000000..16d5b81
mode 100644,000000..100644
--- a/gateway-server/src/main/java/org/apache/knox/gateway/topology/simple/SimpleDescriptorHandler.java
+++ b/gateway-server/src/main/java/org/apache/knox/gateway/topology/simple/SimpleDescriptorHandler.java
@@@ -1,187 -1,0 +1,234 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements. See the NOTICE file distributed with this
 + * work for additional information regarding copyright ownership. The ASF
 + * licenses this file to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance with the License.
 + * You may obtain a copy of the License at
 + *
 + * http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 + * License for the specific language governing permissions and limitations under
 + * the License.
 + */
 +package org.apache.knox.gateway.topology.simple;
 +
 +import org.apache.knox.gateway.i18n.messages.MessagesFactory;
 +import org.apache.knox.gateway.services.Service;
 +import org.apache.knox.gateway.topology.discovery.DefaultServiceDiscoveryConfig;
 +import org.apache.knox.gateway.topology.discovery.ServiceDiscovery;
 +import org.apache.knox.gateway.topology.discovery.ServiceDiscoveryFactory;
++import java.io.BufferedWriter;
++import java.io.File;
++import java.io.FileInputStream;
++import java.io.FileWriter;
++import java.io.InputStreamReader;
++import java.io.IOException;
++
++import java.net.URI;
++import java.net.URISyntaxException;
++
++import java.util.ArrayList;
++import java.util.Collections;
++import java.util.HashMap;
++import java.util.List;
++import java.util.Map;
 +
- import java.io.*;
- import java.util.*;
 +
 +
 +/**
 + * Processes simple topology descriptors, producing full topology files, which can subsequently be deployed to the
 + * gateway.
 + */
 +public class SimpleDescriptorHandler {
 +
 +    private static final Service[] NO_GATEWAY_SERVICES = new Service[]{};
 +
 +    private static final SimpleDescriptorMessages log = MessagesFactory.get(SimpleDescriptorMessages.class);
 +
 +    public static Map<String, File> handle(File desc) throws IOException {
 +        return handle(desc, NO_GATEWAY_SERVICES);
 +    }
 +
 +    public static Map<String, File> handle(File desc, Service...gatewayServices) throws IOException {
 +        return handle(desc, desc.getParentFile(), gatewayServices);
 +    }
 +
 +    public static Map<String, File> handle(File desc, File destDirectory) throws IOException {
 +        return handle(desc, destDirectory, NO_GATEWAY_SERVICES);
 +    }
 +
 +    public static Map<String, File> handle(File desc, File destDirectory, Service...gatewayServices) throws IOException {
 +        return handle(SimpleDescriptorFactory.parse(desc.getAbsolutePath()), desc.getParentFile(), destDirectory, gatewayServices);
 +    }
 +
 +    public static Map<String, File> handle(SimpleDescriptor desc, File srcDirectory, File destDirectory) {
 +        return handle(desc, srcDirectory, destDirectory, NO_GATEWAY_SERVICES);
 +    }
 +
 +    public static Map<String, File> handle(SimpleDescriptor desc, File srcDirectory, File destDirectory, Service...gatewayServices) {
 +        Map<String, File> result = new HashMap<>();
 +
 +        File topologyDescriptor;
 +
 +        DefaultServiceDiscoveryConfig sdc = new DefaultServiceDiscoveryConfig(desc.getDiscoveryAddress());
 +        sdc.setUser(desc.getDiscoveryUser());
 +        sdc.setPasswordAlias(desc.getDiscoveryPasswordAlias());
-         ServiceDiscovery sd = ServiceDiscoveryFactory
-             .get(desc.getDiscoveryType(), gatewayServices);
++        ServiceDiscovery sd = ServiceDiscoveryFactory.get(desc.getDiscoveryType(), gatewayServices);
 +        ServiceDiscovery.Cluster cluster = sd.discover(sdc, desc.getClusterName());
 +
 +        Map<String, List<String>> serviceURLs = new HashMap<>();
 +
 +        if (cluster != null) {
 +            for (SimpleDescriptor.Service descService : desc.getServices()) {
 +                String serviceName = descService.getName();
 +
 +                List<String> descServiceURLs = descService.getURLs();
 +                if (descServiceURLs == null || descServiceURLs.isEmpty()) {
 +                    descServiceURLs = cluster.getServiceURLs(serviceName);
 +                }
 +
-                 // If there is at least one URL associated with the service, then add it to the map
++                // Validate the discovered service URLs
++                List<String> validURLs = new ArrayList<>();
 +                if (descServiceURLs != null && !descServiceURLs.isEmpty()) {
-                     serviceURLs.put(serviceName, descServiceURLs);
++                    // Validate the URL(s)
++                    for (String descServiceURL : descServiceURLs) {
++                        if (validateURL(serviceName, descServiceURL)) {
++                            validURLs.add(descServiceURL);
++                        }
++                    }
++                }
++
++                // If there is at least one valid URL associated with the service, then add it to the map
++                if (!validURLs.isEmpty()) {
++                    serviceURLs.put(serviceName, validURLs);
 +                } else {
 +                    log.failedToDiscoverClusterServiceURLs(serviceName, cluster.getName());
-                     throw new IllegalStateException("ServiceDiscovery failed to resolve any URLs for " + serviceName +
-                                                     ". Topology update aborted!");
 +                }
 +            }
 +        } else {
 +            log.failedToDiscoverClusterServices(desc.getClusterName());
 +        }
 +
++        BufferedWriter fw = null;
 +        topologyDescriptor = null;
 +        File providerConfig = null;
 +        try {
 +            // Verify that the referenced provider configuration exists before attempting to reading it
 +            providerConfig = resolveProviderConfigurationReference(desc.getProviderConfig(), srcDirectory);
 +            if (providerConfig == null) {
 +                log.failedToResolveProviderConfigRef(desc.getProviderConfig());
 +                throw new IllegalArgumentException("Unresolved provider configuration reference: " +
 +                                                   desc.getProviderConfig() + " ; Topology update aborted!");
 +            }
 +            result.put("reference", providerConfig);
 +
 +            // TODO: Should the contents of the provider config be validated before incorporating it into the topology?
 +
 +            String topologyFilename = desc.getName();
 +            if (topologyFilename == null) {
 +                topologyFilename = desc.getClusterName();
 +            }
 +            topologyDescriptor = new File(destDirectory, topologyFilename + ".xml");
-             FileWriter fw = new FileWriter(topologyDescriptor);
++            fw = new BufferedWriter(new FileWriter(topologyDescriptor));
 +
 +            fw.write("<topology>\n");
 +
 +            // Copy the externalized provider configuration content into the topology descriptor in-line
 +            InputStreamReader policyReader = new InputStreamReader(new FileInputStream(providerConfig));
 +            char[] buffer = new char[1024];
 +            int count;
 +            while ((count = policyReader.read(buffer)) > 0) {
 +                fw.write(buffer, 0, count);
 +            }
 +            policyReader.close();
 +
++            // Sort the service names to write the services alphabetically
++            List<String> serviceNames = new ArrayList<>(serviceURLs.keySet());
++            Collections.sort(serviceNames);
++
 +            // Write the service declarations
-             for (String serviceName : serviceURLs.keySet()) {
++            for (String serviceName : serviceNames) {
 +                fw.write("    <service>\n");
 +                fw.write("        <role>" + serviceName + "</role>\n");
 +                for (String url : serviceURLs.get(serviceName)) {
 +                    fw.write("        <url>" + url + "</url>\n");
 +                }
 +                fw.write("    </service>\n");
 +            }
 +
 +            fw.write("</topology>\n");
 +
 +            fw.flush();
-             fw.close();
 +        } catch (IOException e) {
 +            log.failedToGenerateTopologyFromSimpleDescriptor(topologyDescriptor.getName(), e);
 +            topologyDescriptor.delete();
++        } finally {
++            if (fw != null) {
++                try {
++                    fw.close();
++                } catch (IOException e) {
++                    // ignore
++                }
++            }
 +        }
 +
 +        result.put("topology", topologyDescriptor);
 +        return result;
 +    }
 +
++    private static boolean validateURL(String serviceName, String url) {
++        boolean result = false;
++
++        if (url != null && !url.isEmpty()) {
++            try {
++                new URI(url);
++                result = true;
++            } catch (URISyntaxException e) {
++                log.serviceURLValidationFailed(serviceName, url, e);
++            }
++        }
++
++        return result;
++    }
 +
 +    private static File resolveProviderConfigurationReference(String reference, File srcDirectory) {
 +        File providerConfig;
 +
 +        // If the reference includes a path
 +        if (reference.contains(File.separator)) {
 +            // Check if it's an absolute path
 +            providerConfig = new File(reference);
 +            if (!providerConfig.exists()) {
 +                // If it's not an absolute path, try treating it as a relative path
 +                providerConfig = new File(srcDirectory, reference);
 +                if (!providerConfig.exists()) {
 +                    providerConfig = null;
 +                }
 +            }
 +        } else { // No file path, just a name
 +            // Check if it's co-located with the referencing descriptor
 +            providerConfig = new File(srcDirectory, reference);
 +            if (!providerConfig.exists()) {
 +                // Check the shared-providers config location
 +                File sharedProvidersDir = new File(srcDirectory, "../shared-providers");
 +                if (sharedProvidersDir.exists()) {
 +                    providerConfig = new File(sharedProvidersDir, reference);
 +                    if (!providerConfig.exists()) {
 +                        // Check if it's a valid name without the extension
 +                        providerConfig = new File(sharedProvidersDir, reference + ".xml");
 +                        if (!providerConfig.exists()) {
 +                            providerConfig = null;
 +                        }
 +                    }
 +                }
 +            }
 +        }
 +
 +        return providerConfig;
 +    }
 +
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/8affbc02/gateway-server/src/main/java/org/apache/knox/gateway/topology/simple/SimpleDescriptorMessages.java
----------------------------------------------------------------------
diff --cc gateway-server/src/main/java/org/apache/knox/gateway/topology/simple/SimpleDescriptorMessages.java
index eb9d887,0000000..07c4350
mode 100644,000000..100644
--- a/gateway-server/src/main/java/org/apache/knox/gateway/topology/simple/SimpleDescriptorMessages.java
+++ b/gateway-server/src/main/java/org/apache/knox/gateway/topology/simple/SimpleDescriptorMessages.java
@@@ -1,44 -1,0 +1,50 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements. See the NOTICE file distributed with this
 + * work for additional information regarding copyright ownership. The ASF
 + * licenses this file to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance with the License.
 + * You may obtain a copy of the License at
 + * <p>
 + * http://www.apache.org/licenses/LICENSE-2.0
 + * <p>
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 + * License for the specific language governing permissions and limitations under
 + * the License.
 + */
 +package org.apache.knox.gateway.topology.simple;
 +
 +import org.apache.knox.gateway.i18n.messages.Message;
 +import org.apache.knox.gateway.i18n.messages.MessageLevel;
 +import org.apache.knox.gateway.i18n.messages.Messages;
 +import org.apache.knox.gateway.i18n.messages.StackTrace;
 +
 +@Messages(logger="org.apache.gateway.topology.simple")
 +public interface SimpleDescriptorMessages {
 +
 +    @Message(level = MessageLevel.ERROR,
 +            text = "Service discovery for cluster {0} failed.")
 +    void failedToDiscoverClusterServices(final String cluster);
 +
 +    @Message(level = MessageLevel.ERROR,
-             text = "No URLs were discovered for {0} in the {1} cluster.")
++            text = "No valid URLs were discovered for {0} in the {1} cluster.")
 +    void failedToDiscoverClusterServiceURLs(final String serviceName, final String clusterName);
 +
 +    @Message(level = MessageLevel.ERROR,
 +            text = "Failed to resolve the referenced provider configuration {0}.")
 +    void failedToResolveProviderConfigRef(final String providerConfigRef);
 +
 +    @Message(level = MessageLevel.ERROR,
++            text = "URL validation failed for {0} URL {1} : {2}")
++    void serviceURLValidationFailed(final String serviceName,
++                                    final String url,
++                                    @StackTrace( level = MessageLevel.DEBUG ) Exception e );
++
++    @Message(level = MessageLevel.ERROR,
 +            text = "Error generating topology {0} from simple descriptor: {1}")
 +    void failedToGenerateTopologyFromSimpleDescriptor(final String topologyFile,
 +                                                      @StackTrace( level = MessageLevel.DEBUG ) Exception e );
 +
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/8affbc02/gateway-server/src/main/java/org/apache/knox/gateway/websockets/GatewayWebsocketHandler.java
----------------------------------------------------------------------
diff --cc gateway-server/src/main/java/org/apache/knox/gateway/websockets/GatewayWebsocketHandler.java
index 3ddd311,0000000..69634a7
mode 100644,000000..100644
--- a/gateway-server/src/main/java/org/apache/knox/gateway/websockets/GatewayWebsocketHandler.java
+++ b/gateway-server/src/main/java/org/apache/knox/gateway/websockets/GatewayWebsocketHandler.java
@@@ -1,241 -1,0 +1,266 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.websockets;
 +
 +import java.io.File;
 +import java.net.MalformedURLException;
 +import java.net.URI;
 +import java.net.URL;
++import java.util.List;
++import java.util.Map;
 +import java.util.Set;
 +import java.util.concurrent.ExecutorService;
 +import java.util.concurrent.Executors;
 +
 +import org.apache.commons.lang3.StringUtils;
 +import org.apache.knox.gateway.config.GatewayConfig;
 +import org.apache.knox.gateway.i18n.messages.MessagesFactory;
 +import org.apache.knox.gateway.service.definition.ServiceDefinition;
 +import org.apache.knox.gateway.services.GatewayServices;
 +import org.apache.knox.gateway.services.registry.ServiceDefEntry;
 +import org.apache.knox.gateway.services.registry.ServiceDefinitionRegistry;
 +import org.apache.knox.gateway.services.registry.ServiceRegistry;
 +import org.apache.knox.gateway.util.ServiceDefinitionsLoader;
 +import org.eclipse.jetty.websocket.server.WebSocketHandler;
 +import org.eclipse.jetty.websocket.servlet.ServletUpgradeRequest;
 +import org.eclipse.jetty.websocket.servlet.ServletUpgradeResponse;
 +import org.eclipse.jetty.websocket.servlet.WebSocketCreator;
 +import org.eclipse.jetty.websocket.servlet.WebSocketServletFactory;
 +
++import javax.websocket.ClientEndpointConfig;
++
 +/**
 + * Websocket handler that will handle websocket connection request. This class
 + * is responsible for creating a proxy socket for inbound and outbound
 + * connections. This is also where the http to websocket handoff happens.
-  * 
++ *
 + * @since 0.10
 + */
 +public class GatewayWebsocketHandler extends WebSocketHandler
 +    implements WebSocketCreator {
 +
 +  private static final WebsocketLogMessages LOG = MessagesFactory
 +      .get(WebsocketLogMessages.class);
 +
 +  public static final String WEBSOCKET_PROTOCOL_STRING = "ws://";
 +
 +  public static final String SECURE_WEBSOCKET_PROTOCOL_STRING = "wss://";
 +
 +  static final String REGEX_SPLIT_CONTEXT = "^((?:[^/]*/){2}[^/]*)";
 +
 +  final static String REGEX_SPLIT_SERVICE_PATH = "^((?:[^/]*/){3}[^/]*)";
 +
 +  private static final int POOL_SIZE = 10;
 +
 +  /**
 +   * Manage the threads that are spawned
 +   * @since 0.13
 +   */
 +  private final ExecutorService pool;
 +
 +  final GatewayConfig config;
 +  final GatewayServices services;
 +
 +  /**
 +   * Create an instance
-    * 
++   *
 +   * @param config
 +   * @param services
 +   */
 +  public GatewayWebsocketHandler(final GatewayConfig config,
 +      final GatewayServices services) {
 +    super();
 +
 +    this.config = config;
 +    this.services = services;
 +    pool = Executors.newFixedThreadPool(POOL_SIZE);
 +
 +  }
 +
 +  /*
 +   * (non-Javadoc)
-    * 
++   *
 +   * @see
 +   * org.eclipse.jetty.websocket.server.WebSocketHandler#configure(org.eclipse.
 +   * jetty.websocket.servlet.WebSocketServletFactory)
 +   */
 +  @Override
 +  public void configure(final WebSocketServletFactory factory) {
 +    factory.setCreator(this);
 +    factory.getPolicy()
 +        .setMaxTextMessageSize(config.getWebsocketMaxTextMessageSize());
 +    factory.getPolicy()
 +        .setMaxBinaryMessageSize(config.getWebsocketMaxBinaryMessageSize());
 +
 +    factory.getPolicy().setMaxBinaryMessageBufferSize(
 +        config.getWebsocketMaxBinaryMessageBufferSize());
 +    factory.getPolicy().setMaxTextMessageBufferSize(
 +        config.getWebsocketMaxTextMessageBufferSize());
 +
 +    factory.getPolicy()
 +        .setInputBufferSize(config.getWebsocketInputBufferSize());
 +
 +    factory.getPolicy()
 +        .setAsyncWriteTimeout(config.getWebsocketAsyncWriteTimeout());
 +    factory.getPolicy().setIdleTimeout(config.getWebsocketIdleTimeout());
 +
 +  }
 +
 +  /*
 +   * (non-Javadoc)
-    * 
++   *
 +   * @see
 +   * org.eclipse.jetty.websocket.servlet.WebSocketCreator#createWebSocket(org.
 +   * eclipse.jetty.websocket.servlet.ServletUpgradeRequest,
 +   * org.eclipse.jetty.websocket.servlet.ServletUpgradeResponse)
 +   */
 +  @Override
 +  public Object createWebSocket(ServletUpgradeRequest req,
 +      ServletUpgradeResponse resp) {
 +
 +    try {
 +      final URI requestURI = req.getRequestURI();
 +      final String path = requestURI.getPath();
 +
 +      /* URL used to connect to websocket backend */
 +      final String backendURL = getMatchedBackendURL(path);
 +
 +      /* Upgrade happens here */
-       return new ProxyWebSocketAdapter(URI.create(backendURL), pool);
++      return new ProxyWebSocketAdapter(URI.create(backendURL), pool, getClientEndpointConfig(req));
 +    } catch (final Exception e) {
 +      LOG.failedCreatingWebSocket(e);
 +      throw e;
 +    }
 +  }
 +
 +  /**
++   * Returns a {@link ClientEndpointConfig} config that contains the headers
++   * to be passed to the backend.
++   * @since 0.14.0
++   * @param req
++   * @return
++   */
++  private ClientEndpointConfig getClientEndpointConfig(final ServletUpgradeRequest req) {
++
++    return ClientEndpointConfig.Builder.create().configurator( new ClientEndpointConfig.Configurator() {
++
++       @Override
++       public void beforeRequest(final Map<String, List<String>> headers) {
++
++         /* Add request headers */
++         req.getHeaders().forEach(headers::putIfAbsent);
++
++       }
++    }).build();
++  }
++
++  /**
 +   * This method looks at the context path and returns the backend websocket
 +   * url. If websocket url is found it is used as is, or we default to
 +   * ws://{host}:{port} which might or might not be right.
-    * 
-    * @param  The context path
++   *
++   * @param
 +   * @return Websocket backend url
 +   */
 +  private synchronized String getMatchedBackendURL(final String path) {
 +
 +    final ServiceRegistry serviceRegistryService = services
 +        .getService(GatewayServices.SERVICE_REGISTRY_SERVICE);
 +
 +    final ServiceDefinitionRegistry serviceDefinitionService = services
 +        .getService(GatewayServices.SERVICE_DEFINITION_REGISTRY);
 +
 +    /* Filter out the /cluster/topology to get the context we want */
 +    String[] pathInfo = path.split(REGEX_SPLIT_CONTEXT);
 +
 +    final ServiceDefEntry entry = serviceDefinitionService
 +        .getMatchingService(pathInfo[1]);
 +
 +    if (entry == null) {
 +      throw new RuntimeException(
 +          String.format("Cannot find service for the given path: %s", path));
 +    }
 +
 +    /* Filter out /cluster/topology/service to get endpoint */
 +    String[] pathService = path.split(REGEX_SPLIT_SERVICE_PATH);
 +
 +    final File servicesDir = new File(config.getGatewayServicesDir());
 +
 +    final Set<ServiceDefinition> serviceDefs = ServiceDefinitionsLoader
 +        .getServiceDefinitions(servicesDir);
 +
 +    /* URL used to connect to websocket backend */
 +    String backendURL = urlFromServiceDefinition(serviceDefs,
 +        serviceRegistryService, entry, path);
 +
 +    StringBuffer backend = new StringBuffer();
 +    try {
 +
 +      /* if we do not find websocket URL we default to HTTP */
 +      if (!StringUtils.containsAny(backendURL, WEBSOCKET_PROTOCOL_STRING, SECURE_WEBSOCKET_PROTOCOL_STRING)) {
 +        URL serviceUrl = new URL(backendURL);
 +
 +        /* Use http host:port if ws url not configured */
 +        final String protocol = (serviceUrl.getProtocol() == "ws"
 +                || serviceUrl.getProtocol() == "wss") ? serviceUrl.getProtocol()
 +                : "ws";
 +        backend.append(protocol).append("://");
 +        backend.append(serviceUrl.getHost()).append(":");
 +        backend.append(serviceUrl.getPort()).append("/");
 +        backend.append(serviceUrl.getPath());
 +      }
 +      else {
 +        URI serviceUri = new URI(backendURL);
 +        backend.append(serviceUri);
 +        /* Avoid Zeppelin Regression - as this would require ambari changes and break current knox websocket use case*/
-         if (!StringUtils.endsWith(backend.toString(), "/ws") && pathService[1] != null) {
++        if (!StringUtils.endsWith(backend.toString(), "/ws") && pathService.length > 0 && pathService[1] != null) {
 +          backend.append(pathService[1]);
 +        }
 +      }
 +      backendURL = backend.toString();
 +
 +    } catch (MalformedURLException e){
 +        LOG.badUrlError(e);
 +        throw new RuntimeException(e.toString());
 +    } catch (Exception  e1) {
 +        LOG.failedCreatingWebSocket(e1);
 +        throw new RuntimeException(e1.toString());
 +    }
 +
 +    return backendURL;
 +  }
 +
 +  private static String urlFromServiceDefinition(
 +      final Set<ServiceDefinition> serviceDefs,
 +      final ServiceRegistry serviceRegistry, final ServiceDefEntry entry,
 +      final String path) {
 +
 +    final String[] contexts = path.split("/");
 +
 +    final String serviceURL = serviceRegistry.lookupServiceURL(contexts[2],
 +        entry.getName().toUpperCase());
 +
 +    /*
 +     * we have a match, if ws:// is present it is returned else http:// is
 +     * returned
 +     */
 +    return serviceURL;
 +
 +  }
 +
 +}


[41/53] [abbrv] knox git commit: KNOX-998 - Merge from trunk 0.14.0 code

Posted by mo...@apache.org.
http://git-wip-us.apache.org/repos/asf/knox/blob/e766b3b7/gateway-test/src/test/java/org/apache/hadoop/gateway/topology/monitor/RemoteConfigurationMonitorTest.java
----------------------------------------------------------------------
diff --git a/gateway-test/src/test/java/org/apache/hadoop/gateway/topology/monitor/RemoteConfigurationMonitorTest.java b/gateway-test/src/test/java/org/apache/hadoop/gateway/topology/monitor/RemoteConfigurationMonitorTest.java
deleted file mode 100644
index dd75028..0000000
--- a/gateway-test/src/test/java/org/apache/hadoop/gateway/topology/monitor/RemoteConfigurationMonitorTest.java
+++ /dev/null
@@ -1,603 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.gateway.topology.monitor;
-
-import org.apache.commons.io.FileUtils;
-import org.apache.curator.framework.CuratorFramework;
-import org.apache.curator.framework.CuratorFrameworkFactory;
-import org.apache.curator.retry.ExponentialBackoffRetry;
-import org.apache.curator.test.InstanceSpec;
-import org.apache.curator.test.TestingCluster;
-import org.apache.hadoop.gateway.config.GatewayConfig;
-import org.apache.hadoop.gateway.service.config.remote.zk.ZooKeeperClientService;
-import org.apache.hadoop.gateway.service.config.remote.zk.ZooKeeperClientServiceProvider;
-import org.apache.hadoop.gateway.services.config.client.RemoteConfigurationRegistryClientService;
-import org.apache.hadoop.gateway.services.security.AliasService;
-import org.apache.hadoop.test.TestUtils;
-import org.apache.zookeeper.CreateMode;
-import org.apache.zookeeper.ZooDefs;
-import org.apache.zookeeper.data.ACL;
-import org.apache.zookeeper.data.Id;
-import org.easymock.EasyMock;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import java.io.File;
-import java.io.FileWriter;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-/**
- * Test the RemoteConfigurationMonitor functionality with SASL configured, and znode ACLs applied.
- *
- * The expected implementation is org.apache.hadoop.gateway.topology.monitor.zk.ZooKeeperConfigMonitor
- *
- * Digest-based SASL is used for this test, but since that is dictated solely by the JAAS config, Kerberos-based SASL
- * should work in exactly the same way, simply by modifying the SASL config.
- */
-public class RemoteConfigurationMonitorTest {
-
-    private static final String PATH_KNOX = "/knox";
-    private static final String PATH_KNOX_CONFIG = PATH_KNOX + "/config";
-    private static final String PATH_KNOX_PROVIDERS = PATH_KNOX_CONFIG + "/shared-providers";
-    private static final String PATH_KNOX_DESCRIPTORS = PATH_KNOX_CONFIG + "/descriptors";
-
-    private static final String PATH_AUTH_TEST = "/auth_test/child_node";
-
-
-    private static final String ALT_USERNAME = "notyou";
-    private static final String ZK_USERNAME = "testsasluser";
-    private static final String ZK_PASSWORD = "testsaslpwd";
-
-    private static final ACL ANY_AUTHENTICATED_USER_ALL = new ACL(ZooDefs.Perms.ALL, new Id("auth", ""));
-    private static final ACL SASL_TESTUSER_ALL = new ACL(ZooDefs.Perms.ALL, new Id("sasl", ZK_USERNAME));
-
-    private static File testTmp;
-    private static File providersDir;
-    private static File descriptorsDir;
-
-    private static TestingCluster zkCluster;
-
-    private static CuratorFramework client;
-
-    @BeforeClass
-    public static void setupSuite() throws Exception {
-        testTmp = TestUtils.createTempDir(RemoteConfigurationMonitorTest.class.getName());
-        File confDir = TestUtils.createTempDir(testTmp + "/conf");
-        providersDir = TestUtils.createTempDir(confDir + "/shared-providers");
-        descriptorsDir = TestUtils.createTempDir(confDir + "/descriptors");
-    }
-
-    @AfterClass
-    public static void tearDownSuite() throws Exception {
-        // Delete the working dir
-        testTmp.delete();
-    }
-
-    @Before
-    public void setupTest() throws Exception {
-        configureAndStartZKCluster();
-    }
-
-    @After
-    public void tearDownTest() throws Exception {
-        // Clean up the ZK nodes, and close the client
-        if (client != null) {
-            if (client.checkExists().forPath(PATH_KNOX) != null) {
-                client.delete().deletingChildrenIfNeeded().forPath(PATH_KNOX);
-            }
-            client.close();
-        }
-
-        // Shutdown the ZK cluster
-        zkCluster.close();
-    }
-
-    /**
-     * Create and persist a JAAS configuration file, defining the SASL config for both the ZooKeeper cluster instances
-     * and ZooKeeper clients.
-     *
-     * @param username The digest username
-     * @param password The digest password
-     *
-     * @return The JAAS configuration file
-     */
-    private static File setupDigestSaslConfig(String username, String password) throws Exception {
-        File saslConfigFile = new File(testTmp, "server-jaas.conf");
-        FileWriter fw = new FileWriter(saslConfigFile);
-        fw.write("Server {\n" +
-                "    org.apache.zookeeper.server.auth.DigestLoginModule required\n" +
-                "    user_" + username + " =\"" + password + "\";\n" +
-                "};\n" +
-                "Client {\n" +
-                "    org.apache.zookeeper.server.auth.DigestLoginModule required\n" +
-                "    username=\"" + username + "\"\n" +
-                "    password=\"" + password + "\";\n" +
-                "};\n");
-        fw.close();
-        return saslConfigFile;
-    }
-
-    /**
-     * Configure and start the ZooKeeper test cluster, and create the znodes monitored by the RemoteConfigurationMonitor.
-     */
-    private static void configureAndStartZKCluster() throws Exception {
-        // Configure security for the ZK cluster instances
-        Map<String, Object> customInstanceSpecProps = new HashMap<>();
-        customInstanceSpecProps.put("authProvider.1", "org.apache.zookeeper.server.auth.SASLAuthenticationProvider");
-        customInstanceSpecProps.put("requireClientAuthScheme", "sasl");
-
-        // Define the test cluster
-        List<InstanceSpec> instanceSpecs = new ArrayList<>();
-        for (int i = 0 ; i < 3 ; i++) {
-            InstanceSpec is = new InstanceSpec(null, -1, -1, -1, false, (i+1), -1, -1, customInstanceSpecProps);
-            instanceSpecs.add(is);
-        }
-        zkCluster = new TestingCluster(instanceSpecs);
-
-        // Configure auth for the ZooKeeper servers and the clients
-        File saslConfigFile = setupDigestSaslConfig(ZK_USERNAME, ZK_PASSWORD);
-
-        // This system property is used by the ZooKeeper cluster instances, the test driver client, and the
-        // RemoteConfigurationMonitor implementation for SASL authentication/authorization
-        System.setProperty("java.security.auth.login.config", saslConfigFile.getAbsolutePath());
-
-        // Start the cluster
-        zkCluster.start();
-
-        // Create the client for the test cluster
-        client = CuratorFrameworkFactory.builder()
-                                        .connectString(zkCluster.getConnectString())
-                                        .retryPolicy(new ExponentialBackoffRetry(100, 3))
-                                        .build();
-        assertNotNull(client);
-        client.start();
-
-        // Create test config nodes with an ACL for a sasl user that is NOT configured for the test client
-        List<ACL> acls = Arrays.asList(new ACL(ZooDefs.Perms.ALL, new Id("sasl", ALT_USERNAME)),
-                                       new ACL(ZooDefs.Perms.READ, ZooDefs.Ids.ANYONE_ID_UNSAFE));
-        client.create().creatingParentsIfNeeded().withMode(CreateMode.PERSISTENT).withACL(acls).forPath(PATH_AUTH_TEST);
-        assertNotNull("Failed to create node:" + PATH_AUTH_TEST,
-                      client.checkExists().forPath(PATH_AUTH_TEST));
-    }
-
-
-    private static void validateKnoxConfigNodeACLs(List<ACL> expectedACLS, List<ACL> actualACLs) throws Exception {
-        assertEquals(expectedACLS.size(), actualACLs.size());
-        int matchedCount = 0;
-        for (ACL expected : expectedACLS) {
-            for (ACL actual : actualACLs) {
-                Id expectedId = expected.getId();
-                Id actualId = actual.getId();
-                if (actualId.getScheme().equals(expectedId.getScheme()) && actualId.getId().equals(expectedId.getId())) {
-                    matchedCount++;
-                    assertEquals(expected.getPerms(), actual.getPerms());
-                    break;
-                }
-            }
-        }
-        assertEquals("ACL mismatch despite being same quantity.", expectedACLS.size(), matchedCount);
-    }
-
-
-    @Test
-    public void testZooKeeperConfigMonitorSASLNodesExistWithUnacceptableACL() throws Exception {
-        final String configMonitorName = "zkConfigClient";
-        final String alias = "zkPass";
-
-        // Setup the base GatewayConfig mock
-        GatewayConfig gc = EasyMock.createNiceMock(GatewayConfig.class);
-        EasyMock.expect(gc.getGatewayProvidersConfigDir()).andReturn(providersDir.getAbsolutePath()).anyTimes();
-        EasyMock.expect(gc.getGatewayDescriptorsDir()).andReturn(descriptorsDir.getAbsolutePath()).anyTimes();
-        EasyMock.expect(gc.getRemoteRegistryConfigurationNames())
-                .andReturn(Collections.singletonList(configMonitorName))
-                .anyTimes();
-        final String registryConfig =
-                GatewayConfig.REMOTE_CONFIG_REGISTRY_TYPE + "=" + ZooKeeperClientService.TYPE + ";" +
-                        GatewayConfig.REMOTE_CONFIG_REGISTRY_ADDRESS + "=" + zkCluster.getConnectString() + ";" +
-                        GatewayConfig.REMOTE_CONFIG_REGISTRY_PRINCIPAL + "=" + ZK_USERNAME + ";" +
-                        GatewayConfig.REMOTE_CONFIG_REGISTRY_AUTH_TYPE + "=Digest;" +
-                        GatewayConfig.REMOTE_CONFIG_REGISTRY_CREDENTIAL_ALIAS + "=" + alias;
-        EasyMock.expect(gc.getRemoteRegistryConfiguration(configMonitorName))
-                .andReturn(registryConfig).anyTimes();
-        EasyMock.expect(gc.getRemoteConfigurationMonitorClientName()).andReturn(configMonitorName).anyTimes();
-        EasyMock.replay(gc);
-
-        AliasService aliasService = EasyMock.createNiceMock(AliasService.class);
-        EasyMock.expect(aliasService.getPasswordFromAliasForGateway(alias))
-                .andReturn(ZK_PASSWORD.toCharArray())
-                .anyTimes();
-        EasyMock.replay(aliasService);
-
-        RemoteConfigurationRegistryClientService clientService = (new ZooKeeperClientServiceProvider()).newInstance();
-        clientService.setAliasService(aliasService);
-        clientService.init(gc, Collections.emptyMap());
-        clientService.start();
-
-        RemoteConfigurationMonitorFactory.setClientService(clientService);
-
-        RemoteConfigurationMonitor cm = RemoteConfigurationMonitorFactory.get(gc);
-        assertNotNull("Failed to load RemoteConfigurationMonitor", cm);
-
-        final ACL ANY_AUTHENTICATED_USER_ALL = new ACL(ZooDefs.Perms.ALL, new Id("auth", ""));
-        List<ACL> acls = Arrays.asList(ANY_AUTHENTICATED_USER_ALL, new ACL(ZooDefs.Perms.WRITE, ZooDefs.Ids.ANYONE_ID_UNSAFE));
-        client.create().creatingParentsIfNeeded().withMode(CreateMode.PERSISTENT).withACL(acls).forPath(PATH_KNOX);
-        client.create().creatingParentsIfNeeded().withMode(CreateMode.PERSISTENT).withACL(acls).forPath(PATH_KNOX_CONFIG);
-        client.create().creatingParentsIfNeeded().withMode(CreateMode.PERSISTENT).withACL(acls).forPath(PATH_KNOX_PROVIDERS);
-        client.create().creatingParentsIfNeeded().withMode(CreateMode.PERSISTENT).withACL(acls).forPath(PATH_KNOX_DESCRIPTORS);
-
-        // Make sure both ACLs were applied
-        List<ACL> preACLs = client.getACL().forPath(PATH_KNOX);
-        assertEquals(2, preACLs.size());
-
-        // Check that the config nodes really do exist (the monitor will NOT create them if they're present)
-        assertNotNull(client.checkExists().forPath(PATH_KNOX));
-        assertNotNull(client.checkExists().forPath(PATH_KNOX_CONFIG));
-        assertNotNull(client.checkExists().forPath(PATH_KNOX_PROVIDERS));
-        assertNotNull(client.checkExists().forPath(PATH_KNOX_DESCRIPTORS));
-
-        try {
-            cm.start();
-        } catch (Exception e) {
-            fail("Failed to start monitor: " + e.getMessage());
-        }
-
-        // Validate the expected ACLs on the Knox config znodes (make sure the monitor removed the world:anyone ACL)
-        List<ACL> expectedACLs = Collections.singletonList(SASL_TESTUSER_ALL);
-        validateKnoxConfigNodeACLs(expectedACLs, client.getACL().forPath(PATH_KNOX));
-        validateKnoxConfigNodeACLs(expectedACLs, client.getACL().forPath(PATH_KNOX_CONFIG));
-        validateKnoxConfigNodeACLs(expectedACLs, client.getACL().forPath(PATH_KNOX_PROVIDERS));
-        validateKnoxConfigNodeACLs(expectedACLs, client.getACL().forPath(PATH_KNOX_DESCRIPTORS));
-    }
-
-
-    @Test
-    public void testZooKeeperConfigMonitorSASLNodesExistWithAcceptableACL() throws Exception {
-        final String configMonitorName = "zkConfigClient";
-        final String alias = "zkPass";
-
-        // Setup the base GatewayConfig mock
-        GatewayConfig gc = EasyMock.createNiceMock(GatewayConfig.class);
-        EasyMock.expect(gc.getGatewayProvidersConfigDir()).andReturn(providersDir.getAbsolutePath()).anyTimes();
-        EasyMock.expect(gc.getGatewayDescriptorsDir()).andReturn(descriptorsDir.getAbsolutePath()).anyTimes();
-        EasyMock.expect(gc.getRemoteRegistryConfigurationNames())
-                .andReturn(Collections.singletonList(configMonitorName))
-                .anyTimes();
-        final String registryConfig =
-                GatewayConfig.REMOTE_CONFIG_REGISTRY_TYPE + "=" + ZooKeeperClientService.TYPE + ";" +
-                        GatewayConfig.REMOTE_CONFIG_REGISTRY_ADDRESS + "=" + zkCluster.getConnectString() + ";" +
-                        GatewayConfig.REMOTE_CONFIG_REGISTRY_PRINCIPAL + "=" + ZK_USERNAME + ";" +
-                        GatewayConfig.REMOTE_CONFIG_REGISTRY_AUTH_TYPE + "=Digest;" +
-                        GatewayConfig.REMOTE_CONFIG_REGISTRY_CREDENTIAL_ALIAS + "=" + alias;
-        EasyMock.expect(gc.getRemoteRegistryConfiguration(configMonitorName))
-                .andReturn(registryConfig).anyTimes();
-        EasyMock.expect(gc.getRemoteConfigurationMonitorClientName()).andReturn(configMonitorName).anyTimes();
-        EasyMock.replay(gc);
-
-        AliasService aliasService = EasyMock.createNiceMock(AliasService.class);
-        EasyMock.expect(aliasService.getPasswordFromAliasForGateway(alias))
-                .andReturn(ZK_PASSWORD.toCharArray())
-                .anyTimes();
-        EasyMock.replay(aliasService);
-
-        RemoteConfigurationRegistryClientService clientService = (new ZooKeeperClientServiceProvider()).newInstance();
-        clientService.setAliasService(aliasService);
-        clientService.init(gc, Collections.emptyMap());
-        clientService.start();
-
-        RemoteConfigurationMonitorFactory.setClientService(clientService);
-
-        RemoteConfigurationMonitor cm = RemoteConfigurationMonitorFactory.get(gc);
-        assertNotNull("Failed to load RemoteConfigurationMonitor", cm);
-
-        List<ACL> acls = Arrays.asList(ANY_AUTHENTICATED_USER_ALL);
-        client.create().creatingParentsIfNeeded().withMode(CreateMode.PERSISTENT).withACL(acls).forPath(PATH_KNOX);
-        client.create().creatingParentsIfNeeded().withMode(CreateMode.PERSISTENT).withACL(acls).forPath(PATH_KNOX_CONFIG);
-        client.create().creatingParentsIfNeeded().withMode(CreateMode.PERSISTENT).withACL(acls).forPath(PATH_KNOX_PROVIDERS);
-        client.create().creatingParentsIfNeeded().withMode(CreateMode.PERSISTENT).withACL(acls).forPath(PATH_KNOX_DESCRIPTORS);
-
-        // Check that the config nodes really do exist (the monitor will NOT create them if they're present)
-        assertNotNull(client.checkExists().forPath(PATH_KNOX));
-        assertNotNull(client.checkExists().forPath(PATH_KNOX_CONFIG));
-        assertNotNull(client.checkExists().forPath(PATH_KNOX_PROVIDERS));
-        assertNotNull(client.checkExists().forPath(PATH_KNOX_DESCRIPTORS));
-
-        try {
-            cm.start();
-        } catch (Exception e) {
-            fail("Failed to start monitor: " + e.getMessage());
-        }
-
-        // Test auth violation
-        clientService.get(configMonitorName).createEntry("/auth_test/child_node/test1");
-        assertNull("Creation should have been prevented since write access is not granted to the test client.",
-                client.checkExists().forPath("/auth_test/child_node/test1"));
-        assertTrue("Creation should have been prevented since write access is not granted to the test client.",
-                client.getChildren().forPath("/auth_test/child_node").isEmpty());
-
-        // Validate the expected ACLs on the Knox config znodes (make sure the monitor didn't change them)
-        List<ACL> expectedACLs = Collections.singletonList(SASL_TESTUSER_ALL);
-        validateKnoxConfigNodeACLs(expectedACLs, client.getACL().forPath(PATH_KNOX));
-        validateKnoxConfigNodeACLs(expectedACLs, client.getACL().forPath(PATH_KNOX_CONFIG));
-        validateKnoxConfigNodeACLs(expectedACLs, client.getACL().forPath(PATH_KNOX_PROVIDERS));
-        validateKnoxConfigNodeACLs(expectedACLs, client.getACL().forPath(PATH_KNOX_DESCRIPTORS));
-    }
-
-
-    @Test
-    public void testZooKeeperConfigMonitorSASLCreateNodes() throws Exception {
-        final String configMonitorName = "zkConfigClient";
-        final String alias = "zkPass";
-
-        // Setup the base GatewayConfig mock
-        GatewayConfig gc = EasyMock.createNiceMock(GatewayConfig.class);
-        EasyMock.expect(gc.getGatewayProvidersConfigDir()).andReturn(providersDir.getAbsolutePath()).anyTimes();
-        EasyMock.expect(gc.getGatewayDescriptorsDir()).andReturn(descriptorsDir.getAbsolutePath()).anyTimes();
-        EasyMock.expect(gc.getRemoteRegistryConfigurationNames())
-                .andReturn(Collections.singletonList(configMonitorName))
-                .anyTimes();
-        final String registryConfig =
-                            GatewayConfig.REMOTE_CONFIG_REGISTRY_TYPE + "=" + ZooKeeperClientService.TYPE + ";" +
-                            GatewayConfig.REMOTE_CONFIG_REGISTRY_ADDRESS + "=" + zkCluster.getConnectString() + ";" +
-                            GatewayConfig.REMOTE_CONFIG_REGISTRY_PRINCIPAL + "=" + ZK_USERNAME + ";" +
-                            GatewayConfig.REMOTE_CONFIG_REGISTRY_AUTH_TYPE + "=Digest;" +
-                            GatewayConfig.REMOTE_CONFIG_REGISTRY_CREDENTIAL_ALIAS + "=" + alias;
-        EasyMock.expect(gc.getRemoteRegistryConfiguration(configMonitorName))
-                .andReturn(registryConfig).anyTimes();
-        EasyMock.expect(gc.getRemoteConfigurationMonitorClientName()).andReturn(configMonitorName).anyTimes();
-        EasyMock.replay(gc);
-
-        AliasService aliasService = EasyMock.createNiceMock(AliasService.class);
-        EasyMock.expect(aliasService.getPasswordFromAliasForGateway(alias))
-                .andReturn(ZK_PASSWORD.toCharArray())
-                .anyTimes();
-        EasyMock.replay(aliasService);
-
-        RemoteConfigurationRegistryClientService clientService = (new ZooKeeperClientServiceProvider()).newInstance();
-        clientService.setAliasService(aliasService);
-        clientService.init(gc, Collections.emptyMap());
-        clientService.start();
-
-        RemoteConfigurationMonitorFactory.setClientService(clientService);
-
-        RemoteConfigurationMonitor cm = RemoteConfigurationMonitorFactory.get(gc);
-        assertNotNull("Failed to load RemoteConfigurationMonitor", cm);
-
-        // Check that the config nodes really don't yet exist (the monitor will create them if they're not present)
-        assertNull(client.checkExists().forPath(PATH_KNOX));
-        assertNull(client.checkExists().forPath(PATH_KNOX_CONFIG));
-        assertNull(client.checkExists().forPath(PATH_KNOX_PROVIDERS));
-        assertNull(client.checkExists().forPath(PATH_KNOX_DESCRIPTORS));
-
-        try {
-            cm.start();
-        } catch (Exception e) {
-            fail("Failed to start monitor: " + e.getMessage());
-        }
-
-        // Test auth violation
-        clientService.get(configMonitorName).createEntry("/auth_test/child_node/test1");
-        assertNull("Creation should have been prevented since write access is not granted to the test client.",
-                   client.checkExists().forPath("/auth_test/child_node/test1"));
-        assertTrue("Creation should have been prevented since write access is not granted to the test client.",
-                   client.getChildren().forPath("/auth_test/child_node").isEmpty());
-
-        // Validate the expected ACLs on the Knox config znodes (make sure the monitor created them correctly)
-        List<ACL> expectedACLs = Collections.singletonList(SASL_TESTUSER_ALL);
-        validateKnoxConfigNodeACLs(expectedACLs, client.getACL().forPath(PATH_KNOX));
-        validateKnoxConfigNodeACLs(expectedACLs, client.getACL().forPath(PATH_KNOX_CONFIG));
-        validateKnoxConfigNodeACLs(expectedACLs, client.getACL().forPath(PATH_KNOX_PROVIDERS));
-        validateKnoxConfigNodeACLs(expectedACLs, client.getACL().forPath(PATH_KNOX_DESCRIPTORS));
-
-        // Test the Knox config nodes, for which authentication should be sufficient for access
-        try {
-            final String pc_one_znode = getProviderPath("providers-config1.xml");
-            final File pc_one         = new File(providersDir, "providers-config1.xml");
-            final String pc_two_znode = getProviderPath("providers-config2.xml");
-            final File pc_two         = new File(providersDir, "providers-config2.xml");
-
-            client.create().withMode(CreateMode.PERSISTENT).forPath(pc_one_znode, TEST_PROVIDERS_CONFIG_1.getBytes());
-            Thread.sleep(100);
-            assertTrue(pc_one.exists());
-            assertEquals(TEST_PROVIDERS_CONFIG_1, FileUtils.readFileToString(pc_one));
-
-            client.create().withMode(CreateMode.PERSISTENT).forPath(getProviderPath("providers-config2.xml"), TEST_PROVIDERS_CONFIG_2.getBytes());
-            Thread.sleep(100);
-            assertTrue(pc_two.exists());
-            assertEquals(TEST_PROVIDERS_CONFIG_2, FileUtils.readFileToString(pc_two));
-
-            client.setData().forPath(pc_two_znode, TEST_PROVIDERS_CONFIG_1.getBytes());
-            Thread.sleep(100);
-            assertTrue(pc_two.exists());
-            assertEquals(TEST_PROVIDERS_CONFIG_1, FileUtils.readFileToString(pc_two));
-
-            client.delete().forPath(pc_two_znode);
-            Thread.sleep(100);
-            assertFalse(pc_two.exists());
-
-            client.delete().forPath(pc_one_znode);
-            Thread.sleep(100);
-            assertFalse(pc_one.exists());
-
-            final String desc_one_znode   = getDescriptorPath("test1.json");
-            final String desc_two_znode   = getDescriptorPath("test2.json");
-            final String desc_three_znode = getDescriptorPath("test3.json");
-            final File desc_one           = new File(descriptorsDir, "test1.json");
-            final File desc_two           = new File(descriptorsDir, "test2.json");
-            final File desc_three         = new File(descriptorsDir, "test3.json");
-
-            client.create().withMode(CreateMode.PERSISTENT).forPath(desc_one_znode, TEST_DESCRIPTOR_1.getBytes());
-            Thread.sleep(100);
-            assertTrue(desc_one.exists());
-            assertEquals(TEST_DESCRIPTOR_1, FileUtils.readFileToString(desc_one));
-
-            client.create().withMode(CreateMode.PERSISTENT).forPath(desc_two_znode, TEST_DESCRIPTOR_1.getBytes());
-            Thread.sleep(100);
-            assertTrue(desc_two.exists());
-            assertEquals(TEST_DESCRIPTOR_1, FileUtils.readFileToString(desc_two));
-
-            client.setData().forPath(desc_two_znode, TEST_DESCRIPTOR_2.getBytes());
-            Thread.sleep(100);
-            assertTrue(desc_two.exists());
-            assertEquals(TEST_DESCRIPTOR_2, FileUtils.readFileToString(desc_two));
-
-            client.create().withMode(CreateMode.PERSISTENT).forPath(desc_three_znode, TEST_DESCRIPTOR_1.getBytes());
-            Thread.sleep(100);
-            assertTrue(desc_three.exists());
-            assertEquals(TEST_DESCRIPTOR_1, FileUtils.readFileToString(desc_three));
-
-            client.delete().forPath(desc_two_znode);
-            Thread.sleep(100);
-            assertFalse("Expected test2.json to have been deleted.", desc_two.exists());
-
-            client.delete().forPath(desc_three_znode);
-            Thread.sleep(100);
-            assertFalse(desc_three.exists());
-
-            client.delete().forPath(desc_one_znode);
-            Thread.sleep(100);
-            assertFalse(desc_one.exists());
-        } finally {
-            cm.stop();
-        }
-    }
-
-    private static String getDescriptorPath(String descriptorName) {
-        return PATH_KNOX_DESCRIPTORS + "/" + descriptorName;
-    }
-
-    private static String getProviderPath(String providerConfigName) {
-        return PATH_KNOX_PROVIDERS + "/" + providerConfigName;
-    }
-
-
-    private static final String TEST_PROVIDERS_CONFIG_1 =
-                    "<gateway>\n" +
-                    "    <provider>\n" +
-                    "        <role>identity-assertion</role>\n" +
-                    "        <name>Default</name>\n" +
-                    "        <enabled>true</enabled>\n" +
-                    "    </provider>\n" +
-                    "    <provider>\n" +
-                    "        <role>hostmap</role>\n" +
-                    "        <name>static</name>\n" +
-                    "        <enabled>true</enabled>\n" +
-                    "        <param><name>localhost</name><value>sandbox,sandbox.hortonworks.com</value></param>\n" +
-                    "    </provider>\n" +
-                    "</gateway>\n";
-
-    private static final String TEST_PROVIDERS_CONFIG_2 =
-                    "<gateway>\n" +
-                    "    <provider>\n" +
-                    "        <role>authentication</role>\n" +
-                    "        <name>ShiroProvider</name>\n" +
-                    "        <enabled>true</enabled>\n" +
-                    "        <param>\n" +
-                    "            <name>sessionTimeout</name>\n" +
-                    "            <value>30</value>\n" +
-                    "        </param>\n" +
-                    "        <param>\n" +
-                    "            <name>main.ldapRealm</name>\n" +
-                    "            <value>org.apache.hadoop.gateway.shirorealm.KnoxLdapRealm</value>\n" +
-                    "        </param>\n" +
-                    "        <param>\n" +
-                    "            <name>main.ldapContextFactory</name>\n" +
-                    "            <value>org.apache.hadoop.gateway.shirorealm.KnoxLdapContextFactory</value>\n" +
-                    "        </param>\n" +
-                    "        <param>\n" +
-                    "            <name>main.ldapRealm.contextFactory</name>\n" +
-                    "            <value>$ldapContextFactory</value>\n" +
-                    "        </param>\n" +
-                    "        <param>\n" +
-                    "            <name>main.ldapRealm.userDnTemplate</name>\n" +
-                    "            <value>uid={0},ou=people,dc=hadoop,dc=apache,dc=org</value>\n" +
-                    "        </param>\n" +
-                    "        <param>\n" +
-                    "            <name>main.ldapRealm.contextFactory.url</name>\n" +
-                    "            <value>ldap://localhost:33389</value>\n" +
-                    "        </param>\n" +
-                    "        <param>\n" +
-                    "            <name>main.ldapRealm.contextFactory.authenticationMechanism</name>\n" +
-                    "            <value>simple</value>\n" +
-                    "        </param>\n" +
-                    "        <param>\n" +
-                    "            <name>urls./**</name>\n" +
-                    "            <value>authcBasic</value>\n" +
-                    "        </param>\n" +
-                    "    </provider>\n" +
-                    "</gateway>\n";
-
-    private static final String TEST_DESCRIPTOR_1 =
-                    "{\n" +
-                    "  \"discovery-type\":\"AMBARI\",\n" +
-                    "  \"discovery-address\":\"http://sandbox.hortonworks.com:8080\",\n" +
-                    "  \"discovery-user\":\"maria_dev\",\n" +
-                    "  \"discovery-pwd-alias\":\"sandbox.ambari.discovery.password\",\n" +
-                    "  \"provider-config-ref\":\"sandbox-providers.xml\",\n" +
-                    "  \"cluster\":\"Sandbox\",\n" +
-                    "  \"services\":[\n" +
-                    "    {\"name\":\"NODEUI\"},\n" +
-                    "    {\"name\":\"YARNUI\"},\n" +
-                    "    {\"name\":\"HDFSUI\"},\n" +
-                    "    {\"name\":\"OOZIEUI\"},\n" +
-                    "    {\"name\":\"HBASEUI\"},\n" +
-                    "    {\"name\":\"NAMENODE\"},\n" +
-                    "    {\"name\":\"JOBTRACKER\"},\n" +
-                    "    {\"name\":\"WEBHDFS\"},\n" +
-                    "    {\"name\":\"WEBHCAT\"},\n" +
-                    "    {\"name\":\"OOZIE\"},\n" +
-                    "    {\"name\":\"WEBHBASE\"},\n" +
-                    "    {\"name\":\"RESOURCEMANAGER\"},\n" +
-                    "    {\"name\":\"AMBARI\", \"urls\":[\"http://c6401.ambari.apache.org:8080\"]},\n" +
-                    "    {\"name\":\"AMBARIUI\", \"urls\":[\"http://c6401.ambari.apache.org:8080\"]}\n" +
-                    "  ]\n" +
-                    "}\n";
-
-    private static final String TEST_DESCRIPTOR_2 =
-                    "{\n" +
-                    "  \"discovery-type\":\"AMBARI\",\n" +
-                    "  \"discovery-address\":\"http://sandbox.hortonworks.com:8080\",\n" +
-                    "  \"discovery-user\":\"maria_dev\",\n" +
-                    "  \"discovery-pwd-alias\":\"sandbox.ambari.discovery.password\",\n" +
-                    "  \"provider-config-ref\":\"sandbox-providers.xml\",\n" +
-                    "  \"cluster\":\"Sandbox\",\n" +
-                    "  \"services\":[\n" +
-                    "    {\"name\":\"NAMENODE\"},\n" +
-                    "    {\"name\":\"JOBTRACKER\"},\n" +
-                    "    {\"name\":\"WEBHDFS\"},\n" +
-                    "    {\"name\":\"WEBHCAT\"},\n" +
-                    "    {\"name\":\"OOZIE\"},\n" +
-                    "    {\"name\":\"WEBHBASE\"},\n" +
-                    "    {\"name\":\"RESOURCEMANAGER\"}\n" +
-                    "  ]\n" +
-                    "}\n";
-
-}

http://git-wip-us.apache.org/repos/asf/knox/blob/e766b3b7/gateway-test/src/test/java/org/apache/knox/gateway/SimpleDescriptorHandlerFuncTest.java
----------------------------------------------------------------------
diff --git a/gateway-test/src/test/java/org/apache/knox/gateway/SimpleDescriptorHandlerFuncTest.java b/gateway-test/src/test/java/org/apache/knox/gateway/SimpleDescriptorHandlerFuncTest.java
new file mode 100644
index 0000000..5b29e19
--- /dev/null
+++ b/gateway-test/src/test/java/org/apache/knox/gateway/SimpleDescriptorHandlerFuncTest.java
@@ -0,0 +1,275 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.knox.gateway;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.knox.gateway.config.GatewayConfig;
+import org.apache.knox.gateway.services.GatewayServices;
+import org.apache.knox.gateway.services.security.AliasService;
+import org.apache.knox.gateway.services.security.KeystoreService;
+import org.apache.knox.gateway.services.security.MasterService;
+import org.apache.knox.gateway.services.topology.TopologyService;
+import org.apache.knox.gateway.topology.discovery.ServiceDiscovery;
+import org.apache.knox.gateway.topology.discovery.ServiceDiscoveryConfig;
+import org.apache.knox.gateway.topology.discovery.ServiceDiscoveryType;
+import org.apache.knox.gateway.topology.simple.SimpleDescriptor;
+import org.apache.knox.gateway.topology.simple.SimpleDescriptorHandler;
+import org.apache.knox.test.TestUtils;
+import org.easymock.Capture;
+import org.easymock.EasyMock;
+import org.junit.Test;
+
+import java.io.File;
+import java.net.InetSocketAddress;
+import java.security.KeyStore;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import static org.easymock.EasyMock.anyObject;
+import static org.easymock.EasyMock.capture;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
+
+public class SimpleDescriptorHandlerFuncTest {
+
+
+  private static final String TEST_PROVIDER_CONFIG =
+      "    <gateway>\n" +
+          "        <provider>\n" +
+          "            <role>authentication</role>\n" +
+          "            <name>ShiroProvider</name>\n" +
+          "            <enabled>true</enabled>\n" +
+          "            <param>\n" +
+          "                <name>sessionTimeout</name>\n" +
+          "                <value>30</value>\n" +
+          "            </param>\n" +
+          "            <param>\n" +
+          "                <name>main.ldapRealm</name>\n" +
+          "                <value>org.apache.knox.gateway.shirorealm.KnoxLdapRealm</value>\n" +
+          "            </param>\n" +
+          "            <param>\n" +
+          "                <name>main.ldapContextFactory</name>\n" +
+          "                <value>org.apache.knox.gateway.shirorealm.KnoxLdapContextFactory</value>\n" +
+          "            </param>\n" +
+          "            <param>\n" +
+          "                <name>main.ldapRealm.contextFactory</name>\n" +
+          "                <value>$ldapContextFactory</value>\n" +
+          "            </param>\n" +
+          "            <param>\n" +
+          "                <name>main.ldapRealm.userDnTemplate</name>\n" +
+          "                <value>uid={0},ou=people,dc=hadoop,dc=apache,dc=org</value>\n" +
+          "            </param>\n" +
+          "            <param>\n" +
+          "                <name>main.ldapRealm.contextFactory.url</name>\n" +
+          "                <value>ldap://localhost:33389</value>\n" +
+          "            </param>\n" +
+          "            <param>\n" +
+          "                <name>main.ldapRealm.contextFactory.authenticationMechanism</name>\n" +
+          "                <value>simple</value>\n" +
+          "            </param>\n" +
+          "            <param>\n" +
+          "                <name>urls./**</name>\n" +
+          "                <value>authcBasic</value>\n" +
+          "            </param>\n" +
+          "        </provider>\n" +
+          "\n" +
+          "        <provider>\n" +
+          "            <role>identity-assertion</role>\n" +
+          "            <name>Default</name>\n" +
+          "            <enabled>true</enabled>\n" +
+          "        </provider>\n" +
+          "\n" +
+          "        <provider>\n" +
+          "            <role>hostmap</role>\n" +
+          "            <name>static</name>\n" +
+          "            <enabled>true</enabled>\n" +
+          "            <param><name>localhost</name><value>sandbox,sandbox.hortonworks.com</value></param>\n" +
+          "        </provider>\n" +
+          "    </gateway>\n";
+
+
+  /**
+   * KNOX-1136
+   * <p>
+   * Test that a credential store is created, and a encryptQueryString alias is defined, with a password that is not
+   * random (but is derived from the master secret and the topology name).
+   * <p>
+   * N.B. This test depends on the NoOpServiceDiscovery extension being configured in META-INF/services
+   */
+  @Test
+  public void testSimpleDescriptorHandlerQueryStringCredentialAliasCreation() throws Exception {
+
+    final String testMasterSecret = "mysecret";
+    final String discoveryType = "NO_OP";
+    final String clusterName = "dummy";
+
+    final Map<String, List<String>> serviceURLs = new HashMap<>();
+    serviceURLs.put("RESOURCEMANAGER", Collections.singletonList("http://myhost:1234/resource"));
+
+    File testRootDir = TestUtils.createTempDir(getClass().getSimpleName());
+    File testConfDir = new File(testRootDir, "conf");
+    File testProvDir = new File(testConfDir, "shared-providers");
+    File testTopoDir = new File(testConfDir, "topologies");
+    File testDeployDir = new File(testConfDir, "deployments");
+
+    // Write the externalized provider config to a temp file
+    File providerConfig = new File(testProvDir, "ambari-cluster-policy.xml");
+    FileUtils.write(providerConfig, TEST_PROVIDER_CONFIG);
+
+    File topologyFile = null;
+    try {
+      File destDir = new File(System.getProperty("java.io.tmpdir")).getCanonicalFile();
+
+      // Mock out the simple descriptor
+      SimpleDescriptor testDescriptor = EasyMock.createNiceMock(SimpleDescriptor.class);
+      EasyMock.expect(testDescriptor.getName()).andReturn("mysimpledescriptor").anyTimes();
+      EasyMock.expect(testDescriptor.getDiscoveryAddress()).andReturn(null).anyTimes();
+      EasyMock.expect(testDescriptor.getDiscoveryType()).andReturn(discoveryType).anyTimes();
+      EasyMock.expect(testDescriptor.getDiscoveryUser()).andReturn(null).anyTimes();
+      EasyMock.expect(testDescriptor.getProviderConfig()).andReturn(providerConfig.getAbsolutePath()).anyTimes();
+      EasyMock.expect(testDescriptor.getClusterName()).andReturn(clusterName).anyTimes();
+      List<SimpleDescriptor.Service> serviceMocks = new ArrayList<>();
+      for (String serviceName : serviceURLs.keySet()) {
+        SimpleDescriptor.Service svc = EasyMock.createNiceMock(SimpleDescriptor.Service.class);
+        EasyMock.expect(svc.getName()).andReturn(serviceName).anyTimes();
+        EasyMock.expect(svc.getURLs()).andReturn(serviceURLs.get(serviceName)).anyTimes();
+        EasyMock.expect(svc.getParams()).andReturn(Collections.emptyMap()).anyTimes();
+        EasyMock.replay(svc);
+        serviceMocks.add(svc);
+      }
+      EasyMock.expect(testDescriptor.getServices()).andReturn(serviceMocks).anyTimes();
+      EasyMock.replay(testDescriptor);
+
+      // Try setting up enough of the GatewayServer to support the test...
+      GatewayConfig config = EasyMock.createNiceMock(GatewayConfig.class);
+      InetSocketAddress gatewayAddress = new InetSocketAddress(0);
+      EasyMock.expect(config.getGatewayTopologyDir()).andReturn(testTopoDir.getAbsolutePath()).anyTimes();
+      EasyMock.expect(config.getGatewayDeploymentDir()).andReturn(testDeployDir.getAbsolutePath()).anyTimes();
+      EasyMock.expect(config.getGatewayAddress()).andReturn(gatewayAddress).anyTimes();
+      EasyMock.expect(config.getGatewayPortMappings()).andReturn(Collections.emptyMap()).anyTimes();
+      EasyMock.replay(config);
+
+      // Setup the Gateway Services
+      GatewayServices gatewayServices = EasyMock.createNiceMock(GatewayServices.class);
+
+      // Master Service
+      MasterService ms = EasyMock.createNiceMock(MasterService.class);
+      EasyMock.expect(ms.getMasterSecret()).andReturn(testMasterSecret.toCharArray()).anyTimes();
+      EasyMock.replay(ms);
+      EasyMock.expect(gatewayServices.getService("MasterService")).andReturn(ms).anyTimes();
+
+      // Keystore Service
+      KeystoreService ks = EasyMock.createNiceMock(KeystoreService.class);
+      EasyMock.expect(ks.isCredentialStoreForClusterAvailable(testDescriptor.getName())).andReturn(false).once();
+      ks.createCredentialStoreForCluster(testDescriptor.getName());
+      EasyMock.expectLastCall().once();
+      KeyStore credStore = EasyMock.createNiceMock(KeyStore.class);
+      EasyMock.expect(ks.getCredentialStoreForCluster(testDescriptor.getName())).andReturn(credStore).anyTimes();
+      EasyMock.replay(ks);
+      EasyMock.expect(gatewayServices.getService(GatewayServices.KEYSTORE_SERVICE)).andReturn(ks).anyTimes();
+
+      // Alias Service
+      AliasService as = EasyMock.createNiceMock(AliasService.class);
+      // Captures for validating the alias creation for a generated topology
+      Capture<String> capturedCluster = EasyMock.newCapture();
+      Capture<String> capturedAlias = EasyMock.newCapture();
+      Capture<String> capturedPwd = EasyMock.newCapture();
+      as.addAliasForCluster(capture(capturedCluster), capture(capturedAlias), capture(capturedPwd));
+      EasyMock.expectLastCall().anyTimes();
+      EasyMock.replay(as);
+      EasyMock.expect(gatewayServices.getService(GatewayServices.ALIAS_SERVICE)).andReturn(as).anyTimes();
+
+      // Topology Service
+      TopologyService ts = EasyMock.createNiceMock(TopologyService.class);
+      ts.addTopologyChangeListener(anyObject());
+      EasyMock.expectLastCall().anyTimes();
+      ts.reloadTopologies();
+      EasyMock.expectLastCall().anyTimes();
+      EasyMock.expect(ts.getTopologies()).andReturn(Collections.emptyList()).anyTimes();
+      EasyMock.replay(ts);
+      EasyMock.expect(gatewayServices.getService(GatewayServices.TOPOLOGY_SERVICE)).andReturn(ts).anyTimes();
+
+      EasyMock.replay(gatewayServices);
+
+      // Start a GatewayService with the GatewayServices mock
+      GatewayServer server = GatewayServer.startGateway(config, gatewayServices);
+
+      // Invoke the simple descriptor handler, which will also create the credential store
+      // (because it doesn't exist) and the encryptQueryString alias
+      Map<String, File> files = SimpleDescriptorHandler.handle(testDescriptor,
+                                                               providerConfig.getParentFile(),
+                                                               destDir);
+      topologyFile = files.get("topology");
+
+      // Validate the AliasService interaction
+      assertEquals("Unexpected cluster name for the alias (should be the topology name).",
+                   testDescriptor.getName(), capturedCluster.getValue());
+      assertEquals("Unexpected alias name.", "encryptQueryString", capturedAlias.getValue());
+      assertEquals("Unexpected alias value (should be master secret + topology name.",
+                   testMasterSecret + testDescriptor.getName(), capturedPwd.getValue());
+
+    } catch (Exception e) {
+      e.printStackTrace();
+      fail(e.getMessage());
+    } finally {
+      FileUtils.forceDelete(testRootDir);
+      if (topologyFile != null) {
+        topologyFile.delete();
+      }
+    }
+  }
+
+
+  ///////////////////////////////////////////////////////////////////////////////////////////////////////
+  // Test classes for effectively "skipping" service discovery for this test.
+  ///////////////////////////////////////////////////////////////////////////////////////////////////////
+
+  public static final class NoOpServiceDiscoveryType implements ServiceDiscoveryType {
+    @Override
+    public String getType() {
+      return NoOpServiceDiscovery.TYPE;
+    }
+
+    @Override
+    public ServiceDiscovery newInstance() {
+      return new NoOpServiceDiscovery();
+    }
+  }
+
+  private static final class NoOpServiceDiscovery implements ServiceDiscovery {
+    static final String TYPE = "NO_OP";
+
+    @Override
+    public String getType() {
+      return TYPE;
+    }
+
+    @Override
+    public Map<String, Cluster> discover(ServiceDiscoveryConfig config) {
+      return Collections.emptyMap();
+    }
+
+    @Override
+    public Cluster discover(ServiceDiscoveryConfig config, String clusterName) {
+      return null;
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/knox/blob/e766b3b7/gateway-test/src/test/java/org/apache/knox/gateway/topology/monitor/RemoteConfigurationMonitorTest.java
----------------------------------------------------------------------
diff --git a/gateway-test/src/test/java/org/apache/knox/gateway/topology/monitor/RemoteConfigurationMonitorTest.java b/gateway-test/src/test/java/org/apache/knox/gateway/topology/monitor/RemoteConfigurationMonitorTest.java
new file mode 100644
index 0000000..37668a8
--- /dev/null
+++ b/gateway-test/src/test/java/org/apache/knox/gateway/topology/monitor/RemoteConfigurationMonitorTest.java
@@ -0,0 +1,603 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.knox.gateway.topology.monitor;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.curator.framework.CuratorFramework;
+import org.apache.curator.framework.CuratorFrameworkFactory;
+import org.apache.curator.retry.ExponentialBackoffRetry;
+import org.apache.curator.test.InstanceSpec;
+import org.apache.curator.test.TestingCluster;
+import org.apache.knox.gateway.config.GatewayConfig;
+import org.apache.knox.gateway.service.config.remote.zk.ZooKeeperClientService;
+import org.apache.knox.gateway.service.config.remote.zk.ZooKeeperClientServiceProvider;
+import org.apache.knox.gateway.services.config.client.RemoteConfigurationRegistryClientService;
+import org.apache.knox.gateway.services.security.AliasService;
+import org.apache.knox.test.TestUtils;
+import org.apache.zookeeper.CreateMode;
+import org.apache.zookeeper.ZooDefs;
+import org.apache.zookeeper.data.ACL;
+import org.apache.zookeeper.data.Id;
+import org.easymock.EasyMock;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.io.File;
+import java.io.FileWriter;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+/**
+ * Test the RemoteConfigurationMonitor functionality with SASL configured, and znode ACLs applied.
+ *
+ * The expected implementation is org.apache.knox.gateway.topology.monitor.zk.ZooKeeperConfigMonitor
+ *
+ * Digest-based SASL is used for this test, but since that is dictated solely by the JAAS config, Kerberos-based SASL
+ * should work in exactly the same way, simply by modifying the SASL config.
+ */
+public class RemoteConfigurationMonitorTest {
+
+    private static final String PATH_KNOX = "/knox";
+    private static final String PATH_KNOX_CONFIG = PATH_KNOX + "/config";
+    private static final String PATH_KNOX_PROVIDERS = PATH_KNOX_CONFIG + "/shared-providers";
+    private static final String PATH_KNOX_DESCRIPTORS = PATH_KNOX_CONFIG + "/descriptors";
+
+    private static final String PATH_AUTH_TEST = "/auth_test/child_node";
+
+
+    private static final String ALT_USERNAME = "notyou";
+    private static final String ZK_USERNAME = "testsasluser";
+    private static final String ZK_PASSWORD = "testsaslpwd";
+
+    private static final ACL ANY_AUTHENTICATED_USER_ALL = new ACL(ZooDefs.Perms.ALL, new Id("auth", ""));
+    private static final ACL SASL_TESTUSER_ALL = new ACL(ZooDefs.Perms.ALL, new Id("sasl", ZK_USERNAME));
+
+    private static File testTmp;
+    private static File providersDir;
+    private static File descriptorsDir;
+
+    private static TestingCluster zkCluster;
+
+    private static CuratorFramework client;
+
+    @BeforeClass
+    public static void setupSuite() throws Exception {
+        testTmp = TestUtils.createTempDir(RemoteConfigurationMonitorTest.class.getName());
+        File confDir = TestUtils.createTempDir(testTmp + "/conf");
+        providersDir = TestUtils.createTempDir(confDir + "/shared-providers");
+        descriptorsDir = TestUtils.createTempDir(confDir + "/descriptors");
+    }
+
+    @AfterClass
+    public static void tearDownSuite() throws Exception {
+        // Delete the working dir
+        testTmp.delete();
+    }
+
+    @Before
+    public void setupTest() throws Exception {
+        configureAndStartZKCluster();
+    }
+
+    @After
+    public void tearDownTest() throws Exception {
+        // Clean up the ZK nodes, and close the client
+        if (client != null) {
+            if (client.checkExists().forPath(PATH_KNOX) != null) {
+                client.delete().deletingChildrenIfNeeded().forPath(PATH_KNOX);
+            }
+            client.close();
+        }
+
+        // Shutdown the ZK cluster
+        zkCluster.close();
+    }
+
+    /**
+     * Create and persist a JAAS configuration file, defining the SASL config for both the ZooKeeper cluster instances
+     * and ZooKeeper clients.
+     *
+     * @param username The digest username
+     * @param password The digest password
+     *
+     * @return The JAAS configuration file
+     */
+    private static File setupDigestSaslConfig(String username, String password) throws Exception {
+        File saslConfigFile = new File(testTmp, "server-jaas.conf");
+        FileWriter fw = new FileWriter(saslConfigFile);
+        fw.write("Server {\n" +
+                "    org.apache.zookeeper.server.auth.DigestLoginModule required\n" +
+                "    user_" + username + " =\"" + password + "\";\n" +
+                "};\n" +
+                "Client {\n" +
+                "    org.apache.zookeeper.server.auth.DigestLoginModule required\n" +
+                "    username=\"" + username + "\"\n" +
+                "    password=\"" + password + "\";\n" +
+                "};\n");
+        fw.close();
+        return saslConfigFile;
+    }
+
+    /**
+     * Configure and start the ZooKeeper test cluster, and create the znodes monitored by the RemoteConfigurationMonitor.
+     */
+    private static void configureAndStartZKCluster() throws Exception {
+        // Configure security for the ZK cluster instances
+        Map<String, Object> customInstanceSpecProps = new HashMap<>();
+        customInstanceSpecProps.put("authProvider.1", "org.apache.zookeeper.server.auth.SASLAuthenticationProvider");
+        customInstanceSpecProps.put("requireClientAuthScheme", "sasl");
+
+        // Define the test cluster
+        List<InstanceSpec> instanceSpecs = new ArrayList<>();
+        for (int i = 0 ; i < 3 ; i++) {
+            InstanceSpec is = new InstanceSpec(null, -1, -1, -1, false, (i+1), -1, -1, customInstanceSpecProps);
+            instanceSpecs.add(is);
+        }
+        zkCluster = new TestingCluster(instanceSpecs);
+
+        // Configure auth for the ZooKeeper servers and the clients
+        File saslConfigFile = setupDigestSaslConfig(ZK_USERNAME, ZK_PASSWORD);
+
+        // This system property is used by the ZooKeeper cluster instances, the test driver client, and the
+        // RemoteConfigurationMonitor implementation for SASL authentication/authorization
+        System.setProperty("java.security.auth.login.config", saslConfigFile.getAbsolutePath());
+
+        // Start the cluster
+        zkCluster.start();
+
+        // Create the client for the test cluster
+        client = CuratorFrameworkFactory.builder()
+                                        .connectString(zkCluster.getConnectString())
+                                        .retryPolicy(new ExponentialBackoffRetry(100, 3))
+                                        .build();
+        assertNotNull(client);
+        client.start();
+
+        // Create test config nodes with an ACL for a sasl user that is NOT configured for the test client
+        List<ACL> acls = Arrays.asList(new ACL(ZooDefs.Perms.ALL, new Id("sasl", ALT_USERNAME)),
+                                       new ACL(ZooDefs.Perms.READ, ZooDefs.Ids.ANYONE_ID_UNSAFE));
+        client.create().creatingParentsIfNeeded().withMode(CreateMode.PERSISTENT).withACL(acls).forPath(PATH_AUTH_TEST);
+        assertNotNull("Failed to create node:" + PATH_AUTH_TEST,
+                      client.checkExists().forPath(PATH_AUTH_TEST));
+    }
+
+
+    private static void validateKnoxConfigNodeACLs(List<ACL> expectedACLS, List<ACL> actualACLs) throws Exception {
+        assertEquals(expectedACLS.size(), actualACLs.size());
+        int matchedCount = 0;
+        for (ACL expected : expectedACLS) {
+            for (ACL actual : actualACLs) {
+                Id expectedId = expected.getId();
+                Id actualId = actual.getId();
+                if (actualId.getScheme().equals(expectedId.getScheme()) && actualId.getId().equals(expectedId.getId())) {
+                    matchedCount++;
+                    assertEquals(expected.getPerms(), actual.getPerms());
+                    break;
+                }
+            }
+        }
+        assertEquals("ACL mismatch despite being same quantity.", expectedACLS.size(), matchedCount);
+    }
+
+
+    @Test
+    public void testZooKeeperConfigMonitorSASLNodesExistWithUnacceptableACL() throws Exception {
+        final String configMonitorName = "zkConfigClient";
+        final String alias = "zkPass";
+
+        // Setup the base GatewayConfig mock
+        GatewayConfig gc = EasyMock.createNiceMock(GatewayConfig.class);
+        EasyMock.expect(gc.getGatewayProvidersConfigDir()).andReturn(providersDir.getAbsolutePath()).anyTimes();
+        EasyMock.expect(gc.getGatewayDescriptorsDir()).andReturn(descriptorsDir.getAbsolutePath()).anyTimes();
+        EasyMock.expect(gc.getRemoteRegistryConfigurationNames())
+                .andReturn(Collections.singletonList(configMonitorName))
+                .anyTimes();
+        final String registryConfig =
+                GatewayConfig.REMOTE_CONFIG_REGISTRY_TYPE + "=" + ZooKeeperClientService.TYPE + ";" +
+                        GatewayConfig.REMOTE_CONFIG_REGISTRY_ADDRESS + "=" + zkCluster.getConnectString() + ";" +
+                        GatewayConfig.REMOTE_CONFIG_REGISTRY_PRINCIPAL + "=" + ZK_USERNAME + ";" +
+                        GatewayConfig.REMOTE_CONFIG_REGISTRY_AUTH_TYPE + "=Digest;" +
+                        GatewayConfig.REMOTE_CONFIG_REGISTRY_CREDENTIAL_ALIAS + "=" + alias;
+        EasyMock.expect(gc.getRemoteRegistryConfiguration(configMonitorName))
+                .andReturn(registryConfig).anyTimes();
+        EasyMock.expect(gc.getRemoteConfigurationMonitorClientName()).andReturn(configMonitorName).anyTimes();
+        EasyMock.replay(gc);
+
+        AliasService aliasService = EasyMock.createNiceMock(AliasService.class);
+        EasyMock.expect(aliasService.getPasswordFromAliasForGateway(alias))
+                .andReturn(ZK_PASSWORD.toCharArray())
+                .anyTimes();
+        EasyMock.replay(aliasService);
+
+        RemoteConfigurationRegistryClientService clientService = (new ZooKeeperClientServiceProvider()).newInstance();
+        clientService.setAliasService(aliasService);
+        clientService.init(gc, Collections.emptyMap());
+        clientService.start();
+
+        RemoteConfigurationMonitorFactory.setClientService(clientService);
+
+        RemoteConfigurationMonitor cm = RemoteConfigurationMonitorFactory.get(gc);
+        assertNotNull("Failed to load RemoteConfigurationMonitor", cm);
+
+        final ACL ANY_AUTHENTICATED_USER_ALL = new ACL(ZooDefs.Perms.ALL, new Id("auth", ""));
+        List<ACL> acls = Arrays.asList(ANY_AUTHENTICATED_USER_ALL, new ACL(ZooDefs.Perms.WRITE, ZooDefs.Ids.ANYONE_ID_UNSAFE));
+        client.create().creatingParentsIfNeeded().withMode(CreateMode.PERSISTENT).withACL(acls).forPath(PATH_KNOX);
+        client.create().creatingParentsIfNeeded().withMode(CreateMode.PERSISTENT).withACL(acls).forPath(PATH_KNOX_CONFIG);
+        client.create().creatingParentsIfNeeded().withMode(CreateMode.PERSISTENT).withACL(acls).forPath(PATH_KNOX_PROVIDERS);
+        client.create().creatingParentsIfNeeded().withMode(CreateMode.PERSISTENT).withACL(acls).forPath(PATH_KNOX_DESCRIPTORS);
+
+        // Make sure both ACLs were applied
+        List<ACL> preACLs = client.getACL().forPath(PATH_KNOX);
+        assertEquals(2, preACLs.size());
+
+        // Check that the config nodes really do exist (the monitor will NOT create them if they're present)
+        assertNotNull(client.checkExists().forPath(PATH_KNOX));
+        assertNotNull(client.checkExists().forPath(PATH_KNOX_CONFIG));
+        assertNotNull(client.checkExists().forPath(PATH_KNOX_PROVIDERS));
+        assertNotNull(client.checkExists().forPath(PATH_KNOX_DESCRIPTORS));
+
+        try {
+            cm.start();
+        } catch (Exception e) {
+            fail("Failed to start monitor: " + e.getMessage());
+        }
+
+        // Validate the expected ACLs on the Knox config znodes (make sure the monitor removed the world:anyone ACL)
+        List<ACL> expectedACLs = Collections.singletonList(SASL_TESTUSER_ALL);
+        validateKnoxConfigNodeACLs(expectedACLs, client.getACL().forPath(PATH_KNOX));
+        validateKnoxConfigNodeACLs(expectedACLs, client.getACL().forPath(PATH_KNOX_CONFIG));
+        validateKnoxConfigNodeACLs(expectedACLs, client.getACL().forPath(PATH_KNOX_PROVIDERS));
+        validateKnoxConfigNodeACLs(expectedACLs, client.getACL().forPath(PATH_KNOX_DESCRIPTORS));
+    }
+
+
+    @Test
+    public void testZooKeeperConfigMonitorSASLNodesExistWithAcceptableACL() throws Exception {
+        final String configMonitorName = "zkConfigClient";
+        final String alias = "zkPass";
+
+        // Setup the base GatewayConfig mock
+        GatewayConfig gc = EasyMock.createNiceMock(GatewayConfig.class);
+        EasyMock.expect(gc.getGatewayProvidersConfigDir()).andReturn(providersDir.getAbsolutePath()).anyTimes();
+        EasyMock.expect(gc.getGatewayDescriptorsDir()).andReturn(descriptorsDir.getAbsolutePath()).anyTimes();
+        EasyMock.expect(gc.getRemoteRegistryConfigurationNames())
+                .andReturn(Collections.singletonList(configMonitorName))
+                .anyTimes();
+        final String registryConfig =
+                GatewayConfig.REMOTE_CONFIG_REGISTRY_TYPE + "=" + ZooKeeperClientService.TYPE + ";" +
+                        GatewayConfig.REMOTE_CONFIG_REGISTRY_ADDRESS + "=" + zkCluster.getConnectString() + ";" +
+                        GatewayConfig.REMOTE_CONFIG_REGISTRY_PRINCIPAL + "=" + ZK_USERNAME + ";" +
+                        GatewayConfig.REMOTE_CONFIG_REGISTRY_AUTH_TYPE + "=Digest;" +
+                        GatewayConfig.REMOTE_CONFIG_REGISTRY_CREDENTIAL_ALIAS + "=" + alias;
+        EasyMock.expect(gc.getRemoteRegistryConfiguration(configMonitorName))
+                .andReturn(registryConfig).anyTimes();
+        EasyMock.expect(gc.getRemoteConfigurationMonitorClientName()).andReturn(configMonitorName).anyTimes();
+        EasyMock.replay(gc);
+
+        AliasService aliasService = EasyMock.createNiceMock(AliasService.class);
+        EasyMock.expect(aliasService.getPasswordFromAliasForGateway(alias))
+                .andReturn(ZK_PASSWORD.toCharArray())
+                .anyTimes();
+        EasyMock.replay(aliasService);
+
+        RemoteConfigurationRegistryClientService clientService = (new ZooKeeperClientServiceProvider()).newInstance();
+        clientService.setAliasService(aliasService);
+        clientService.init(gc, Collections.emptyMap());
+        clientService.start();
+
+        RemoteConfigurationMonitorFactory.setClientService(clientService);
+
+        RemoteConfigurationMonitor cm = RemoteConfigurationMonitorFactory.get(gc);
+        assertNotNull("Failed to load RemoteConfigurationMonitor", cm);
+
+        List<ACL> acls = Arrays.asList(ANY_AUTHENTICATED_USER_ALL);
+        client.create().creatingParentsIfNeeded().withMode(CreateMode.PERSISTENT).withACL(acls).forPath(PATH_KNOX);
+        client.create().creatingParentsIfNeeded().withMode(CreateMode.PERSISTENT).withACL(acls).forPath(PATH_KNOX_CONFIG);
+        client.create().creatingParentsIfNeeded().withMode(CreateMode.PERSISTENT).withACL(acls).forPath(PATH_KNOX_PROVIDERS);
+        client.create().creatingParentsIfNeeded().withMode(CreateMode.PERSISTENT).withACL(acls).forPath(PATH_KNOX_DESCRIPTORS);
+
+        // Check that the config nodes really do exist (the monitor will NOT create them if they're present)
+        assertNotNull(client.checkExists().forPath(PATH_KNOX));
+        assertNotNull(client.checkExists().forPath(PATH_KNOX_CONFIG));
+        assertNotNull(client.checkExists().forPath(PATH_KNOX_PROVIDERS));
+        assertNotNull(client.checkExists().forPath(PATH_KNOX_DESCRIPTORS));
+
+        try {
+            cm.start();
+        } catch (Exception e) {
+            fail("Failed to start monitor: " + e.getMessage());
+        }
+
+        // Test auth violation
+        clientService.get(configMonitorName).createEntry("/auth_test/child_node/test1");
+        assertNull("Creation should have been prevented since write access is not granted to the test client.",
+                client.checkExists().forPath("/auth_test/child_node/test1"));
+        assertTrue("Creation should have been prevented since write access is not granted to the test client.",
+                client.getChildren().forPath("/auth_test/child_node").isEmpty());
+
+        // Validate the expected ACLs on the Knox config znodes (make sure the monitor didn't change them)
+        List<ACL> expectedACLs = Collections.singletonList(SASL_TESTUSER_ALL);
+        validateKnoxConfigNodeACLs(expectedACLs, client.getACL().forPath(PATH_KNOX));
+        validateKnoxConfigNodeACLs(expectedACLs, client.getACL().forPath(PATH_KNOX_CONFIG));
+        validateKnoxConfigNodeACLs(expectedACLs, client.getACL().forPath(PATH_KNOX_PROVIDERS));
+        validateKnoxConfigNodeACLs(expectedACLs, client.getACL().forPath(PATH_KNOX_DESCRIPTORS));
+    }
+
+
+    @Test
+    public void testZooKeeperConfigMonitorSASLCreateNodes() throws Exception {
+        final String configMonitorName = "zkConfigClient";
+        final String alias = "zkPass";
+
+        // Setup the base GatewayConfig mock
+        GatewayConfig gc = EasyMock.createNiceMock(GatewayConfig.class);
+        EasyMock.expect(gc.getGatewayProvidersConfigDir()).andReturn(providersDir.getAbsolutePath()).anyTimes();
+        EasyMock.expect(gc.getGatewayDescriptorsDir()).andReturn(descriptorsDir.getAbsolutePath()).anyTimes();
+        EasyMock.expect(gc.getRemoteRegistryConfigurationNames())
+                .andReturn(Collections.singletonList(configMonitorName))
+                .anyTimes();
+        final String registryConfig =
+                            GatewayConfig.REMOTE_CONFIG_REGISTRY_TYPE + "=" + ZooKeeperClientService.TYPE + ";" +
+                            GatewayConfig.REMOTE_CONFIG_REGISTRY_ADDRESS + "=" + zkCluster.getConnectString() + ";" +
+                            GatewayConfig.REMOTE_CONFIG_REGISTRY_PRINCIPAL + "=" + ZK_USERNAME + ";" +
+                            GatewayConfig.REMOTE_CONFIG_REGISTRY_AUTH_TYPE + "=Digest;" +
+                            GatewayConfig.REMOTE_CONFIG_REGISTRY_CREDENTIAL_ALIAS + "=" + alias;
+        EasyMock.expect(gc.getRemoteRegistryConfiguration(configMonitorName))
+                .andReturn(registryConfig).anyTimes();
+        EasyMock.expect(gc.getRemoteConfigurationMonitorClientName()).andReturn(configMonitorName).anyTimes();
+        EasyMock.replay(gc);
+
+        AliasService aliasService = EasyMock.createNiceMock(AliasService.class);
+        EasyMock.expect(aliasService.getPasswordFromAliasForGateway(alias))
+                .andReturn(ZK_PASSWORD.toCharArray())
+                .anyTimes();
+        EasyMock.replay(aliasService);
+
+        RemoteConfigurationRegistryClientService clientService = (new ZooKeeperClientServiceProvider()).newInstance();
+        clientService.setAliasService(aliasService);
+        clientService.init(gc, Collections.emptyMap());
+        clientService.start();
+
+        RemoteConfigurationMonitorFactory.setClientService(clientService);
+
+        RemoteConfigurationMonitor cm = RemoteConfigurationMonitorFactory.get(gc);
+        assertNotNull("Failed to load RemoteConfigurationMonitor", cm);
+
+        // Check that the config nodes really don't yet exist (the monitor will create them if they're not present)
+        assertNull(client.checkExists().forPath(PATH_KNOX));
+        assertNull(client.checkExists().forPath(PATH_KNOX_CONFIG));
+        assertNull(client.checkExists().forPath(PATH_KNOX_PROVIDERS));
+        assertNull(client.checkExists().forPath(PATH_KNOX_DESCRIPTORS));
+
+        try {
+            cm.start();
+        } catch (Exception e) {
+            fail("Failed to start monitor: " + e.getMessage());
+        }
+
+        // Test auth violation
+        clientService.get(configMonitorName).createEntry("/auth_test/child_node/test1");
+        assertNull("Creation should have been prevented since write access is not granted to the test client.",
+                   client.checkExists().forPath("/auth_test/child_node/test1"));
+        assertTrue("Creation should have been prevented since write access is not granted to the test client.",
+                   client.getChildren().forPath("/auth_test/child_node").isEmpty());
+
+        // Validate the expected ACLs on the Knox config znodes (make sure the monitor created them correctly)
+        List<ACL> expectedACLs = Collections.singletonList(SASL_TESTUSER_ALL);
+        validateKnoxConfigNodeACLs(expectedACLs, client.getACL().forPath(PATH_KNOX));
+        validateKnoxConfigNodeACLs(expectedACLs, client.getACL().forPath(PATH_KNOX_CONFIG));
+        validateKnoxConfigNodeACLs(expectedACLs, client.getACL().forPath(PATH_KNOX_PROVIDERS));
+        validateKnoxConfigNodeACLs(expectedACLs, client.getACL().forPath(PATH_KNOX_DESCRIPTORS));
+
+        // Test the Knox config nodes, for which authentication should be sufficient for access
+        try {
+            final String pc_one_znode = getProviderPath("providers-config1.xml");
+            final File pc_one         = new File(providersDir, "providers-config1.xml");
+            final String pc_two_znode = getProviderPath("providers-config2.xml");
+            final File pc_two         = new File(providersDir, "providers-config2.xml");
+
+            client.create().withMode(CreateMode.PERSISTENT).forPath(pc_one_znode, TEST_PROVIDERS_CONFIG_1.getBytes());
+            Thread.sleep(100);
+            assertTrue(pc_one.exists());
+            assertEquals(TEST_PROVIDERS_CONFIG_1, FileUtils.readFileToString(pc_one));
+
+            client.create().withMode(CreateMode.PERSISTENT).forPath(getProviderPath("providers-config2.xml"), TEST_PROVIDERS_CONFIG_2.getBytes());
+            Thread.sleep(100);
+            assertTrue(pc_two.exists());
+            assertEquals(TEST_PROVIDERS_CONFIG_2, FileUtils.readFileToString(pc_two));
+
+            client.setData().forPath(pc_two_znode, TEST_PROVIDERS_CONFIG_1.getBytes());
+            Thread.sleep(100);
+            assertTrue(pc_two.exists());
+            assertEquals(TEST_PROVIDERS_CONFIG_1, FileUtils.readFileToString(pc_two));
+
+            client.delete().forPath(pc_two_znode);
+            Thread.sleep(100);
+            assertFalse(pc_two.exists());
+
+            client.delete().forPath(pc_one_znode);
+            Thread.sleep(100);
+            assertFalse(pc_one.exists());
+
+            final String desc_one_znode   = getDescriptorPath("test1.json");
+            final String desc_two_znode   = getDescriptorPath("test2.json");
+            final String desc_three_znode = getDescriptorPath("test3.json");
+            final File desc_one           = new File(descriptorsDir, "test1.json");
+            final File desc_two           = new File(descriptorsDir, "test2.json");
+            final File desc_three         = new File(descriptorsDir, "test3.json");
+
+            client.create().withMode(CreateMode.PERSISTENT).forPath(desc_one_znode, TEST_DESCRIPTOR_1.getBytes());
+            Thread.sleep(100);
+            assertTrue(desc_one.exists());
+            assertEquals(TEST_DESCRIPTOR_1, FileUtils.readFileToString(desc_one));
+
+            client.create().withMode(CreateMode.PERSISTENT).forPath(desc_two_znode, TEST_DESCRIPTOR_1.getBytes());
+            Thread.sleep(100);
+            assertTrue(desc_two.exists());
+            assertEquals(TEST_DESCRIPTOR_1, FileUtils.readFileToString(desc_two));
+
+            client.setData().forPath(desc_two_znode, TEST_DESCRIPTOR_2.getBytes());
+            Thread.sleep(100);
+            assertTrue(desc_two.exists());
+            assertEquals(TEST_DESCRIPTOR_2, FileUtils.readFileToString(desc_two));
+
+            client.create().withMode(CreateMode.PERSISTENT).forPath(desc_three_znode, TEST_DESCRIPTOR_1.getBytes());
+            Thread.sleep(100);
+            assertTrue(desc_three.exists());
+            assertEquals(TEST_DESCRIPTOR_1, FileUtils.readFileToString(desc_three));
+
+            client.delete().forPath(desc_two_znode);
+            Thread.sleep(100);
+            assertFalse("Expected test2.json to have been deleted.", desc_two.exists());
+
+            client.delete().forPath(desc_three_znode);
+            Thread.sleep(100);
+            assertFalse(desc_three.exists());
+
+            client.delete().forPath(desc_one_znode);
+            Thread.sleep(100);
+            assertFalse(desc_one.exists());
+        } finally {
+            cm.stop();
+        }
+    }
+
+    private static String getDescriptorPath(String descriptorName) {
+        return PATH_KNOX_DESCRIPTORS + "/" + descriptorName;
+    }
+
+    private static String getProviderPath(String providerConfigName) {
+        return PATH_KNOX_PROVIDERS + "/" + providerConfigName;
+    }
+
+
+    private static final String TEST_PROVIDERS_CONFIG_1 =
+                    "<gateway>\n" +
+                    "    <provider>\n" +
+                    "        <role>identity-assertion</role>\n" +
+                    "        <name>Default</name>\n" +
+                    "        <enabled>true</enabled>\n" +
+                    "    </provider>\n" +
+                    "    <provider>\n" +
+                    "        <role>hostmap</role>\n" +
+                    "        <name>static</name>\n" +
+                    "        <enabled>true</enabled>\n" +
+                    "        <param><name>localhost</name><value>sandbox,sandbox.hortonworks.com</value></param>\n" +
+                    "    </provider>\n" +
+                    "</gateway>\n";
+
+    private static final String TEST_PROVIDERS_CONFIG_2 =
+                    "<gateway>\n" +
+                    "    <provider>\n" +
+                    "        <role>authentication</role>\n" +
+                    "        <name>ShiroProvider</name>\n" +
+                    "        <enabled>true</enabled>\n" +
+                    "        <param>\n" +
+                    "            <name>sessionTimeout</name>\n" +
+                    "            <value>30</value>\n" +
+                    "        </param>\n" +
+                    "        <param>\n" +
+                    "            <name>main.ldapRealm</name>\n" +
+                    "            <value>org.apache.knox.gateway.shirorealm.KnoxLdapRealm</value>\n" +
+                    "        </param>\n" +
+                    "        <param>\n" +
+                    "            <name>main.ldapContextFactory</name>\n" +
+                    "            <value>org.apache.knox.gateway.shirorealm.KnoxLdapContextFactory</value>\n" +
+                    "        </param>\n" +
+                    "        <param>\n" +
+                    "            <name>main.ldapRealm.contextFactory</name>\n" +
+                    "            <value>$ldapContextFactory</value>\n" +
+                    "        </param>\n" +
+                    "        <param>\n" +
+                    "            <name>main.ldapRealm.userDnTemplate</name>\n" +
+                    "            <value>uid={0},ou=people,dc=hadoop,dc=apache,dc=org</value>\n" +
+                    "        </param>\n" +
+                    "        <param>\n" +
+                    "            <name>main.ldapRealm.contextFactory.url</name>\n" +
+                    "            <value>ldap://localhost:33389</value>\n" +
+                    "        </param>\n" +
+                    "        <param>\n" +
+                    "            <name>main.ldapRealm.contextFactory.authenticationMechanism</name>\n" +
+                    "            <value>simple</value>\n" +
+                    "        </param>\n" +
+                    "        <param>\n" +
+                    "            <name>urls./**</name>\n" +
+                    "            <value>authcBasic</value>\n" +
+                    "        </param>\n" +
+                    "    </provider>\n" +
+                    "</gateway>\n";
+
+    private static final String TEST_DESCRIPTOR_1 =
+                    "{\n" +
+                    "  \"discovery-type\":\"AMBARI\",\n" +
+                    "  \"discovery-address\":\"http://sandbox.hortonworks.com:8080\",\n" +
+                    "  \"discovery-user\":\"maria_dev\",\n" +
+                    "  \"discovery-pwd-alias\":\"sandbox.ambari.discovery.password\",\n" +
+                    "  \"provider-config-ref\":\"sandbox-providers.xml\",\n" +
+                    "  \"cluster\":\"Sandbox\",\n" +
+                    "  \"services\":[\n" +
+                    "    {\"name\":\"NODEUI\"},\n" +
+                    "    {\"name\":\"YARNUI\"},\n" +
+                    "    {\"name\":\"HDFSUI\"},\n" +
+                    "    {\"name\":\"OOZIEUI\"},\n" +
+                    "    {\"name\":\"HBASEUI\"},\n" +
+                    "    {\"name\":\"NAMENODE\"},\n" +
+                    "    {\"name\":\"JOBTRACKER\"},\n" +
+                    "    {\"name\":\"WEBHDFS\"},\n" +
+                    "    {\"name\":\"WEBHCAT\"},\n" +
+                    "    {\"name\":\"OOZIE\"},\n" +
+                    "    {\"name\":\"WEBHBASE\"},\n" +
+                    "    {\"name\":\"RESOURCEMANAGER\"},\n" +
+                    "    {\"name\":\"AMBARI\", \"urls\":[\"http://c6401.ambari.apache.org:8080\"]},\n" +
+                    "    {\"name\":\"AMBARIUI\", \"urls\":[\"http://c6401.ambari.apache.org:8080\"]}\n" +
+                    "  ]\n" +
+                    "}\n";
+
+    private static final String TEST_DESCRIPTOR_2 =
+                    "{\n" +
+                    "  \"discovery-type\":\"AMBARI\",\n" +
+                    "  \"discovery-address\":\"http://sandbox.hortonworks.com:8080\",\n" +
+                    "  \"discovery-user\":\"maria_dev\",\n" +
+                    "  \"discovery-pwd-alias\":\"sandbox.ambari.discovery.password\",\n" +
+                    "  \"provider-config-ref\":\"sandbox-providers.xml\",\n" +
+                    "  \"cluster\":\"Sandbox\",\n" +
+                    "  \"services\":[\n" +
+                    "    {\"name\":\"NAMENODE\"},\n" +
+                    "    {\"name\":\"JOBTRACKER\"},\n" +
+                    "    {\"name\":\"WEBHDFS\"},\n" +
+                    "    {\"name\":\"WEBHCAT\"},\n" +
+                    "    {\"name\":\"OOZIE\"},\n" +
+                    "    {\"name\":\"WEBHBASE\"},\n" +
+                    "    {\"name\":\"RESOURCEMANAGER\"}\n" +
+                    "  ]\n" +
+                    "}\n";
+
+}

http://git-wip-us.apache.org/repos/asf/knox/blob/e766b3b7/gateway-test/src/test/resources/META-INF/services/org.apache.hadoop.gateway.topology.discovery.ServiceDiscoveryType
----------------------------------------------------------------------
diff --git a/gateway-test/src/test/resources/META-INF/services/org.apache.hadoop.gateway.topology.discovery.ServiceDiscoveryType b/gateway-test/src/test/resources/META-INF/services/org.apache.hadoop.gateway.topology.discovery.ServiceDiscoveryType
deleted file mode 100644
index 0c5fe09..0000000
--- a/gateway-test/src/test/resources/META-INF/services/org.apache.hadoop.gateway.topology.discovery.ServiceDiscoveryType
+++ /dev/null
@@ -1,19 +0,0 @@
-##########################################################################
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-##########################################################################
-
-org.apache.hadoop.gateway.SimpleDescriptorHandlerFuncTest$NoOpServiceDiscoveryType

http://git-wip-us.apache.org/repos/asf/knox/blob/e766b3b7/gateway-test/src/test/resources/META-INF/services/org.apache.knox.gateway.topology.discovery.ServiceDiscoveryType
----------------------------------------------------------------------
diff --git a/gateway-test/src/test/resources/META-INF/services/org.apache.knox.gateway.topology.discovery.ServiceDiscoveryType b/gateway-test/src/test/resources/META-INF/services/org.apache.knox.gateway.topology.discovery.ServiceDiscoveryType
new file mode 100644
index 0000000..8d72813
--- /dev/null
+++ b/gateway-test/src/test/resources/META-INF/services/org.apache.knox.gateway.topology.discovery.ServiceDiscoveryType
@@ -0,0 +1,19 @@
+##########################################################################
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##########################################################################
+
+org.apache.knox.gateway.SimpleDescriptorHandlerFuncTest$NoOpServiceDiscoveryType


[40/53] [abbrv] knox git commit: Merge branch 'master' into KNOX-998-Package_Restructuring

Posted by mo...@apache.org.
Merge branch 'master' into KNOX-998-Package_Restructuring

# Conflicts:
#	gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariServiceDiscovery.java
#	gateway-discovery-ambari/src/test/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariServiceDiscoveryTest.java
#	gateway-provider-security-pac4j/src/main/java/org/apache/knox/gateway/pac4j/filter/Pac4jIdentityAdapter.java
#	gateway-server/src/main/java/org/apache/knox/gateway/services/CLIGatewayServices.java
#	gateway-server/src/main/java/org/apache/knox/gateway/services/DefaultGatewayServices.java
#	gateway-server/src/main/java/org/apache/knox/gateway/services/topology/impl/DefaultTopologyService.java
#	gateway-server/src/main/java/org/apache/knox/gateway/topology/simple/SimpleDescriptorHandler.java
#	gateway-server/src/main/java/org/apache/knox/gateway/util/KnoxCLI.java
#	gateway-server/src/test/java/org/apache/knox/gateway/util/KnoxCLITest.java
#	gateway-service-definitions/src/main/resources/services/ambariui/2.2.1/service.xml


Project: http://git-wip-us.apache.org/repos/asf/knox/repo
Commit: http://git-wip-us.apache.org/repos/asf/knox/commit/22a7304a
Tree: http://git-wip-us.apache.org/repos/asf/knox/tree/22a7304a
Diff: http://git-wip-us.apache.org/repos/asf/knox/diff/22a7304a

Branch: refs/heads/master
Commit: 22a7304a602105ed01ed37e19cba086f3296e4af
Parents: e70904b 370c861
Author: Sandeep More <mo...@apache.org>
Authored: Thu Dec 14 10:38:39 2017 -0500
Committer: Sandeep More <mo...@apache.org>
Committed: Thu Dec 14 10:38:39 2017 -0500

----------------------------------------------------------------------
 .gitignore                                      |    1 +
 CHANGES                                         |   22 +-
 gateway-admin-ui/README.md                      |   34 +-
 gateway-admin-ui/angular-cli.json               |   22 +-
 gateway-admin-ui/package.json                   |   60 +-
 gateway-admin-ui/src/app/app.module.ts          |    6 +-
 .../src/app/topology-detail.component.ts        |   55 +-
 gateway-admin-ui/src/app/topology.component.ts  |    2 +-
 .../src/assets/knox-logo-transparent.gif        |  Bin 0 -> 19703 bytes
 gateway-admin-ui/src/index.html                 |    4 +-
 gateway-admin-ui/src/tsconfig.json              |    4 +-
 .../app/assets/knox-logo-transparent.gif        |  Bin 0 -> 19703 bytes
 .../applications/admin-ui/app/index.html        |   62 +-
 .../app/inline.b47d11937c275f76ce02.bundle.js   |    1 +
 .../app/inline.d41d8cd98f00b204e980.bundle.js   |    2 -
 .../app/inline.d41d8cd98f00b204e980.bundle.map  |    1 -
 .../app/main.806d67070af66e18c2fc.bundle.js     |    2 -
 .../app/main.806d67070af66e18c2fc.bundle.js.gz  |  Bin 3657 -> 0 bytes
 .../app/main.806d67070af66e18c2fc.bundle.map    |    1 -
 .../app/main.a69408978854e3a77fb2.bundle.js     |    1 +
 .../app/scripts.2c89ed78f648df44c10f.bundle.js  |   12 +
 .../app/styles.b2328beb0372c051d06d.bundle.js   |    2 -
 .../app/styles.b2328beb0372c051d06d.bundle.map  |    1 -
 .../app/styles.d41d8cd98f00b204e980.bundle.css  |    0
 ....d41d8cd98f00b204e9800998ecf8427e.bundle.css |    2 -
 .../app/vendor.48771018d3da89d3269f.bundle.js   | 2035 ------------------
 .../vendor.48771018d3da89d3269f.bundle.js.gz    |  Bin 459997 -> 0 bytes
 .../app/vendor.48771018d3da89d3269f.bundle.map  |    1 -
 .../discovery/ambari/AmbariClientCommon.java    |  102 +
 ...bariClusterConfigurationMonitorProvider.java |   35 +
 .../ambari/AmbariConfigurationMonitor.java      |  525 +++++
 .../topology/discovery/ambari/RESTInvoker.java  |  136 ++
 .../discovery/ambari/AmbariCluster.java         |    5 +
 .../ambari/AmbariServiceDiscovery.java          |  228 +-
 .../ambari/AmbariServiceDiscoveryMessages.java  |   51 +-
 .../ambari/ServiceURLPropertyConfig.java        |    2 +-
 ...iscovery.ClusterConfigurationMonitorProvider |   19 +
 .../ambari/AmbariConfigurationMonitorTest.java  |  319 +++
 .../ambari/AmbariServiceDiscoveryTest.java      |   34 +-
 .../pac4j/filter/Pac4jDispatcherFilter.java     |   11 +-
 .../pac4j/filter/Pac4jIdentityAdapter.java      |   33 +-
 .../gateway/pac4j/MockHttpServletRequest.java   |    8 +-
 .../knox/gateway/pac4j/Pac4jProviderTest.java   |  187 +-
 gateway-release/home/conf/gateway-site.xml      |   12 +
 gateway-release/home/conf/topologies/admin.xml  |   21 +-
 .../home/conf/topologies/knoxsso.xml            |    5 +-
 .../home/conf/topologies/manager.xml            |   21 +-
 .../home/conf/topologies/sandbox.xml            |   21 +-
 gateway-server/pom.xml                          |    9 +
 ...faultClusterConfigurationMonitorService.java |   81 +
 .../DefaultConfigurationMonitorProvider.java    |   31 +
 .../DefaultRemoteConfigurationMonitor.java      |  228 ++
 .../RemoteConfigurationMonitorFactory.java      |   74 +
 .../apache/knox/gateway/GatewayMessages.java    |   64 +-
 .../gateway/config/impl/GatewayConfigImpl.java  |   67 +-
 .../gateway/services/CLIGatewayServices.java    |   10 +
 .../services/DefaultGatewayServices.java        |   24 +-
 .../topology/impl/DefaultTopologyService.java   |   99 +-
 .../simple/SimpleDescriptorFactory.java         |    2 +-
 .../simple/SimpleDescriptorHandler.java         |   78 +-
 .../simple/SimpleDescriptorMessages.java        |    9 +
 .../org/apache/knox/gateway/util/KnoxCLI.java   |  411 +++-
 ...y.monitor.RemoteConfigurationMonitorProvider |   19 +
 ...emoteConfigurationRegistryClientService.java |  263 +++
 ...figurationRegistryClientServiceProvider.java |   32 +
 .../ZooKeeperConfigurationMonitorTest.java      |  355 +++
 .../config/impl/GatewayConfigImplTest.java      |   43 +
 .../topology/DefaultTopologyServiceTest.java    |   10 +-
 .../simple/SimpleDescriptorFactoryTest.java     |   13 +-
 .../apache/knox/gateway/util/KnoxCLITest.java   |  385 +++-
 .../knox/gateway/websockets/BadUrlTest.java     |   11 +
 .../gateway/websockets/WebsocketEchoTest.java   |   11 +
 .../WebsocketMultipleConnectionTest.java        |   11 +
 ...teConfigurationRegistryClientServiceProvider |   19 +
 .../services/ambariui/2.2.1/rewrite.xml         |  104 -
 .../services/ambariui/2.2.1/service.xml         |   92 -
 gateway-service-remoteconfig/pom.xml            |   89 +
 .../remote/RemoteConfigurationMessages.java     |   49 +
 ...nfigurationRegistryClientServiceFactory.java |   41 +
 ...figurationRegistryClientServiceProvider.java |   27 +
 .../RemoteConfigurationRegistryConfig.java      |   43 +
 .../DefaultRemoteConfigurationRegistries.java   |  104 +
 .../config/RemoteConfigurationRegistries.java   |   33 +
 .../RemoteConfigurationRegistriesAccessor.java  |   60 +
 .../RemoteConfigurationRegistriesParser.java    |   48 +
 .../config/RemoteConfigurationRegistry.java     |  139 ++
 .../config/remote/zk/CuratorClientService.java  |  464 ++++
 .../RemoteConfigurationRegistryJAASConfig.java  |  179 ++
 .../remote/zk/ZooKeeperClientService.java       |   25 +
 .../zk/ZooKeeperClientServiceProvider.java      |   34 +
 ...teConfigurationRegistryClientServiceProvider |   19 +
 ...efaultRemoteConfigurationRegistriesTest.java |  184 ++
 ...teConfigurationRegistryConfigParserTest.java |  108 +
 .../util/RemoteRegistryConfigTestUtils.java     |  117 +
 ...eConfigurationRegistryClientServiceTest.java |  424 ++++
 ...moteConfigurationRegistryJAASConfigTest.java |  255 +++
 .../RemoteConfigurationRegistryClient.java      |   80 +
 ...emoteConfigurationRegistryClientService.java |   28 +
 .../ClusterConfigurationMonitorService.java     |   43 +
 .../discovery/ClusterConfigurationMonitor.java  |   48 +
 .../ClusterConfigurationMonitorProvider.java    |   27 +
 .../monitor/RemoteConfigurationMonitor.java     |   24 +
 .../RemoteConfigurationMonitorProvider.java     |   34 +
 .../knox/gateway/config/GatewayConfig.java      |   50 +
 .../knox/gateway/services/GatewayServices.java  |    4 +
 .../apache/knox/gateway/GatewayTestConfig.java  |   38 +-
 .../java/org/apache/knox/test/TestUtils.java    |    2 +-
 gateway-test/pom.xml                            |    6 +
 .../SimpleDescriptorHandlerFuncTest.java        |  275 +++
 .../monitor/RemoteConfigurationMonitorTest.java |  603 ++++++
 .../knox/gateway/GatewayBasicFuncTest.java      |    2 +-
 ...eway.topology.discovery.ServiceDiscoveryType |   19 +
 pom.xml                                         |   18 +-
 113 files changed, 7743 insertions(+), 2663 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/knox/blob/22a7304a/.gitignore
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/knox/blob/22a7304a/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariCluster.java
----------------------------------------------------------------------
diff --cc gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariCluster.java
index d71e079,0000000..bcf3adc
mode 100644,000000..100644
--- a/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariCluster.java
+++ b/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariCluster.java
@@@ -1,115 -1,0 +1,120 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements. See the NOTICE file distributed with this
 + * work for additional information regarding copyright ownership. The ASF
 + * licenses this file to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance with the License.
 + * You may obtain a copy of the License at
 + *
 + * http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 + * License for the specific language governing permissions and limitations under
 + * the License.
 + */
 +package org.apache.knox.gateway.topology.discovery.ambari;
 +
 +import org.apache.knox.gateway.topology.discovery.ServiceDiscovery;
 +
 +import java.util.ArrayList;
 +import java.util.HashMap;
 +import java.util.List;
 +import java.util.Map;
 +
 +class AmbariCluster implements ServiceDiscovery.Cluster {
 +
 +    private String name = null;
 +
 +    private AmbariDynamicServiceURLCreator urlCreator;
 +
 +    private Map<String, Map<String, ServiceConfiguration>> serviceConfigurations = new HashMap<>();
 +
 +    private Map<String, AmbariComponent> components = null;
 +
 +
 +    AmbariCluster(String name) {
 +        this.name = name;
 +        components = new HashMap<>();
 +        urlCreator = new AmbariDynamicServiceURLCreator(this);
 +    }
 +
 +    void addServiceConfiguration(String serviceName, String configurationType, ServiceConfiguration serviceConfig) {
 +        if (!serviceConfigurations.keySet().contains(serviceName)) {
 +            serviceConfigurations.put(serviceName, new HashMap<>());
 +        }
 +        serviceConfigurations.get(serviceName).put(configurationType, serviceConfig);
 +    }
 +
 +
 +    void addComponent(AmbariComponent component) {
 +        components.put(component.getName(), component);
 +    }
 +
 +
 +    ServiceConfiguration getServiceConfiguration(String serviceName, String configurationType) {
 +        ServiceConfiguration sc = null;
 +        Map<String, ServiceConfiguration> configs = serviceConfigurations.get(serviceName);
 +        if (configs != null) {
 +            sc = configs.get(configurationType);
 +        }
 +        return sc;
 +    }
 +
 +
++    Map<String, Map<String, ServiceConfiguration>> getServiceConfigurations() {
++        return serviceConfigurations;
++    }
++
++
 +    Map<String, AmbariComponent> getComponents() {
 +        return components;
 +    }
 +
 +
 +    AmbariComponent getComponent(String name) {
 +        return components.get(name);
 +    }
 +
 +
 +    @Override
 +    public String getName() {
 +        return name;
 +    }
 +
 +
 +    @Override
 +    public List<String> getServiceURLs(String serviceName) {
 +        List<String> urls = new ArrayList<>();
 +        urls.addAll(urlCreator.create(serviceName));
 +        return urls;
 +    }
 +
 +
 +    static class ServiceConfiguration {
 +
 +        private String type;
 +        private String version;
 +        private Map<String, String> props;
 +
 +        ServiceConfiguration(String type, String version, Map<String, String> properties) {
 +            this.type = type;
 +            this.version = version;
 +            this.props = properties;
 +        }
 +
 +        public String getVersion() {
 +            return version;
 +        }
 +
 +        public String getType() {
 +            return type;
 +        }
 +
 +        public Map<String, String> getProperties() {
 +            return props;
 +        }
 +    }
 +
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/22a7304a/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariServiceDiscovery.java
----------------------------------------------------------------------
diff --cc gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariServiceDiscovery.java
index dbc783d,0000000..6a6a888
mode 100644,000000..100644
--- a/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariServiceDiscovery.java
+++ b/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariServiceDiscovery.java
@@@ -1,306 -1,0 +1,262 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements. See the NOTICE file distributed with this
 + * work for additional information regarding copyright ownership. The ASF
 + * licenses this file to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance with the License.
 + * You may obtain a copy of the License at
 + *
 + * http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 + * License for the specific language governing permissions and limitations under
 + * the License.
 + */
 +package org.apache.knox.gateway.topology.discovery.ambari;
 +
- import java.io.IOException;
++import java.lang.reflect.Method;
 +import java.util.ArrayList;
 +import java.util.HashMap;
 +import java.util.List;
 +import java.util.Map;
 +import java.util.Properties;
 +
 +import net.minidev.json.JSONArray;
 +import net.minidev.json.JSONObject;
- import net.minidev.json.JSONValue;
- import org.apache.knox.gateway.config.ConfigurationException;
 +import org.apache.knox.gateway.i18n.messages.MessagesFactory;
++import org.apache.knox.gateway.services.GatewayServices;
 +import org.apache.knox.gateway.services.security.AliasService;
- import org.apache.knox.gateway.services.security.AliasServiceException;
++import org.apache.knox.gateway.topology.ClusterConfigurationMonitorService;
++import org.apache.knox.gateway.topology.discovery.ClusterConfigurationMonitor;
 +import org.apache.knox.gateway.topology.discovery.GatewayService;
 +import org.apache.knox.gateway.topology.discovery.ServiceDiscovery;
 +import org.apache.knox.gateway.topology.discovery.ServiceDiscoveryConfig;
- import org.apache.http.HttpEntity;
- import org.apache.http.HttpStatus;
- import org.apache.http.client.methods.CloseableHttpResponse;
- import org.apache.http.client.methods.HttpGet;
- import org.apache.http.impl.client.CloseableHttpClient;
- import org.apache.http.message.BasicHeader;
- import org.apache.http.util.EntityUtils;
 +
 +
 +class AmbariServiceDiscovery implements ServiceDiscovery {
 +
 +    static final String TYPE = "AMBARI";
 +
-     static final String AMBARI_CLUSTERS_URI = "/api/v1/clusters";
++    static final String AMBARI_CLUSTERS_URI = AmbariClientCommon.AMBARI_CLUSTERS_URI;
 +
-     static final String AMBARI_HOSTROLES_URI =
-                                        AMBARI_CLUSTERS_URI + "/%s/services?fields=components/host_components/HostRoles";
++    static final String AMBARI_HOSTROLES_URI = AmbariClientCommon.AMBARI_HOSTROLES_URI;
 +
-     static final String AMBARI_SERVICECONFIGS_URI =
-             AMBARI_CLUSTERS_URI + "/%s/configurations/service_config_versions?is_current=true";
++    static final String AMBARI_SERVICECONFIGS_URI = AmbariClientCommon.AMBARI_SERVICECONFIGS_URI;
 +
 +    private static final String COMPONENT_CONFIG_MAPPING_FILE =
 +                                                        "ambari-service-discovery-component-config-mapping.properties";
 +
++    private static final String GATEWAY_SERVICES_ACCESSOR_CLASS  = "org.apache.knox.gateway.GatewayServer";
++    private static final String GATEWAY_SERVICES_ACCESSOR_METHOD = "getGatewayServices";
++
 +    private static final AmbariServiceDiscoveryMessages log = MessagesFactory.get(AmbariServiceDiscoveryMessages.class);
 +
 +    // Map of component names to service configuration types
 +    private static Map<String, String> componentServiceConfigs = new HashMap<>();
 +    static {
 +        try {
 +            Properties configMapping = new Properties();
 +            configMapping.load(AmbariServiceDiscovery.class.getClassLoader().getResourceAsStream(COMPONENT_CONFIG_MAPPING_FILE));
 +            for (String componentName : configMapping.stringPropertyNames()) {
 +                componentServiceConfigs.put(componentName, configMapping.getProperty(componentName));
 +            }
 +        } catch (Exception e) {
-             log.failedToLoadServiceDiscoveryConfiguration(COMPONENT_CONFIG_MAPPING_FILE, e);
++            log.failedToLoadServiceDiscoveryURLDefConfiguration(COMPONENT_CONFIG_MAPPING_FILE, e);
 +        }
 +    }
 +
-     private static final String DEFAULT_USER_ALIAS = "ambari.discovery.user";
-     private static final String DEFAULT_PWD_ALIAS  = "ambari.discovery.password";
- 
 +    @GatewayService
 +    private AliasService aliasService;
 +
-     private CloseableHttpClient httpClient = null;
++    private RESTInvoker restClient;
++    private AmbariClientCommon ambariClient;
 +
++    // This is used to update the monitor when new cluster configuration details are discovered.
++    private AmbariConfigurationMonitor configChangeMonitor;
++
++    private boolean isInitialized = false;
 +
 +    AmbariServiceDiscovery() {
-         httpClient = org.apache.http.impl.client.HttpClients.createDefault();
++    }
++
++
++    AmbariServiceDiscovery(RESTInvoker restClient) {
++        this.restClient = restClient;
++    }
++
++
++    /**
++     * Initialization must be subsequent to construction because the AliasService member isn't assigned until after
++     * construction time. This is called internally prior to discovery invocations to make sure the clients have been
++     * initialized.
++     */
++    private void init() {
++        if (!isInitialized) {
++            if (this.restClient == null) {
++                this.restClient = new RESTInvoker(aliasService);
++            }
++            this.ambariClient = new AmbariClientCommon(restClient);
++            this.configChangeMonitor = getConfigurationChangeMonitor();
++
++            isInitialized = true;
++        }
++    }
++
++
++    /**
++     * Get the Ambari configuration change monitor from the associated gateway service.
++     */
++    private AmbariConfigurationMonitor getConfigurationChangeMonitor() {
++        AmbariConfigurationMonitor ambariMonitor = null;
++        try {
++            Class clazz = Class.forName(GATEWAY_SERVICES_ACCESSOR_CLASS);
++            if (clazz != null) {
++                Method m = clazz.getDeclaredMethod(GATEWAY_SERVICES_ACCESSOR_METHOD);
++                if (m != null) {
++                    Object obj = m.invoke(null);
++                    if (GatewayServices.class.isAssignableFrom(obj.getClass())) {
++                        ClusterConfigurationMonitorService clusterMonitorService =
++                              ((GatewayServices) obj).getService(GatewayServices.CLUSTER_CONFIGURATION_MONITOR_SERVICE);
++                        ClusterConfigurationMonitor monitor =
++                                                 clusterMonitorService.getMonitor(AmbariConfigurationMonitor.getType());
++                        if (monitor != null) {
++                            if (AmbariConfigurationMonitor.class.isAssignableFrom(monitor.getClass())) {
++                                ambariMonitor = (AmbariConfigurationMonitor) monitor;
++                            }
++                        }
++                    }
++                }
++            }
++        } catch (Exception e) {
++            log.errorAccessingConfigurationChangeMonitor(e);
++        }
++        return ambariMonitor;
 +    }
 +
 +
 +    @Override
 +    public String getType() {
 +        return TYPE;
 +    }
 +
 +
 +    @Override
 +    public Map<String, Cluster> discover(ServiceDiscoveryConfig config) {
-         Map<String, Cluster> clusters = new HashMap<String, Cluster>();
++        Map<String, Cluster> clusters = new HashMap<>();
++
++        init();
 +
 +        String discoveryAddress = config.getAddress();
 +
 +        // Invoke Ambari REST API to discover the available clusters
 +        String clustersDiscoveryURL = String.format("%s" + AMBARI_CLUSTERS_URI, discoveryAddress);
 +
-         JSONObject json = invokeREST(clustersDiscoveryURL, config.getUser(), config.getPasswordAlias());
++        JSONObject json = restClient.invoke(clustersDiscoveryURL, config.getUser(), config.getPasswordAlias());
 +
 +        // Parse the cluster names from the response, and perform the cluster discovery
 +        JSONArray clusterItems = (JSONArray) json.get("items");
 +        for (Object clusterItem : clusterItems) {
 +            String clusterName = (String) ((JSONObject)((JSONObject) clusterItem).get("Clusters")).get("cluster_name");
 +            try {
 +                Cluster c = discover(config, clusterName);
 +                clusters.put(clusterName, c);
 +            } catch (Exception e) {
 +                log.clusterDiscoveryError(clusterName, e);
 +            }
 +        }
 +
 +        return clusters;
 +    }
 +
 +
 +    @Override
 +    public Cluster discover(ServiceDiscoveryConfig config, String clusterName) {
 +        AmbariCluster cluster = new AmbariCluster(clusterName);
 +
 +        Map<String, String> serviceComponents = new HashMap<>();
 +
++        init();
++
 +        String discoveryAddress = config.getAddress();
 +        String discoveryUser = config.getUser();
 +        String discoveryPwdAlias = config.getPasswordAlias();
 +
 +        Map<String, List<String>> componentHostNames = new HashMap<>();
 +        String hostRolesURL = String.format("%s" + AMBARI_HOSTROLES_URI, discoveryAddress, clusterName);
-         JSONObject hostRolesJSON = invokeREST(hostRolesURL, discoveryUser, discoveryPwdAlias);
++        JSONObject hostRolesJSON = restClient.invoke(hostRolesURL, discoveryUser, discoveryPwdAlias);
 +        if (hostRolesJSON != null) {
 +            // Process the host roles JSON
 +            JSONArray items = (JSONArray) hostRolesJSON.get("items");
 +            for (Object obj : items) {
 +                JSONArray components = (JSONArray) ((JSONObject) obj).get("components");
 +                for (Object component : components) {
 +                    JSONArray hostComponents = (JSONArray) ((JSONObject) component).get("host_components");
 +                    for (Object hostComponent : hostComponents) {
 +                        JSONObject hostRoles = (JSONObject) ((JSONObject) hostComponent).get("HostRoles");
 +                        String serviceName = (String) hostRoles.get("service_name");
 +                        String componentName = (String) hostRoles.get("component_name");
 +
 +                        serviceComponents.put(componentName, serviceName);
 +
 +                        // Assuming public host name is more applicable than host_name
 +                        String hostName = (String) hostRoles.get("public_host_name");
 +                        if (hostName == null) {
 +                            // Some (even slightly) older versions of Ambari/HDP do not return public_host_name,
 +                            // so fall back to host_name in those cases.
 +                            hostName = (String) hostRoles.get("host_name");
 +                        }
 +
 +                        if (hostName != null) {
 +                            log.discoveredServiceHost(serviceName, hostName);
 +                            if (!componentHostNames.containsKey(componentName)) {
-                                 componentHostNames.put(componentName, new ArrayList<String>());
++                                componentHostNames.put(componentName, new ArrayList<>());
 +                            }
 +                            componentHostNames.get(componentName).add(hostName);
 +                        }
 +                    }
 +                }
 +            }
 +        }
 +
++        // Service configurations
 +        Map<String, Map<String, AmbariCluster.ServiceConfiguration>> serviceConfigurations =
-                                                  new HashMap<String, Map<String, AmbariCluster.ServiceConfiguration>>();
-         String serviceConfigsURL = String.format("%s" + AMBARI_SERVICECONFIGS_URI, discoveryAddress, clusterName);
-         JSONObject serviceConfigsJSON = invokeREST(serviceConfigsURL, discoveryUser, discoveryPwdAlias);
-         if (serviceConfigsJSON != null) {
-             // Process the service configurations
-             JSONArray serviceConfigs = (JSONArray) serviceConfigsJSON.get("items");
-             for (Object serviceConfig : serviceConfigs) {
-                 String serviceName = (String) ((JSONObject) serviceConfig).get("service_name");
-                 JSONArray configurations = (JSONArray) ((JSONObject) serviceConfig).get("configurations");
-                 for (Object configuration : configurations) {
-                     String configType = (String) ((JSONObject) configuration).get("type");
-                     String configVersion = String.valueOf(((JSONObject) configuration).get("version"));
- 
-                     Map<String, String> configProps = new HashMap<String, String>();
-                     JSONObject configProperties = (JSONObject) ((JSONObject) configuration).get("properties");
-                     for (String propertyName : configProperties.keySet()) {
-                         configProps.put(propertyName, String.valueOf(((JSONObject) configProperties).get(propertyName)));
-                     }
-                     if (!serviceConfigurations.containsKey(serviceName)) {
-                         serviceConfigurations.put(serviceName, new HashMap<String, AmbariCluster.ServiceConfiguration>());
-                     }
-                     serviceConfigurations.get(serviceName).put(configType, new AmbariCluster.ServiceConfiguration(configType, configVersion, configProps));
-                     cluster.addServiceConfiguration(serviceName, configType, new AmbariCluster.ServiceConfiguration(configType, configVersion, configProps));
-                 }
++                                                        ambariClient.getActiveServiceConfigurations(discoveryAddress,
++                                                                                                    clusterName,
++                                                                                                    discoveryUser,
++                                                                                                    discoveryPwdAlias);
++        for (String serviceName : serviceConfigurations.keySet()) {
++            for (Map.Entry<String, AmbariCluster.ServiceConfiguration> serviceConfig : serviceConfigurations.get(serviceName).entrySet()) {
++                cluster.addServiceConfiguration(serviceName, serviceConfig.getKey(), serviceConfig.getValue());
 +            }
 +        }
 +
 +        // Construct the AmbariCluster model
 +        for (String componentName : serviceComponents.keySet()) {
 +            String serviceName = serviceComponents.get(componentName);
 +            List<String> hostNames = componentHostNames.get(componentName);
 +
 +            Map<String, AmbariCluster.ServiceConfiguration> configs = serviceConfigurations.get(serviceName);
 +            String configType = componentServiceConfigs.get(componentName);
 +            if (configType != null) {
 +                AmbariCluster.ServiceConfiguration svcConfig = configs.get(configType);
 +                AmbariComponent c = new AmbariComponent(componentName,
 +                                                        svcConfig.getVersion(),
 +                                                        clusterName,
 +                                                        serviceName,
 +                                                        hostNames,
 +                                                        svcConfig.getProperties());
 +                cluster.addComponent(c);
 +            }
 +        }
 +
-         return cluster;
-     }
- 
- 
-     protected JSONObject invokeREST(String url, String username, String passwordAlias) {
-         JSONObject result = null;
- 
-         CloseableHttpResponse response = null;
-         try {
-             HttpGet request = new HttpGet(url);
- 
-             // If no configured username, then use default username alias
-             String password = null;
-             if (username == null) {
-                 if (aliasService != null) {
-                     try {
-                         char[] defaultUser = aliasService.getPasswordFromAliasForGateway(DEFAULT_USER_ALIAS);
-                         if (defaultUser != null) {
-                             username = new String(defaultUser);
-                         }
-                     } catch (AliasServiceException e) {
-                         log.aliasServiceUserError(DEFAULT_USER_ALIAS, e.getLocalizedMessage());
-                     }
-                 }
- 
-                 // If username is still null
-                 if (username == null) {
-                     log.aliasServiceUserNotFound();
-                     throw new ConfigurationException("No username is configured for Ambari service discovery.");
-                 }
-             }
- 
-             if (aliasService != null) {
-                 // If no password alias is configured, then try the default alias
-                 if (passwordAlias == null) {
-                     passwordAlias = DEFAULT_PWD_ALIAS;
-                 }
- 
-                 try {
-                     char[] pwd = aliasService.getPasswordFromAliasForGateway(passwordAlias);
-                     if (pwd != null) {
-                         password = new String(pwd);
-                     }
- 
-                 } catch (AliasServiceException e) {
-                     log.aliasServicePasswordError(passwordAlias, e.getLocalizedMessage());
-                 }
-             }
- 
-             // If the password could not be determined
-             if (password == null) {
-                 log.aliasServicePasswordNotFound();
-                 throw new ConfigurationException("No password is configured for Ambari service discovery.");
-             }
- 
-             // Add an auth header if credentials are available
-             String encodedCreds =
-                     org.apache.commons.codec.binary.Base64.encodeBase64String((username + ":" + password).getBytes());
-             request.addHeader(new BasicHeader("Authorization", "Basic " + encodedCreds));
- 
-             response = httpClient.execute(request);
- 
-             if (HttpStatus.SC_OK == response.getStatusLine().getStatusCode()) {
-                 HttpEntity entity = response.getEntity();
-                 if (entity != null) {
-                     result = (JSONObject) JSONValue.parse((EntityUtils.toString(entity)));
-                     log.debugJSON(result.toJSONString());
-                 } else {
-                     log.noJSON(url);
-                 }
-             } else {
-                 log.unexpectedRestResponseStatusCode(url, response.getStatusLine().getStatusCode());
-             }
- 
-         } catch (IOException e) {
-             log.restInvocationError(url, e);
-         } finally {
-             if(response != null) {
-                 try {
-                     response.close();
-                 } catch (IOException e) {
-                     // Ignore
-                 }
-             }
++        if (configChangeMonitor != null) {
++            // Notify the cluster config monitor about these cluster configuration details
++            configChangeMonitor.addClusterConfigVersions(cluster, config);
 +        }
-         return result;
-     }
 +
++        return cluster;
++    }
 +
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/22a7304a/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariServiceDiscoveryMessages.java
----------------------------------------------------------------------
diff --cc gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariServiceDiscoveryMessages.java
index 2bdc94b,0000000..12e6078
mode 100644,000000..100644
--- a/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariServiceDiscoveryMessages.java
+++ b/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariServiceDiscoveryMessages.java
@@@ -1,121 -1,0 +1,148 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements. See the NOTICE file distributed with this
 + * work for additional information regarding copyright ownership. The ASF
 + * licenses this file to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance with the License.
 + * You may obtain a copy of the License at
 + *
 + * http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 + * License for the specific language governing permissions and limitations under
 + * the License.
 + */
 +package org.apache.knox.gateway.topology.discovery.ambari;
 +
 +import org.apache.knox.gateway.i18n.messages.Message;
 +import org.apache.knox.gateway.i18n.messages.MessageLevel;
 +import org.apache.knox.gateway.i18n.messages.Messages;
 +import org.apache.knox.gateway.i18n.messages.StackTrace;
 +
 +@Messages(logger="org.apache.knox.gateway.topology.discovery.ambari")
 +public interface AmbariServiceDiscoveryMessages {
 +
 +    @Message(level = MessageLevel.ERROR,
-             text = "Failed to load service discovery configuration: {1}")
-     void failedToLoadServiceDiscoveryConfiguration(@StackTrace(level = MessageLevel.ERROR) Exception e);
++             text = "Failed to persist data for cluster configuration monitor {0} {1}: {2}")
++    void failedToPersistClusterMonitorData(final String monitor,
++                                           final String filename,
++                                           @StackTrace(level = MessageLevel.DEBUG) Exception e);
 +
 +    @Message(level = MessageLevel.ERROR,
-              text = "Failed to load service discovery configuration {0}: {1}")
-     void failedToLoadServiceDiscoveryConfiguration(final String configuration,
-                                @StackTrace(level = MessageLevel.ERROR) Exception e);
++             text = "Failed to load persisted service discovery configuration for cluster monitor {0} : {1}")
++    void failedToLoadClusterMonitorServiceDiscoveryConfig(final String monitor,
++                                                          @StackTrace(level = MessageLevel.DEBUG) Exception e);
++
++    @Message(level = MessageLevel.ERROR,
++            text = "Failed to load persisted cluster configuration version data for cluster monitor {0} : {1}")
++    void failedToLoadClusterMonitorConfigVersions(final String monitor,
++                                                  @StackTrace(level = MessageLevel.DEBUG) Exception e);
++
++    @Message(level = MessageLevel.ERROR,
++             text = "Unable to access the Ambari Configuration Change Monitor: {0}")
++    void errorAccessingConfigurationChangeMonitor(@StackTrace(level = MessageLevel.DEBUG) Exception e);
++
++    @Message(level = MessageLevel.ERROR,
++             text = "Failed to load service discovery URL definition configuration: {1}")
++    void failedToLoadServiceDiscoveryURLDefConfiguration(@StackTrace(level = MessageLevel.DEBUG) Exception e);
++
++    @Message(level = MessageLevel.ERROR,
++             text = "Failed to load service discovery URL definition configuration {0}: {1}")
++    void failedToLoadServiceDiscoveryURLDefConfiguration(final String configuration,
++                                                         @StackTrace(level = MessageLevel.ERROR) Exception e);
 +
 +    @Message(level = MessageLevel.ERROR,
 +             text = "Encountered an error during cluster {0} discovery: {1}")
 +    void clusterDiscoveryError(final String clusterName,
-                                @StackTrace(level = MessageLevel.ERROR) Exception e);
++                               @StackTrace(level = MessageLevel.DEBUG) Exception e);
 +
 +
 +    @Message(level = MessageLevel.DEBUG,
 +             text = "REST invocation {0} failed: {1}")
 +    void restInvocationError(final String url,
-                              @StackTrace(level = MessageLevel.ERROR) Exception e);
++                             @StackTrace(level = MessageLevel.DEBUG) Exception e);
 +
 +
 +    @Message(level = MessageLevel.ERROR,
 +             text = "Encountered an error attempting to determine the user for alias {0} : {1}")
 +    void aliasServiceUserError(final String alias, final String error);
 +
 +
 +    @Message(level = MessageLevel.ERROR,
 +             text = "Encountered an error attempting to determine the password for alias {0} : {1}")
 +    void aliasServicePasswordError(final String alias, final String error);
 +
 +
 +    @Message(level = MessageLevel.ERROR,
 +             text = "No user configured for Ambari service discovery.")
 +    void aliasServiceUserNotFound();
 +
 +
 +    @Message(level = MessageLevel.ERROR,
 +             text = "No password configured for Ambari service discovery.")
 +    void aliasServicePasswordNotFound();
 +
 +
 +    @Message(level = MessageLevel.ERROR,
 +             text = "Unexpected REST invocation response code for {0} : {1}")
 +    void unexpectedRestResponseStatusCode(final String url, int responseStatusCode);
 +
 +
 +    @Message(level = MessageLevel.ERROR,
 +             text = "REST invocation {0} yielded a response without any JSON.")
 +    void noJSON(final String url);
 +
 +
-     @Message(level = MessageLevel.DEBUG,
++    @Message(level = MessageLevel.TRACE,
 +             text = "REST invocation result: {0}")
 +    void debugJSON(final String json);
 +
++
 +    @Message(level = MessageLevel.DEBUG,
-             text = "Loaded component configuration mappings: {0}")
++             text = "Loaded component configuration mappings: {0}")
 +    void loadedComponentConfigMappings(final String mappings);
 +
++
 +    @Message(level = MessageLevel.ERROR,
 +             text = "Failed to load component configuration property mappings {0}: {1}")
 +    void failedToLoadComponentConfigMappings(final String mappings,
-                                              @StackTrace(level = MessageLevel.ERROR) Exception e);
++                                             @StackTrace(level = MessageLevel.DEBUG) Exception e);
 +
-     @Message(level = MessageLevel.DEBUG,
++
++    @Message(level = MessageLevel.TRACE,
 +             text = "Discovered: Service: {0}, Host: {1}")
 +    void discoveredServiceHost(final String serviceName, final String hostName);
 +
 +
 +    @Message(level = MessageLevel.DEBUG,
 +             text = "Querying the cluster for the {0} configuration ({1}) property: {2}")
 +    void lookingUpServiceConfigProperty(final String serviceName, final String configType, final String propertyName);
 +
 +
 +    @Message(level = MessageLevel.DEBUG,
 +             text = "Querying the cluster for the {0} component configuration property: {1}")
 +    void lookingUpComponentConfigProperty(final String componentName, final String propertyName);
 +
 +
 +    @Message(level = MessageLevel.DEBUG,
 +             text = "Querying the cluster for the {0} component's hosts")
 +    void lookingUpComponentHosts(final String componentName);
 +
 +
 +    @Message(level = MessageLevel.DEBUG,
 +            text = "Handling a derived service URL mapping property for the {0} service: type = {1}, name = {2}")
 +    void handlingDerivedProperty(final String serviceName, final String propertyType, final String propertyName);
 +
 +
 +    @Message(level = MessageLevel.DEBUG,
-             text = "Determined the service URL mapping property {0} value: {1}")
++             text = "Determined the service URL mapping property {0} value: {1}")
 +    void determinedPropertyValue(final String propertyName, final String propertyValue);
 +
 +
++    @Message(level = MessageLevel.INFO,
++             text = "Started Ambari cluster configuration monitor (checking every {0} seconds)")
++    void startedAmbariConfigMonitor(final long pollingInterval);
++
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/22a7304a/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/ServiceURLPropertyConfig.java
----------------------------------------------------------------------
diff --cc gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/ServiceURLPropertyConfig.java
index ed07873,0000000..47b20e9
mode 100644,000000..100644
--- a/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/ServiceURLPropertyConfig.java
+++ b/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/ServiceURLPropertyConfig.java
@@@ -1,324 -1,0 +1,324 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements. See the NOTICE file distributed with this
 + * work for additional information regarding copyright ownership. The ASF
 + * licenses this file to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance with the License.
 + * You may obtain a copy of the License at
 + * <p>
 + * http://www.apache.org/licenses/LICENSE-2.0
 + * <p>
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 + * License for the specific language governing permissions and limitations under
 + * the License.
 + */
 +package org.apache.knox.gateway.topology.discovery.ambari;
 +
 +import org.apache.knox.gateway.i18n.messages.MessagesFactory;
 +import org.apache.knox.gateway.util.XmlUtils;
 +import org.w3c.dom.Document;
 +import org.w3c.dom.NamedNodeMap;
 +import org.w3c.dom.Node;
 +import org.w3c.dom.NodeList;
 +
 +import javax.xml.xpath.XPath;
 +import javax.xml.xpath.XPathConstants;
 +import javax.xml.xpath.XPathExpression;
 +import javax.xml.xpath.XPathExpressionException;
 +import javax.xml.xpath.XPathFactory;
 +import java.io.File;
 +import java.io.FileInputStream;
 +import java.io.IOException;
 +import java.io.InputStream;
 +import java.util.ArrayList;
 +import java.util.HashMap;
 +import java.util.List;
 +import java.util.Map;
 +import java.util.regex.Matcher;
 +import java.util.regex.Pattern;
 +
 +/**
 + * Service URL pattern mapping configuration model.
 + */
 +class ServiceURLPropertyConfig {
 +
 +    private static final AmbariServiceDiscoveryMessages log = MessagesFactory.get(AmbariServiceDiscoveryMessages.class);
 +
 +    private static final String ATTR_NAME = "name";
 +
 +    private static XPathExpression SERVICE_URL_PATTERN_MAPPINGS;
 +    private static XPathExpression URL_PATTERN;
 +    private static XPathExpression PROPERTIES;
 +    static {
 +        XPath xpath = XPathFactory.newInstance().newXPath();
 +        try {
 +            SERVICE_URL_PATTERN_MAPPINGS = xpath.compile("/service-discovery-url-mappings/service");
 +            URL_PATTERN                  = xpath.compile("url-pattern/text()");
 +            PROPERTIES                   = xpath.compile("properties/property");
 +        } catch (XPathExpressionException e) {
 +            e.printStackTrace();
 +        }
 +    }
 +
 +    private static final String DEFAULT_SERVICE_URL_MAPPINGS = "ambari-service-discovery-url-mappings.xml";
 +
 +    private Map<String, URLPattern> urlPatterns = new HashMap<>();
 +
 +    private Map<String, Map<String, Property>> properties = new HashMap<>();
 +
 +
 +    /**
 +     * The default service URL pattern to property mapping configuration will be used.
 +     */
 +    ServiceURLPropertyConfig() {
 +        this(ServiceURLPropertyConfig.class.getClassLoader().getResourceAsStream(DEFAULT_SERVICE_URL_MAPPINGS));
 +    }
 +
 +    /**
 +     * The default service URL pattern to property mapping configuration will be used.
 +     */
 +    ServiceURLPropertyConfig(File mappingConfigurationFile) throws Exception {
 +        this(new FileInputStream(mappingConfigurationFile));
 +    }
 +
 +    /**
 +     *
 +     * @param source An InputStream for the XML content
 +     */
 +    ServiceURLPropertyConfig(InputStream source) {
 +        // Parse the XML, and build the model
 +        try {
 +            Document doc = XmlUtils.readXml(source);
 +
 +            NodeList serviceNodes =
 +                    (NodeList) SERVICE_URL_PATTERN_MAPPINGS.evaluate(doc, XPathConstants.NODESET);
 +            for (int i=0; i < serviceNodes.getLength(); i++) {
 +                Node serviceNode = serviceNodes.item(i);
 +                String serviceName = serviceNode.getAttributes().getNamedItem(ATTR_NAME).getNodeValue();
 +                properties.put(serviceName, new HashMap<String, Property>());
 +
 +                Node urlPatternNode = (Node) URL_PATTERN.evaluate(serviceNode, XPathConstants.NODE);
 +                if (urlPatternNode != null) {
 +                    urlPatterns.put(serviceName, new URLPattern(urlPatternNode.getNodeValue()));
 +                }
 +
 +                NodeList propertiesNode = (NodeList) PROPERTIES.evaluate(serviceNode, XPathConstants.NODESET);
 +                if (propertiesNode != null) {
 +                    processProperties(serviceName, propertiesNode);
 +                }
 +            }
 +        } catch (Exception e) {
-             log.failedToLoadServiceDiscoveryConfiguration(e);
++            log.failedToLoadServiceDiscoveryURLDefConfiguration(e);
 +        } finally {
 +            try {
 +                source.close();
 +            } catch (IOException e) {
 +                // Ignore
 +            }
 +        }
 +    }
 +
 +    private void processProperties(String serviceName, NodeList propertyNodes) {
 +        for (int i = 0; i < propertyNodes.getLength(); i++) {
 +            Property p = Property.createProperty(serviceName, propertyNodes.item(i));
 +            properties.get(serviceName).put(p.getName(), p);
 +        }
 +    }
 +
 +    URLPattern getURLPattern(String service) {
 +        return urlPatterns.get(service);
 +    }
 +
 +    Property getConfigProperty(String service, String property) {
 +        return properties.get(service).get(property);
 +    }
 +
 +    static class URLPattern {
 +        String pattern;
 +        List<String> placeholders = new ArrayList<>();
 +
 +        URLPattern(String pattern) {
 +            this.pattern = pattern;
 +
 +            final Pattern regex = Pattern.compile("\\{(.*?)}", Pattern.DOTALL);
 +            final Matcher matcher = regex.matcher(pattern);
 +            while( matcher.find() ){
 +                placeholders.add(matcher.group(1));
 +            }
 +        }
 +
 +        String get() {return pattern; }
 +        List<String> getPlaceholders() {
 +            return placeholders;
 +        }
 +    }
 +
 +    static class Property {
 +        static final String TYPE_SERVICE   = "SERVICE";
 +        static final String TYPE_COMPONENT = "COMPONENT";
 +        static final String TYPE_DERIVED   = "DERIVED";
 +
 +        static final String PROP_COMP_HOSTNAME = "component.host.name";
 +
 +        static final String ATTR_NAME     = "name";
 +        static final String ATTR_PROPERTY = "property";
 +        static final String ATTR_VALUE    = "value";
 +
 +        static XPathExpression HOSTNAME;
 +        static XPathExpression SERVICE_CONFIG;
 +        static XPathExpression COMPONENT;
 +        static XPathExpression CONFIG_PROPERTY;
 +        static XPathExpression IF;
 +        static XPathExpression THEN;
 +        static XPathExpression ELSE;
 +        static XPathExpression TEXT;
 +        static {
 +            XPath xpath = XPathFactory.newInstance().newXPath();
 +            try {
 +                HOSTNAME        = xpath.compile("hostname");
 +                SERVICE_CONFIG  = xpath.compile("service-config");
 +                COMPONENT       = xpath.compile("component");
 +                CONFIG_PROPERTY = xpath.compile("config-property");
 +                IF              = xpath.compile("if");
 +                THEN            = xpath.compile("then");
 +                ELSE            = xpath.compile("else");
 +                TEXT            = xpath.compile("text()");
 +            } catch (XPathExpressionException e) {
 +                e.printStackTrace();
 +            }
 +        }
 +
 +
 +        String type;
 +        String name;
 +        String component;
 +        String service;
 +        String serviceConfig;
 +        String value;
 +        ConditionalValueHandler conditionHandler = null;
 +
 +        private Property(String type,
 +                         String propertyName,
 +                         String component,
 +                         String service,
 +                         String configType,
 +                         String value,
 +                         ConditionalValueHandler pch) {
 +            this.type = type;
 +            this.name = propertyName;
 +            this.service = service;
 +            this.component = component;
 +            this.serviceConfig = configType;
 +            this.value = value;
 +            conditionHandler = pch;
 +        }
 +
 +        static Property createProperty(String serviceName, Node propertyNode) {
 +            String propertyName = propertyNode.getAttributes().getNamedItem(ATTR_NAME).getNodeValue();
 +            String propertyType = null;
 +            String serviceType = null;
 +            String configType = null;
 +            String componentType = null;
 +            String value = null;
 +            ConditionalValueHandler pch = null;
 +
 +            try {
 +                Node hostNameNode = (Node) HOSTNAME.evaluate(propertyNode, XPathConstants.NODE);
 +                if (hostNameNode != null) {
 +                    value = PROP_COMP_HOSTNAME;
 +                }
 +
 +                // Check for a service-config node
 +                Node scNode = (Node) SERVICE_CONFIG.evaluate(propertyNode, XPathConstants.NODE);
 +                if (scNode != null) {
 +                    // Service config property
 +                    propertyType = Property.TYPE_SERVICE;
 +                    serviceType = scNode.getAttributes().getNamedItem(ATTR_NAME).getNodeValue();
 +                    Node scTextNode = (Node) TEXT.evaluate(scNode, XPathConstants.NODE);
 +                    configType = scTextNode.getNodeValue();
 +                } else { // If not service-config node, check for a component config node
 +                    Node cNode = (Node) COMPONENT.evaluate(propertyNode, XPathConstants.NODE);
 +                    if (cNode != null) {
 +                        // Component config property
 +                        propertyType = Property.TYPE_COMPONENT;
 +                        componentType = cNode.getFirstChild().getNodeValue();
 +                        Node cTextNode = (Node) TEXT.evaluate(cNode, XPathConstants.NODE);
 +                        configType = cTextNode.getNodeValue();
 +                        componentType = cTextNode.getNodeValue();
 +                    }
 +                }
 +
 +                // Check for a config property node
 +                Node cpNode = (Node) CONFIG_PROPERTY.evaluate(propertyNode, XPathConstants.NODE);
 +                if (cpNode != null) {
 +                    // Check for a condition element
 +                    Node ifNode = (Node) IF.evaluate(cpNode, XPathConstants.NODE);
 +                    if (ifNode != null) {
 +                        propertyType = TYPE_DERIVED;
 +                        pch = getConditionHandler(serviceName, ifNode);
 +                    } else {
 +                        Node cpTextNode = (Node) TEXT.evaluate(cpNode, XPathConstants.NODE);
 +                        value = cpTextNode.getNodeValue();
 +                    }
 +                }
 +            } catch (Exception e) {
 +                e.printStackTrace();
 +            }
 +
 +            // Create and return the property representation
 +            return new Property(propertyType, propertyName, componentType, serviceType, configType, value, pch);
 +        }
 +
 +        private static ConditionalValueHandler getConditionHandler(String serviceName, Node ifNode) throws Exception {
 +            ConditionalValueHandler result = null;
 +
 +            if (ifNode != null) {
 +                NamedNodeMap attrs = ifNode.getAttributes();
 +                String comparisonPropName = attrs.getNamedItem(ATTR_PROPERTY).getNodeValue();
 +                String comparisonValue = attrs.getNamedItem(ATTR_VALUE).getNodeValue();
 +
 +                ConditionalValueHandler affirmativeResult = null;
 +                Node thenNode = (Node) THEN.evaluate(ifNode, XPathConstants.NODE);
 +                if (thenNode != null) {
 +                    Node subIfNode = (Node) IF.evaluate(thenNode, XPathConstants.NODE);
 +                    if (subIfNode != null) {
 +                        affirmativeResult = getConditionHandler(serviceName, subIfNode);
 +                    } else {
 +                        affirmativeResult = new SimpleValueHandler(thenNode.getFirstChild().getNodeValue());
 +                    }
 +                }
 +
 +                ConditionalValueHandler negativeResult = null;
 +                Node elseNode = (Node) ELSE.evaluate(ifNode, XPathConstants.NODE);
 +                if (elseNode != null) {
 +                    Node subIfNode = (Node) IF.evaluate(elseNode, XPathConstants.NODE);
 +                    if (subIfNode != null) {
 +                        negativeResult = getConditionHandler(serviceName, subIfNode);
 +                    } else {
 +                        negativeResult = new SimpleValueHandler(elseNode.getFirstChild().getNodeValue());
 +                    }
 +                }
 +
 +                result = new PropertyEqualsHandler(serviceName,
 +                        comparisonPropName,
 +                        comparisonValue,
 +                        affirmativeResult,
 +                        negativeResult);
 +            }
 +
 +            return result;
 +        }
 +
 +        String getType() { return type; }
 +        String getName() { return name; }
 +        String getComponent() { return component; }
 +        String getService() { return service; }
 +        String getServiceConfig() { return serviceConfig; }
 +        String getValue() {
 +            return value;
 +        }
 +        ConditionalValueHandler getConditionHandler() { return conditionHandler; }
 +    }
 +}


[43/53] [abbrv] knox git commit: KNOX-998 - Merge from trunk 0.14.0 code

Posted by mo...@apache.org.
http://git-wip-us.apache.org/repos/asf/knox/blob/e766b3b7/gateway-service-remoteconfig/src/main/java/org/apache/knox/gateway/service/config/remote/zk/ZooKeeperClientServiceProvider.java
----------------------------------------------------------------------
diff --git a/gateway-service-remoteconfig/src/main/java/org/apache/knox/gateway/service/config/remote/zk/ZooKeeperClientServiceProvider.java b/gateway-service-remoteconfig/src/main/java/org/apache/knox/gateway/service/config/remote/zk/ZooKeeperClientServiceProvider.java
new file mode 100644
index 0000000..96b931c
--- /dev/null
+++ b/gateway-service-remoteconfig/src/main/java/org/apache/knox/gateway/service/config/remote/zk/ZooKeeperClientServiceProvider.java
@@ -0,0 +1,34 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.knox.gateway.service.config.remote.zk;
+
+import org.apache.knox.gateway.service.config.remote.RemoteConfigurationRegistryClientServiceProvider;
+
+
+public class ZooKeeperClientServiceProvider implements RemoteConfigurationRegistryClientServiceProvider {
+
+    @Override
+    public String getType() {
+        return ZooKeeperClientService.TYPE;
+    }
+
+    @Override
+    public ZooKeeperClientService newInstance() {
+        return new CuratorClientService();
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/knox/blob/e766b3b7/gateway-service-remoteconfig/src/main/resources/META-INF/services/org.apache.hadoop.gateway.service.config.remote.RemoteConfigurationRegistryClientServiceProvider
----------------------------------------------------------------------
diff --git a/gateway-service-remoteconfig/src/main/resources/META-INF/services/org.apache.hadoop.gateway.service.config.remote.RemoteConfigurationRegistryClientServiceProvider b/gateway-service-remoteconfig/src/main/resources/META-INF/services/org.apache.hadoop.gateway.service.config.remote.RemoteConfigurationRegistryClientServiceProvider
deleted file mode 100644
index 7f2312a..0000000
--- a/gateway-service-remoteconfig/src/main/resources/META-INF/services/org.apache.hadoop.gateway.service.config.remote.RemoteConfigurationRegistryClientServiceProvider
+++ /dev/null
@@ -1,19 +0,0 @@
-##########################################################################
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-##########################################################################
-
-org.apache.hadoop.gateway.service.config.remote.zk.ZooKeeperClientServiceProvider

http://git-wip-us.apache.org/repos/asf/knox/blob/e766b3b7/gateway-service-remoteconfig/src/main/resources/META-INF/services/org.apache.knox.gateway.service.config.remote.RemoteConfigurationRegistryClientServiceProvider
----------------------------------------------------------------------
diff --git a/gateway-service-remoteconfig/src/main/resources/META-INF/services/org.apache.knox.gateway.service.config.remote.RemoteConfigurationRegistryClientServiceProvider b/gateway-service-remoteconfig/src/main/resources/META-INF/services/org.apache.knox.gateway.service.config.remote.RemoteConfigurationRegistryClientServiceProvider
new file mode 100644
index 0000000..fe12e48
--- /dev/null
+++ b/gateway-service-remoteconfig/src/main/resources/META-INF/services/org.apache.knox.gateway.service.config.remote.RemoteConfigurationRegistryClientServiceProvider
@@ -0,0 +1,19 @@
+##########################################################################
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##########################################################################
+
+org.apache.knox.gateway.service.config.remote.zk.ZooKeeperClientServiceProvider

http://git-wip-us.apache.org/repos/asf/knox/blob/e766b3b7/gateway-service-remoteconfig/src/test/java/org/apache/hadoop/gateway/service/config/remote/config/DefaultRemoteConfigurationRegistriesTest.java
----------------------------------------------------------------------
diff --git a/gateway-service-remoteconfig/src/test/java/org/apache/hadoop/gateway/service/config/remote/config/DefaultRemoteConfigurationRegistriesTest.java b/gateway-service-remoteconfig/src/test/java/org/apache/hadoop/gateway/service/config/remote/config/DefaultRemoteConfigurationRegistriesTest.java
deleted file mode 100644
index a33fcc2..0000000
--- a/gateway-service-remoteconfig/src/test/java/org/apache/hadoop/gateway/service/config/remote/config/DefaultRemoteConfigurationRegistriesTest.java
+++ /dev/null
@@ -1,184 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.gateway.service.config.remote.config;
-
-import org.apache.hadoop.gateway.config.GatewayConfig;
-import org.easymock.EasyMock;
-import org.junit.Test;
-
-import java.util.ArrayList;
-import java.util.Enumeration;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Properties;
-
-import static org.junit.Assert.assertEquals;
-import static org.testng.Assert.assertNotNull;
-
-public class DefaultRemoteConfigurationRegistriesTest {
-
-    /**
-     * Test a single registry configuration with digest auth configuration.
-     */
-    @Test
-    public void testPropertiesRemoteConfigurationRegistriesSingleDigest() throws Exception {
-        Map<String, Properties> testProperties = new HashMap<>();
-        Properties p = new Properties();
-        p.setProperty(GatewayConfig.REMOTE_CONFIG_REGISTRY_TYPE, "ZooKeeper");
-        p.setProperty(GatewayConfig.REMOTE_CONFIG_REGISTRY_ADDRESS, "hostx:2181");
-        p.setProperty(GatewayConfig.REMOTE_CONFIG_REGISTRY_PRINCIPAL, "zkDigestUser");
-        p.setProperty(GatewayConfig.REMOTE_CONFIG_REGISTRY_AUTH_TYPE, "digest");
-        p.setProperty(GatewayConfig.REMOTE_CONFIG_REGISTRY_CREDENTIAL_ALIAS, "zkDigestAlias");
-        testProperties.put("testDigest", p);
-
-        doTestPropertiesRemoteConfigurationRegistries(testProperties);
-    }
-
-
-    /**
-     * Test a single registry configuration with kerberos auth configuration.
-     */
-    @Test
-    public void testPropertiesRemoteConfigurationRegistriesSingleKerberos() throws Exception {
-        Map<String, Properties> testProperties = new HashMap<>();
-        Properties p = new Properties();
-        p.setProperty(GatewayConfig.REMOTE_CONFIG_REGISTRY_TYPE, "ZooKeeper");
-        p.setProperty(GatewayConfig.REMOTE_CONFIG_REGISTRY_ADDRESS, "hostx:2181");
-        p.setProperty(GatewayConfig.REMOTE_CONFIG_REGISTRY_PRINCIPAL, "zkUser");
-        p.setProperty(GatewayConfig.REMOTE_CONFIG_REGISTRY_AUTH_TYPE, "kerberos");
-        p.setProperty(GatewayConfig.REMOTE_CONFIG_REGISTRY_KEYTAB, "/home/user/remoteregistry.keytab");
-        p.setProperty(GatewayConfig.REMOTE_CONFIG_REGISTRY_USE_KEYTAB, "true");
-        p.setProperty(GatewayConfig.REMOTE_CONFIG_REGISTRY_USE_TICKET_CACHE, "false");
-        testProperties.put("testKerb", p);
-
-        doTestPropertiesRemoteConfigurationRegistries(testProperties);
-    }
-
-    /**
-     * Test multiple registry configuration with varying auth configurations.
-     */
-    @Test
-    public void testPropertiesRemoteConfigurationRegistriesMultipleMixed() throws Exception {
-        Map<String, Properties> testProperties = new HashMap<>();
-
-        Properties kerb = new Properties();
-        kerb.setProperty(GatewayConfig.REMOTE_CONFIG_REGISTRY_TYPE, "ZooKeeper");
-        kerb.setProperty(GatewayConfig.REMOTE_CONFIG_REGISTRY_ADDRESS, "host1:2181");
-        kerb.setProperty(GatewayConfig.REMOTE_CONFIG_REGISTRY_NAMESPACE, "/knox/config");
-        kerb.setProperty(GatewayConfig.REMOTE_CONFIG_REGISTRY_PRINCIPAL, "kerbPrincipal");
-        kerb.setProperty(GatewayConfig.REMOTE_CONFIG_REGISTRY_AUTH_TYPE, "kerberos");
-        kerb.setProperty(GatewayConfig.REMOTE_CONFIG_REGISTRY_KEYTAB, "/home/user/mykrb.keytab");
-        kerb.setProperty(GatewayConfig.REMOTE_CONFIG_REGISTRY_USE_KEYTAB, "true");
-        kerb.setProperty(GatewayConfig.REMOTE_CONFIG_REGISTRY_USE_TICKET_CACHE, "false");
-        testProperties.put("testKerb1", kerb);
-
-        Properties digest = new Properties();
-        digest.setProperty(GatewayConfig.REMOTE_CONFIG_REGISTRY_TYPE, "ZooKeeper");
-        digest.setProperty(GatewayConfig.REMOTE_CONFIG_REGISTRY_ADDRESS, "host2:2181");
-        digest.setProperty(GatewayConfig.REMOTE_CONFIG_REGISTRY_PRINCIPAL, "digestPrincipal");
-        digest.setProperty(GatewayConfig.REMOTE_CONFIG_REGISTRY_AUTH_TYPE, "digest");
-        digest.setProperty(GatewayConfig.REMOTE_CONFIG_REGISTRY_CREDENTIAL_ALIAS, "digestPwdAlias");
-        testProperties.put("testDigest1", digest);
-
-        Properties unsecured = new Properties();
-        unsecured.setProperty(GatewayConfig.REMOTE_CONFIG_REGISTRY_TYPE, "ZooKeeper");
-        unsecured.setProperty(GatewayConfig.REMOTE_CONFIG_REGISTRY_ADDRESS, "host2:2181");
-        testProperties.put("testUnsecured", unsecured);
-
-        doTestPropertiesRemoteConfigurationRegistries(testProperties);
-    }
-
-
-    /**
-     * Perform the actual test.
-     *
-     * @param testProperties The test properties
-     */
-    private void doTestPropertiesRemoteConfigurationRegistries(Map<String, Properties> testProperties) throws Exception {
-        // Mock gateway config
-        GatewayConfig gc = mockGatewayConfig(testProperties);
-
-        // Create the RemoteConfigurationRegistries object to be tested from the GatewayConfig
-        RemoteConfigurationRegistries registries = new DefaultRemoteConfigurationRegistries(gc);
-
-        // Basic validation
-        assertNotNull(registries);
-        List<RemoteConfigurationRegistry> registryConfigs = registries.getRegistryConfigurations();
-        assertNotNull(registryConfigs);
-        assertEquals(testProperties.size(), registryConfigs.size());
-
-        // Validate the contents of the created object
-        for (RemoteConfigurationRegistry regConfig : registryConfigs) {
-            validateRemoteRegistryConfig(regConfig.getName(), testProperties.get(regConfig.getName()), regConfig);
-        }
-    }
-
-
-    /**
-     * Create a mock GatewayConfig based on the specified test properties.
-     *
-     * @param testProperties The test properties to set on the config
-     */
-    private GatewayConfig mockGatewayConfig(Map<String, Properties> testProperties) {
-        // Mock gateway config
-        GatewayConfig gc = EasyMock.createNiceMock(GatewayConfig.class);
-        List<String> configNames = new ArrayList<>();
-        for (String registryName : testProperties.keySet()) {
-            configNames.add(registryName);
-
-            String propertyValueString = "";
-            Properties props = testProperties.get(registryName);
-            Enumeration names = props.propertyNames();
-            while (names.hasMoreElements()) {
-                String propertyName = (String) names.nextElement();
-                propertyValueString += propertyName + "=" + props.get(propertyName);
-                if (names.hasMoreElements()) {
-                    propertyValueString += ";";
-                }
-            }
-            EasyMock.expect(gc.getRemoteRegistryConfiguration(registryName))
-                    .andReturn(propertyValueString)
-                    .anyTimes();
-        }
-        EasyMock.expect(gc.getRemoteRegistryConfigurationNames()).andReturn(configNames).anyTimes();
-        EasyMock.replay(gc);
-
-        return gc;
-    }
-
-
-    /**
-     * Validate the specified RemoteConfigurationRegistry based on the expected test properties.
-     */
-    private void validateRemoteRegistryConfig(String                      configName,
-                                              Properties                  expected,
-                                              RemoteConfigurationRegistry registryConfig) throws Exception {
-        assertEquals(configName, registryConfig.getName());
-        assertEquals(expected.get(GatewayConfig.REMOTE_CONFIG_REGISTRY_TYPE), registryConfig.getRegistryType());
-        assertEquals(expected.get(GatewayConfig.REMOTE_CONFIG_REGISTRY_ADDRESS), registryConfig.getConnectionString());
-        assertEquals(expected.get(GatewayConfig.REMOTE_CONFIG_REGISTRY_NAMESPACE), registryConfig.getNamespace());
-        assertEquals(registryConfig.isSecureRegistry(), expected.get(GatewayConfig.REMOTE_CONFIG_REGISTRY_AUTH_TYPE) != null);
-        assertEquals(expected.get(GatewayConfig.REMOTE_CONFIG_REGISTRY_AUTH_TYPE), registryConfig.getAuthType());
-        assertEquals(expected.get(GatewayConfig.REMOTE_CONFIG_REGISTRY_PRINCIPAL), registryConfig.getPrincipal());
-        assertEquals(expected.get(GatewayConfig.REMOTE_CONFIG_REGISTRY_CREDENTIAL_ALIAS), registryConfig.getCredentialAlias());
-        assertEquals(expected.get(GatewayConfig.REMOTE_CONFIG_REGISTRY_KEYTAB), registryConfig.getKeytab());
-        assertEquals(Boolean.valueOf((String)expected.get(GatewayConfig.REMOTE_CONFIG_REGISTRY_USE_KEYTAB)), registryConfig.isUseKeyTab());
-        assertEquals(Boolean.valueOf((String)expected.get(GatewayConfig.REMOTE_CONFIG_REGISTRY_USE_TICKET_CACHE)), registryConfig.isUseTicketCache());
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/knox/blob/e766b3b7/gateway-service-remoteconfig/src/test/java/org/apache/hadoop/gateway/service/config/remote/config/RemoteConfigurationRegistryConfigParserTest.java
----------------------------------------------------------------------
diff --git a/gateway-service-remoteconfig/src/test/java/org/apache/hadoop/gateway/service/config/remote/config/RemoteConfigurationRegistryConfigParserTest.java b/gateway-service-remoteconfig/src/test/java/org/apache/hadoop/gateway/service/config/remote/config/RemoteConfigurationRegistryConfigParserTest.java
deleted file mode 100644
index 386e332..0000000
--- a/gateway-service-remoteconfig/src/test/java/org/apache/hadoop/gateway/service/config/remote/config/RemoteConfigurationRegistryConfigParserTest.java
+++ /dev/null
@@ -1,108 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.gateway.service.config.remote.config;
-
-import org.apache.commons.io.FileUtils;
-import org.apache.hadoop.gateway.service.config.remote.RemoteConfigurationRegistryConfig;
-import org.apache.hadoop.gateway.service.config.remote.util.RemoteRegistryConfigTestUtils;
-import org.junit.Test;
-
-import java.io.File;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-
-import static org.apache.hadoop.gateway.service.config.remote.util.RemoteRegistryConfigTestUtils.*;
-
-public class RemoteConfigurationRegistryConfigParserTest {
-
-    @Test
-    public void testExternalXMLParsing() throws Exception {
-        final String CONN_STR = "http://my.zookeeper.host:2181";
-
-        Map<String, Map<String, String>> testRegistryConfigurations = new HashMap<>();
-
-        Map<String, String> config1 = new HashMap<>();
-        config1.put(PROPERTY_TYPE, "ZooKeeper");
-        config1.put(PROPERTY_NAME, "registry1");
-        config1.put(PROPERTY_ADDRESS, CONN_STR);
-        config1.put(PROPERTY_SECURE, "true");
-        config1.put(PROPERTY_AUTH_TYPE, "Digest");
-        config1.put(PROPERTY_PRINCIPAL, "knox");
-        config1.put(PROPERTY_CRED_ALIAS, "zkCredential");
-        testRegistryConfigurations.put(config1.get("name"), config1);
-
-        Map<String, String> config2 = new HashMap<>();
-        config2.put(PROPERTY_TYPE, "ZooKeeper");
-        config2.put(PROPERTY_NAME, "MyKerberos");
-        config2.put(PROPERTY_ADDRESS, CONN_STR);
-        config2.put(PROPERTY_SECURE, "true");
-        config2.put(PROPERTY_AUTH_TYPE, "Kerberos");
-        config2.put(PROPERTY_PRINCIPAL, "knox");
-        File myKeyTab = File.createTempFile("mytest", "keytab");
-        config2.put(PROPERTY_KEYTAB, myKeyTab.getAbsolutePath());
-        config2.put(PROPERTY_USE_KEYTAB, "false");
-        config2.put(PROPERTY_USE_TICKET_CACHE, "true");
-        testRegistryConfigurations.put(config2.get("name"), config2);
-
-        Map<String, String> config3 = new HashMap<>();
-        config3.put(PROPERTY_TYPE, "ZooKeeper");
-        config3.put(PROPERTY_NAME, "anotherRegistry");
-        config3.put(PROPERTY_ADDRESS, "whatever:1281");
-        testRegistryConfigurations.put(config3.get("name"), config3);
-
-        String configXML =
-                    RemoteRegistryConfigTestUtils.createRemoteConfigRegistriesXML(testRegistryConfigurations.values());
-
-        File registryConfigFile = File.createTempFile("remote-registries", "xml");
-        try {
-            FileUtils.writeStringToFile(registryConfigFile, configXML);
-
-            List<RemoteConfigurationRegistryConfig> configs =
-                                    RemoteConfigurationRegistriesParser.getConfig(registryConfigFile.getAbsolutePath());
-            assertNotNull(configs);
-            assertEquals(testRegistryConfigurations.keySet().size(), configs.size());
-
-            for (RemoteConfigurationRegistryConfig registryConfig : configs) {
-                Map<String, String> expected = testRegistryConfigurations.get(registryConfig.getName());
-                assertNotNull(expected);
-                validateParsedRegistryConfiguration(registryConfig, expected);
-            }
-        } finally {
-            registryConfigFile.delete();
-        }
-    }
-
-    private void validateParsedRegistryConfiguration(RemoteConfigurationRegistryConfig config,
-                                                     Map<String, String> expected) throws Exception {
-        assertEquals(expected.get(PROPERTY_TYPE), config.getRegistryType());
-        assertEquals(expected.get(PROPERTY_ADDRESS), config.getConnectionString());
-        assertEquals(expected.get(PROPERTY_NAME), config.getName());
-        assertEquals(expected.get(PROPERTY_NAMESAPCE), config.getNamespace());
-        assertEquals(Boolean.valueOf(expected.get(PROPERTY_SECURE)), config.isSecureRegistry());
-        assertEquals(expected.get(PROPERTY_AUTH_TYPE), config.getAuthType());
-        assertEquals(expected.get(PROPERTY_PRINCIPAL), config.getPrincipal());
-        assertEquals(expected.get(PROPERTY_CRED_ALIAS), config.getCredentialAlias());
-        assertEquals(expected.get(PROPERTY_KEYTAB), config.getKeytab());
-        assertEquals(Boolean.valueOf(expected.get(PROPERTY_USE_KEYTAB)), config.isUseKeyTab());
-        assertEquals(Boolean.valueOf(expected.get(PROPERTY_USE_TICKET_CACHE)), config.isUseTicketCache());
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/knox/blob/e766b3b7/gateway-service-remoteconfig/src/test/java/org/apache/hadoop/gateway/service/config/remote/util/RemoteRegistryConfigTestUtils.java
----------------------------------------------------------------------
diff --git a/gateway-service-remoteconfig/src/test/java/org/apache/hadoop/gateway/service/config/remote/util/RemoteRegistryConfigTestUtils.java b/gateway-service-remoteconfig/src/test/java/org/apache/hadoop/gateway/service/config/remote/util/RemoteRegistryConfigTestUtils.java
deleted file mode 100644
index 35919d0..0000000
--- a/gateway-service-remoteconfig/src/test/java/org/apache/hadoop/gateway/service/config/remote/util/RemoteRegistryConfigTestUtils.java
+++ /dev/null
@@ -1,117 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.gateway.service.config.remote.util;
-
-import java.util.Collection;
-import java.util.Map;
-
-public class RemoteRegistryConfigTestUtils {
-
-    public static final String PROPERTY_TYPE = "type";
-    public static final String PROPERTY_NAME = "name";
-    public static final String PROPERTY_ADDRESS = "address";
-    public static final String PROPERTY_NAMESAPCE = "namespace";
-    public static final String PROPERTY_SECURE = "secure";
-    public static final String PROPERTY_AUTH_TYPE = "authType";
-    public static final String PROPERTY_PRINCIPAL = "principal";
-    public static final String PROPERTY_CRED_ALIAS = "credentialAlias";
-    public static final String PROPERTY_KEYTAB = "keyTab";
-    public static final String PROPERTY_USE_KEYTAB = "useKeyTab";
-    public static final String PROPERTY_USE_TICKET_CACHE = "useTicketCache";
-
-    public static String createRemoteConfigRegistriesXML(Collection<Map<String, String>> configProperties) {
-        String result = "<?xml version=\"1.0\" encoding=\"utf-8\"?>\n" +
-                        "<remote-configuration-registries>\n";
-
-        for (Map<String, String> props : configProperties) {
-            String authType = props.get(PROPERTY_AUTH_TYPE);
-            if ("Kerberos".equalsIgnoreCase(authType)) {
-                result +=
-                   createRemoteConfigRegistryXMLWithKerberosAuth(props.get(PROPERTY_TYPE),
-                                                                 props.get(PROPERTY_NAME),
-                                                                 props.get(PROPERTY_ADDRESS),
-                                                                 props.get(PROPERTY_PRINCIPAL),
-                                                                 props.get(PROPERTY_KEYTAB),
-                                                                 Boolean.valueOf(props.get(PROPERTY_USE_KEYTAB)),
-                                                                 Boolean.valueOf(props.get(PROPERTY_USE_TICKET_CACHE)));
-            } else if ("Digest".equalsIgnoreCase(authType)) {
-                result +=
-                    createRemoteConfigRegistryXMLWithDigestAuth(props.get(PROPERTY_TYPE),
-                                                                props.get(PROPERTY_NAME),
-                                                                props.get(PROPERTY_ADDRESS),
-                                                                props.get(PROPERTY_PRINCIPAL),
-                                                                props.get(PROPERTY_CRED_ALIAS));
-            } else {
-                result += createRemoteConfigRegistryXMLNoAuth(props.get(PROPERTY_TYPE),
-                                                              props.get(PROPERTY_NAME),
-                                                              props.get(PROPERTY_ADDRESS));
-            }
-        }
-
-        result += "</remote-configuration-registries>\n";
-
-        return result;
-    }
-
-    public static String createRemoteConfigRegistryXMLWithKerberosAuth(String type,
-                                                                       String name,
-                                                                       String address,
-                                                                       String principal,
-                                                                       String keyTab,
-                                                                       boolean userKeyTab,
-                                                                       boolean useTicketCache) {
-        return "  <remote-configuration-registry>\n" +
-               "    <name>" + name + "</name>\n" +
-               "    <type>" + type + "</type>\n" +
-               "    <address>" + address + "</address>\n" +
-               "    <secure>true</secure>\n" +
-               "    <auth-type>" + "Kerberos" + "</auth-type>\n" +
-               "    <principal>" + principal + "</principal>\n" +
-               "    <keytab>" + keyTab + "</keytab>\n" +
-               "    <use-keytab>" + String.valueOf(userKeyTab) + "</use-keytab>\n" +
-               "    <use-ticket-cache>" + String.valueOf(useTicketCache) + "</use-ticket-cache>\n" +
-               "  </remote-configuration-registry>\n";
-    }
-
-    public static String createRemoteConfigRegistryXMLWithDigestAuth(String type,
-                                                                     String name,
-                                                                     String address,
-                                                                     String principal,
-                                                                     String credentialAlias) {
-        return "  <remote-configuration-registry>\n" +
-               "    <name>" + name + "</name>\n" +
-               "    <type>" + type + "</type>\n" +
-               "    <address>" + address + "</address>\n" +
-               "    <secure>true</secure>\n" +
-               "    <auth-type>" + "Digest" + "</auth-type>\n" +
-               "    <principal>" + principal + "</principal>\n" +
-               "    <credential-alias>" + credentialAlias + "</credential-alias>\n" +
-               "  </remote-configuration-registry>\n";
-    }
-
-
-    public static String createRemoteConfigRegistryXMLNoAuth(String type,
-                                                             String name,
-                                                             String address) {
-        return "  <remote-configuration-registry>\n" +
-               "    <name>" + name + "</name>\n" +
-               "    <type>" + type + "</type>\n" +
-               "    <address>" + address + "</address>\n" +
-               "  </remote-configuration-registry>\n";
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/knox/blob/e766b3b7/gateway-service-remoteconfig/src/test/java/org/apache/hadoop/gateway/service/config/remote/zk/RemoteConfigurationRegistryClientServiceTest.java
----------------------------------------------------------------------
diff --git a/gateway-service-remoteconfig/src/test/java/org/apache/hadoop/gateway/service/config/remote/zk/RemoteConfigurationRegistryClientServiceTest.java b/gateway-service-remoteconfig/src/test/java/org/apache/hadoop/gateway/service/config/remote/zk/RemoteConfigurationRegistryClientServiceTest.java
deleted file mode 100644
index 0292ee3..0000000
--- a/gateway-service-remoteconfig/src/test/java/org/apache/hadoop/gateway/service/config/remote/zk/RemoteConfigurationRegistryClientServiceTest.java
+++ /dev/null
@@ -1,424 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.gateway.service.config.remote.zk;
-
-import org.apache.commons.io.FileUtils;
-import org.apache.curator.framework.CuratorFramework;
-import org.apache.curator.framework.CuratorFrameworkFactory;
-import org.apache.curator.retry.ExponentialBackoffRetry;
-import org.apache.curator.test.InstanceSpec;
-import org.apache.curator.test.TestingCluster;
-import org.apache.hadoop.gateway.config.GatewayConfig;
-import org.apache.hadoop.gateway.services.config.client.RemoteConfigurationRegistryClient.ChildEntryListener;
-import org.apache.hadoop.gateway.services.config.client.RemoteConfigurationRegistryClient;
-import org.apache.hadoop.gateway.services.config.client.RemoteConfigurationRegistryClientService;
-import org.apache.hadoop.gateway.service.config.remote.RemoteConfigurationRegistryClientServiceFactory;
-import org.apache.hadoop.gateway.service.config.remote.util.RemoteRegistryConfigTestUtils;
-import org.apache.hadoop.gateway.services.security.AliasService;
-import org.apache.zookeeper.ZooDefs;
-import org.apache.zookeeper.data.ACL;
-import org.apache.zookeeper.data.Id;
-import org.easymock.EasyMock;
-import org.junit.Test;
-
-import javax.security.auth.login.AppConfigurationEntry;
-import javax.security.auth.login.Configuration;
-import java.io.File;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-
-public class RemoteConfigurationRegistryClientServiceTest {
-
-    /**
-     * Test a configuration for an unsecured remote registry, included in the gateway configuration.
-     */
-    @Test
-    public void testUnsecuredZooKeeperWithSimpleRegistryConfig() throws Exception {
-        final String REGISTRY_CLIENT_NAME = "unsecured-zk-registry-name";
-        final String PRINCIPAL = null;
-        final String PWD = null;
-        final String CRED_ALIAS = null;
-
-        // Configure and start a secure ZK cluster
-        TestingCluster zkCluster = setupAndStartSecureTestZooKeeper(PRINCIPAL, PWD);
-
-        try {
-            // Create the setup client for the test cluster, and initialize the test znodes
-            CuratorFramework setupClient = initializeTestClientAndZNodes(zkCluster, PRINCIPAL);
-
-            // Mock configuration
-            GatewayConfig config = EasyMock.createNiceMock(GatewayConfig.class);
-            final String registryConfigValue =
-                        GatewayConfig.REMOTE_CONFIG_REGISTRY_TYPE + "=" + ZooKeeperClientService.TYPE + ";" +
-                        GatewayConfig.REMOTE_CONFIG_REGISTRY_ADDRESS + "=" + zkCluster.getConnectString();
-            EasyMock.expect(config.getRemoteRegistryConfiguration(REGISTRY_CLIENT_NAME))
-                    .andReturn(registryConfigValue)
-                    .anyTimes();
-            EasyMock.expect(config.getRemoteRegistryConfigurationNames())
-                    .andReturn(Collections.singletonList(REGISTRY_CLIENT_NAME)).anyTimes();
-            EasyMock.replay(config);
-
-            doTestZooKeeperClient(setupClient, REGISTRY_CLIENT_NAME, config, CRED_ALIAS, PWD);
-        } finally {
-            zkCluster.stop();
-        }
-    }
-
-    /**
-     * Test multiple configurations for an unsecured remote registry.
-     */
-    @Test
-    public void testMultipleUnsecuredZooKeeperWithSimpleRegistryConfig() throws Exception {
-        final String REGISTRY_CLIENT_NAME_1 = "zkclient1";
-        final String REGISTRY_CLIENT_NAME_2 = "zkclient2";
-        final String PRINCIPAL = null;
-        final String PWD = null;
-        final String CRED_ALIAS = null;
-
-        // Configure and start a secure ZK cluster
-        TestingCluster zkCluster = setupAndStartSecureTestZooKeeper(PRINCIPAL, PWD);
-
-        try {
-            // Create the setup client for the test cluster, and initialize the test znodes
-            CuratorFramework setupClient = initializeTestClientAndZNodes(zkCluster, PRINCIPAL);
-
-            // Mock configuration
-            GatewayConfig config = EasyMock.createNiceMock(GatewayConfig.class);
-            final String registryConfigValue1 =
-                                GatewayConfig.REMOTE_CONFIG_REGISTRY_TYPE + "=" + ZooKeeperClientService.TYPE + ";" +
-                                GatewayConfig.REMOTE_CONFIG_REGISTRY_ADDRESS + "=" + zkCluster.getConnectString();
-            EasyMock.expect(config.getRemoteRegistryConfiguration(REGISTRY_CLIENT_NAME_1))
-                    .andReturn(registryConfigValue1).anyTimes();
-            final String registryConfigValue2 =
-                                GatewayConfig.REMOTE_CONFIG_REGISTRY_TYPE + "=" + ZooKeeperClientService.TYPE + ";" +
-                                GatewayConfig.REMOTE_CONFIG_REGISTRY_ADDRESS + "=" + zkCluster.getConnectString();
-            EasyMock.expect(config.getRemoteRegistryConfiguration(REGISTRY_CLIENT_NAME_2))
-                    .andReturn(registryConfigValue2).anyTimes();
-            EasyMock.expect(config.getRemoteRegistryConfigurationNames())
-                    .andReturn(Arrays.asList(REGISTRY_CLIENT_NAME_1, REGISTRY_CLIENT_NAME_2)).anyTimes();
-            EasyMock.replay(config);
-
-            // Create the client service instance
-            RemoteConfigurationRegistryClientService clientService =
-                    RemoteConfigurationRegistryClientServiceFactory.newInstance(config);
-            assertEquals("Wrong registry client service type.", clientService.getClass(), CuratorClientService.class);
-            clientService.setAliasService(null);
-            clientService.init(config, null);
-            clientService.start();
-
-            RemoteConfigurationRegistryClient client1 = clientService.get(REGISTRY_CLIENT_NAME_1);
-            assertNotNull(client1);
-
-            RemoteConfigurationRegistryClient client2 = clientService.get(REGISTRY_CLIENT_NAME_2);
-            assertNotNull(client2);
-
-            doTestZooKeeperClient(setupClient, REGISTRY_CLIENT_NAME_1, clientService, false);
-            doTestZooKeeperClient(setupClient, REGISTRY_CLIENT_NAME_2, clientService, false);
-        } finally {
-            zkCluster.stop();
-        }
-    }
-
-    /**
-     * Test a configuration for a secure remote registry, included in the gateway configuration.
-     */
-    @Test
-    public void testZooKeeperWithSimpleRegistryConfig() throws Exception {
-        final String AUTH_TYPE = "digest";
-        final String REGISTRY_CLIENT_NAME = "zk-registry-name";
-        final String PRINCIPAL = "knox";
-        final String PWD = "knoxtest";
-        final String CRED_ALIAS = "zkCredential";
-
-        // Configure and start a secure ZK cluster
-        TestingCluster zkCluster = setupAndStartSecureTestZooKeeper(PRINCIPAL, PWD);
-
-        try {
-            // Create the setup client for the test cluster, and initialize the test znodes
-            CuratorFramework setupClient = initializeTestClientAndZNodes(zkCluster, PRINCIPAL);
-
-            // Mock configuration
-            GatewayConfig config = EasyMock.createNiceMock(GatewayConfig.class);
-            final String registryConfigValue =
-                            GatewayConfig.REMOTE_CONFIG_REGISTRY_TYPE + "=" + ZooKeeperClientService.TYPE + ";" +
-                            GatewayConfig.REMOTE_CONFIG_REGISTRY_ADDRESS + "=" + zkCluster.getConnectString() + ";" +
-                            GatewayConfig.REMOTE_CONFIG_REGISTRY_AUTH_TYPE + "=" + AUTH_TYPE + ";" +
-                            GatewayConfig.REMOTE_CONFIG_REGISTRY_PRINCIPAL + "=" + PRINCIPAL + ";" +
-                            GatewayConfig.REMOTE_CONFIG_REGISTRY_CREDENTIAL_ALIAS + "=" + CRED_ALIAS;
-            EasyMock.expect(config.getRemoteRegistryConfiguration(REGISTRY_CLIENT_NAME))
-                    .andReturn(registryConfigValue)
-                    .anyTimes();
-            EasyMock.expect(config.getRemoteRegistryConfigurationNames())
-                    .andReturn(Collections.singletonList(REGISTRY_CLIENT_NAME)).anyTimes();
-            EasyMock.replay(config);
-
-            doTestZooKeeperClient(setupClient, REGISTRY_CLIENT_NAME, config, CRED_ALIAS, PWD);
-        } finally {
-            zkCluster.stop();
-        }
-    }
-
-    /**
-     * Test the remote registry configuration external to, and referenced from, the gateway configuration, for a secure
-     * client.
-     */
-    @Test
-    public void testZooKeeperWithSingleExternalRegistryConfig() throws Exception {
-        final String AUTH_TYPE = "digest";
-        final String REGISTRY_CLIENT_NAME = "my-zookeeper_registryNAME";
-        final String PRINCIPAL = "knox";
-        final String PWD = "knoxtest";
-        final String CRED_ALIAS = "zkCredential";
-
-        // Configure and start a secure ZK cluster
-        TestingCluster zkCluster = setupAndStartSecureTestZooKeeper(PRINCIPAL, PWD);
-
-        File tmpRegConfigFile = null;
-
-        try {
-            // Create the setup client for the test cluster, and initialize the test znodes
-            CuratorFramework setupClient = initializeTestClientAndZNodes(zkCluster, PRINCIPAL);
-
-            // Mock configuration
-            Map<String, String> registryConfigProps = new HashMap<>();
-            registryConfigProps.put("type", ZooKeeperClientService.TYPE);
-            registryConfigProps.put("name", REGISTRY_CLIENT_NAME);
-            registryConfigProps.put("address", zkCluster.getConnectString());
-            registryConfigProps.put("secure", "true");
-            registryConfigProps.put("authType", AUTH_TYPE);
-            registryConfigProps.put("principal", PRINCIPAL);
-            registryConfigProps.put("credentialAlias", CRED_ALIAS);
-            String registryConfigXML =
-                  RemoteRegistryConfigTestUtils.createRemoteConfigRegistriesXML(Collections.singleton(registryConfigProps));
-            tmpRegConfigFile = File.createTempFile("myRemoteRegistryConfig", "xml");
-            FileUtils.writeStringToFile(tmpRegConfigFile, registryConfigXML);
-
-            System.setProperty("org.apache.knox.gateway.remote.registry.config.file", tmpRegConfigFile.getAbsolutePath());
-
-            GatewayConfig config = EasyMock.createNiceMock(GatewayConfig.class);
-            EasyMock.replay(config);
-
-            doTestZooKeeperClient(setupClient, REGISTRY_CLIENT_NAME, config, CRED_ALIAS, PWD);
-        } finally {
-            zkCluster.stop();
-            if (tmpRegConfigFile != null && tmpRegConfigFile.exists()) {
-                tmpRegConfigFile.delete();
-            }
-            System.clearProperty("org.apache.knox.gateway.remote.registry.config.file");
-        }
-    }
-
-    /**
-     * Setup and start a secure test ZooKeeper cluster.
-     */
-    private TestingCluster setupAndStartSecureTestZooKeeper(String principal, String digestPassword) throws Exception {
-        final boolean applyAuthentication = (principal != null);
-
-        // Configure security for the ZK cluster instances
-        Map<String, Object> customInstanceSpecProps = new HashMap<>();
-
-        if (applyAuthentication) {
-            customInstanceSpecProps.put("authProvider.1", "org.apache.zookeeper.server.auth.SASLAuthenticationProvider");
-            customInstanceSpecProps.put("requireClientAuthScheme", "sasl");
-        }
-
-        // Define the test cluster
-        List<InstanceSpec> instanceSpecs = new ArrayList<>();
-        for (int i = 0 ; i < 3 ; i++) {
-            InstanceSpec is = new InstanceSpec(null, -1, -1, -1, false, (i+1), -1, -1, customInstanceSpecProps);
-            instanceSpecs.add(is);
-        }
-        TestingCluster zkCluster = new TestingCluster(instanceSpecs);
-
-        if (applyAuthentication) {
-            // Setup ZooKeeper server SASL
-            Map<String, String> digestOptions = new HashMap<>();
-            digestOptions.put("user_" + principal, digestPassword);
-            final AppConfigurationEntry[] serverEntries =
-                    {new AppConfigurationEntry("org.apache.zookeeper.server.auth.DigestLoginModule",
-                            AppConfigurationEntry.LoginModuleControlFlag.REQUIRED,
-                            digestOptions)};
-            Configuration.setConfiguration(new Configuration() {
-                @Override
-                public AppConfigurationEntry[] getAppConfigurationEntry(String name) {
-                    return ("Server".equalsIgnoreCase(name)) ? serverEntries : null;
-                }
-            });
-        }
-
-        // Start the cluster
-        zkCluster.start();
-
-        return zkCluster;
-    }
-
-    /**
-     * Create a ZooKeeper client with SASL digest auth configured, and initialize the test znodes.
-     */
-    private CuratorFramework initializeTestClientAndZNodes(TestingCluster zkCluster, String principal) throws Exception {
-        // Create the client for the test cluster
-        CuratorFramework setupClient = CuratorFrameworkFactory.builder()
-                                                              .connectString(zkCluster.getConnectString())
-                                                              .retryPolicy(new ExponentialBackoffRetry(100, 3))
-                                                              .build();
-        assertNotNull(setupClient);
-        setupClient.start();
-
-        List<ACL> acls = new ArrayList<>();
-        if (principal != null) {
-            acls.add(new ACL(ZooDefs.Perms.ALL, new Id("sasl", principal)));
-        } else {
-            acls.add(new ACL(ZooDefs.Perms.ALL, ZooDefs.Ids.ANYONE_ID_UNSAFE));
-        }
-        setupClient.create().creatingParentsIfNeeded().withACL(acls).forPath("/knox/config/descriptors");
-        setupClient.create().creatingParentsIfNeeded().withACL(acls).forPath("/knox/config/shared-providers");
-
-        List<ACL> negativeACLs = new ArrayList<>();
-        if (principal != null) {
-            negativeACLs.add(new ACL(ZooDefs.Perms.ALL, new Id("sasl", "notyou")));
-        } else {
-            negativeACLs.add(new ACL(ZooDefs.Perms.ALL, ZooDefs.Ids.ANYONE_ID_UNSAFE));
-        }
-        setupClient.create().creatingParentsIfNeeded().withACL(negativeACLs).forPath("/someotherconfig");
-
-        return setupClient;
-    }
-
-    private void doTestZooKeeperClient(final CuratorFramework setupClient,
-                                       final String           testClientName,
-                                       final GatewayConfig    config,
-                                       final String           credentialAlias,
-                                       final String           digestPassword) throws Exception {
-        boolean isSecureTest = (credentialAlias != null && digestPassword != null);
-
-        // Mock alias service
-        AliasService aliasService = EasyMock.createNiceMock(AliasService.class);
-        EasyMock.expect(aliasService.getPasswordFromAliasForGateway(credentialAlias))
-                .andReturn(isSecureTest ? digestPassword.toCharArray() : null)
-                .anyTimes();
-        EasyMock.replay(aliasService);
-
-        // Create the client service instance
-        RemoteConfigurationRegistryClientService clientService =
-                RemoteConfigurationRegistryClientServiceFactory.newInstance(config);
-        assertEquals("Wrong registry client service type.", clientService.getClass(), CuratorClientService.class);
-        clientService.setAliasService(aliasService);
-        clientService.init(config, null);
-        clientService.start();
-
-        doTestZooKeeperClient(setupClient, testClientName, clientService, isSecureTest);
-    }
-
-    /**
-     * Test secure ZooKeeper client interactions.
-     *
-     * @param setupClient    The client used for interacting with ZooKeeper independent from the registry client service.
-     * @param testClientName The name of the client to use from the registry client service.
-     * @param clientService  The RemoteConfigurationRegistryClientService
-     * @param isSecureTest   Flag to indicate whether this is a secure interaction test
-     */
-    private void doTestZooKeeperClient(final CuratorFramework                         setupClient,
-                                       final String                                   testClientName,
-                                       final RemoteConfigurationRegistryClientService clientService,
-                                       boolean                                        isSecureTest) throws Exception {
-
-        RemoteConfigurationRegistryClient client = clientService.get(testClientName);
-        assertNotNull(client);
-        List<String> descriptors = client.listChildEntries("/knox/config/descriptors");
-        assertNotNull(descriptors);
-        for (String descriptor : descriptors) {
-            System.out.println("Descriptor: " + descriptor);
-        }
-
-        List<String> providerConfigs = client.listChildEntries("/knox/config/shared-providers");
-        assertNotNull(providerConfigs);
-        for (String providerConfig : providerConfigs) {
-            System.out.println("Provider config: " + providerConfig);
-        }
-
-        List<String> someotherConfig = client.listChildEntries("/someotherconfig");
-        if (isSecureTest) {
-            assertNull("Expected null because of the ACL mismatch.", someotherConfig);
-        } else {
-            assertNotNull(someotherConfig);
-        }
-
-        // Test listeners
-        final String MY_NEW_ZNODE = "/clientServiceTestNode";
-        final String MY_NEW_DATA_ZNODE = MY_NEW_ZNODE + "/mydata";
-
-        if (setupClient.checkExists().forPath(MY_NEW_ZNODE) != null) {
-            setupClient.delete().deletingChildrenIfNeeded().forPath(MY_NEW_ZNODE);
-        }
-
-        final List<String> listenerLog = new ArrayList<>();
-        client.addChildEntryListener(MY_NEW_ZNODE, (c, type, path) -> {
-            listenerLog.add("EXTERNAL: " + type.toString() + ":" + path);
-            if (ChildEntryListener.Type.ADDED.equals(type)) {
-                try {
-                    c.addEntryListener(path, (cc, p, d) -> listenerLog.add("EXTERNAL: " + p + ":" + (d != null ? new String(d) : "null")));
-                } catch (Exception e) {
-                    e.printStackTrace();
-                }
-            }
-        });
-
-        client.createEntry(MY_NEW_ZNODE);
-        client.createEntry(MY_NEW_DATA_ZNODE, "more test data");
-        String testData = client.getEntryData(MY_NEW_DATA_ZNODE);
-        assertNotNull(testData);
-        assertEquals("more test data", testData);
-
-        assertTrue(client.entryExists(MY_NEW_DATA_ZNODE));
-        client.setEntryData(MY_NEW_DATA_ZNODE, "still more data");
-
-        try {
-            Thread.sleep(1000);
-        } catch (InterruptedException e) {
-            //
-        }
-
-        client.setEntryData(MY_NEW_DATA_ZNODE, "changed completely");
-
-        try {
-            Thread.sleep(1000);
-        } catch (InterruptedException e) {
-            //
-        }
-
-        client.deleteEntry(MY_NEW_DATA_ZNODE);
-
-        try {
-            Thread.sleep(1000);
-        } catch (InterruptedException e) {
-            //
-        }
-
-        assertFalse(listenerLog.isEmpty());
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/knox/blob/e766b3b7/gateway-service-remoteconfig/src/test/java/org/apache/hadoop/gateway/service/config/remote/zk/RemoteConfigurationRegistryJAASConfigTest.java
----------------------------------------------------------------------
diff --git a/gateway-service-remoteconfig/src/test/java/org/apache/hadoop/gateway/service/config/remote/zk/RemoteConfigurationRegistryJAASConfigTest.java b/gateway-service-remoteconfig/src/test/java/org/apache/hadoop/gateway/service/config/remote/zk/RemoteConfigurationRegistryJAASConfigTest.java
deleted file mode 100644
index 6cbef9b..0000000
--- a/gateway-service-remoteconfig/src/test/java/org/apache/hadoop/gateway/service/config/remote/zk/RemoteConfigurationRegistryJAASConfigTest.java
+++ /dev/null
@@ -1,255 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.gateway.service.config.remote.zk;
-
-import org.apache.hadoop.gateway.service.config.remote.RemoteConfigurationRegistryConfig;
-import org.apache.hadoop.gateway.service.config.remote.zk.RemoteConfigurationRegistryJAASConfig;
-import org.apache.hadoop.gateway.services.security.AliasService;
-import org.easymock.EasyMock;
-import org.junit.Test;
-
-import javax.security.auth.login.AppConfigurationEntry;
-import javax.security.auth.login.Configuration;
-import java.io.File;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-public class RemoteConfigurationRegistryJAASConfigTest {
-
-    @Test
-    public void testZooKeeperDigestContextEntry() throws Exception {
-        List<RemoteConfigurationRegistryConfig> registryConfigs = new ArrayList<>();
-        final String ENTRY_NAME       = "my_digest_context";
-        final String DIGEST_PRINCIPAL = "myIdentity";
-        final String DIGEST_PWD_ALIAS = "myAlias";
-        final String DIGEST_PWD       = "mysecret";
-
-        AliasService aliasService = EasyMock.createNiceMock(AliasService.class);
-        EasyMock.expect(aliasService.getPasswordFromAliasForGateway(DIGEST_PWD_ALIAS)).andReturn(DIGEST_PWD.toCharArray()).anyTimes();
-        EasyMock.replay(aliasService);
-
-        registryConfigs.add(createDigestConfig(ENTRY_NAME, DIGEST_PRINCIPAL, DIGEST_PWD_ALIAS));
-
-        try {
-            RemoteConfigurationRegistryJAASConfig jaasConfig =
-                                    RemoteConfigurationRegistryJAASConfig.configure(registryConfigs, aliasService);
-
-            // Make sure there are no entries for an invalid context entry name
-            assertNull(jaasConfig.getAppConfigurationEntry("invalid"));
-
-            // Validate the intended context entry
-            validateDigestContext(jaasConfig,
-                                  ENTRY_NAME,
-                                  RemoteConfigurationRegistryJAASConfig.digestLoginModules.get("ZOOKEEPER"),
-                                  DIGEST_PRINCIPAL,
-                                  DIGEST_PWD);
-        } finally {
-            Configuration.setConfiguration(null);
-        }
-    }
-
-    @Test
-    public void testKerberosContextEntry() throws Exception {
-        List<RemoteConfigurationRegistryConfig> registryConfigs = new ArrayList<>();
-        final String ENTRY_NAME = "my_kerberos_context";
-        final String PRINCIPAL  = "myIdentity";
-
-        File dummyKeyTab = File.createTempFile("my_context", "keytab");
-        registryConfigs.add(createKerberosConfig(ENTRY_NAME, PRINCIPAL, dummyKeyTab.getAbsolutePath()));
-
-        try {
-            RemoteConfigurationRegistryJAASConfig jaasConfig =
-                                            RemoteConfigurationRegistryJAASConfig.configure(registryConfigs, null);
-
-            // Make sure there are no entries for an invalid context entry name
-            assertNull(jaasConfig.getAppConfigurationEntry("invalid"));
-
-            // Validate the intended context entry
-            validateKerberosContext(jaasConfig,
-                                    ENTRY_NAME,
-                                    PRINCIPAL,
-                                    dummyKeyTab.getAbsolutePath(),
-                                    true,
-                                    false);
-
-        } finally {
-            Configuration.setConfiguration(null);
-        }
-    }
-
-    @Test
-    public void testZooKeeperMultipleContextEntries() throws Exception {
-        List<RemoteConfigurationRegistryConfig> registryConfigs = new ArrayList<>();
-        final String KERBEROS_ENTRY_NAME = "my_kerberos_context";
-        final String KERBEROS_PRINCIPAL  = "myKerberosIdentity";
-        final String DIGEST_ENTRY_NAME   = "my_digest_context";
-        final String DIGEST_PRINCIPAL    = "myDigestIdentity";
-        final String DIGEST_PWD_ALIAS    = "myAlias";
-        final String DIGEST_PWD          = "mysecret";
-
-        AliasService aliasService = EasyMock.createNiceMock(AliasService.class);
-        EasyMock.expect(aliasService.getPasswordFromAliasForGateway(DIGEST_PWD_ALIAS)).andReturn(DIGEST_PWD.toCharArray()).anyTimes();
-        EasyMock.replay(aliasService);
-
-        File dummyKeyTab = File.createTempFile("my_context", "keytab");
-        registryConfigs.add(createKerberosConfig(KERBEROS_ENTRY_NAME, KERBEROS_PRINCIPAL, dummyKeyTab.getAbsolutePath()));
-        registryConfigs.add(createDigestConfig(DIGEST_ENTRY_NAME, DIGEST_PRINCIPAL, DIGEST_PWD_ALIAS));
-
-        try {
-            RemoteConfigurationRegistryJAASConfig jaasConfig =
-                                        RemoteConfigurationRegistryJAASConfig.configure(registryConfigs, aliasService);
-
-            // Make sure there are no entries for an invalid context entry name
-            assertNull(jaasConfig.getAppConfigurationEntry("invalid"));
-
-            // Validate the kerberos context entry
-            validateKerberosContext(jaasConfig,
-                                    KERBEROS_ENTRY_NAME,
-                                    KERBEROS_PRINCIPAL,
-                                    dummyKeyTab.getAbsolutePath(),
-                                    true,
-                                    false);
-
-            // Validate the digest context entry
-            validateDigestContext(jaasConfig,
-                                  DIGEST_ENTRY_NAME,
-                                  RemoteConfigurationRegistryJAASConfig.digestLoginModules.get("ZOOKEEPER"),
-                                  DIGEST_PRINCIPAL,
-                                  DIGEST_PWD);
-
-        } finally {
-            Configuration.setConfiguration(null);
-        }
-    }
-
-    @Test
-    public void testZooKeeperDigestContextEntryWithoutAliasService() throws Exception {
-        List<RemoteConfigurationRegistryConfig> registryConfigs = new ArrayList<>();
-        final String ENTRY_NAME       = "my_digest_context";
-        final String DIGEST_PRINCIPAL = "myIdentity";
-        final String DIGEST_PWD_ALIAS = "myAlias";
-
-        registryConfigs.add(createDigestConfig(ENTRY_NAME, DIGEST_PRINCIPAL, DIGEST_PWD_ALIAS));
-
-        try {
-            RemoteConfigurationRegistryJAASConfig jaasConfig =
-                                            RemoteConfigurationRegistryJAASConfig.configure(registryConfigs, null);
-            fail("Expected IllegalArgumentException because the AliasService is not available.");
-        } catch (IllegalArgumentException e) {
-            // Expected
-            assertTrue(e.getMessage().contains("AliasService"));
-        } catch (Throwable e) {
-            fail("Wrong exception encountered: " + e.getClass().getName() + ", " + e.getMessage());
-        } finally {
-            Configuration.setConfiguration(null);
-        }
-    }
-
-    private static RemoteConfigurationRegistryConfig createDigestConfig(String entryName,
-                                                                        String principal,
-                                                                        String credentialAlias) {
-        return createDigestConfig(entryName, principal, credentialAlias, "ZooKeeper");
-    }
-
-    private static RemoteConfigurationRegistryConfig createDigestConfig(String entryName,
-                                                                        String principal,
-                                                                        String credentialAlias,
-                                                                        String registryType) {
-        RemoteConfigurationRegistryConfig rc = EasyMock.createNiceMock(RemoteConfigurationRegistryConfig.class);
-        EasyMock.expect(rc.getRegistryType()).andReturn(registryType).anyTimes();
-        EasyMock.expect(rc.getName()).andReturn(entryName).anyTimes();
-        EasyMock.expect(rc.isSecureRegistry()).andReturn(true).anyTimes();
-        EasyMock.expect(rc.getAuthType()).andReturn("digest").anyTimes();
-        EasyMock.expect(rc.getPrincipal()).andReturn(principal).anyTimes();
-        EasyMock.expect(rc.getCredentialAlias()).andReturn(credentialAlias).anyTimes();
-        EasyMock.replay(rc);
-        return rc;
-    }
-
-
-    private static RemoteConfigurationRegistryConfig createKerberosConfig(String entryName,
-                                                                          String principal,
-                                                                          String keyTabPath) {
-        return createKerberosConfig(entryName, principal, keyTabPath, "ZooKeeper");
-    }
-
-    private static RemoteConfigurationRegistryConfig createKerberosConfig(String entryName,
-                                                                          String principal,
-                                                                          String keyTabPath,
-                                                                          String registryType) {
-        return createKerberosConfig(entryName, principal, keyTabPath, null, null, registryType);
-    }
-
-    private static RemoteConfigurationRegistryConfig createKerberosConfig(String entryName,
-                                                                          String principal,
-                                                                          String keyTabPath,
-                                                                          Boolean useKeyTab,
-                                                                          Boolean useTicketCache,
-                                                                          String registryType) {
-        RemoteConfigurationRegistryConfig rc = EasyMock.createNiceMock(RemoteConfigurationRegistryConfig.class);
-        EasyMock.expect(rc.getRegistryType()).andReturn(registryType).anyTimes();
-        EasyMock.expect(rc.getName()).andReturn(entryName).anyTimes();
-        EasyMock.expect(rc.isSecureRegistry()).andReturn(true).anyTimes();
-        EasyMock.expect(rc.getAuthType()).andReturn("kerberos").anyTimes();
-        EasyMock.expect(rc.getPrincipal()).andReturn(principal).anyTimes();
-        EasyMock.expect(rc.getKeytab()).andReturn(keyTabPath).anyTimes();
-        EasyMock.expect(rc.isUseKeyTab()).andReturn(useKeyTab != null ? useKeyTab : true).anyTimes();
-        EasyMock.expect(rc.isUseTicketCache()).andReturn(useTicketCache != null ? useTicketCache : false).anyTimes();
-        EasyMock.replay(rc);
-        return rc;
-    }
-
-    private static void validateDigestContext(RemoteConfigurationRegistryJAASConfig config,
-                                              String                                entryName,
-                                              String                                loginModule,
-                                              String                                principal,
-                                              String                                password) throws Exception {
-        AppConfigurationEntry[] myContextEntries = config.getAppConfigurationEntry(entryName);
-        assertNotNull(myContextEntries);
-        assertEquals(1, myContextEntries.length);
-        AppConfigurationEntry entry = myContextEntries[0];
-        assertTrue(entry.getLoginModuleName().equals(loginModule));
-        Map<String, ?> entryOpts = entry.getOptions();
-        assertEquals(principal, entryOpts.get("username"));
-        assertEquals(password, entryOpts.get("password"));
-    }
-
-    private static void validateKerberosContext(RemoteConfigurationRegistryJAASConfig config,
-                                                String                                entryName,
-                                                String                                principal,
-                                                String                                keyTab,
-                                                boolean                               useKeyTab,
-                                                boolean                               useTicketCache) throws Exception {
-        AppConfigurationEntry[] myContextEntries = config.getAppConfigurationEntry(entryName);
-        assertNotNull(myContextEntries);
-        assertEquals(1, myContextEntries.length);
-        AppConfigurationEntry entry = myContextEntries[0];
-        assertTrue(entry.getLoginModuleName().endsWith(".security.auth.module.Krb5LoginModule"));
-        Map<String, ?> entryOpts = entry.getOptions();
-        assertEquals(principal, entryOpts.get("principal"));
-        assertEquals(keyTab, entryOpts.get("keyTab"));
-        assertEquals(useKeyTab, Boolean.valueOf((String)entryOpts.get("isUseKeyTab")));
-        assertEquals(useTicketCache, Boolean.valueOf((String)entryOpts.get("isUseTicketCache")));
-    }
-}

http://git-wip-us.apache.org/repos/asf/knox/blob/e766b3b7/gateway-service-remoteconfig/src/test/java/org/apache/knox/gateway/service/config/remote/config/DefaultRemoteConfigurationRegistriesTest.java
----------------------------------------------------------------------
diff --git a/gateway-service-remoteconfig/src/test/java/org/apache/knox/gateway/service/config/remote/config/DefaultRemoteConfigurationRegistriesTest.java b/gateway-service-remoteconfig/src/test/java/org/apache/knox/gateway/service/config/remote/config/DefaultRemoteConfigurationRegistriesTest.java
new file mode 100644
index 0000000..ce223e9
--- /dev/null
+++ b/gateway-service-remoteconfig/src/test/java/org/apache/knox/gateway/service/config/remote/config/DefaultRemoteConfigurationRegistriesTest.java
@@ -0,0 +1,184 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.knox.gateway.service.config.remote.config;
+
+import org.apache.knox.gateway.config.GatewayConfig;
+import org.easymock.EasyMock;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.Enumeration;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+
+import static org.junit.Assert.assertEquals;
+import static org.testng.Assert.assertNotNull;
+
+public class DefaultRemoteConfigurationRegistriesTest {
+
+    /**
+     * Test a single registry configuration with digest auth configuration.
+     */
+    @Test
+    public void testPropertiesRemoteConfigurationRegistriesSingleDigest() throws Exception {
+        Map<String, Properties> testProperties = new HashMap<>();
+        Properties p = new Properties();
+        p.setProperty(GatewayConfig.REMOTE_CONFIG_REGISTRY_TYPE, "ZooKeeper");
+        p.setProperty(GatewayConfig.REMOTE_CONFIG_REGISTRY_ADDRESS, "hostx:2181");
+        p.setProperty(GatewayConfig.REMOTE_CONFIG_REGISTRY_PRINCIPAL, "zkDigestUser");
+        p.setProperty(GatewayConfig.REMOTE_CONFIG_REGISTRY_AUTH_TYPE, "digest");
+        p.setProperty(GatewayConfig.REMOTE_CONFIG_REGISTRY_CREDENTIAL_ALIAS, "zkDigestAlias");
+        testProperties.put("testDigest", p);
+
+        doTestPropertiesRemoteConfigurationRegistries(testProperties);
+    }
+
+
+    /**
+     * Test a single registry configuration with kerberos auth configuration.
+     */
+    @Test
+    public void testPropertiesRemoteConfigurationRegistriesSingleKerberos() throws Exception {
+        Map<String, Properties> testProperties = new HashMap<>();
+        Properties p = new Properties();
+        p.setProperty(GatewayConfig.REMOTE_CONFIG_REGISTRY_TYPE, "ZooKeeper");
+        p.setProperty(GatewayConfig.REMOTE_CONFIG_REGISTRY_ADDRESS, "hostx:2181");
+        p.setProperty(GatewayConfig.REMOTE_CONFIG_REGISTRY_PRINCIPAL, "zkUser");
+        p.setProperty(GatewayConfig.REMOTE_CONFIG_REGISTRY_AUTH_TYPE, "kerberos");
+        p.setProperty(GatewayConfig.REMOTE_CONFIG_REGISTRY_KEYTAB, "/home/user/remoteregistry.keytab");
+        p.setProperty(GatewayConfig.REMOTE_CONFIG_REGISTRY_USE_KEYTAB, "true");
+        p.setProperty(GatewayConfig.REMOTE_CONFIG_REGISTRY_USE_TICKET_CACHE, "false");
+        testProperties.put("testKerb", p);
+
+        doTestPropertiesRemoteConfigurationRegistries(testProperties);
+    }
+
+    /**
+     * Test multiple registry configuration with varying auth configurations.
+     */
+    @Test
+    public void testPropertiesRemoteConfigurationRegistriesMultipleMixed() throws Exception {
+        Map<String, Properties> testProperties = new HashMap<>();
+
+        Properties kerb = new Properties();
+        kerb.setProperty(GatewayConfig.REMOTE_CONFIG_REGISTRY_TYPE, "ZooKeeper");
+        kerb.setProperty(GatewayConfig.REMOTE_CONFIG_REGISTRY_ADDRESS, "host1:2181");
+        kerb.setProperty(GatewayConfig.REMOTE_CONFIG_REGISTRY_NAMESPACE, "/knox/config");
+        kerb.setProperty(GatewayConfig.REMOTE_CONFIG_REGISTRY_PRINCIPAL, "kerbPrincipal");
+        kerb.setProperty(GatewayConfig.REMOTE_CONFIG_REGISTRY_AUTH_TYPE, "kerberos");
+        kerb.setProperty(GatewayConfig.REMOTE_CONFIG_REGISTRY_KEYTAB, "/home/user/mykrb.keytab");
+        kerb.setProperty(GatewayConfig.REMOTE_CONFIG_REGISTRY_USE_KEYTAB, "true");
+        kerb.setProperty(GatewayConfig.REMOTE_CONFIG_REGISTRY_USE_TICKET_CACHE, "false");
+        testProperties.put("testKerb1", kerb);
+
+        Properties digest = new Properties();
+        digest.setProperty(GatewayConfig.REMOTE_CONFIG_REGISTRY_TYPE, "ZooKeeper");
+        digest.setProperty(GatewayConfig.REMOTE_CONFIG_REGISTRY_ADDRESS, "host2:2181");
+        digest.setProperty(GatewayConfig.REMOTE_CONFIG_REGISTRY_PRINCIPAL, "digestPrincipal");
+        digest.setProperty(GatewayConfig.REMOTE_CONFIG_REGISTRY_AUTH_TYPE, "digest");
+        digest.setProperty(GatewayConfig.REMOTE_CONFIG_REGISTRY_CREDENTIAL_ALIAS, "digestPwdAlias");
+        testProperties.put("testDigest1", digest);
+
+        Properties unsecured = new Properties();
+        unsecured.setProperty(GatewayConfig.REMOTE_CONFIG_REGISTRY_TYPE, "ZooKeeper");
+        unsecured.setProperty(GatewayConfig.REMOTE_CONFIG_REGISTRY_ADDRESS, "host2:2181");
+        testProperties.put("testUnsecured", unsecured);
+
+        doTestPropertiesRemoteConfigurationRegistries(testProperties);
+    }
+
+
+    /**
+     * Perform the actual test.
+     *
+     * @param testProperties The test properties
+     */
+    private void doTestPropertiesRemoteConfigurationRegistries(Map<String, Properties> testProperties) throws Exception {
+        // Mock gateway config
+        GatewayConfig gc = mockGatewayConfig(testProperties);
+
+        // Create the RemoteConfigurationRegistries object to be tested from the GatewayConfig
+        RemoteConfigurationRegistries registries = new DefaultRemoteConfigurationRegistries(gc);
+
+        // Basic validation
+        assertNotNull(registries);
+        List<RemoteConfigurationRegistry> registryConfigs = registries.getRegistryConfigurations();
+        assertNotNull(registryConfigs);
+        assertEquals(testProperties.size(), registryConfigs.size());
+
+        // Validate the contents of the created object
+        for (RemoteConfigurationRegistry regConfig : registryConfigs) {
+            validateRemoteRegistryConfig(regConfig.getName(), testProperties.get(regConfig.getName()), regConfig);
+        }
+    }
+
+
+    /**
+     * Create a mock GatewayConfig based on the specified test properties.
+     *
+     * @param testProperties The test properties to set on the config
+     */
+    private GatewayConfig mockGatewayConfig(Map<String, Properties> testProperties) {
+        // Mock gateway config
+        GatewayConfig gc = EasyMock.createNiceMock(GatewayConfig.class);
+        List<String> configNames = new ArrayList<>();
+        for (String registryName : testProperties.keySet()) {
+            configNames.add(registryName);
+
+            String propertyValueString = "";
+            Properties props = testProperties.get(registryName);
+            Enumeration names = props.propertyNames();
+            while (names.hasMoreElements()) {
+                String propertyName = (String) names.nextElement();
+                propertyValueString += propertyName + "=" + props.get(propertyName);
+                if (names.hasMoreElements()) {
+                    propertyValueString += ";";
+                }
+            }
+            EasyMock.expect(gc.getRemoteRegistryConfiguration(registryName))
+                    .andReturn(propertyValueString)
+                    .anyTimes();
+        }
+        EasyMock.expect(gc.getRemoteRegistryConfigurationNames()).andReturn(configNames).anyTimes();
+        EasyMock.replay(gc);
+
+        return gc;
+    }
+
+
+    /**
+     * Validate the specified RemoteConfigurationRegistry based on the expected test properties.
+     */
+    private void validateRemoteRegistryConfig(String                      configName,
+                                              Properties                  expected,
+                                              RemoteConfigurationRegistry registryConfig) throws Exception {
+        assertEquals(configName, registryConfig.getName());
+        assertEquals(expected.get(GatewayConfig.REMOTE_CONFIG_REGISTRY_TYPE), registryConfig.getRegistryType());
+        assertEquals(expected.get(GatewayConfig.REMOTE_CONFIG_REGISTRY_ADDRESS), registryConfig.getConnectionString());
+        assertEquals(expected.get(GatewayConfig.REMOTE_CONFIG_REGISTRY_NAMESPACE), registryConfig.getNamespace());
+        assertEquals(registryConfig.isSecureRegistry(), expected.get(GatewayConfig.REMOTE_CONFIG_REGISTRY_AUTH_TYPE) != null);
+        assertEquals(expected.get(GatewayConfig.REMOTE_CONFIG_REGISTRY_AUTH_TYPE), registryConfig.getAuthType());
+        assertEquals(expected.get(GatewayConfig.REMOTE_CONFIG_REGISTRY_PRINCIPAL), registryConfig.getPrincipal());
+        assertEquals(expected.get(GatewayConfig.REMOTE_CONFIG_REGISTRY_CREDENTIAL_ALIAS), registryConfig.getCredentialAlias());
+        assertEquals(expected.get(GatewayConfig.REMOTE_CONFIG_REGISTRY_KEYTAB), registryConfig.getKeytab());
+        assertEquals(Boolean.valueOf((String)expected.get(GatewayConfig.REMOTE_CONFIG_REGISTRY_USE_KEYTAB)), registryConfig.isUseKeyTab());
+        assertEquals(Boolean.valueOf((String)expected.get(GatewayConfig.REMOTE_CONFIG_REGISTRY_USE_TICKET_CACHE)), registryConfig.isUseTicketCache());
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/knox/blob/e766b3b7/gateway-service-remoteconfig/src/test/java/org/apache/knox/gateway/service/config/remote/config/RemoteConfigurationRegistryConfigParserTest.java
----------------------------------------------------------------------
diff --git a/gateway-service-remoteconfig/src/test/java/org/apache/knox/gateway/service/config/remote/config/RemoteConfigurationRegistryConfigParserTest.java b/gateway-service-remoteconfig/src/test/java/org/apache/knox/gateway/service/config/remote/config/RemoteConfigurationRegistryConfigParserTest.java
new file mode 100644
index 0000000..1ff5dec
--- /dev/null
+++ b/gateway-service-remoteconfig/src/test/java/org/apache/knox/gateway/service/config/remote/config/RemoteConfigurationRegistryConfigParserTest.java
@@ -0,0 +1,115 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.knox.gateway.service.config.remote.config;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.knox.gateway.service.config.remote.RemoteConfigurationRegistryConfig;
+import org.apache.knox.gateway.service.config.remote.util.RemoteRegistryConfigTestUtils;
+import org.junit.Test;
+
+import java.io.File;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+
+import static org.apache.knox.gateway.service.config.remote.util.RemoteRegistryConfigTestUtils.*;
+
+public class RemoteConfigurationRegistryConfigParserTest {
+
+    @Test
+    public void testExternalXMLParsing() throws Exception {
+        final String CONN_STR = "http://my.zookeeper.host:2181";
+
+        Map<String, Map<String, String>> testRegistryConfigurations = new HashMap<>();
+
+        Map<String, String> config1 = new HashMap<>();
+        config1.put(RemoteRegistryConfigTestUtils.PROPERTY_TYPE, "ZooKeeper");
+        config1.put(RemoteRegistryConfigTestUtils.PROPERTY_NAME, "registry1");
+        config1.put(RemoteRegistryConfigTestUtils.PROPERTY_ADDRESS, CONN_STR);
+        config1.put(RemoteRegistryConfigTestUtils.PROPERTY_SECURE, "true");
+        config1.put(RemoteRegistryConfigTestUtils.PROPERTY_AUTH_TYPE, "Digest");
+        config1.put(RemoteRegistryConfigTestUtils.PROPERTY_PRINCIPAL, "knox");
+        config1.put(RemoteRegistryConfigTestUtils.PROPERTY_CRED_ALIAS, "zkCredential");
+        testRegistryConfigurations.put(config1.get("name"), config1);
+
+        Map<String, String> config2 = new HashMap<>();
+        config2.put(RemoteRegistryConfigTestUtils.PROPERTY_TYPE, "ZooKeeper");
+        config2.put(RemoteRegistryConfigTestUtils.PROPERTY_NAME, "MyKerberos");
+        config2.put(RemoteRegistryConfigTestUtils.PROPERTY_ADDRESS, CONN_STR);
+        config2.put(RemoteRegistryConfigTestUtils.PROPERTY_SECURE, "true");
+        config2.put(RemoteRegistryConfigTestUtils.PROPERTY_AUTH_TYPE, "Kerberos");
+        config2.put(RemoteRegistryConfigTestUtils.PROPERTY_PRINCIPAL, "knox");
+        File myKeyTab = File.createTempFile("mytest", "keytab");
+        config2.put(RemoteRegistryConfigTestUtils.PROPERTY_KEYTAB, myKeyTab.getAbsolutePath());
+        config2.put(RemoteRegistryConfigTestUtils.PROPERTY_USE_KEYTAB, "false");
+        config2.put(RemoteRegistryConfigTestUtils.PROPERTY_USE_TICKET_CACHE, "true");
+        testRegistryConfigurations.put(config2.get("name"), config2);
+
+        Map<String, String> config3 = new HashMap<>();
+        config3.put(RemoteRegistryConfigTestUtils.PROPERTY_TYPE, "ZooKeeper");
+        config3.put(RemoteRegistryConfigTestUtils.PROPERTY_NAME, "anotherRegistry");
+        config3.put(RemoteRegistryConfigTestUtils.PROPERTY_ADDRESS, "whatever:1281");
+        testRegistryConfigurations.put(config3.get("name"), config3);
+
+        String configXML =
+                    RemoteRegistryConfigTestUtils.createRemoteConfigRegistriesXML(testRegistryConfigurations.values());
+
+        File registryConfigFile = File.createTempFile("remote-registries", "xml");
+        try {
+            FileUtils.writeStringToFile(registryConfigFile, configXML);
+
+            List<RemoteConfigurationRegistryConfig> configs =
+                                    RemoteConfigurationRegistriesParser.getConfig(registryConfigFile.getAbsolutePath());
+            assertNotNull(configs);
+            assertEquals(testRegistryConfigurations.keySet().size(), configs.size());
+
+            for (RemoteConfigurationRegistryConfig registryConfig : configs) {
+                Map<String, String> expected = testRegistryConfigurations.get(registryConfig.getName());
+                assertNotNull(expected);
+                validateParsedRegistryConfiguration(registryConfig, expected);
+            }
+        } finally {
+            registryConfigFile.delete();
+        }
+    }
+
+    private void validateParsedRegistryConfiguration(RemoteConfigurationRegistryConfig config,
+                                                     Map<String, String> expected) throws Exception {
+        assertEquals(expected.get(RemoteRegistryConfigTestUtils.PROPERTY_TYPE), config.getRegistryType());
+        assertEquals(expected.get(RemoteRegistryConfigTestUtils.PROPERTY_ADDRESS), config.getConnectionString());
+        assertEquals(expected.get(RemoteRegistryConfigTestUtils.PROPERTY_NAME), config.getName());
+        assertEquals(expected.get(
+            RemoteRegistryConfigTestUtils.PROPERTY_NAMESAPCE), config.getNamespace());
+        assertEquals(Boolean.valueOf(expected.get(
+            RemoteRegistryConfigTestUtils.PROPERTY_SECURE)), config.isSecureRegistry());
+        assertEquals(expected.get(
+            RemoteRegistryConfigTestUtils.PROPERTY_AUTH_TYPE), config.getAuthType());
+        assertEquals(expected.get(
+            RemoteRegistryConfigTestUtils.PROPERTY_PRINCIPAL), config.getPrincipal());
+        assertEquals(expected.get(
+            RemoteRegistryConfigTestUtils.PROPERTY_CRED_ALIAS), config.getCredentialAlias());
+        assertEquals(expected.get(RemoteRegistryConfigTestUtils.PROPERTY_KEYTAB), config.getKeytab());
+        assertEquals(Boolean.valueOf(expected.get(
+            RemoteRegistryConfigTestUtils.PROPERTY_USE_KEYTAB)), config.isUseKeyTab());
+        assertEquals(Boolean.valueOf(expected.get(
+            RemoteRegistryConfigTestUtils.PROPERTY_USE_TICKET_CACHE)), config.isUseTicketCache());
+    }
+
+}


[16/53] [abbrv] knox git commit: Merge branch 'master' into KNOX-998-Package_Restructuring

Posted by mo...@apache.org.
http://git-wip-us.apache.org/repos/asf/knox/blob/c754cc06/gateway-test/src/test/java/org/apache/knox/gateway/GatewayAdminTopologyFuncTest.java
----------------------------------------------------------------------
diff --cc gateway-test/src/test/java/org/apache/knox/gateway/GatewayAdminTopologyFuncTest.java
index d33d59e,0000000..7dcb4e0
mode 100644,000000..100644
--- a/gateway-test/src/test/java/org/apache/knox/gateway/GatewayAdminTopologyFuncTest.java
+++ b/gateway-test/src/test/java/org/apache/knox/gateway/GatewayAdminTopologyFuncTest.java
@@@ -1,800 -1,0 +1,1386 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway;
 +
 +import java.io.File;
 +import java.io.FileOutputStream;
 +import java.io.IOException;
 +import java.io.StringReader;
 +import java.net.URI;
 +import java.net.URISyntaxException;
++import java.util.Arrays;
 +import java.util.Enumeration;
 +import java.util.HashMap;
++import java.util.List;
 +import java.util.Map;
 +import java.util.UUID;
 +import javax.ws.rs.core.MediaType;
 +
 +import io.restassured.http.ContentType;
 +import com.mycila.xmltool.XMLDoc;
 +import com.mycila.xmltool.XMLTag;
 +import org.apache.directory.server.protocol.shared.transport.TcpTransport;
 +import org.apache.knox.gateway.config.GatewayConfig;
 +import org.apache.knox.gateway.security.ldap.SimpleLdapDirectoryServer;
 +import org.apache.knox.gateway.services.DefaultGatewayServices;
 +import org.apache.knox.gateway.services.GatewayServices;
 +import org.apache.knox.gateway.services.ServiceLifecycleException;
 +import org.apache.knox.gateway.services.topology.TopologyService;
 +import org.apache.knox.gateway.topology.Param;
 +import org.apache.knox.gateway.topology.Provider;
 +import org.apache.knox.gateway.topology.Service;
 +import org.apache.knox.gateway.topology.Topology;
 +import org.apache.knox.gateway.util.XmlUtils;
++import io.restassured.response.ResponseBody;
++import org.apache.commons.io.FileUtils;
++import org.apache.commons.io.FilenameUtils;
 +import org.apache.hadoop.test.TestUtils;
 +import org.apache.http.HttpStatus;
 +import org.apache.log4j.Appender;
 +import org.hamcrest.MatcherAssert;
 +import org.junit.AfterClass;
 +import org.junit.BeforeClass;
 +import org.junit.Test;
 +import org.slf4j.Logger;
 +import org.slf4j.LoggerFactory;
 +import org.w3c.dom.Document;
 +import org.xml.sax.InputSource;
 +
 +import static io.restassured.RestAssured.given;
++import static junit.framework.TestCase.assertTrue;
 +import static org.apache.hadoop.test.TestUtils.LOG_ENTER;
 +import static org.apache.hadoop.test.TestUtils.LOG_EXIT;
 +import static org.hamcrest.CoreMatchers.containsString;
 +import static org.hamcrest.CoreMatchers.equalTo;
 +import static org.hamcrest.CoreMatchers.is;
 +import static org.hamcrest.CoreMatchers.not;
 +import static org.hamcrest.CoreMatchers.notNullValue;
 +import static org.hamcrest.CoreMatchers.nullValue;
 +import static org.hamcrest.xml.HasXPath.hasXPath;
++import static org.junit.Assert.assertEquals;
++import static org.junit.Assert.assertFalse;
 +import static org.junit.Assert.assertThat;
 +import static org.junit.Assert.fail;
 +
 +public class GatewayAdminTopologyFuncTest {
 +
 +  private static Logger LOG = LoggerFactory.getLogger( GatewayAdminTopologyFuncTest.class );
 +
 +  public static Enumeration<Appender> appenders;
 +  public static GatewayConfig config;
 +  public static GatewayServer gateway;
 +  public static String gatewayUrl;
 +  public static String clusterUrl;
 +  private static GatewayTestDriver driver = new GatewayTestDriver();
 +
 +  @BeforeClass
 +  public static void setupSuite() throws Exception {
 +    //appenders = NoOpAppender.setUp();
 +    driver.setupLdap(0);
 +    setupGateway(new GatewayTestConfig());
 +  }
 +
 +  @AfterClass
 +  public static void cleanupSuite() throws Exception {
 +    gateway.stop();
 +    driver.cleanup();
 +    //FileUtils.deleteQuietly( new File( config.getGatewayHomeDir() ) );
 +    //NoOpAppender.tearDown( appenders );
 +  }
 +
 +  public static void setupGateway(GatewayTestConfig testConfig) throws Exception {
 +
 +    File targetDir = new File( System.getProperty( "user.dir" ), "target" );
 +    File gatewayDir = new File( targetDir, "gateway-home-" + UUID.randomUUID() );
 +    gatewayDir.mkdirs();
 +
 +    config = testConfig;
 +    testConfig.setGatewayHomeDir( gatewayDir.getAbsolutePath() );
 +
 +    File topoDir = new File( testConfig.getGatewayTopologyDir() );
 +    topoDir.mkdirs();
 +
 +    File deployDir = new File( testConfig.getGatewayDeploymentDir() );
 +    deployDir.mkdirs();
 +
++    File providerConfigDir = new File(testConfig.getGatewayConfDir(), "shared-providers");
++    providerConfigDir.mkdirs();
++
++    File descriptorsDir = new File(testConfig.getGatewayConfDir(), "descriptors");
++    descriptorsDir.mkdirs();
++
 +    File descriptor = new File( topoDir, "admin.xml" );
 +    FileOutputStream stream = new FileOutputStream( descriptor );
 +    createKnoxTopology().toStream( stream );
 +    stream.close();
 +
 +    File descriptor2 = new File( topoDir, "test-cluster.xml" );
 +    FileOutputStream stream2 = new FileOutputStream( descriptor2 );
 +    createNormalTopology().toStream( stream2 );
 +    stream.close();
 +
 +    DefaultGatewayServices srvcs = new DefaultGatewayServices();
 +    Map<String,String> options = new HashMap<>();
 +    options.put( "persist-master", "false" );
 +    options.put( "master", "password" );
 +
 +    try {
 +      srvcs.init( testConfig, options );
 +    } catch ( ServiceLifecycleException e ) {
 +      e.printStackTrace(); // I18N not required.
 +    }
 +    gateway = GatewayServer.startGateway( testConfig, srvcs );
 +    MatcherAssert.assertThat( "Failed to start gateway.", gateway, notNullValue() );
 +
 +    LOG.info( "Gateway port = " + gateway.getAddresses()[ 0 ].getPort() );
 +
 +    gatewayUrl = "http://localhost:" + gateway.getAddresses()[0].getPort() + "/" + config.getGatewayPath();
 +    clusterUrl = gatewayUrl + "/admin";
 +  }
 +
 +  private static XMLTag createNormalTopology() {
 +    XMLTag xml = XMLDoc.newDocument( true )
 +        .addRoot( "topology" )
 +        .addTag( "gateway" )
 +        .addTag( "provider" )
 +        .addTag( "role" ).addText( "webappsec" )
 +        .addTag( "name" ).addText( "WebAppSec" )
 +        .addTag( "enabled" ).addText( "true" )
 +        .addTag( "param" )
 +        .addTag( "name" ).addText( "csrf.enabled" )
 +        .addTag( "value" ).addText( "true" ).gotoParent().gotoParent()
 +        .addTag( "provider" )
 +        .addTag( "role" ).addText( "authentication" )
 +        .addTag( "name" ).addText( "ShiroProvider" )
 +        .addTag( "enabled" ).addText( "true" )
 +        .addTag( "param" )
 +        .addTag( "name" ).addText( "main.ldapRealm" )
 +        .addTag( "value" ).addText( "org.apache.knox.gateway.shirorealm.KnoxLdapRealm" ).gotoParent()
 +        .addTag( "param" )
 +        .addTag( "name" ).addText( "main.ldapRealm.userDnTemplate" )
 +        .addTag( "value" ).addText( "uid={0},ou=people,dc=hadoop,dc=apache,dc=org" ).gotoParent()
 +        .addTag( "param" )
 +        .addTag( "name" ).addText( "main.ldapRealm.contextFactory.url" )
 +        .addTag( "value" ).addText( driver.getLdapUrl() ).gotoParent()
 +        .addTag( "param" )
 +        .addTag( "name" ).addText( "main.ldapRealm.contextFactory.authenticationMechanism" )
 +        .addTag( "value" ).addText( "simple" ).gotoParent()
 +        .addTag( "param" )
 +        .addTag( "name" ).addText( "urls./**" )
 +        .addTag( "value" ).addText( "authcBasic" ).gotoParent().gotoParent()
 +        .addTag( "provider" )
 +        .addTag( "role" ).addText( "identity-assertion" )
 +        .addTag( "enabled" ).addText( "true" )
 +        .addTag( "name" ).addText( "Default" ).gotoParent()
 +        .addTag( "provider" )
 +        .addTag( "role" ).addText( "authorization" )
 +        .addTag( "enabled" ).addText( "true" )
 +        .addTag( "name" ).addText( "AclsAuthz" ).gotoParent()
 +        .addTag( "param" )
 +        .addTag( "name" ).addText( "webhdfs-acl" )
 +        .addTag( "value" ).addText( "hdfs;*;*" ).gotoParent()
 +        .gotoRoot()
 +        .addTag( "service" )
 +        .addTag( "role" ).addText( "WEBHDFS" )
 +        .addTag( "url" ).addText( "http://localhost:50070/webhdfs/v1" ).gotoParent()
 +        .gotoRoot();
 +//     System.out.println( "GATEWAY=" + xml.toString() );
 +    return xml;
 +  }
 +
 +  private static XMLTag createKnoxTopology() {
 +    XMLTag xml = XMLDoc.newDocument( true )
 +        .addRoot( "topology" )
 +        .addTag( "gateway" )
 +        .addTag( "provider" )
 +        .addTag( "role" ).addText( "authentication" )
 +        .addTag( "name" ).addText( "ShiroProvider" )
 +        .addTag( "enabled" ).addText( "true" )
 +        .addTag( "param" )
 +        .addTag( "name" ).addText( "main.ldapRealm" )
 +        .addTag( "value" ).addText( "org.apache.knox.gateway.shirorealm.KnoxLdapRealm" ).gotoParent()
 +        .addTag( "param" )
 +        .addTag( "name" ).addText( "main.ldapRealm.userDnTemplate" )
 +        .addTag( "value" ).addText( "uid={0},ou=people,dc=hadoop,dc=apache,dc=org" ).gotoParent()
 +        .addTag( "param" )
 +        .addTag( "name" ).addText( "main.ldapRealm.contextFactory.url" )
 +        .addTag( "value" ).addText( driver.getLdapUrl() ).gotoParent()
 +        .addTag( "param" )
 +        .addTag( "name" ).addText( "main.ldapRealm.contextFactory.authenticationMechanism" )
 +        .addTag( "value" ).addText( "simple" ).gotoParent()
 +        .addTag( "param" )
 +        .addTag( "name" ).addText( "urls./**" )
 +        .addTag( "value" ).addText( "authcBasic" ).gotoParent().gotoParent()
 +        .addTag("provider")
 +        .addTag( "role" ).addText( "authorization" )
 +        .addTag( "name" ).addText( "AclsAuthz" )
 +        .addTag( "enabled" ).addText( "true" )
 +        .addTag("param")
 +        .addTag("name").addText("knox.acl")
 +        .addTag("value").addText("admin;*;*").gotoParent().gotoParent()
 +        .addTag("provider")
 +        .addTag( "role" ).addText( "identity-assertion" )
 +        .addTag( "enabled" ).addText( "true" )
 +        .addTag( "name" ).addText( "Default" ).gotoParent()
 +        .gotoRoot()
 +        .addTag( "service" )
 +        .addTag( "role" ).addText( "KNOX" )
 +        .gotoRoot();
 +    // System.out.println( "GATEWAY=" + xml.toString() );
 +    return xml;
 +  }
 +
++  private static XMLTag createProviderConfiguration() {
++    XMLTag xml = XMLDoc.newDocument( true )
++            .addRoot( "gateway" )
++            .addTag( "provider" )
++            .addTag( "role" ).addText( "authentication" )
++            .addTag( "name" ).addText( "ShiroProvider" )
++            .addTag( "enabled" ).addText( "true" )
++            .addTag( "param" )
++            .addTag( "name" ).addText( "main.ldapRealm" )
++            .addTag( "value" ).addText( "org.apache.knox.gateway.shirorealm.KnoxLdapRealm" ).gotoParent()
++            .addTag( "param" )
++            .addTag( "name" ).addText( "main.ldapRealm.userDnTemplate" )
++            .addTag( "value" ).addText( "uid={0},ou=people,dc=hadoop,dc=apache,dc=org" ).gotoParent()
++            .addTag( "param" )
++            .addTag( "name" ).addText( "main.ldapRealm.contextFactory.url" )
++            .addTag( "value" ).addText( driver.getLdapUrl() ).gotoParent()
++            .addTag( "param" )
++            .addTag( "name" ).addText( "main.ldapRealm.contextFactory.authenticationMechanism" )
++            .addTag( "value" ).addText( "simple" ).gotoParent()
++            .addTag( "param" )
++            .addTag( "name" ).addText( "urls./**" )
++            .addTag( "value" ).addText( "authcBasic" ).gotoParent().gotoParent()
++            .addTag("provider")
++            .addTag( "role" ).addText( "authorization" )
++            .addTag( "name" ).addText( "AclsAuthz" )
++            .addTag( "enabled" ).addText( "true" )
++            .addTag("param")
++            .addTag("name").addText("knox.acl")
++            .addTag("value").addText("admin;*;*").gotoParent().gotoParent()
++            .addTag("provider")
++            .addTag( "role" ).addText( "identity-assertion" )
++            .addTag( "enabled" ).addText( "true" )
++            .addTag( "name" ).addText( "Default" ).gotoParent()
++            .gotoRoot();
++    // System.out.println( "GATEWAY=" + xml.toString() );
++    return xml;
++  }
++
++
++  private static String createDescriptor(String clusterName) {
++    return createDescriptor(clusterName, null);
++  }
++
++
++  private static String createDescriptor(String clusterName, String providerConfigRef) {
++    StringBuilder sb = new StringBuilder();
++    if (providerConfigRef == null) {
++      providerConfigRef = "sandbox-providers";
++    }
++
++    sb.append("{\n");
++    sb.append("  \"discovery-type\":\"AMBARI\",\n");
++    sb.append("  \"discovery-address\":\"http://c6401.ambari.apache.org:8080\",\n");
++    sb.append("  \"discovery-user\":\"ambariuser\",\n");
++    sb.append("  \"discovery-pwd-alias\":\"ambari.discovery.password\",\n");
++    sb.append("  \"provider-config-ref\":\"");
++    sb.append(providerConfigRef);
++    sb.append("\",\n");
++    sb.append("  \"cluster\":\"");
++    sb.append(clusterName);
++    sb.append("\",\n");
++    sb.append("  \"services\":[\n");
++    sb.append("    {\"name\":\"NAMENODE\"},\n");
++    sb.append("    {\"name\":\"JOBTRACKER\"},\n");
++    sb.append("    {\"name\":\"WEBHDFS\"},\n");
++    sb.append("    {\"name\":\"WEBHCAT\"},\n");
++    sb.append("    {\"name\":\"OOZIE\"},\n");
++    sb.append("    {\"name\":\"WEBHBASE\"},\n");
++    sb.append("    {\"name\":\"HIVE\"},\n");
++    sb.append("    {\"name\":\"RESOURCEMANAGER\"},\n");
++    sb.append("    {\"name\":\"AMBARI\", \"urls\":[\"http://c6401.ambari.apache.org:8080\"]}\n");
++    sb.append("  ]\n");
++    sb.append("}\n");
++
++    return sb.toString();
++  }
++
++
 +  //@Test
 +  public void waitForManualTesting() throws IOException {
 +    System.in.read();
 +  }
 +
 +  @Test( timeout = TestUtils.LONG_TIMEOUT )
 +  public void testTopologyCollection() throws ClassNotFoundException {
 +    LOG_ENTER();
 +
 +    String username = "admin";
 +    String password = "admin-password";
 +    String serviceUrl = clusterUrl + "/api/v1/topologies";
 +    String href1 = given()
 +        //.log().all()
 +        .auth().preemptive().basic(username, password)
 +        .header("Accept", MediaType.APPLICATION_JSON)
 +        .contentType(MediaType.APPLICATION_JSON)
 +        .then()
 +        //.log().all()
 +        .statusCode(HttpStatus.SC_OK)
 +        .body("topologies.topology[0].name", not(nullValue()))
 +        .body("topologies.topology[1].name", not(nullValue()))
 +        .body("topologies.topology[0].uri", not(nullValue()))
 +        .body("topologies.topology[1].uri", not(nullValue()))
 +        .body("topologies.topology[0].href", not(nullValue()))
 +        .body("topologies.topology[1].href", not(nullValue()))
 +        .body("topologies.topology[0].timestamp", not(nullValue()))
 +        .body("topologies.topology[1].timestamp", not(nullValue()))
 +        .when().get(serviceUrl).thenReturn().getBody().path("topologies.topology.href[1]");
 +
 +       given()
 +        //.log().all()
 +        .auth().preemptive().basic(username, password)
 +        .header("Accept", MediaType.APPLICATION_XML)
 +        .then()
 +        //.log().all()
 +        .body("topologies.topology.href[1]", equalTo(href1))
 +        .statusCode(HttpStatus.SC_OK)
 +        .when().get(serviceUrl);
 +
 +
 +    given()
 +        //.log().all()
 +        .auth().preemptive().basic(username, password)
 +        .then()
 +        //.log().all()
 +        .statusCode(HttpStatus.SC_OK)
 +        .contentType(MediaType.APPLICATION_XML)
 +        .when().get(serviceUrl);
 +
 +
 +    given().auth().preemptive().basic(username, password)
 +        .header("Accept", MediaType.APPLICATION_JSON)
 +        .then()
 +        //.log().all()
 +        .statusCode(HttpStatus.SC_OK)
 +        .contentType("application/json")
 +        .body("topology.name", equalTo("test-cluster"))
 +        .when().get(href1);
 +
 +    LOG_EXIT();
 +  }
 +
 +  @Test( timeout = TestUtils.LONG_TIMEOUT )
 +  public void testTopologyObject() throws ClassNotFoundException {
 +    LOG_ENTER();
 +
 +    String username = "admin";
 +    String password = "admin-password";
 +    String serviceUrl = clusterUrl + "/api/v1/topologies";
 +    String hrefJson = given()
 +        //.log().all()
 +        .auth().preemptive().basic(username, password)
 +        .header("Accept", MediaType.APPLICATION_JSON)
 +        .then()
 +        //.log().all()
 +        .statusCode(HttpStatus.SC_OK)
 +        .when().get(serviceUrl).thenReturn().getBody().path("topologies.topology[1].href");
 +
 +    String timestampJson = given()
 +        //.log().all()
 +        .auth().preemptive().basic(username, password)
 +        .header("Accept", MediaType.APPLICATION_JSON)
 +        .then()
 +        //.log().all()
 +        .statusCode(HttpStatus.SC_OK)
 +        .contentType("application/json")
 +        .when().get(serviceUrl).andReturn()
 +        .getBody().path("topologies.topology[1].timestamp");
 +
 +        given()
 +        //.log().all()
 +        .auth().preemptive().basic(username, password)
 +        .header("Accept", MediaType.APPLICATION_JSON)
 +        .then()
 +        //.log().all()
 +        .statusCode(HttpStatus.SC_OK)
 +        .body("topology.name", equalTo("test-cluster"))
 +        .body("topology.timestamp", equalTo(Long.parseLong(timestampJson)))
 +        .when()
 +        .get(hrefJson);
 +
 +
 +    String hrefXml = given()
 +        //.log().all()
 +        .auth().preemptive().basic(username, password)
 +        .header("Accept", MediaType.APPLICATION_XML)
 +        .then()
 +        //.log().all()
 +        .statusCode(HttpStatus.SC_OK)
 +        .when().get(serviceUrl).thenReturn().getBody().path("topologies.topology[1].href");
 +
 +    given()
 +        //.log().all()
 +        .auth().preemptive().basic(username, password)
 +        .header("Accept", MediaType.APPLICATION_XML)
 +        .then()
 +        //.log().all()
 +        .statusCode(HttpStatus.SC_OK)
 +        .when()
 +        .get(hrefXml);
 +
 +    LOG_EXIT();
 +  }
 +
 +  @Test( timeout = TestUtils.LONG_TIMEOUT )
 +  public void testPositiveAuthorization() throws ClassNotFoundException{
 +    LOG_ENTER();
 +
 +    String adminUser = "admin";
 +    String adminPass = "admin-password";
 +    String url = clusterUrl + "/api/v1/topologies";
 +
 +    given()
 +        //.log().all()
 +        .auth().preemptive().basic(adminUser, adminPass)
 +        .header("Accept", MediaType.APPLICATION_JSON)
 +        .then()
 +        //.log().all()
 +        .statusCode(HttpStatus.SC_OK)
 +        .contentType(ContentType.JSON)
 +        .body("topologies.topology[0].name", not(nullValue()))
 +        .body("topologies.topology[1].name", not(nullValue()))
 +        .body("topologies.topology[0].uri", not(nullValue()))
 +        .body("topologies.topology[1].uri", not(nullValue()))
 +        .body("topologies.topology[0].href", not(nullValue()))
 +        .body("topologies.topology[1].href", not(nullValue()))
 +        .body("topologies.topology[0].timestamp", not(nullValue()))
 +        .body("topologies.topology[1].timestamp", not(nullValue()))
 +        .when().get(url);
 +
 +    LOG_EXIT();
 +  }
 +
 +  @Test( timeout = TestUtils.LONG_TIMEOUT )
 +  public void testNegativeAuthorization() throws ClassNotFoundException{
 +    LOG_ENTER();
 +
 +    String guestUser = "guest";
 +    String guestPass = "guest-password";
 +    String url = clusterUrl + "/api/v1/topologies";
 +
 +    given()
 +        //.log().all()
 +        .auth().basic(guestUser, guestPass)
 +        .then()
 +        //.log().all()
 +        .statusCode(HttpStatus.SC_FORBIDDEN)
 +        .when().get(url);
 +
 +    LOG_EXIT();
 +  }
 +
 +  private Topology createTestTopology(){
 +    Topology topology = new Topology();
 +    topology.setName("test-topology");
 +
 +    try {
 +      topology.setUri(new URI(gatewayUrl + "/" + topology.getName()));
 +    } catch (URISyntaxException ex) {
 +      assertThat(topology.getUri(), not(nullValue()));
 +    }
 +
 +    Provider identityProvider = new Provider();
 +    identityProvider.setName("Default");
 +    identityProvider.setRole("identity-assertion");
 +    identityProvider.setEnabled(true);
 +
 +    Provider AuthenicationProvider = new Provider();
 +    AuthenicationProvider.setName("ShiroProvider");
 +    AuthenicationProvider.setRole("authentication");
 +    AuthenicationProvider.setEnabled(true);
 +
 +    Param ldapMain = new Param();
 +    ldapMain.setName("main.ldapRealm");
 +    ldapMain.setValue("org.apache.knox.gateway.shirorealm.KnoxLdapRealm");
 +
 +    Param ldapGroupContextFactory = new Param();
 +    ldapGroupContextFactory.setName("main.ldapGroupContextFactory");
 +    ldapGroupContextFactory.setValue("org.apache.knox.gateway.shirorealm.KnoxLdapContextFactory");
 +
 +    Param ldapRealmContext = new Param();
 +    ldapRealmContext.setName("main.ldapRealm.contextFactory");
 +    ldapRealmContext.setValue("$ldapGroupContextFactory");
 +
 +    Param ldapURL = new Param();
 +    ldapURL.setName("main.ldapRealm.contextFactory.url");
 +    ldapURL.setValue(driver.getLdapUrl());
 +
 +    Param ldapUserTemplate = new Param();
 +    ldapUserTemplate.setName("main.ldapRealm.userDnTemplate");
 +    ldapUserTemplate.setValue("uid={0},ou=people,dc=hadoop,dc=apache,dc=org");
 +
 +    Param authcBasic = new Param();
 +    authcBasic.setName("urls./**");
 +    authcBasic.setValue("authcBasic");
 +
 +    AuthenicationProvider.addParam(ldapGroupContextFactory);
 +    AuthenicationProvider.addParam(ldapMain);
 +    AuthenicationProvider.addParam(ldapRealmContext);
 +    AuthenicationProvider.addParam(ldapURL);
 +    AuthenicationProvider.addParam(ldapUserTemplate);
 +    AuthenicationProvider.addParam(authcBasic);
 +
 +    Service testService = new Service();
 +    testService.setRole("test-service-role");
 +
 +    topology.addProvider(AuthenicationProvider);
 +    topology.addProvider(identityProvider);
 +    topology.addService(testService);
 +    topology.setTimestamp(System.nanoTime());
 +
 +    return topology;
 +  }
 +
 +  @Test( timeout = TestUtils.LONG_TIMEOUT )
 +  public void testDeployTopology() throws Exception {
 +    LOG_ENTER();
 +
 +    Topology testTopology = createTestTopology();
 +
 +    String user = "guest";
 +    String password = "guest-password";
 +
 +    String url = gatewayUrl + "/" + testTopology.getName() + "/test-service-path/test-service-resource";
 +
 +    GatewayServices srvs = GatewayServer.getGatewayServices();
 +
 +    TopologyService ts = srvs.getService(GatewayServices.TOPOLOGY_SERVICE);
 +    try {
 +      ts.stopMonitor();
 +
 +      assertThat( testTopology, not( nullValue() ) );
 +      assertThat( testTopology.getName(), is( "test-topology" ) );
 +
 +      given()
 +          //.log().all()
 +          .auth().preemptive().basic( "admin", "admin-password" ).header( "Accept", MediaType.APPLICATION_JSON ).then()
 +          //.log().all()
 +          .statusCode( HttpStatus.SC_OK ).body( containsString( "ServerVersion" ) ).when().get( gatewayUrl + "/admin/api/v1/version" );
 +
 +      given()
 +          //.log().all()
 +          .auth().preemptive().basic( user, password ).then()
 +          //.log().all()
 +          .statusCode( HttpStatus.SC_NOT_FOUND ).when().get( url );
 +
 +      ts.deployTopology( testTopology );
 +
 +      given()
 +          //.log().all()
 +          .auth().preemptive().basic( user, password ).then()
 +          //.log().all()
 +          .statusCode( HttpStatus.SC_OK ).contentType( "text/plain" ).body( is( "test-service-response" ) ).when().get( url ).getBody();
 +
 +      ts.deleteTopology( testTopology );
 +
 +      given()
 +          //.log().all()
 +          .auth().preemptive().basic( user, password ).then()
 +          //.log().all()
 +          .statusCode( HttpStatus.SC_NOT_FOUND ).when().get( url );
 +    } finally {
 +      ts.startMonitor();
 +    }
 +
 +    LOG_EXIT();
 +  }
 +
 +  @Test( timeout = TestUtils.LONG_TIMEOUT )
 +  public void testDeleteTopology() throws ClassNotFoundException {
 +    LOG_ENTER();
 +
 +    Topology test = createTestTopology();
 +
 +    String username = "admin";
 +    String password = "admin-password";
 +    String url = clusterUrl + "/api/v1/topologies/" + test.getName();
 +
 +    GatewayServices gs = GatewayServer.getGatewayServices();
 +
 +    TopologyService ts = gs.getService(GatewayServices.TOPOLOGY_SERVICE);
 +
 +    ts.deployTopology(test);
 +
 +    given()
 +        .auth().preemptive().basic(username, password)
 +        .header("Accept", MediaType.APPLICATION_JSON)
 +        .then()
 +        //.log().all()
 +        .statusCode(HttpStatus.SC_OK)
 +        .contentType(MediaType.APPLICATION_JSON)
 +        .when().get(url);
 +
 +    given()
 +        .auth().preemptive().basic(username, password)
 +        .then()
 +        //.log().all()
 +        .statusCode(HttpStatus.SC_OK)
 +        .contentType(MediaType.APPLICATION_JSON)
 +        .when().delete(url);
 +
 +    given()
 +        //.log().all()
 +        .auth().preemptive().basic(username, password)
 +        .then()
 +        //.log().all()
 +        .statusCode(HttpStatus.SC_NO_CONTENT)
 +        .when().get(url);
 +
 +    LOG_EXIT();
 +  }
 +
 +  @Test( timeout = TestUtils.LONG_TIMEOUT )
 +  public void testPutTopology() throws Exception {
 +    LOG_ENTER() ;
 +
 +    String username = "admin";
 +    String password = "admin-password";
 +    String url = clusterUrl + "/api/v1/topologies/test-put";
 +
 +    String JsonPut =
 +        given()
 +        .auth().preemptive().basic(username, password)
 +        .header("Accept", MediaType.APPLICATION_JSON)
 +        .get(clusterUrl + "/api/v1/topologies/test-cluster")
 +        .getBody().asString();
 +
 +    String XML = given()
 +        //.log().all()
 +        .auth().preemptive().basic(username, password)
 +        .contentType(MediaType.APPLICATION_JSON)
 +        .header("Accept", MediaType.APPLICATION_XML)
 +        .body(JsonPut)
 +        .then()
 +        .statusCode(HttpStatus.SC_OK)
 +        //.log().all()
 +        .when().put(url).getBody().asString();
 +
 +    InputSource source = new InputSource( new StringReader( XML ) );
 +    Document doc = XmlUtils.readXml( source );
 +
 +    assertThat( doc, hasXPath( "/topology/gateway/provider[1]/name", containsString( "WebAppSec" ) ) );
 +    assertThat( doc, hasXPath( "/topology/gateway/provider[1]/param/name", containsString( "csrf.enabled" ) ) );
 +
 +    given()
 +            .auth().preemptive().basic(username, password)
 +            .header("Accept", MediaType.APPLICATION_XML)
 +            .then()
 +            .statusCode(HttpStatus.SC_OK)
 +            .body(equalTo(XML))
 +            .when().get(url)
 +            .getBody().asString();
 +
 +    String XmlPut =
 +        given()
 +            .auth().preemptive().basic(username, password)
 +            .header("Accept", MediaType.APPLICATION_XML)
 +            .get(clusterUrl + "/api/v1/topologies/test-cluster")
 +            .getBody().asString();
 +
 +    String JSON = given()
 +        //.log().all()
 +        .auth().preemptive().basic(username, password)
 +        .contentType(MediaType.APPLICATION_XML)
 +        .header("Accept", MediaType.APPLICATION_JSON)
 +        .body(XmlPut)
 +        .then()
 +        .statusCode(HttpStatus.SC_OK)
 +            //.log().all()
 +        .when().put(url).getBody().asString();
 +
 +    given()
 +        .auth().preemptive().basic(username, password)
 +        .header("Accept", MediaType.APPLICATION_JSON)
 +        .then()
 +        .statusCode(HttpStatus.SC_OK)
 +        .body(equalTo(JSON))
 +        .when().get(url)
 +        .getBody().asString();
 +
 +    LOG_EXIT();
 +  }
 +
 +  @Test( timeout = TestUtils.LONG_TIMEOUT )
 +  public void testXForwardedHeaders() {
 +    LOG_ENTER();
 +
 +    String username = "admin";
 +    String password = "admin-password";
 +    String url = clusterUrl + "/api/v1/topologies";
 +
 +//    X-Forward header values
 +    String port = String.valueOf(777);
 +    String server = "myserver";
 +    String host = server + ":" + port;
 +    String proto = "protocol";
 +    String context = "/mycontext";
 +    String newUrl = proto + "://" + host + context;
 +//    String port = String.valueOf(gateway.getAddresses()[0].getPort());
 +
 +//     Case 1: Add in all x-forward headers (host, port, server, context, proto)
 +    given()
 +        .auth().preemptive().basic(username, password)
 +        .header("Accept", MediaType.APPLICATION_XML)
 +        .header("X-Forwarded-Host", host )
 +        .header("X-Forwarded-Port", port )
 +        .header("X-Forwarded-Server", server )
 +        .header("X-Forwarded-Context", context)
 +        .header("X-Forwarded-Proto", proto)
 +        .then()
 +        .statusCode(HttpStatus.SC_OK)
 +        .body(containsString(newUrl))
 +        .body(containsString("test-cluster"))
 +        .body(containsString("admin"))
 +        .when().get(url);
 +
 +
 +//     Case 2: add in x-forward headers (host, server, proto, context)
 +    given()
 +        .auth().preemptive().basic(username, password)
 +        .header("Accept", MediaType.APPLICATION_XML)
 +        .header("X-Forwarded-Host", host )
 +        .header("X-Forwarded-Server", server )
 +        .header("X-Forwarded-Context", context )
 +        .header("X-Forwarded-Proto", proto )
 +        .then()
 +        .statusCode(HttpStatus.SC_OK)
 +        .body(containsString(server))
 +        .body(containsString(context))
 +        .body(containsString(proto))
 +        .body(containsString(host))
 +        .body(containsString("test-cluster"))
 +        .body(containsString("admin"))
 +        .when().get(url);
 +
 +//     Case 3: add in x-forward headers (host, proto, port, context)
 +    given()
 +        .auth().preemptive().basic(username, password)
 +        .header("Accept", MediaType.APPLICATION_XML)
 +        .header("X-Forwarded-Host", host )
 +        .header("X-Forwarded-Port", port )
 +        .header("X-Forwarded-Context", context )
 +        .header("X-Forwarded-Proto", proto)
 +        .then()
 +        .statusCode(HttpStatus.SC_OK)
 +        .body(containsString(host))
 +        .body(containsString(port))
 +        .body(containsString(context))
 +        .body(containsString(proto))
 +        .body(containsString("test-cluster"))
 +        .body(containsString("admin"))
 +        .when().get(url);
 +
 +//     Case 4: add in x-forward headers (host, proto, port, context) no port in host.
 +    given()
 +        .auth().preemptive().basic(username, password)
 +        .header("Accept", MediaType.APPLICATION_XML)
 +        .header("X-Forwarded-Host", server)
 +        .header("X-Forwarded-Port", port)
 +        .header("X-Forwarded-Context", context)
 +        .header("X-Forwarded-Proto", proto)
 +        .then()
 +        .statusCode(HttpStatus.SC_OK)
 +        .body(containsString(server))
 +        .body(containsString(port))
 +        .body(containsString(context))
 +        .body(containsString(proto))
 +        .body(containsString("test-cluster"))
 +        .body(containsString("admin"))
 +        .when().get(url);
 +
 +//     Case 5: add in x-forward headers (host, port)
 +    given()
 +        .auth().preemptive().basic(username, password)
 +        .header("Accept", MediaType.APPLICATION_XML)
 +        .header("X-Forwarded-Host", host )
 +        .header("X-Forwarded-Port", port )
 +        .then()
 +        .statusCode(HttpStatus.SC_OK)
 +        .body(containsString(host))
 +        .body(containsString(port))
 +        .body(containsString("test-cluster"))
 +        .body(containsString("admin"))
 +        .when().get(url);
 +
 +//     Case 6: Normal Request
 +    given()
 +        .auth().preemptive().basic(username, password)
 +        .header("Accept", MediaType.APPLICATION_XML)
 +        .then()
 +        .statusCode(HttpStatus.SC_OK)
 +        .body(containsString(url))
 +        .body(containsString("test-cluster"))
 +        .body(containsString("admin"))
 +        .when().get(url);
 +
 +    LOG_EXIT();
 +  }
 +
 +  @Test( timeout = TestUtils.LONG_TIMEOUT )
 +  public void testGatewayPathChange() throws Exception {
 +    LOG_ENTER();
 +    String username = "admin";
 +    String password = "admin-password";
 +    String url = clusterUrl + "/api/v1/topologies";
 +
 +//     Case 1: Normal Request (No Change in gateway.path). Ensure HTTP OK resp + valid URL.
 +    given()
 +        .auth().preemptive().basic(username, password)
 +        .header("Accept", MediaType.APPLICATION_XML)
 +        .then()
 +        .statusCode(HttpStatus.SC_OK)
 +        .body(containsString(url + "/test-cluster"))
 +        .when().get(url);
 +
 +
 +//     Case 2: Change gateway.path to another String. Ensure HTTP OK resp + valid URL.
 +   try {
 +     gateway.stop();
 +
 +     GatewayTestConfig conf = new GatewayTestConfig();
 +     conf.setGatewayPath("new-gateway-path");
 +     setupGateway(conf);
 +
 +     String newUrl = clusterUrl + "/api/v1/topologies";
 +
 +     given()
 +         .auth().preemptive().basic(username, password)
 +         .header("Accept", MediaType.APPLICATION_XML)
 +         .then()
 +         .statusCode(HttpStatus.SC_OK)
 +         .body(containsString(newUrl + "/test-cluster"))
 +         .when().get(newUrl);
 +   } catch(Exception e){
 +     fail(e.getMessage());
 +   }
 +    finally {
 +//        Restart the gateway with old settings.
 +       gateway.stop();
 +      setupGateway(new GatewayTestConfig());
 +    }
 +
 +    LOG_EXIT();
 +  }
 +
++
++  @Test( timeout = TestUtils.LONG_TIMEOUT )
++  public void testProviderConfigurationCollection() throws Exception {
++    LOG_ENTER();
++
++    final String username = "admin";
++    final String password = "admin-password";
++    final String serviceUrl = clusterUrl + "/api/v1/providerconfig";
++
++    final File sharedProvidersDir = new File(config.getGatewayConfDir(), "shared-providers");
++    final List<String> configNames = Arrays.asList("sandbox-providers", "custom-providers");
++    final List<String> configFileNames = Arrays.asList(configNames.get(0) + ".xml", configNames.get(1) + ".xml");
++
++    // Request a listing of all the provider configs with an INCORRECT Accept header
++    given()
++      .auth().preemptive().basic(username, password)
++      .header("Accept", MediaType.APPLICATION_XML)
++      .then()
++      .statusCode(HttpStatus.SC_NOT_ACCEPTABLE)
++      .when().get(serviceUrl);
++
++    // Request a listing of all the provider configs (with the CORRECT Accept header)
++    ResponseBody responseBody = given()
++                                  .auth().preemptive().basic(username, password)
++                                  .header("Accept", MediaType.APPLICATION_JSON)
++                                  .then()
++                                  .statusCode(HttpStatus.SC_OK)
++                                  .contentType(MediaType.APPLICATION_JSON)
++                                  .when().get(serviceUrl).body();
++    List<String> items = responseBody.path("items");
++    assertTrue("Expected no items since the shared-providers dir is empty.", items.isEmpty());
++
++    // Manually write a file to the shared-providers directory
++    File providerConfig = new File(sharedProvidersDir, configFileNames.get(0));
++    FileOutputStream stream = new FileOutputStream(providerConfig);
++    createProviderConfiguration().toStream(stream);
++    stream.close();
++
++    // Request a listing of all the provider configs
++    responseBody = given()
++                      .auth().preemptive().basic(username, password)
++                      .header("Accept", MediaType.APPLICATION_JSON)
++                      .then()
++                      .statusCode(HttpStatus.SC_OK)
++                      .contentType(MediaType.APPLICATION_JSON)
++                      .when().get(serviceUrl).body();
++    items = responseBody.path("items");
++    assertEquals("Expected items to include the new file in the shared-providers dir.", 1, items.size());
++    assertEquals(configFileNames.get(0), responseBody.path("items[0].name"));
++    String href1 = responseBody.path("items[0].href");
++
++    // Manually write another file to the shared-providers directory
++    File anotherProviderConfig = new File(sharedProvidersDir, configFileNames.get(1));
++    stream = new FileOutputStream(anotherProviderConfig);
++    createProviderConfiguration().toStream(stream);
++    stream.close();
++
++    // Request a listing of all the provider configs
++    responseBody = given()
++                      .auth().preemptive().basic(username, password)
++                      .header("Accept", MediaType.APPLICATION_JSON)
++                      .then()
++                      .statusCode(HttpStatus.SC_OK)
++                      .contentType(MediaType.APPLICATION_JSON)
++                      .when().get(serviceUrl).body();
++    items = responseBody.path("items");
++    assertEquals(2, items.size());
++    String pcOne = responseBody.path("items[0].name");
++    String pcTwo = responseBody.path("items[1].name");
++    assertTrue(configFileNames.contains(pcOne));
++    assertTrue(configFileNames.contains(pcTwo));
++
++    // Request a specific provider configuration with an INCORRECT Accept header
++    given()
++      .auth().preemptive().basic(username, password)
++      .header("Accept", MediaType.APPLICATION_JSON)
++      .then()
++      .statusCode(HttpStatus.SC_NOT_ACCEPTABLE)
++      .when().get(href1).body();
++
++    // Request a specific provider configuration (with the CORRECT Accept header)
++    responseBody = given()
++                      .auth().preemptive().basic(username, password)
++                      .header("Accept", MediaType.APPLICATION_XML)
++                      .then()
++                      .statusCode(HttpStatus.SC_OK)
++                      .contentType(MediaType.APPLICATION_XML)
++                      .when().get(href1).body();
++    String sandboxProvidersConfigContent = responseBody.asString();
++
++    // Parse the result, to make sure it's at least valid XML
++    XmlUtils.readXml(new InputSource(new StringReader(sandboxProvidersConfigContent)));
++
++    providerConfig.delete();
++    anotherProviderConfig.delete();
++
++    // Request a specific provider configuration, which does NOT exist
++    given()
++      .auth().preemptive().basic(username, password)
++      .header("Accept", MediaType.APPLICATION_XML)
++      .then()
++      .statusCode(HttpStatus.SC_NOT_FOUND)
++      .when().get(serviceUrl + "/not-a-real-provider-config");
++
++    LOG_EXIT();
++  }
++
++
++  @Test( timeout = TestUtils.LONG_TIMEOUT )
++  public void testPutProviderConfiguration() throws Exception {
++    LOG_ENTER();
++
++    final String username = "admin";
++    final String password = "admin-password";
++    final String serviceUrl = clusterUrl + "/api/v1/providerconfig";
++
++    final String newProviderConfigName     = "new-provider-config";
++    final String newProviderConfigFileName = newProviderConfigName + ".xml";
++
++    XMLTag newProviderConfigXML = createProviderConfiguration();
++
++    // Attempt to PUT a provider config with an INCORRECT Content-type header
++    given()
++        .auth().preemptive().basic(username, password)
++        .header("Content-type", MediaType.APPLICATION_JSON)
++        .body(newProviderConfigXML.toBytes("utf-8"))
++        .then()
++        .statusCode(HttpStatus.SC_UNSUPPORTED_MEDIA_TYPE)
++        .when().put(serviceUrl + "/" + newProviderConfigName);
++
++    // Attempt to PUT a provider config with the CORRECT Content-type header
++    given()
++        .auth().preemptive().basic(username, password)
++        .header("Content-type", MediaType.APPLICATION_XML)
++        .body(newProviderConfigXML.toBytes("utf-8"))
++        .then()
++        .statusCode(HttpStatus.SC_CREATED)
++        .when().put(serviceUrl + "/" + newProviderConfigName);
++
++    // Verify that the provider configuration was written to the expected location
++    File newProviderConfigFile =
++                  new File(new File(config.getGatewayConfDir(), "shared-providers"), newProviderConfigFileName);
++    assertTrue(newProviderConfigFile.exists());
++
++    // Request a listing of all the provider configs to further verify the PUT
++    ResponseBody responseBody = given()
++                                  .auth().preemptive().basic(username, password)
++                                  .header("Accept", MediaType.APPLICATION_JSON)
++                                  .then()
++                                  .statusCode(HttpStatus.SC_OK)
++                                  .contentType(MediaType.APPLICATION_JSON)
++                                  .when().get(serviceUrl).body();
++    List<String> items = responseBody.path("items");
++    assertEquals(1, items.size());
++    assertEquals(newProviderConfigFileName, responseBody.path("items[0].name"));
++    String href = responseBody.path("items[0].href");
++
++    // Get the new provider config content
++    responseBody = given()
++                      .auth().preemptive().basic(username, password)
++                      .header("Accept", MediaType.APPLICATION_XML)
++                      .then()
++                      .statusCode(HttpStatus.SC_OK)
++                      .contentType(MediaType.APPLICATION_XML)
++                      .when().get(href).body();
++    String configContent = responseBody.asString();
++
++    // Parse the result, to make sure it's at least valid XML
++    XmlUtils.readXml(new InputSource(new StringReader(configContent)));
++
++    // Manually delete the provider config
++    newProviderConfigFile.delete();
++
++    LOG_EXIT();
++  }
++
++
++  @Test( timeout = TestUtils.LONG_TIMEOUT )
++  public void testDeleteProviderConfiguration() throws Exception {
++    LOG_ENTER();
++
++    final String username = "admin";
++    final String password = "admin-password";
++    final String serviceUrl = clusterUrl + "/api/v1/providerconfig";
++
++    final File sharedProvidersDir = new File(config.getGatewayConfDir(), "shared-providers");
++
++    // Manually add two provider config files to the shared-providers directory
++    File providerConfigOneFile = new File(sharedProvidersDir, "deleteme-one-config.xml");
++    FileOutputStream stream = new FileOutputStream(providerConfigOneFile);
++    createProviderConfiguration().toStream(stream);
++    stream.close();
++    assertTrue(providerConfigOneFile.exists());
++
++    File providerConfigTwoFile = new File(sharedProvidersDir, "deleteme-two-config.xml");
++    stream = new FileOutputStream(providerConfigTwoFile);
++    createProviderConfiguration().toStream(stream);
++    stream.close();
++    assertTrue(providerConfigTwoFile.exists());
++
++    // Request a listing of all the provider configs
++    ResponseBody responseBody = given()
++                                  .auth().preemptive().basic(username, password)
++                                  .header("Accept", MediaType.APPLICATION_JSON)
++                                  .then()
++                                  .statusCode(HttpStatus.SC_OK)
++                                  .contentType(MediaType.APPLICATION_JSON)
++                                  .when().get(serviceUrl).body();
++    List<String> items = responseBody.path("items");
++    assertEquals(2, items.size());
++    String name1 = responseBody.path("items[0].name");
++    String href1 = responseBody.path("items[0].href");
++    String name2 = responseBody.path("items[1].name");
++    String href2 = responseBody.path("items[1].href");
++
++    // Delete one of the provider configs
++    responseBody = given()
++                    .auth().preemptive().basic(username, password)
++                    .header("Accept", MediaType.APPLICATION_JSON)
++                    .then()
++                    .statusCode(HttpStatus.SC_OK)
++                    .contentType(MediaType.APPLICATION_JSON)
++                    .when().delete(href1).body();
++    String deletedMsg = responseBody.path("deleted");
++    assertEquals("provider config " + FilenameUtils.getBaseName(name1), deletedMsg);
++    assertFalse((new File(sharedProvidersDir, name1).exists()));
++
++    assertTrue((new File(sharedProvidersDir, name2).exists()));
++    // Delete the other provider config
++    responseBody = given()
++                    .auth().preemptive().basic(username, password)
++                    .header("Accept", MediaType.APPLICATION_JSON)
++                    .then()
++                    .statusCode(HttpStatus.SC_OK)
++                    .contentType(MediaType.APPLICATION_JSON)
++                    .when().delete(href2).body();
++    deletedMsg = responseBody.path("deleted");
++    assertEquals("provider config " + FilenameUtils.getBaseName(name2), deletedMsg);
++    assertFalse((new File(sharedProvidersDir, name2).exists()));
++
++    // Attempt to delete a provider config that does not exist
++    given()
++      .auth().preemptive().basic(username, password)
++      .header("Accept", MediaType.APPLICATION_JSON)
++      .then()
++      .statusCode(HttpStatus.SC_OK)
++      .when().delete(serviceUrl + "/does-not-exist");
++
++    LOG_EXIT();
++  }
++
++
++  @Test( timeout = TestUtils.LONG_TIMEOUT )
++  public void testDescriptorCollection() throws Exception {
++    LOG_ENTER();
++
++    final String username = "admin";
++    final String password = "admin-password";
++    final String serviceUrl = clusterUrl + "/api/v1/descriptors";
++
++    final File descriptorsDir = new File(config.getGatewayConfDir(), "descriptors");
++    final List<String> clusterNames        = Arrays.asList("clusterOne", "clusterTwo");
++    final List<String> descriptorNames     = Arrays.asList("test-descriptor-one", "test-descriptor-two");
++    final List<String> descriptorFileNames = Arrays.asList(descriptorNames.get(0) + ".json",
++                                                           descriptorNames.get(1) + ".json");
++
++    // Request a listing of all the descriptors with an INCORRECT Accept header
++    given()
++        .auth().preemptive().basic(username, password)
++        .header("Accept", MediaType.APPLICATION_XML)
++        .then()
++        .statusCode(HttpStatus.SC_NOT_ACCEPTABLE)
++        .when().get(serviceUrl);
++
++    // Request a listing of all the descriptors (with the CORRECT Accept header)
++    ResponseBody responseBody = given()
++                                  .auth().preemptive().basic(username, password)
++                                  .header("Accept", MediaType.APPLICATION_JSON)
++                                  .then()
++                                  .statusCode(HttpStatus.SC_OK)
++                                  .contentType(MediaType.APPLICATION_JSON)
++                                  .when().get(serviceUrl).body();
++    List<String> items = responseBody.path("items");
++    assertTrue("Expected no items since the descriptors dir is empty.", items.isEmpty());
++
++    // Manually write a file to the descriptors directory
++    File descriptorOneFile = new File(descriptorsDir, descriptorFileNames.get(0));
++    FileUtils.write(descriptorOneFile, createDescriptor(clusterNames.get(0)));
++
++    // Request a listing of all the descriptors
++    responseBody = given()
++                    .auth().preemptive().basic(username, password)
++                    .header("Accept", MediaType.APPLICATION_JSON)
++                    .then()
++                    .statusCode(HttpStatus.SC_OK)
++                    .contentType(MediaType.APPLICATION_JSON)
++                    .when().get(serviceUrl).body();
++    items = responseBody.path("items");
++    assertEquals("Expected items to include the new file in the shared-providers dir.", 1, items.size());
++    assertEquals(descriptorFileNames.get(0), responseBody.path("items[0].name"));
++    String href1 = responseBody.path("items[0].href");
++
++    // Manually write another file to the descriptors directory
++    File descriptorTwoFile = new File(descriptorsDir, descriptorFileNames.get(1));
++    FileUtils.write(descriptorTwoFile, createDescriptor(clusterNames.get(1)));
++
++    // Request a listing of all the descriptors
++    responseBody = given()
++                    .auth().preemptive().basic(username, password)
++                    .header("Accept", MediaType.APPLICATION_JSON)
++                    .then()
++                    .statusCode(HttpStatus.SC_OK)
++                    .contentType(MediaType.APPLICATION_JSON)
++                    .when().get(serviceUrl).body();
++    items = responseBody.path("items");
++    assertEquals(2, items.size());
++    String descOne = responseBody.path("items[0].name");
++    String descTwo = responseBody.path("items[1].name");
++    assertTrue(descriptorFileNames.contains(descOne));
++    assertTrue(descriptorFileNames.contains(descTwo));
++
++    // Request a specific descriptor with an INCORRECT Accept header
++    given()
++        .auth().preemptive().basic(username, password)
++        .header("Accept", MediaType.APPLICATION_XML)
++        .then()
++        .statusCode(HttpStatus.SC_NOT_ACCEPTABLE)
++        .when().get(href1).body();
++
++    // Request a specific descriptor (with the CORRECT Accept header)
++    responseBody = given()
++                    .auth().preemptive().basic(username, password)
++                    .header("Accept", MediaType.APPLICATION_JSON)
++                    .then()
++                    .statusCode(HttpStatus.SC_OK)
++                    .contentType(MediaType.APPLICATION_JSON)
++                    .when().get(href1).body();
++    String cluster = responseBody.path("cluster");
++    assertEquals(cluster, clusterNames.get(0));
++
++    // Request a specific descriptor, which does NOT exist
++    given()
++      .auth().preemptive().basic(username, password)
++      .header("Accept", MediaType.APPLICATION_JSON)
++      .then()
++      .statusCode(HttpStatus.SC_NOT_FOUND)
++      .when().get(serviceUrl + "/not-a-real-descriptor").body();
++
++    descriptorOneFile.delete();
++    descriptorTwoFile.delete();
++
++    LOG_EXIT();
++  }
++
++
++  @Test( timeout = TestUtils.LONG_TIMEOUT )
++  public void testPutDescriptor() throws Exception {
++    LOG_ENTER();
++
++    final String username = "admin";
++    final String password = "admin-password";
++    final String serviceUrl = clusterUrl + "/api/v1/descriptors";
++
++    final String clusterName           = "test-cluster";
++    final String newDescriptorName     = "new-descriptor";
++    final String newDescriptorFileName = newDescriptorName + ".json";
++
++    String newDescriptorJSON = createDescriptor(clusterName);
++
++    // Attempt to PUT a descriptor with an INCORRECT Content-type header
++    given()
++      .auth().preemptive().basic(username, password)
++      .header("Content-type", MediaType.APPLICATION_XML)
++      .body(newDescriptorJSON.getBytes("utf-8"))
++      .then()
++      .statusCode(HttpStatus.SC_UNSUPPORTED_MEDIA_TYPE)
++      .when().put(serviceUrl + "/" + newDescriptorName);
++
++    // Attempt to PUT a descriptor with the CORRECT Content-type header
++    given()
++      .auth().preemptive().basic(username, password)
++      .header("Content-type", MediaType.APPLICATION_JSON)
++      .body(newDescriptorJSON.getBytes("utf-8"))
++      .then()
++      .statusCode(HttpStatus.SC_CREATED)
++      .when().put(serviceUrl + "/" + newDescriptorName);
++
++    // Verify that the descriptor was written to the expected location
++    File newDescriptorFile =
++            new File(new File(config.getGatewayConfDir(), "descriptors"), newDescriptorFileName);
++    assertTrue(newDescriptorFile.exists());
++
++    // Request a listing of all the descriptors to verify the PUT
++    ResponseBody responseBody = given()
++                                  .auth().preemptive().basic(username, password)
++                                  .header("Accept", MediaType.APPLICATION_JSON)
++                                  .then()
++                                  .statusCode(HttpStatus.SC_OK)
++                                  .contentType(MediaType.APPLICATION_JSON)
++                                  .when().get(serviceUrl).body();
++    List<String> items = responseBody.path("items");
++    assertEquals(1, items.size());
++    assertEquals(newDescriptorFileName, responseBody.path("items[0].name"));
++    String href = responseBody.path("items[0].href");
++
++    // Get the new descriptor content
++    responseBody = given()
++                    .auth().preemptive().basic(username, password)
++                    .header("Accept", MediaType.APPLICATION_JSON)
++                    .then()
++                    .statusCode(HttpStatus.SC_OK)
++                    .contentType(MediaType.APPLICATION_JSON)
++                    .when().get(href).body();
++    String cluster = responseBody.path("cluster");
++    assertEquals(clusterName, cluster);
++
++    // Manually delete the descriptor
++    newDescriptorFile.delete();
++
++    LOG_EXIT();
++  }
++
++
++  @Test( timeout = TestUtils.LONG_TIMEOUT )
++  public void testDeleteDescriptor() throws Exception {
++    LOG_ENTER();
++
++    final String username = "admin";
++    final String password = "admin-password";
++    final String serviceUrl = clusterUrl + "/api/v1/descriptors";
++
++    final File descriptorsDir = new File(config.getGatewayConfDir(), "descriptors");
++
++    // Manually add two descriptor files to the descriptors directory
++    File descriptorOneFile = new File(descriptorsDir, "deleteme-one.json");
++    FileUtils.writeStringToFile(descriptorOneFile, createDescriptor("clusterOne"));
++    assertTrue(descriptorOneFile.exists());
++
++    File descriptorTwoFile = new File(descriptorsDir, "deleteme-two.json");
++    FileUtils.writeStringToFile(descriptorTwoFile, createDescriptor("clusterTwo"));
++    assertTrue(descriptorTwoFile.exists());
++
++    // Request a listing of all the descriptors
++    ResponseBody responseBody = given()
++                                  .auth().preemptive().basic(username, password)
++                                  .header("Accept", MediaType.APPLICATION_JSON)
++                                  .then()
++                                  .statusCode(HttpStatus.SC_OK)
++                                  .contentType(MediaType.APPLICATION_JSON)
++                                  .when().get(serviceUrl).body();
++    List<String> items = responseBody.path("items");
++    assertEquals(2, items.size());
++    String name1 = responseBody.path("items[0].name");
++    String href1 = responseBody.path("items[0].href");
++    String name2 = responseBody.path("items[1].name");
++    String href2 = responseBody.path("items[1].href");
++
++    // Delete one of the descriptors
++    responseBody = given()
++                    .auth().preemptive().basic(username, password)
++                    .header("Accept", MediaType.APPLICATION_JSON)
++                    .then()
++                    .statusCode(HttpStatus.SC_OK)
++                    .contentType(MediaType.APPLICATION_JSON)
++                    .when().delete(href1).body();
++    String deletedMsg = responseBody.path("deleted");
++    assertEquals("descriptor " + FilenameUtils.getBaseName(name1), deletedMsg);
++    assertFalse((new File(descriptorsDir, name1).exists()));
++
++    assertTrue((new File(descriptorsDir, name2).exists()));
++    // Delete the other descriptor
++    responseBody = given()
++                    .auth().preemptive().basic(username, password)
++                    .header("Accept", MediaType.APPLICATION_JSON)
++                    .then()
++                    .statusCode(HttpStatus.SC_OK)
++                    .contentType(MediaType.APPLICATION_JSON)
++                    .when().delete(href2).body();
++    deletedMsg = responseBody.path("deleted");
++    assertEquals("descriptor " + FilenameUtils.getBaseName(name2), deletedMsg);
++    assertFalse((new File(descriptorsDir, name2).exists()));
++
++    // Attempt to delete a descriptor that does not exist
++    given()
++      .auth().preemptive().basic(username, password)
++      .header("Accept", MediaType.APPLICATION_JSON)
++      .then()
++      .statusCode(HttpStatus.SC_OK)
++      .when().delete(serviceUrl + "/does-not-exist");
++
++    LOG_EXIT();
++  }
++
++
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/c754cc06/pom.xml
----------------------------------------------------------------------


[15/53] [abbrv] knox git commit: KNOX-998 refactoring after the merge

Posted by mo...@apache.org.
KNOX-998 refactoring after the merge


Project: http://git-wip-us.apache.org/repos/asf/knox/repo
Commit: http://git-wip-us.apache.org/repos/asf/knox/commit/9577842b
Tree: http://git-wip-us.apache.org/repos/asf/knox/tree/9577842b
Diff: http://git-wip-us.apache.org/repos/asf/knox/diff/9577842b

Branch: refs/heads/master
Commit: 9577842b1b72fbeed9c622118917a64fbf869c4f
Parents: 58780d3
Author: Sandeep More <mo...@apache.org>
Authored: Wed Oct 25 15:20:09 2017 -0400
Committer: Sandeep More <mo...@apache.org>
Committed: Wed Oct 25 15:20:09 2017 -0400

----------------------------------------------------------------------
 .../provider/impl/BaseZookeeperURLManager.java  | 195 -------------------
 .../provider/impl/HBaseZookeeperURLManager.java | 138 -------------
 .../provider/impl/KafkaZookeeperURLManager.java | 152 ---------------
 .../provider/impl/SOLRZookeeperURLManager.java  | 118 -----------
 .../ha/provider/impl/StringResponseHandler.java |  49 -----
 .../provider/impl/BaseZookeeperURLManager.java  | 195 +++++++++++++++++++
 .../provider/impl/HBaseZookeeperURLManager.java | 138 +++++++++++++
 .../provider/impl/KafkaZookeeperURLManager.java | 152 +++++++++++++++
 .../provider/impl/SOLRZookeeperURLManager.java  | 118 +++++++++++
 .../ha/provider/impl/StringResponseHandler.java |  49 +++++
 ...g.apache.knox.gateway.ha.provider.URLManager |   5 +-
 .../impl/HBaseZookeeperURLManagerTest.java      |  72 -------
 .../impl/KafkaZookeeperURLManagerTest.java      |  71 -------
 .../impl/SOLRZookeeperURLManagerTest.java       | 110 -----------
 .../impl/HBaseZookeeperURLManagerTest.java      |  72 +++++++
 .../impl/KafkaZookeeperURLManagerTest.java      |  71 +++++++
 .../impl/SOLRZookeeperURLManagerTest.java       | 110 +++++++++++
 ...gateway.deploy.ProviderDeploymentContributor |   5 +-
 18 files changed, 910 insertions(+), 910 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/knox/blob/9577842b/gateway-provider-ha/src/main/java/org/apache/hadoop/gateway/ha/provider/impl/BaseZookeeperURLManager.java
----------------------------------------------------------------------
diff --git a/gateway-provider-ha/src/main/java/org/apache/hadoop/gateway/ha/provider/impl/BaseZookeeperURLManager.java b/gateway-provider-ha/src/main/java/org/apache/hadoop/gateway/ha/provider/impl/BaseZookeeperURLManager.java
deleted file mode 100644
index 0b16144..0000000
--- a/gateway-provider-ha/src/main/java/org/apache/hadoop/gateway/ha/provider/impl/BaseZookeeperURLManager.java
+++ /dev/null
@@ -1,195 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.gateway.ha.provider.impl;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.concurrent.ConcurrentLinkedQueue;
-
-import org.apache.commons.io.IOUtils;
-import org.apache.hadoop.gateway.ha.provider.HaServiceConfig;
-import org.apache.hadoop.gateway.ha.provider.URLManager;
-import org.apache.hadoop.gateway.ha.provider.impl.i18n.HaMessages;
-import org.apache.hadoop.gateway.i18n.messages.MessagesFactory;
-import org.apache.http.client.config.RequestConfig;
-import org.apache.http.client.methods.HttpGet;
-import org.apache.http.impl.client.CloseableHttpClient;
-import org.apache.http.impl.client.HttpClientBuilder;
-
-import com.google.common.collect.Lists;
-
-/**
- * Base implementation of URLManager intended for query of Zookeeper active hosts. In
- * the event of a failure via markFailed, Zookeeper is queried again for active
- * host information.
- * 
- * When configuring the HAProvider in the topology, the zookeeperEnsemble attribute must be set to a
- * comma delimited list of the host and port number, i.e. host1:2181,host2:2181. 
- */
-public abstract class BaseZookeeperURLManager implements URLManager {
-	protected static final HaMessages LOG = MessagesFactory.get(HaMessages.class);
-	/**
-	 * Host Ping Timeout
-	 */
-	private static final int TIMEOUT = 2000;
-
-	private String zooKeeperEnsemble;
-	private ConcurrentLinkedQueue<String> urls = new ConcurrentLinkedQueue<String>();
-
-	// -------------------------------------------------------------------------------------
-	// URLManager interface methods
-	// -------------------------------------------------------------------------------------
-
-	@Override
-	public boolean supportsConfig(HaServiceConfig config) {
-		if (!config.getServiceName().equalsIgnoreCase(getServiceName())) {
-			return false;
-		}
-		
-		String zookeeperEnsemble = config.getZookeeperEnsemble();
-		if (zookeeperEnsemble != null && zookeeperEnsemble.trim().length() > 0) {
-			return true;
-		}
-		
-		return false;
-	}
-
-	@Override
-	public void setConfig(HaServiceConfig config) {
-		zooKeeperEnsemble = config.getZookeeperEnsemble();
-		setURLs(lookupURLs());
-	}
-
-	@Override
-	public synchronized String getActiveURL() {
-		// None available so refresh
-		if (urls.isEmpty()) {
-			setURLs(lookupURLs());
-		}
-
-		return this.urls.peek();
-	}
-
-	@Override
-	public synchronized void setActiveURL(String url) {
-		throw new UnsupportedOperationException();
-	}
-
-	@Override
-	public synchronized List<String> getURLs() {
-		return Lists.newArrayList(this.urls.iterator());
-	}
-
-	@Override
-	public synchronized void markFailed(String url) {
-		// Capture complete URL of active host
-		String topURL = getActiveURL();
-
-		// Refresh URLs from ZooKeeper
-		setURLs(lookupURLs());
-
-		// Show failed URL and new URL
-		LOG.markedFailedUrl(topURL, getActiveURL());
-	}
-
-	@Override
-	public synchronized void setURLs(List<String> urls) {
-		if ((urls != null) && (!(urls.isEmpty()))) {
-			this.urls.clear();
-			this.urls.addAll(urls);
-		}
-	}
-
-	// -------------------------------------------------------------------------------------
-	// Abstract methods
-	// -------------------------------------------------------------------------------------
-
-	/**
-	 * Look within Zookeeper under the /live_nodes branch for active hosts
-	 * 
-	 * @return A List of URLs (never null)
-	 */
-	protected abstract List<String> lookupURLs();
-
-	/**
-	 * @return The name of the Knox Topology Service to support
-	 */
-	protected abstract String getServiceName();
-
-	// -------------------------------------------------------------------------------------
-	// Protected methods
-	// -------------------------------------------------------------------------------------
-
-	protected String getZookeeperEnsemble() {
-		return zooKeeperEnsemble;
-	}
-	
-	/**
-	 * Validate access to hosts using simple light weight ping style REST call.
-	 * 
-	 * @param hosts List of hosts to evaluate (required)
-	 * @param suffix Text to append to host (required) 
-	 * @param acceptHeader Used for Accept header (optional)
-	 * 
-	 * @return Hosts with successful access
-	 */
-	protected List<String> validateHosts(List<String> hosts, String suffix, String acceptHeader) {
-		List<String> result = new ArrayList<String>();
-		
-		CloseableHttpClient client = null;
-		
-		try {
-			// Construct a HttpClient with short term timeout
-			RequestConfig.Builder requestBuilder = RequestConfig.custom()
-					.setConnectTimeout(TIMEOUT)
-					.setSocketTimeout(TIMEOUT)
-					.setConnectionRequestTimeout(TIMEOUT);
-
-			client = HttpClientBuilder.create()
-					.setDefaultRequestConfig(requestBuilder.build())
-					.build();
-			
-			for(String host: hosts) {
-				try	{
-					HttpGet get = new HttpGet(host + suffix);
-					
-					if (acceptHeader != null) {
-						get.setHeader("Accept", acceptHeader);
-					}
-					
-					String response = client.execute(get, new StringResponseHandler());
-					
-					if (response != null) {
-						result.add(host);
-					}
-				}
-				catch (Exception ex) {
-					// ignore host
-				}
-			}
-		}
-		catch (Exception ex) {
-			// Ignore errors
-		}
-		finally	{
-			IOUtils.closeQuietly(client);
-		}
-		
-		return result;
-	}
-}

http://git-wip-us.apache.org/repos/asf/knox/blob/9577842b/gateway-provider-ha/src/main/java/org/apache/hadoop/gateway/ha/provider/impl/HBaseZookeeperURLManager.java
----------------------------------------------------------------------
diff --git a/gateway-provider-ha/src/main/java/org/apache/hadoop/gateway/ha/provider/impl/HBaseZookeeperURLManager.java b/gateway-provider-ha/src/main/java/org/apache/hadoop/gateway/ha/provider/impl/HBaseZookeeperURLManager.java
deleted file mode 100644
index 8a414c7..0000000
--- a/gateway-provider-ha/src/main/java/org/apache/hadoop/gateway/ha/provider/impl/HBaseZookeeperURLManager.java
+++ /dev/null
@@ -1,138 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.gateway.ha.provider.impl;
-
-import org.apache.curator.framework.CuratorFramework;
-import org.apache.curator.framework.CuratorFrameworkFactory;
-import org.apache.curator.retry.ExponentialBackoffRetry;
-
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-
-/**
- * Implementation of URLManager intended for query of Zookeeper for active HBase RegionServer hosts.
- *  
- * The assumption is that the HBase REST Server will be installed on the same host.  For safety
- * reasons, the REST Server is pinged for access before inclusion in the list of returned hosts.
- * 
- * In the event of a failure via markFailed, Zookeeper is queried again for active
- * host information.
- * 
- * When configuring the HAProvider in the topology, the zookeeperEnsemble
- * attribute must be set to a comma delimited list of the host and port number,
- * i.e. host1:2181,host2:2181.
- */
-public class HBaseZookeeperURLManager extends BaseZookeeperURLManager {
-	/**
-	 * Default Port Number for HBase REST Server
-	 */
-	private static final int PORT_NUMBER = 8080;
-	
-	private String zookeeperNamespace = "hbase-unsecure";
-	
-	// -------------------------------------------------------------------------------------
-	// Abstract methods
-	// -------------------------------------------------------------------------------------
-
-	/**
-	 * Look within Zookeeper under the /hbase-unsecure/rs branch for active HBase RegionServer hosts
-	 * 
-	 * @return A List of URLs (never null)
-	 */
-	@Override
-	protected List<String> lookupURLs() {
-		// Retrieve list of potential hosts from ZooKeeper
-		List<String> hosts = retrieveHosts();
-		
-		// Validate access to hosts using cheap ping style operation
-		List<String> validatedHosts = validateHosts(hosts,"/","text/xml");
-
-		// Randomize the hosts list for simple load balancing
-		if (!validatedHosts.isEmpty()) {
-			Collections.shuffle(validatedHosts);
-		}
-
-		return validatedHosts;
-	}
-
-	protected String getServiceName() {
-		return "WEBHBASE";
-	};
-
-	// -------------------------------------------------------------------------------------
-	// Private methods
-	// -------------------------------------------------------------------------------------
-
-	/**
-	 * @return Retrieve lists of hosts from ZooKeeper
-	 */
-	private List<String> retrieveHosts()
-	{
-		List<String> serverHosts = new ArrayList<>();
-		
-		CuratorFramework zooKeeperClient = CuratorFrameworkFactory.builder()
-				.connectString(getZookeeperEnsemble())
-				.retryPolicy(new ExponentialBackoffRetry(1000, 3))
-				.build();
-		
-		try {
-			zooKeeperClient.start();
-			
-			// Retrieve list of all region server hosts
-			List<String> serverNodes = zooKeeperClient.getChildren().forPath("/" + zookeeperNamespace + "/rs");
-			
-			for (String serverNode : serverNodes) {
-				String serverURL = constructURL(serverNode);
-				serverHosts.add(serverURL);
-			}
-		} catch (Exception e) {
-			LOG.failedToGetZookeeperUrls(e);
-			throw new RuntimeException(e);
-		} finally {
-			// Close the client connection with ZooKeeper
-			if (zooKeeperClient != null) {
-				zooKeeperClient.close();
-			}
-		}
-		
-		return serverHosts;
-	}
-	
-	/**
-	 * Given a String of the format "host,number,number" convert to a URL of the format
-	 * "http://host:port".
-	 * 
-	 * @param serverInfo Server Info from Zookeeper (required)
-	 * 
-	 * @return URL to HBASE
-	 */
-	private String constructURL(String serverInfo) {
-		String scheme = "http";
-
-		StringBuffer buffer = new StringBuffer();
-		buffer.append(scheme);
-		buffer.append("://");
-		// Strip off the host name 
-		buffer.append(serverInfo.substring(0,serverInfo.indexOf(",")));
-		buffer.append(":");
-		buffer.append(PORT_NUMBER);
-		
-		return buffer.toString();
-	}
-}

http://git-wip-us.apache.org/repos/asf/knox/blob/9577842b/gateway-provider-ha/src/main/java/org/apache/hadoop/gateway/ha/provider/impl/KafkaZookeeperURLManager.java
----------------------------------------------------------------------
diff --git a/gateway-provider-ha/src/main/java/org/apache/hadoop/gateway/ha/provider/impl/KafkaZookeeperURLManager.java b/gateway-provider-ha/src/main/java/org/apache/hadoop/gateway/ha/provider/impl/KafkaZookeeperURLManager.java
deleted file mode 100644
index c68c107..0000000
--- a/gateway-provider-ha/src/main/java/org/apache/hadoop/gateway/ha/provider/impl/KafkaZookeeperURLManager.java
+++ /dev/null
@@ -1,152 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.gateway.ha.provider.impl;
-
-import net.minidev.json.JSONObject;
-import net.minidev.json.parser.JSONParser;
-import net.minidev.json.parser.ParseException;
-
-import org.apache.curator.framework.CuratorFramework;
-import org.apache.curator.framework.CuratorFrameworkFactory;
-import org.apache.curator.retry.ExponentialBackoffRetry;
-
-import java.nio.charset.Charset;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-
-/**
- * Implementation of URLManager intended for query of Zookeeper for active Kafka hosts. 
- * 
- * The assumption is that the Confluent REST Proxy will be installed on the same host.  For safety
- * reasons, the REST Server is pinged for access before inclusion in the list of returned hosts.
- * 
- * In the event of a failure via markFailed, Zookeeper is queried again for active
- * host information.
- * 
- * When configuring the HAProvider in the topology, the zookeeperEnsemble
- * attribute must be set to a comma delimited list of the host and port number,
- * i.e. host1:2181,host2:2181.
- */
-public class KafkaZookeeperURLManager extends BaseZookeeperURLManager {
-	/**
-	 * Default Port Number for Confluent Kafka REST Server
-	 */
-	private static final int PORT_NUMBER = 8082;
-	/**
-	 * Base path for retrieval from Zookeeper
-	 */
-	private static final String BASE_PATH = "/brokers/ids";
-	
-	// -------------------------------------------------------------------------------------
-	// Abstract methods
-	// -------------------------------------------------------------------------------------
-
-	/**
-	 * Look within Zookeeper under the /broker/ids branch for active Kafka hosts
-	 * 
-	 * @return A List of URLs (never null)
-	 */
-	@Override
-	protected List<String> lookupURLs() {
-		// Retrieve list of potential hosts from ZooKeeper
-		List<String> hosts = retrieveHosts();
-		
-		// Validate access to hosts using cheap ping style operation
-		List<String> validatedHosts = validateHosts(hosts,"/topics","application/vnd.kafka.v2+json");
-
-		// Randomize the hosts list for simple load balancing
-		if (!validatedHosts.isEmpty()) {
-			Collections.shuffle(validatedHosts);
-		}
-
-		return validatedHosts;
-	}
-
-	protected String getServiceName() {
-		return "KAFKA";
-	};
-
-	// -------------------------------------------------------------------------------------
-	// Private methods
-	// -------------------------------------------------------------------------------------
-
-	/**
-	 * @return Retrieve lists of hosts from ZooKeeper
-	 */
-	private List<String> retrieveHosts()
-	{
-		List<String> serverHosts = new ArrayList<>();
-		
-		CuratorFramework zooKeeperClient = CuratorFrameworkFactory.builder()
-				.connectString(getZookeeperEnsemble())
-				.retryPolicy(new ExponentialBackoffRetry(1000, 3))
-				.build();
-		
-		try {
-			zooKeeperClient.start();
-
-			// Retrieve list of host URLs from ZooKeeper
-			List<String> brokers = zooKeeperClient.getChildren().forPath(BASE_PATH);
-
-			for (String broker : brokers) {
-				String serverInfo = new String(zooKeeperClient.getData().forPath(BASE_PATH + "/" + broker), Charset.forName("UTF-8"));
-				
-				String serverURL = constructURL(serverInfo);
-				serverHosts.add(serverURL);
-			}
-		} catch (Exception e) {
-			LOG.failedToGetZookeeperUrls(e);
-			throw new RuntimeException(e);
-		} finally {
-			// Close the client connection with ZooKeeper
-			if (zooKeeperClient != null) {
-				zooKeeperClient.close();
-			}
-		}
-		
-		return serverHosts;
-	}
-	
-	/**
-	 * Given a String of the format "{"jmx_port":-1,"timestamp":"1505763958072","endpoints":["PLAINTEXT://host:6667"],"host":"host","version":3,"port":6667}" 
-	 * convert to a URL of the format "http://host:port".
-	 * 
-	 * @param serverInfo Server Info in JSON Format from Zookeeper (required)
-	 * 
-	 * @return URL to Kafka
-	 * @throws ParseException 
-	 */
-	private String constructURL(String serverInfo) throws ParseException {
-		String scheme = "http";
-
-		StringBuffer buffer = new StringBuffer();
-		
-		buffer.append(scheme);
-		buffer.append("://");
-		
-		JSONParser parser = new JSONParser(JSONParser.DEFAULT_PERMISSIVE_MODE);
-		JSONObject obj = (JSONObject) parser.parse(serverInfo);
-		buffer.append(obj.get("host"));
-		
-		buffer.append(":");
-		buffer.append(PORT_NUMBER);
-
-		return buffer.toString();
-	}	
-}

http://git-wip-us.apache.org/repos/asf/knox/blob/9577842b/gateway-provider-ha/src/main/java/org/apache/hadoop/gateway/ha/provider/impl/SOLRZookeeperURLManager.java
----------------------------------------------------------------------
diff --git a/gateway-provider-ha/src/main/java/org/apache/hadoop/gateway/ha/provider/impl/SOLRZookeeperURLManager.java b/gateway-provider-ha/src/main/java/org/apache/hadoop/gateway/ha/provider/impl/SOLRZookeeperURLManager.java
deleted file mode 100644
index f612e9b..0000000
--- a/gateway-provider-ha/src/main/java/org/apache/hadoop/gateway/ha/provider/impl/SOLRZookeeperURLManager.java
+++ /dev/null
@@ -1,118 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.gateway.ha.provider.impl;
-
-import org.apache.curator.framework.CuratorFramework;
-import org.apache.curator.framework.CuratorFrameworkFactory;
-import org.apache.curator.retry.ExponentialBackoffRetry;
-
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-
-/**
- * Implementation of URLManager intended for query of Zookeeper for active SOLR Cloud hosts. 
- * In the event of a failure via markFailed, Zookeeper is queried again for active
- * host information.
- * 
- * When configuring the HAProvider in the topology, the zookeeperEnsemble
- * attribute must be set to a comma delimited list of the host and port number,
- * i.e. host1:2181,host2:2181.
- */
-public class SOLRZookeeperURLManager extends BaseZookeeperURLManager {
-
-	// -------------------------------------------------------------------------------------
-	// Abstract methods
-	// -------------------------------------------------------------------------------------
-
-	/**
-	 * Look within Zookeeper under the /live_nodes branch for active SOLR hosts
-	 * 
-	 * @return A List of URLs (never null)
-	 */
-	@Override
-	protected List<String> lookupURLs() {
-		// Retrieve list of potential hosts from ZooKeeper
-		List<String> hosts = retrieveHosts();
-		
-		// Randomize the hosts list for simple load balancing
-		if (!hosts.isEmpty()) {
-			Collections.shuffle(hosts);
-		}
-
-		return hosts;
-	}
-
-	protected String getServiceName() {
-		return "SOLR";
-	};
-
-	// -------------------------------------------------------------------------------------
-	// Private methods
-	// -------------------------------------------------------------------------------------
-
-	/**
-	 * @return Retrieve lists of hosts from ZooKeeper
-	 */
-	private List<String> retrieveHosts()
-	{
-		List<String> serverHosts = new ArrayList<>();
-		
-		CuratorFramework zooKeeperClient = CuratorFrameworkFactory.builder()
-				.connectString(getZookeeperEnsemble())
-				.retryPolicy(new ExponentialBackoffRetry(1000, 3))
-				.build();
-		
-		try {
-			zooKeeperClient.start();
-			List<String> serverNodes = zooKeeperClient.getChildren().forPath("/live_nodes");
-			for (String serverNode : serverNodes) {
-				String serverURL = constructURL(serverNode);
-				serverHosts.add(serverURL);
-			}
-		} catch (Exception e) {
-			LOG.failedToGetZookeeperUrls(e);
-			throw new RuntimeException(e);
-		} finally {
-			// Close the client connection with ZooKeeper
-			if (zooKeeperClient != null) {
-				zooKeeperClient.close();
-			}
-		}
-
-		return serverHosts;
-	}
-	
-	/**
-	 * Given a String of the format "host:port_solr" convert to a URL of the format
-	 * "http://host:port/solr".
-	 * 
-	 * @param serverInfo Server Info from Zookeeper (required)
-	 * 
-	 * @return URL to SOLR
-	 */
-	private String constructURL(String serverInfo) {
-		String scheme = "http";
-
-		StringBuffer buffer = new StringBuffer();
-		buffer.append(scheme);
-		buffer.append("://");
-		buffer.append(serverInfo.replace("_", "/"));
-		return buffer.toString();
-	}
-}

http://git-wip-us.apache.org/repos/asf/knox/blob/9577842b/gateway-provider-ha/src/main/java/org/apache/hadoop/gateway/ha/provider/impl/StringResponseHandler.java
----------------------------------------------------------------------
diff --git a/gateway-provider-ha/src/main/java/org/apache/hadoop/gateway/ha/provider/impl/StringResponseHandler.java b/gateway-provider-ha/src/main/java/org/apache/hadoop/gateway/ha/provider/impl/StringResponseHandler.java
deleted file mode 100644
index 68b68c6..0000000
--- a/gateway-provider-ha/src/main/java/org/apache/hadoop/gateway/ha/provider/impl/StringResponseHandler.java
+++ /dev/null
@@ -1,49 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.gateway.ha.provider.impl;
-
-import java.io.IOException;
-
-import org.apache.http.HttpEntity;
-import org.apache.http.HttpResponse;
-import org.apache.http.client.ClientProtocolException;
-import org.apache.http.client.ResponseHandler;
-import org.apache.http.util.EntityUtils;
-
-/**
- * Apache HttpClient ResponseHandler for String HttpResponse
- */
-public class StringResponseHandler implements ResponseHandler<String>
-{
-	@Override
-	public String handleResponse(HttpResponse response)
-	throws ClientProtocolException, IOException 
-	{
-		int status = response.getStatusLine().getStatusCode();
-		
-		if (status >= 200 && status < 300)
-		{
-			HttpEntity entity = response.getEntity();
-			return entity != null ?EntityUtils.toString(entity) : null;
-		}
-		else
-		{
-			throw new ClientProtocolException("Unexcepted response status: " + status);
-		}
-	}
-}

http://git-wip-us.apache.org/repos/asf/knox/blob/9577842b/gateway-provider-ha/src/main/java/org/apache/knox/gateway/ha/provider/impl/BaseZookeeperURLManager.java
----------------------------------------------------------------------
diff --git a/gateway-provider-ha/src/main/java/org/apache/knox/gateway/ha/provider/impl/BaseZookeeperURLManager.java b/gateway-provider-ha/src/main/java/org/apache/knox/gateway/ha/provider/impl/BaseZookeeperURLManager.java
new file mode 100644
index 0000000..2b18fc1
--- /dev/null
+++ b/gateway-provider-ha/src/main/java/org/apache/knox/gateway/ha/provider/impl/BaseZookeeperURLManager.java
@@ -0,0 +1,195 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.knox.gateway.ha.provider.impl;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.ConcurrentLinkedQueue;
+
+import org.apache.commons.io.IOUtils;
+import org.apache.knox.gateway.ha.provider.HaServiceConfig;
+import org.apache.knox.gateway.ha.provider.URLManager;
+import org.apache.knox.gateway.ha.provider.impl.i18n.HaMessages;
+import org.apache.knox.gateway.i18n.messages.MessagesFactory;
+import org.apache.http.client.config.RequestConfig;
+import org.apache.http.client.methods.HttpGet;
+import org.apache.http.impl.client.CloseableHttpClient;
+import org.apache.http.impl.client.HttpClientBuilder;
+
+import com.google.common.collect.Lists;
+
+/**
+ * Base implementation of URLManager intended for query of Zookeeper active hosts. In
+ * the event of a failure via markFailed, Zookeeper is queried again for active
+ * host information.
+ * 
+ * When configuring the HAProvider in the topology, the zookeeperEnsemble attribute must be set to a
+ * comma delimited list of the host and port number, i.e. host1:2181,host2:2181. 
+ */
+public abstract class BaseZookeeperURLManager implements URLManager {
+	protected static final HaMessages LOG = MessagesFactory.get(HaMessages.class);
+	/**
+	 * Host Ping Timeout
+	 */
+	private static final int TIMEOUT = 2000;
+
+	private String zooKeeperEnsemble;
+	private ConcurrentLinkedQueue<String> urls = new ConcurrentLinkedQueue<String>();
+
+	// -------------------------------------------------------------------------------------
+	// URLManager interface methods
+	// -------------------------------------------------------------------------------------
+
+	@Override
+	public boolean supportsConfig(HaServiceConfig config) {
+		if (!config.getServiceName().equalsIgnoreCase(getServiceName())) {
+			return false;
+		}
+		
+		String zookeeperEnsemble = config.getZookeeperEnsemble();
+		if (zookeeperEnsemble != null && zookeeperEnsemble.trim().length() > 0) {
+			return true;
+		}
+		
+		return false;
+	}
+
+	@Override
+	public void setConfig(HaServiceConfig config) {
+		zooKeeperEnsemble = config.getZookeeperEnsemble();
+		setURLs(lookupURLs());
+	}
+
+	@Override
+	public synchronized String getActiveURL() {
+		// None available so refresh
+		if (urls.isEmpty()) {
+			setURLs(lookupURLs());
+		}
+
+		return this.urls.peek();
+	}
+
+	@Override
+	public synchronized void setActiveURL(String url) {
+		throw new UnsupportedOperationException();
+	}
+
+	@Override
+	public synchronized List<String> getURLs() {
+		return Lists.newArrayList(this.urls.iterator());
+	}
+
+	@Override
+	public synchronized void markFailed(String url) {
+		// Capture complete URL of active host
+		String topURL = getActiveURL();
+
+		// Refresh URLs from ZooKeeper
+		setURLs(lookupURLs());
+
+		// Show failed URL and new URL
+		LOG.markedFailedUrl(topURL, getActiveURL());
+	}
+
+	@Override
+	public synchronized void setURLs(List<String> urls) {
+		if ((urls != null) && (!(urls.isEmpty()))) {
+			this.urls.clear();
+			this.urls.addAll(urls);
+		}
+	}
+
+	// -------------------------------------------------------------------------------------
+	// Abstract methods
+	// -------------------------------------------------------------------------------------
+
+	/**
+	 * Look within Zookeeper under the /live_nodes branch for active hosts
+	 * 
+	 * @return A List of URLs (never null)
+	 */
+	protected abstract List<String> lookupURLs();
+
+	/**
+	 * @return The name of the Knox Topology Service to support
+	 */
+	protected abstract String getServiceName();
+
+	// -------------------------------------------------------------------------------------
+	// Protected methods
+	// -------------------------------------------------------------------------------------
+
+	protected String getZookeeperEnsemble() {
+		return zooKeeperEnsemble;
+	}
+	
+	/**
+	 * Validate access to hosts using simple light weight ping style REST call.
+	 * 
+	 * @param hosts List of hosts to evaluate (required)
+	 * @param suffix Text to append to host (required) 
+	 * @param acceptHeader Used for Accept header (optional)
+	 * 
+	 * @return Hosts with successful access
+	 */
+	protected List<String> validateHosts(List<String> hosts, String suffix, String acceptHeader) {
+		List<String> result = new ArrayList<String>();
+		
+		CloseableHttpClient client = null;
+		
+		try {
+			// Construct a HttpClient with short term timeout
+			RequestConfig.Builder requestBuilder = RequestConfig.custom()
+					.setConnectTimeout(TIMEOUT)
+					.setSocketTimeout(TIMEOUT)
+					.setConnectionRequestTimeout(TIMEOUT);
+
+			client = HttpClientBuilder.create()
+					.setDefaultRequestConfig(requestBuilder.build())
+					.build();
+			
+			for(String host: hosts) {
+				try	{
+					HttpGet get = new HttpGet(host + suffix);
+					
+					if (acceptHeader != null) {
+						get.setHeader("Accept", acceptHeader);
+					}
+					
+					String response = client.execute(get, new StringResponseHandler());
+					
+					if (response != null) {
+						result.add(host);
+					}
+				}
+				catch (Exception ex) {
+					// ignore host
+				}
+			}
+		}
+		catch (Exception ex) {
+			// Ignore errors
+		}
+		finally	{
+			IOUtils.closeQuietly(client);
+		}
+		
+		return result;
+	}
+}

http://git-wip-us.apache.org/repos/asf/knox/blob/9577842b/gateway-provider-ha/src/main/java/org/apache/knox/gateway/ha/provider/impl/HBaseZookeeperURLManager.java
----------------------------------------------------------------------
diff --git a/gateway-provider-ha/src/main/java/org/apache/knox/gateway/ha/provider/impl/HBaseZookeeperURLManager.java b/gateway-provider-ha/src/main/java/org/apache/knox/gateway/ha/provider/impl/HBaseZookeeperURLManager.java
new file mode 100644
index 0000000..461ea98
--- /dev/null
+++ b/gateway-provider-ha/src/main/java/org/apache/knox/gateway/ha/provider/impl/HBaseZookeeperURLManager.java
@@ -0,0 +1,138 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.knox.gateway.ha.provider.impl;
+
+import org.apache.curator.framework.CuratorFramework;
+import org.apache.curator.framework.CuratorFrameworkFactory;
+import org.apache.curator.retry.ExponentialBackoffRetry;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
+/**
+ * Implementation of URLManager intended for query of Zookeeper for active HBase RegionServer hosts.
+ *  
+ * The assumption is that the HBase REST Server will be installed on the same host.  For safety
+ * reasons, the REST Server is pinged for access before inclusion in the list of returned hosts.
+ * 
+ * In the event of a failure via markFailed, Zookeeper is queried again for active
+ * host information.
+ * 
+ * When configuring the HAProvider in the topology, the zookeeperEnsemble
+ * attribute must be set to a comma delimited list of the host and port number,
+ * i.e. host1:2181,host2:2181.
+ */
+public class HBaseZookeeperURLManager extends BaseZookeeperURLManager {
+	/**
+	 * Default Port Number for HBase REST Server
+	 */
+	private static final int PORT_NUMBER = 8080;
+	
+	private String zookeeperNamespace = "hbase-unsecure";
+	
+	// -------------------------------------------------------------------------------------
+	// Abstract methods
+	// -------------------------------------------------------------------------------------
+
+	/**
+	 * Look within Zookeeper under the /hbase-unsecure/rs branch for active HBase RegionServer hosts
+	 * 
+	 * @return A List of URLs (never null)
+	 */
+	@Override
+	protected List<String> lookupURLs() {
+		// Retrieve list of potential hosts from ZooKeeper
+		List<String> hosts = retrieveHosts();
+		
+		// Validate access to hosts using cheap ping style operation
+		List<String> validatedHosts = validateHosts(hosts,"/","text/xml");
+
+		// Randomize the hosts list for simple load balancing
+		if (!validatedHosts.isEmpty()) {
+			Collections.shuffle(validatedHosts);
+		}
+
+		return validatedHosts;
+	}
+
+	protected String getServiceName() {
+		return "WEBHBASE";
+	};
+
+	// -------------------------------------------------------------------------------------
+	// Private methods
+	// -------------------------------------------------------------------------------------
+
+	/**
+	 * @return Retrieve lists of hosts from ZooKeeper
+	 */
+	private List<String> retrieveHosts()
+	{
+		List<String> serverHosts = new ArrayList<>();
+		
+		CuratorFramework zooKeeperClient = CuratorFrameworkFactory.builder()
+				.connectString(getZookeeperEnsemble())
+				.retryPolicy(new ExponentialBackoffRetry(1000, 3))
+				.build();
+		
+		try {
+			zooKeeperClient.start();
+			
+			// Retrieve list of all region server hosts
+			List<String> serverNodes = zooKeeperClient.getChildren().forPath("/" + zookeeperNamespace + "/rs");
+			
+			for (String serverNode : serverNodes) {
+				String serverURL = constructURL(serverNode);
+				serverHosts.add(serverURL);
+			}
+		} catch (Exception e) {
+			LOG.failedToGetZookeeperUrls(e);
+			throw new RuntimeException(e);
+		} finally {
+			// Close the client connection with ZooKeeper
+			if (zooKeeperClient != null) {
+				zooKeeperClient.close();
+			}
+		}
+		
+		return serverHosts;
+	}
+	
+	/**
+	 * Given a String of the format "host,number,number" convert to a URL of the format
+	 * "http://host:port".
+	 * 
+	 * @param serverInfo Server Info from Zookeeper (required)
+	 * 
+	 * @return URL to HBASE
+	 */
+	private String constructURL(String serverInfo) {
+		String scheme = "http";
+
+		StringBuffer buffer = new StringBuffer();
+		buffer.append(scheme);
+		buffer.append("://");
+		// Strip off the host name 
+		buffer.append(serverInfo.substring(0,serverInfo.indexOf(",")));
+		buffer.append(":");
+		buffer.append(PORT_NUMBER);
+		
+		return buffer.toString();
+	}
+}

http://git-wip-us.apache.org/repos/asf/knox/blob/9577842b/gateway-provider-ha/src/main/java/org/apache/knox/gateway/ha/provider/impl/KafkaZookeeperURLManager.java
----------------------------------------------------------------------
diff --git a/gateway-provider-ha/src/main/java/org/apache/knox/gateway/ha/provider/impl/KafkaZookeeperURLManager.java b/gateway-provider-ha/src/main/java/org/apache/knox/gateway/ha/provider/impl/KafkaZookeeperURLManager.java
new file mode 100644
index 0000000..64d59a4
--- /dev/null
+++ b/gateway-provider-ha/src/main/java/org/apache/knox/gateway/ha/provider/impl/KafkaZookeeperURLManager.java
@@ -0,0 +1,152 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.knox.gateway.ha.provider.impl;
+
+import net.minidev.json.JSONObject;
+import net.minidev.json.parser.JSONParser;
+import net.minidev.json.parser.ParseException;
+
+import org.apache.curator.framework.CuratorFramework;
+import org.apache.curator.framework.CuratorFrameworkFactory;
+import org.apache.curator.retry.ExponentialBackoffRetry;
+
+import java.nio.charset.Charset;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
+/**
+ * Implementation of URLManager intended for query of Zookeeper for active Kafka hosts. 
+ * 
+ * The assumption is that the Confluent REST Proxy will be installed on the same host.  For safety
+ * reasons, the REST Server is pinged for access before inclusion in the list of returned hosts.
+ * 
+ * In the event of a failure via markFailed, Zookeeper is queried again for active
+ * host information.
+ * 
+ * When configuring the HAProvider in the topology, the zookeeperEnsemble
+ * attribute must be set to a comma delimited list of the host and port number,
+ * i.e. host1:2181,host2:2181.
+ */
+public class KafkaZookeeperURLManager extends BaseZookeeperURLManager {
+	/**
+	 * Default Port Number for Confluent Kafka REST Server
+	 */
+	private static final int PORT_NUMBER = 8082;
+	/**
+	 * Base path for retrieval from Zookeeper
+	 */
+	private static final String BASE_PATH = "/brokers/ids";
+	
+	// -------------------------------------------------------------------------------------
+	// Abstract methods
+	// -------------------------------------------------------------------------------------
+
+	/**
+	 * Look within Zookeeper under the /broker/ids branch for active Kafka hosts
+	 * 
+	 * @return A List of URLs (never null)
+	 */
+	@Override
+	protected List<String> lookupURLs() {
+		// Retrieve list of potential hosts from ZooKeeper
+		List<String> hosts = retrieveHosts();
+		
+		// Validate access to hosts using cheap ping style operation
+		List<String> validatedHosts = validateHosts(hosts,"/topics","application/vnd.kafka.v2+json");
+
+		// Randomize the hosts list for simple load balancing
+		if (!validatedHosts.isEmpty()) {
+			Collections.shuffle(validatedHosts);
+		}
+
+		return validatedHosts;
+	}
+
+	protected String getServiceName() {
+		return "KAFKA";
+	};
+
+	// -------------------------------------------------------------------------------------
+	// Private methods
+	// -------------------------------------------------------------------------------------
+
+	/**
+	 * @return Retrieve lists of hosts from ZooKeeper
+	 */
+	private List<String> retrieveHosts()
+	{
+		List<String> serverHosts = new ArrayList<>();
+		
+		CuratorFramework zooKeeperClient = CuratorFrameworkFactory.builder()
+				.connectString(getZookeeperEnsemble())
+				.retryPolicy(new ExponentialBackoffRetry(1000, 3))
+				.build();
+		
+		try {
+			zooKeeperClient.start();
+
+			// Retrieve list of host URLs from ZooKeeper
+			List<String> brokers = zooKeeperClient.getChildren().forPath(BASE_PATH);
+
+			for (String broker : brokers) {
+				String serverInfo = new String(zooKeeperClient.getData().forPath(BASE_PATH + "/" + broker), Charset.forName("UTF-8"));
+				
+				String serverURL = constructURL(serverInfo);
+				serverHosts.add(serverURL);
+			}
+		} catch (Exception e) {
+			LOG.failedToGetZookeeperUrls(e);
+			throw new RuntimeException(e);
+		} finally {
+			// Close the client connection with ZooKeeper
+			if (zooKeeperClient != null) {
+				zooKeeperClient.close();
+			}
+		}
+		
+		return serverHosts;
+	}
+	
+	/**
+	 * Given a String of the format "{"jmx_port":-1,"timestamp":"1505763958072","endpoints":["PLAINTEXT://host:6667"],"host":"host","version":3,"port":6667}" 
+	 * convert to a URL of the format "http://host:port".
+	 * 
+	 * @param serverInfo Server Info in JSON Format from Zookeeper (required)
+	 * 
+	 * @return URL to Kafka
+	 * @throws ParseException 
+	 */
+	private String constructURL(String serverInfo) throws ParseException {
+		String scheme = "http";
+
+		StringBuffer buffer = new StringBuffer();
+		
+		buffer.append(scheme);
+		buffer.append("://");
+		
+		JSONParser parser = new JSONParser(JSONParser.DEFAULT_PERMISSIVE_MODE);
+		JSONObject obj = (JSONObject) parser.parse(serverInfo);
+		buffer.append(obj.get("host"));
+		
+		buffer.append(":");
+		buffer.append(PORT_NUMBER);
+
+		return buffer.toString();
+	}	
+}

http://git-wip-us.apache.org/repos/asf/knox/blob/9577842b/gateway-provider-ha/src/main/java/org/apache/knox/gateway/ha/provider/impl/SOLRZookeeperURLManager.java
----------------------------------------------------------------------
diff --git a/gateway-provider-ha/src/main/java/org/apache/knox/gateway/ha/provider/impl/SOLRZookeeperURLManager.java b/gateway-provider-ha/src/main/java/org/apache/knox/gateway/ha/provider/impl/SOLRZookeeperURLManager.java
new file mode 100644
index 0000000..af17af5
--- /dev/null
+++ b/gateway-provider-ha/src/main/java/org/apache/knox/gateway/ha/provider/impl/SOLRZookeeperURLManager.java
@@ -0,0 +1,118 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.knox.gateway.ha.provider.impl;
+
+import org.apache.curator.framework.CuratorFramework;
+import org.apache.curator.framework.CuratorFrameworkFactory;
+import org.apache.curator.retry.ExponentialBackoffRetry;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
+/**
+ * Implementation of URLManager intended for query of Zookeeper for active SOLR Cloud hosts. 
+ * In the event of a failure via markFailed, Zookeeper is queried again for active
+ * host information.
+ * 
+ * When configuring the HAProvider in the topology, the zookeeperEnsemble
+ * attribute must be set to a comma delimited list of the host and port number,
+ * i.e. host1:2181,host2:2181.
+ */
+public class SOLRZookeeperURLManager extends BaseZookeeperURLManager {
+
+	// -------------------------------------------------------------------------------------
+	// Abstract methods
+	// -------------------------------------------------------------------------------------
+
+	/**
+	 * Look within Zookeeper under the /live_nodes branch for active SOLR hosts
+	 * 
+	 * @return A List of URLs (never null)
+	 */
+	@Override
+	protected List<String> lookupURLs() {
+		// Retrieve list of potential hosts from ZooKeeper
+		List<String> hosts = retrieveHosts();
+		
+		// Randomize the hosts list for simple load balancing
+		if (!hosts.isEmpty()) {
+			Collections.shuffle(hosts);
+		}
+
+		return hosts;
+	}
+
+	protected String getServiceName() {
+		return "SOLR";
+	};
+
+	// -------------------------------------------------------------------------------------
+	// Private methods
+	// -------------------------------------------------------------------------------------
+
+	/**
+	 * @return Retrieve lists of hosts from ZooKeeper
+	 */
+	private List<String> retrieveHosts()
+	{
+		List<String> serverHosts = new ArrayList<>();
+		
+		CuratorFramework zooKeeperClient = CuratorFrameworkFactory.builder()
+				.connectString(getZookeeperEnsemble())
+				.retryPolicy(new ExponentialBackoffRetry(1000, 3))
+				.build();
+		
+		try {
+			zooKeeperClient.start();
+			List<String> serverNodes = zooKeeperClient.getChildren().forPath("/live_nodes");
+			for (String serverNode : serverNodes) {
+				String serverURL = constructURL(serverNode);
+				serverHosts.add(serverURL);
+			}
+		} catch (Exception e) {
+			LOG.failedToGetZookeeperUrls(e);
+			throw new RuntimeException(e);
+		} finally {
+			// Close the client connection with ZooKeeper
+			if (zooKeeperClient != null) {
+				zooKeeperClient.close();
+			}
+		}
+
+		return serverHosts;
+	}
+	
+	/**
+	 * Given a String of the format "host:port_solr" convert to a URL of the format
+	 * "http://host:port/solr".
+	 * 
+	 * @param serverInfo Server Info from Zookeeper (required)
+	 * 
+	 * @return URL to SOLR
+	 */
+	private String constructURL(String serverInfo) {
+		String scheme = "http";
+
+		StringBuffer buffer = new StringBuffer();
+		buffer.append(scheme);
+		buffer.append("://");
+		buffer.append(serverInfo.replace("_", "/"));
+		return buffer.toString();
+	}
+}

http://git-wip-us.apache.org/repos/asf/knox/blob/9577842b/gateway-provider-ha/src/main/java/org/apache/knox/gateway/ha/provider/impl/StringResponseHandler.java
----------------------------------------------------------------------
diff --git a/gateway-provider-ha/src/main/java/org/apache/knox/gateway/ha/provider/impl/StringResponseHandler.java b/gateway-provider-ha/src/main/java/org/apache/knox/gateway/ha/provider/impl/StringResponseHandler.java
new file mode 100644
index 0000000..d9121b6
--- /dev/null
+++ b/gateway-provider-ha/src/main/java/org/apache/knox/gateway/ha/provider/impl/StringResponseHandler.java
@@ -0,0 +1,49 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.knox.gateway.ha.provider.impl;
+
+import java.io.IOException;
+
+import org.apache.http.HttpEntity;
+import org.apache.http.HttpResponse;
+import org.apache.http.client.ClientProtocolException;
+import org.apache.http.client.ResponseHandler;
+import org.apache.http.util.EntityUtils;
+
+/**
+ * Apache HttpClient ResponseHandler for String HttpResponse
+ */
+public class StringResponseHandler implements ResponseHandler<String>
+{
+	@Override
+	public String handleResponse(HttpResponse response)
+	throws ClientProtocolException, IOException 
+	{
+		int status = response.getStatusLine().getStatusCode();
+		
+		if (status >= 200 && status < 300)
+		{
+			HttpEntity entity = response.getEntity();
+			return entity != null ?EntityUtils.toString(entity) : null;
+		}
+		else
+		{
+			throw new ClientProtocolException("Unexcepted response status: " + status);
+		}
+	}
+}

http://git-wip-us.apache.org/repos/asf/knox/blob/9577842b/gateway-provider-ha/src/main/resources/META-INF/services/org.apache.knox.gateway.ha.provider.URLManager
----------------------------------------------------------------------
diff --git a/gateway-provider-ha/src/main/resources/META-INF/services/org.apache.knox.gateway.ha.provider.URLManager b/gateway-provider-ha/src/main/resources/META-INF/services/org.apache.knox.gateway.ha.provider.URLManager
index e579be8..d6b9608 100644
--- a/gateway-provider-ha/src/main/resources/META-INF/services/org.apache.knox.gateway.ha.provider.URLManager
+++ b/gateway-provider-ha/src/main/resources/META-INF/services/org.apache.knox.gateway.ha.provider.URLManager
@@ -16,4 +16,7 @@
 # limitations under the License.
 ##########################################################################
 
-org.apache.knox.gateway.ha.provider.impl.HS2ZookeeperURLManager
\ No newline at end of file
+org.apache.knox.gateway.ha.provider.impl.HS2ZookeeperURLManager
+org.apache.knox.gateway.ha.provider.impl.SOLRZookeeperURLManager
+org.apache.knox.gateway.ha.provider.impl.KafkaZookeeperURLManager
+org.apache.knox.gateway.ha.provider.impl.HBaseZookeeperURLManager
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/knox/blob/9577842b/gateway-provider-ha/src/test/java/org/apache/hadoop/gateway/ha/provider/impl/HBaseZookeeperURLManagerTest.java
----------------------------------------------------------------------
diff --git a/gateway-provider-ha/src/test/java/org/apache/hadoop/gateway/ha/provider/impl/HBaseZookeeperURLManagerTest.java b/gateway-provider-ha/src/test/java/org/apache/hadoop/gateway/ha/provider/impl/HBaseZookeeperURLManagerTest.java
deleted file mode 100644
index 087651e..0000000
--- a/gateway-provider-ha/src/test/java/org/apache/hadoop/gateway/ha/provider/impl/HBaseZookeeperURLManagerTest.java
+++ /dev/null
@@ -1,72 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.gateway.ha.provider.impl;
-
-import java.io.IOException;
-
-import org.apache.curator.framework.CuratorFramework;
-import org.apache.curator.framework.CuratorFrameworkFactory;
-import org.apache.curator.retry.ExponentialBackoffRetry;
-import org.apache.curator.test.TestingCluster;
-import org.apache.hadoop.gateway.ha.provider.HaServiceConfig;
-import org.apache.hadoop.gateway.ha.provider.URLManager;
-import org.apache.hadoop.gateway.ha.provider.URLManagerLoader;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-
-/**
- * Simple unit tests for HBaseZookeeperURLManager.
- * 
- * @see HBaseZookeeperURLManager
- */
-public class HBaseZookeeperURLManagerTest {
-	
-  private TestingCluster cluster;
-
-  @Before
-  public void setup() throws Exception {
-    cluster = new TestingCluster(3);
-    cluster.start();
-
-    CuratorFramework zooKeeperClient =
-        CuratorFrameworkFactory.builder().connectString(cluster.getConnectString())
-            .retryPolicy(new ExponentialBackoffRetry(1000, 3)).build();
-
-    zooKeeperClient.start();
-    zooKeeperClient.create().forPath("/hbase-unsecure");
-    zooKeeperClient.create().forPath("/hbase-unsecure/rs");
-    zooKeeperClient.close();
-  }
-
-  @After
-  public void teardown() throws IOException {
-    cluster.stop();
-  }
-
-  @Test
-  public void testHBaseZookeeperURLManagerLoading() {
-    HaServiceConfig config = new DefaultHaServiceConfig("WEBHBASE");
-    config.setEnabled(true);
-    config.setZookeeperEnsemble(cluster.getConnectString());
-    URLManager manager = URLManagerLoader.loadURLManager(config);
-    Assert.assertNotNull(manager);
-    Assert.assertTrue(manager instanceof HBaseZookeeperURLManager);
-  }
-}

http://git-wip-us.apache.org/repos/asf/knox/blob/9577842b/gateway-provider-ha/src/test/java/org/apache/hadoop/gateway/ha/provider/impl/KafkaZookeeperURLManagerTest.java
----------------------------------------------------------------------
diff --git a/gateway-provider-ha/src/test/java/org/apache/hadoop/gateway/ha/provider/impl/KafkaZookeeperURLManagerTest.java b/gateway-provider-ha/src/test/java/org/apache/hadoop/gateway/ha/provider/impl/KafkaZookeeperURLManagerTest.java
deleted file mode 100644
index 50dedbf..0000000
--- a/gateway-provider-ha/src/test/java/org/apache/hadoop/gateway/ha/provider/impl/KafkaZookeeperURLManagerTest.java
+++ /dev/null
@@ -1,71 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.gateway.ha.provider.impl;
-
-import java.io.IOException;
-
-import org.apache.curator.framework.CuratorFramework;
-import org.apache.curator.framework.CuratorFrameworkFactory;
-import org.apache.curator.retry.ExponentialBackoffRetry;
-import org.apache.curator.test.TestingCluster;
-import org.apache.hadoop.gateway.ha.provider.HaServiceConfig;
-import org.apache.hadoop.gateway.ha.provider.URLManager;
-import org.apache.hadoop.gateway.ha.provider.URLManagerLoader;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-
-/**
- * Simple unit tests for KafkaZookeeperURLManager.
- * 
- * @see KafkaZookeeperURLManager
- */
-public class KafkaZookeeperURLManagerTest {
-  private TestingCluster cluster;
-
-  @Before
-  public void setup() throws Exception {
-    cluster = new TestingCluster(3);
-    cluster.start();
-
-    CuratorFramework zooKeeperClient =
-        CuratorFrameworkFactory.builder().connectString(cluster.getConnectString())
-            .retryPolicy(new ExponentialBackoffRetry(1000, 3)).build();
-
-    zooKeeperClient.start();
-    zooKeeperClient.create().forPath("/brokers");
-    zooKeeperClient.create().forPath("/brokers/ids");
-    zooKeeperClient.close();
-  }
-
-  @After
-  public void teardown() throws IOException {
-    cluster.stop();
-  }
-	
-  @Test
-  public void testHBaseZookeeperURLManagerLoading() {
-    HaServiceConfig config = new DefaultHaServiceConfig("KAFKA");
-    config.setEnabled(true);
-    config.setZookeeperEnsemble(cluster.getConnectString());
-    URLManager manager = URLManagerLoader.loadURLManager(config);
-    Assert.assertNotNull(manager);
-    Assert.assertTrue(manager instanceof KafkaZookeeperURLManager);
-  }
-}

http://git-wip-us.apache.org/repos/asf/knox/blob/9577842b/gateway-provider-ha/src/test/java/org/apache/hadoop/gateway/ha/provider/impl/SOLRZookeeperURLManagerTest.java
----------------------------------------------------------------------
diff --git a/gateway-provider-ha/src/test/java/org/apache/hadoop/gateway/ha/provider/impl/SOLRZookeeperURLManagerTest.java b/gateway-provider-ha/src/test/java/org/apache/hadoop/gateway/ha/provider/impl/SOLRZookeeperURLManagerTest.java
deleted file mode 100644
index 6cc6fa7..0000000
--- a/gateway-provider-ha/src/test/java/org/apache/hadoop/gateway/ha/provider/impl/SOLRZookeeperURLManagerTest.java
+++ /dev/null
@@ -1,110 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.gateway.ha.provider.impl;
-
-import org.apache.curator.framework.CuratorFramework;
-import org.apache.curator.framework.CuratorFrameworkFactory;
-import org.apache.curator.retry.ExponentialBackoffRetry;
-import org.apache.curator.test.TestingCluster;
-import org.apache.hadoop.gateway.ha.provider.HaServiceConfig;
-import org.apache.hadoop.gateway.ha.provider.URLManager;
-import org.apache.hadoop.gateway.ha.provider.URLManagerLoader;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-
-import java.io.IOException;
-import java.util.List;
-import java.util.TreeSet;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-/**
- * Simple unit tests for SOLRZookeeperURLManager.
- * 
- * @see SOLRZookeeperURLManager
- */
-public class SOLRZookeeperURLManagerTest {
-
-  private TestingCluster cluster;
-  private SOLRZookeeperURLManager manager;
-
-  @Before
-  public void setup() throws Exception {
-    cluster = new TestingCluster(3);
-    cluster.start();
-
-    CuratorFramework zooKeeperClient =
-        CuratorFrameworkFactory.builder().connectString(cluster.getConnectString())
-            .retryPolicy(new ExponentialBackoffRetry(1000, 3)).build();
-
-    zooKeeperClient.start();
-    zooKeeperClient.create().forPath("/live_nodes");
-    zooKeeperClient.create().forPath("/live_nodes/host1:8983_solr");
-    zooKeeperClient.create().forPath("/live_nodes/host2:8983_solr");
-    zooKeeperClient.create().forPath("/live_nodes/host3:8983_solr");
-    zooKeeperClient.close();
-    manager = new SOLRZookeeperURLManager();
-    HaServiceConfig config = new DefaultHaServiceConfig("SOLR");
-    config.setEnabled(true);
-    config.setZookeeperEnsemble(cluster.getConnectString());
-    manager.setConfig(config);
-  }
-
-  @After
-  public void teardown() throws IOException {
-    cluster.stop();
-  }
-
-  @Test
-  public void testURLs() throws Exception {
-    List<String> urls = manager.getURLs();
-    Assert.assertNotNull(urls);
-
-    // Order of URLS is not deterministic out of Zookeeper
-    // So we just check for expected values
-    
-    TreeSet<String> expected = new TreeSet<String>();
-
-    expected.add("http://host1:8983/solr");
-    expected.add("http://host2:8983/solr");
-    expected.add("http://host3:8983/solr");
-    
-    for(String url : urls)
-    {
-    	assertTrue(expected.contains(url));
-    	expected.remove(url);
-    }
-    
-    assertEquals(0,expected.size());
-    
-    // Unable to test markFailed because the SOLRZookeeperURLManager always does a refresh on Zookeeper contents.
-  }
-
-  @Test
-  public void testSOLRZookeeperURLManagerLoading() {
-    HaServiceConfig config = new DefaultHaServiceConfig("SOLR");
-    config.setEnabled(true);
-    config.setZookeeperEnsemble(cluster.getConnectString());
-    URLManager manager = URLManagerLoader.loadURLManager(config);
-    Assert.assertNotNull(manager);
-    Assert.assertTrue(manager instanceof SOLRZookeeperURLManager);
-  }
-}

http://git-wip-us.apache.org/repos/asf/knox/blob/9577842b/gateway-provider-ha/src/test/java/org/apache/knox/gateway/ha/provider/impl/HBaseZookeeperURLManagerTest.java
----------------------------------------------------------------------
diff --git a/gateway-provider-ha/src/test/java/org/apache/knox/gateway/ha/provider/impl/HBaseZookeeperURLManagerTest.java b/gateway-provider-ha/src/test/java/org/apache/knox/gateway/ha/provider/impl/HBaseZookeeperURLManagerTest.java
new file mode 100644
index 0000000..b08576d
--- /dev/null
+++ b/gateway-provider-ha/src/test/java/org/apache/knox/gateway/ha/provider/impl/HBaseZookeeperURLManagerTest.java
@@ -0,0 +1,72 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.knox.gateway.ha.provider.impl;
+
+import java.io.IOException;
+
+import org.apache.curator.framework.CuratorFramework;
+import org.apache.curator.framework.CuratorFrameworkFactory;
+import org.apache.curator.retry.ExponentialBackoffRetry;
+import org.apache.curator.test.TestingCluster;
+import org.apache.knox.gateway.ha.provider.HaServiceConfig;
+import org.apache.knox.gateway.ha.provider.URLManager;
+import org.apache.knox.gateway.ha.provider.URLManagerLoader;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+/**
+ * Simple unit tests for HBaseZookeeperURLManager.
+ * 
+ * @see HBaseZookeeperURLManager
+ */
+public class HBaseZookeeperURLManagerTest {
+	
+  private TestingCluster cluster;
+
+  @Before
+  public void setup() throws Exception {
+    cluster = new TestingCluster(3);
+    cluster.start();
+
+    CuratorFramework zooKeeperClient =
+        CuratorFrameworkFactory.builder().connectString(cluster.getConnectString())
+            .retryPolicy(new ExponentialBackoffRetry(1000, 3)).build();
+
+    zooKeeperClient.start();
+    zooKeeperClient.create().forPath("/hbase-unsecure");
+    zooKeeperClient.create().forPath("/hbase-unsecure/rs");
+    zooKeeperClient.close();
+  }
+
+  @After
+  public void teardown() throws IOException {
+    cluster.stop();
+  }
+
+  @Test
+  public void testHBaseZookeeperURLManagerLoading() {
+    HaServiceConfig config = new DefaultHaServiceConfig("WEBHBASE");
+    config.setEnabled(true);
+    config.setZookeeperEnsemble(cluster.getConnectString());
+    URLManager manager = URLManagerLoader.loadURLManager(config);
+    Assert.assertNotNull(manager);
+    Assert.assertTrue(manager instanceof HBaseZookeeperURLManager);
+  }
+}

http://git-wip-us.apache.org/repos/asf/knox/blob/9577842b/gateway-provider-ha/src/test/java/org/apache/knox/gateway/ha/provider/impl/KafkaZookeeperURLManagerTest.java
----------------------------------------------------------------------
diff --git a/gateway-provider-ha/src/test/java/org/apache/knox/gateway/ha/provider/impl/KafkaZookeeperURLManagerTest.java b/gateway-provider-ha/src/test/java/org/apache/knox/gateway/ha/provider/impl/KafkaZookeeperURLManagerTest.java
new file mode 100644
index 0000000..1c42ccc
--- /dev/null
+++ b/gateway-provider-ha/src/test/java/org/apache/knox/gateway/ha/provider/impl/KafkaZookeeperURLManagerTest.java
@@ -0,0 +1,71 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.knox.gateway.ha.provider.impl;
+
+import java.io.IOException;
+
+import org.apache.curator.framework.CuratorFramework;
+import org.apache.curator.framework.CuratorFrameworkFactory;
+import org.apache.curator.retry.ExponentialBackoffRetry;
+import org.apache.curator.test.TestingCluster;
+import org.apache.knox.gateway.ha.provider.HaServiceConfig;
+import org.apache.knox.gateway.ha.provider.URLManager;
+import org.apache.knox.gateway.ha.provider.URLManagerLoader;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+/**
+ * Simple unit tests for KafkaZookeeperURLManager.
+ * 
+ * @see KafkaZookeeperURLManager
+ */
+public class KafkaZookeeperURLManagerTest {
+  private TestingCluster cluster;
+
+  @Before
+  public void setup() throws Exception {
+    cluster = new TestingCluster(3);
+    cluster.start();
+
+    CuratorFramework zooKeeperClient =
+        CuratorFrameworkFactory.builder().connectString(cluster.getConnectString())
+            .retryPolicy(new ExponentialBackoffRetry(1000, 3)).build();
+
+    zooKeeperClient.start();
+    zooKeeperClient.create().forPath("/brokers");
+    zooKeeperClient.create().forPath("/brokers/ids");
+    zooKeeperClient.close();
+  }
+
+  @After
+  public void teardown() throws IOException {
+    cluster.stop();
+  }
+	
+  @Test
+  public void testHBaseZookeeperURLManagerLoading() {
+    HaServiceConfig config = new DefaultHaServiceConfig("KAFKA");
+    config.setEnabled(true);
+    config.setZookeeperEnsemble(cluster.getConnectString());
+    URLManager manager = URLManagerLoader.loadURLManager(config);
+    Assert.assertNotNull(manager);
+    Assert.assertTrue(manager instanceof KafkaZookeeperURLManager);
+  }
+}

http://git-wip-us.apache.org/repos/asf/knox/blob/9577842b/gateway-provider-ha/src/test/java/org/apache/knox/gateway/ha/provider/impl/SOLRZookeeperURLManagerTest.java
----------------------------------------------------------------------
diff --git a/gateway-provider-ha/src/test/java/org/apache/knox/gateway/ha/provider/impl/SOLRZookeeperURLManagerTest.java b/gateway-provider-ha/src/test/java/org/apache/knox/gateway/ha/provider/impl/SOLRZookeeperURLManagerTest.java
new file mode 100644
index 0000000..ccbfae1
--- /dev/null
+++ b/gateway-provider-ha/src/test/java/org/apache/knox/gateway/ha/provider/impl/SOLRZookeeperURLManagerTest.java
@@ -0,0 +1,110 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.knox.gateway.ha.provider.impl;
+
+import org.apache.curator.framework.CuratorFramework;
+import org.apache.curator.framework.CuratorFrameworkFactory;
+import org.apache.curator.retry.ExponentialBackoffRetry;
+import org.apache.curator.test.TestingCluster;
+import org.apache.knox.gateway.ha.provider.HaServiceConfig;
+import org.apache.knox.gateway.ha.provider.URLManager;
+import org.apache.knox.gateway.ha.provider.URLManagerLoader;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.TreeSet;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * Simple unit tests for SOLRZookeeperURLManager.
+ * 
+ * @see SOLRZookeeperURLManager
+ */
+public class SOLRZookeeperURLManagerTest {
+
+  private TestingCluster cluster;
+  private SOLRZookeeperURLManager manager;
+
+  @Before
+  public void setup() throws Exception {
+    cluster = new TestingCluster(3);
+    cluster.start();
+
+    CuratorFramework zooKeeperClient =
+        CuratorFrameworkFactory.builder().connectString(cluster.getConnectString())
+            .retryPolicy(new ExponentialBackoffRetry(1000, 3)).build();
+
+    zooKeeperClient.start();
+    zooKeeperClient.create().forPath("/live_nodes");
+    zooKeeperClient.create().forPath("/live_nodes/host1:8983_solr");
+    zooKeeperClient.create().forPath("/live_nodes/host2:8983_solr");
+    zooKeeperClient.create().forPath("/live_nodes/host3:8983_solr");
+    zooKeeperClient.close();
+    manager = new SOLRZookeeperURLManager();
+    HaServiceConfig config = new DefaultHaServiceConfig("SOLR");
+    config.setEnabled(true);
+    config.setZookeeperEnsemble(cluster.getConnectString());
+    manager.setConfig(config);
+  }
+
+  @After
+  public void teardown() throws IOException {
+    cluster.stop();
+  }
+
+  @Test
+  public void testURLs() throws Exception {
+    List<String> urls = manager.getURLs();
+    Assert.assertNotNull(urls);
+
+    // Order of URLS is not deterministic out of Zookeeper
+    // So we just check for expected values
+    
+    TreeSet<String> expected = new TreeSet<String>();
+
+    expected.add("http://host1:8983/solr");
+    expected.add("http://host2:8983/solr");
+    expected.add("http://host3:8983/solr");
+    
+    for(String url : urls)
+    {
+    	assertTrue(expected.contains(url));
+    	expected.remove(url);
+    }
+    
+    assertEquals(0,expected.size());
+    
+    // Unable to test markFailed because the SOLRZookeeperURLManager always does a refresh on Zookeeper contents.
+  }
+
+  @Test
+  public void testSOLRZookeeperURLManagerLoading() {
+    HaServiceConfig config = new DefaultHaServiceConfig("SOLR");
+    config.setEnabled(true);
+    config.setZookeeperEnsemble(cluster.getConnectString());
+    URLManager manager = URLManagerLoader.loadURLManager(config);
+    Assert.assertNotNull(manager);
+    Assert.assertTrue(manager instanceof SOLRZookeeperURLManager);
+  }
+}

http://git-wip-us.apache.org/repos/asf/knox/blob/9577842b/gateway-provider-rewrite-func-hostmap-static/src/main/resources/META-INF/services/org.apache.knox.gateway.deploy.ProviderDeploymentContributor
----------------------------------------------------------------------
diff --git a/gateway-provider-rewrite-func-hostmap-static/src/main/resources/META-INF/services/org.apache.knox.gateway.deploy.ProviderDeploymentContributor b/gateway-provider-rewrite-func-hostmap-static/src/main/resources/META-INF/services/org.apache.knox.gateway.deploy.ProviderDeploymentContributor
index d6b9608..76328d9 100644
--- a/gateway-provider-rewrite-func-hostmap-static/src/main/resources/META-INF/services/org.apache.knox.gateway.deploy.ProviderDeploymentContributor
+++ b/gateway-provider-rewrite-func-hostmap-static/src/main/resources/META-INF/services/org.apache.knox.gateway.deploy.ProviderDeploymentContributor
@@ -16,7 +16,4 @@
 # limitations under the License.
 ##########################################################################
 
-org.apache.knox.gateway.ha.provider.impl.HS2ZookeeperURLManager
-org.apache.knox.gateway.ha.provider.impl.SOLRZookeeperURLManager
-org.apache.knox.gateway.ha.provider.impl.KafkaZookeeperURLManager
-org.apache.knox.gateway.ha.provider.impl.HBaseZookeeperURLManager
\ No newline at end of file
+org.apache.knox.gateway.hostmap.impl.HostmapDeploymentContributor
\ No newline at end of file


[32/53] [abbrv] knox git commit: Merge branch 'master' into KNOX-998-Package_Restructuring

Posted by mo...@apache.org.
http://git-wip-us.apache.org/repos/asf/knox/blob/22a7304a/gateway-spi/src/main/java/org/apache/knox/gateway/config/GatewayConfig.java
----------------------------------------------------------------------
diff --cc gateway-spi/src/main/java/org/apache/knox/gateway/config/GatewayConfig.java
index 0ed7556,0000000..882bc71
mode 100644,000000..100644
--- a/gateway-spi/src/main/java/org/apache/knox/gateway/config/GatewayConfig.java
+++ b/gateway-spi/src/main/java/org/apache/knox/gateway/config/GatewayConfig.java
@@@ -1,302 -1,0 +1,352 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.config;
 +
 +import java.net.InetSocketAddress;
 +import java.net.UnknownHostException;
 +import java.util.List;
 +import java.util.Map;
 +
 +public interface GatewayConfig {
 +
 +  // Used as the basis for any home directory that is not specified.
 +  static final String GATEWAY_HOME_VAR = "GATEWAY_HOME";
 +
 +  // Variable name for the location of configuration files edited by users
 +  static final String GATEWAY_CONF_HOME_VAR = "GATEWAY_CONF_HOME";
 +
 +  // Variable name for the location of data files generated by the gateway at runtime.
 +  static final String GATEWAY_DATA_HOME_VAR = "GATEWAY_DATA_HOME";
 +
 +  public static final String GATEWAY_CONFIG_ATTRIBUTE = "org.apache.knox.gateway.config";
 +  public static final String HADOOP_KERBEROS_SECURED = "gateway.hadoop.kerberos.secured";
 +  public static final String KRB5_CONFIG = "java.security.krb5.conf";
 +  public static final String KRB5_DEBUG = "sun.security.krb5.debug";
 +  public static final String KRB5_LOGIN_CONFIG = "java.security.auth.login.config";
 +  public static final String KRB5_USE_SUBJECT_CREDS_ONLY = "javax.security.auth.useSubjectCredsOnly";
 +  public static final String SIGNING_KEYSTORE_NAME = "gateway.signing.keystore.name";
 +  public static final String SIGNING_KEY_ALIAS = "gateway.signing.key.alias";
 +
++  String REMOTE_CONFIG_REGISTRY_TYPE = "type";
++  String REMOTE_CONFIG_REGISTRY_ADDRESS = "address";
++  String REMOTE_CONFIG_REGISTRY_NAMESPACE = "namespace";
++  String REMOTE_CONFIG_REGISTRY_AUTH_TYPE = "authType";
++  String REMOTE_CONFIG_REGISTRY_PRINCIPAL = "principal";
++  String REMOTE_CONFIG_REGISTRY_CREDENTIAL_ALIAS = "credentialAlias";
++  String REMOTE_CONFIG_REGISTRY_KEYTAB = "keytab";
++  String REMOTE_CONFIG_REGISTRY_USE_KEYTAB = "useKeytab";
++  String REMOTE_CONFIG_REGISTRY_USE_TICKET_CACHE = "useTicketCache";
++
 +  /**
 +   * The location of the gateway configuration.
 +   * Subdirectories will be: topologies
 +   * @return The location of the gateway configuration.
 +   */
 +  String getGatewayConfDir();
 +
 +  /**
 +   * The location of the gateway runtime generated data.
 +   * Subdirectories will be security, deployments
 +   * @return The location of the gateway runtime generated data.
 +   */
 +  String getGatewayDataDir();
 +
 +  /**
 +   * The location of the gateway services definition's root directory
 +   * @return The location of the gateway services top level directory.
 +   */
 +  String getGatewayServicesDir();
 +
 +  /**
 +   * The location of the gateway applications's root directory
 +   * @return The location of the gateway applications top level directory.
 +   */
 +  String getGatewayApplicationsDir();
 +
 +  String getHadoopConfDir();
 +
 +  String getGatewayHost();
 +
 +  int getGatewayPort();
 +
 +  String getGatewayPath();
 +
++  String getGatewayProvidersConfigDir();
++
++  String getGatewayDescriptorsDir();
++
 +  String getGatewayTopologyDir();
 +
 +  String getGatewaySecurityDir();
 +
 +  String getGatewayDeploymentDir();
 +
 +  InetSocketAddress getGatewayAddress() throws UnknownHostException;
 +
 +  boolean isSSLEnabled();
 +  
 +  List<String> getExcludedSSLProtocols();
 +
 +  List<String> getIncludedSSLCiphers();
 +
 +  List<String> getExcludedSSLCiphers();
 +
 +  boolean isHadoopKerberosSecured();
 +
 +  String getKerberosConfig();
 +
 +  boolean isKerberosDebugEnabled();
 +
 +  String getKerberosLoginConfig();
 +
 +  String getDefaultTopologyName();
 +
 +  String getDefaultAppRedirectPath();
 +
 +  String getFrontendUrl();
 +
 +  boolean isClientAuthNeeded();
 +
 +  boolean isClientAuthWanted();
 +
 +  String getTruststorePath();
 +
 +  boolean getTrustAllCerts();
 +
 +  String getKeystoreType();
 +
 +  String getTruststoreType();
 +
 +  boolean isXForwardedEnabled();
 +
 +  String getEphemeralDHKeySize();
 +
 +  int getHttpClientMaxConnections();
 +
 +  int getHttpClientConnectionTimeout();
 +
 +  int getHttpClientSocketTimeout();
 +
 +  int getThreadPoolMax();
 +
 +  int getHttpServerRequestBuffer();
 +
 +  int getHttpServerRequestHeaderBuffer();
 +
 +  int getHttpServerResponseBuffer();
 +
 +  int getHttpServerResponseHeaderBuffer();
 +
 +  int getGatewayDeploymentsBackupVersionLimit();
 +
 +  long getGatewayDeploymentsBackupAgeLimit();
 +
 +  long getGatewayIdleTimeout();
 +
 +  String getSigningKeystoreName();
 +
 +  String getSigningKeyAlias();
 +
 +  List<String> getGlobalRulesServices();
 +
 +  /**
 +   * Returns true if websocket feature enabled else false.
 +   * Default is false.
 +   * @since 0.10
 +   * @return
 +   */
 +  boolean isWebsocketEnabled();
 +
 +  /**
 +   * Websocket connection max text message size.
 +   * @since 0.10
 +   * @return
 +   */
 +  int getWebsocketMaxTextMessageSize();
 +
 +  /**
 +   * Websocket connection max binary message size.
 +   * @since 0.10
 +   * @return
 +   */
 +  int getWebsocketMaxBinaryMessageSize();
 +
 +  /**
 +   * Websocket connection max text message buffer size.
 +   * @since 0.10
 +   * @return
 +   */
 +  int getWebsocketMaxTextMessageBufferSize();
 +
 +  /**
 +   * Websocket connection max binary message buffer size.
 +   * @since 0.10
 +   * @return
 +   */
 +  int getWebsocketMaxBinaryMessageBufferSize();
 +
 +  /**
 +   * Websocket connection input buffer size.
 +   * @since 0.10
 +   * @return
 +   */
 +  int getWebsocketInputBufferSize();
 +
 +  /**
 +   * Websocket connection async write timeout.
 +   * @since 0.10
 +   * @return
 +   */
 +  int getWebsocketAsyncWriteTimeout();
 +
 +  /**
 +   * Websocket connection idle timeout.
 +   * @since 0.10
 +   * @return
 +   */
 +  int getWebsocketIdleTimeout();
 +
 +  boolean isMetricsEnabled();
 +
 +  boolean isJmxMetricsReportingEnabled();
 +
 +  boolean isGraphiteMetricsReportingEnabled();
 +
 +  String getGraphiteHost();
 +
 +  int getGraphitePort();
 +
 +  int getGraphiteReportingFrequency();
 +
 +  /**
 +   * List of MIME Type to be compressed.
 +   * @since 0.12
 +   */
 +  List<String> getMimeTypesToCompress();
 +
 +  /**
 +   * Enable cookie scoping to gateway path
 +   *
 +   * @since 0.13
 +   */
 +  boolean isCookieScopingToPathEnabled();
 +
 +  /**
 +   * Configured name of the HTTP Header that is expected
 +   * to be set by a proxy in front of the gateway.
 +   * @return
 +   */
 +  String getHeaderNameForRemoteAddress();
 +
 +  /**
 +   * Configured Algorithm name to be used by the CryptoService
 +   * and MasterService implementations
 +   * @return
 +   */
 +  String getAlgorithm();
 +
 +  /**
 +   * Configured Algorithm name to be used by the CryptoService
 +   * for password based encryption
 +   * @return
 +   */
 +  String getPBEAlgorithm();
 +
 +  /**
 +   * Configured Transformation name to be used by the CryptoService
 +   * and MasterService implementations
 +   * @return
 +   */
 +  String getTransformation();
 +
 +  /**
 +   * Configured SaltSize to be used by the CryptoService
 +   * and MasterService implementations
 +   * @return
 +   */
 +  String getSaltSize();
 +
 +  /**
 +   * Configured IterationCount to be used by the CryptoService
 +   * and MasterService implementations
 +   * @return
 +   */
 +  String getIterationCount();
 +
 +  /**
 +   * Configured KeyLength to be used by the CryptoService
 +   * and MasterService implementations
 +   * @return
 +   */
 +  String getKeyLength();
 +
 +  /**
 +   * Map of Topology names and their ports.
 +   *
 +   * @return
 +   */
 +  Map<String, Integer> getGatewayPortMappings();
 +
 +  /**
 +   * Is the Port Mapping feature on
 +   * @return
 +   */
 +  boolean isGatewayPortMappingEnabled();
 +
 +  /**
 +   * Is the Server header suppressed
 +   * @return
 +   */
 +  boolean isGatewayServerHeaderEnabled();
++  
++  /**
++   *
++   * @param type The type of cluster configuration monitor for which the interval should be returned.
++   *
++   * @return The polling interval configuration value, or -1 if it has not been configured.
++   */
++  int getClusterMonitorPollingInterval(String type);
++  
++  /**
++   *
++   * @param type The type of cluster configuration monitor for which the interval should be returned.
++   *
++   * @return The enabled status of the specified type of cluster configuration monitor.
++   */
++  boolean isClusterMonitorEnabled(String type);
++  
++  /**
++   * @return The list of the names of any remote registry configurations defined herein.
++   */
++  List<String> getRemoteRegistryConfigurationNames();
++
++  /**
++   *
++   * @param name The name of the remote registry configuration
++   *
++   * @return The configuration associated with the specified name.
++   */
++  String getRemoteRegistryConfiguration(String name);
++
++  /**
++   *
++   * @return The name of a remote configuration registry client
++   */
++  String getRemoteConfigurationMonitorClientName();
++
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/22a7304a/gateway-spi/src/main/java/org/apache/knox/gateway/services/GatewayServices.java
----------------------------------------------------------------------
diff --cc gateway-spi/src/main/java/org/apache/knox/gateway/services/GatewayServices.java
index 4a30800,0000000..8912c98
mode 100644,000000..100644
--- a/gateway-spi/src/main/java/org/apache/knox/gateway/services/GatewayServices.java
+++ b/gateway-spi/src/main/java/org/apache/knox/gateway/services/GatewayServices.java
@@@ -1,46 -1,0 +1,50 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.services;
 +
 +import java.util.Collection;
 +
 +import org.apache.knox.gateway.deploy.ProviderDeploymentContributor;
 +
 +public interface GatewayServices extends Service,
 +    ProviderDeploymentContributor {
 +
 +  public static final String GATEWAY_CLUSTER_ATTRIBUTE = "org.apache.knox.gateway.gateway.cluster";
 +  public static final String GATEWAY_SERVICES_ATTRIBUTE = "org.apache.knox.gateway.gateway.services";
 +
 +  public static final String SSL_SERVICE = "SSLService";
 +  public static final String CRYPTO_SERVICE = "CryptoService";
 +  public static final String ALIAS_SERVICE = "AliasService";
 +  public static final String KEYSTORE_SERVICE = "KeystoreService";
 +  public static final String TOKEN_SERVICE = "TokenService";
 +  public static final String SERVICE_REGISTRY_SERVICE = "ServiceRegistryService";
 +  public static final String HOST_MAPPING_SERVICE = "HostMappingService";
 +  public static final String SERVER_INFO_SERVICE = "ServerInfoService";
 +  public static final String TOPOLOGY_SERVICE = "TopologyService";
 +  public static final String SERVICE_DEFINITION_REGISTRY = "ServiceDefinitionRegistry";
 +  public static final String METRICS_SERVICE = "MetricsService";
 +
++  String REMOTE_REGISTRY_CLIENT_SERVICE = "RemoteConfigRegistryClientService";
++
++  String CLUSTER_CONFIGURATION_MONITOR_SERVICE = "ClusterConfigurationMonitorService";
++
 +  public abstract Collection<String> getServiceNames();
 +
 +  public abstract <T> T getService( String serviceName );
 +
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/22a7304a/gateway-test-release-utils/src/main/java/org/apache/knox/gateway/GatewayTestConfig.java
----------------------------------------------------------------------
diff --cc gateway-test-release-utils/src/main/java/org/apache/knox/gateway/GatewayTestConfig.java
index 8abf5aa,0000000..79a9292
mode 100644,000000..100644
--- a/gateway-test-release-utils/src/main/java/org/apache/knox/gateway/GatewayTestConfig.java
+++ b/gateway-test-release-utils/src/main/java/org/apache/knox/gateway/GatewayTestConfig.java
@@@ -1,617 -1,0 +1,653 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway;
 +
 +import org.apache.commons.lang.StringUtils;
 +import org.apache.hadoop.conf.Configuration;
 +import org.apache.knox.gateway.config.GatewayConfig;
 +
 +import java.io.File;
 +import java.net.InetSocketAddress;
 +import java.net.UnknownHostException;
 +import java.util.ArrayList;
++import java.util.Collections;
 +import java.util.List;
 +import java.util.Map;
 +import java.util.concurrent.ConcurrentHashMap;
 +
 +public class GatewayTestConfig extends Configuration implements GatewayConfig {
 +
 +  /* Websocket defaults */
 +  public static final boolean DEFAULT_WEBSOCKET_FEATURE_ENABLED = false;
 +  public static final int DEFAULT_WEBSOCKET_MAX_TEXT_MESSAGE_SIZE = Integer.MAX_VALUE;;
 +  public static final int DEFAULT_WEBSOCKET_MAX_BINARY_MESSAGE_SIZE = Integer.MAX_VALUE;;
 +  public static final int DEFAULT_WEBSOCKET_MAX_TEXT_MESSAGE_BUFFER_SIZE = 32768;
 +  public static final int DEFAULT_WEBSOCKET_MAX_BINARY_MESSAGE_BUFFER_SIZE = 32768;
 +  public static final int DEFAULT_WEBSOCKET_INPUT_BUFFER_SIZE = 4096;
 +  public static final int DEFAULT_WEBSOCKET_ASYNC_WRITE_TIMEOUT = 60000;
 +  public static final int DEFAULT_WEBSOCKET_IDLE_TIMEOUT = 300000;
 +
 +  private String gatewayHomeDir = "gateway-home";
 +  private String hadoopConfDir = "hadoop";
 +  private String gatewayHost = "localhost";
 +  private int gatewayPort = 0;
 +  private String gatewayPath = "gateway";
 +  private boolean hadoopKerberosSecured = false;
 +  private String kerberosConfig = "/etc/knox/conf/krb5.conf";
 +  private boolean kerberosDebugEnabled = false;
 +  private String kerberosLoginConfig = "/etc/knox/conf/krb5JAASLogin.conf";
 +  private String frontendUrl = null;
 +  private boolean xForwardedEnabled = true;
 +  private String gatewayApplicationsDir = null;
 +  private String gatewayServicesDir;
 +  private String defaultTopologyName = "default";
 +  private List<String> includedSSLCiphers = null;
 +  private List<String> excludedSSLCiphers = null;
 +  private boolean sslEnabled = false;
 +  private String truststoreType = "jks";
 +  private String keystoreType = "jks";
 +  private boolean isTopologyPortMappingEnabled = true;
 +  private ConcurrentHashMap<String, Integer> topologyPortMapping = new ConcurrentHashMap<>();
 +  private int backupVersionLimit = -1;
 +  private long backupAgeLimit = -1;
 +
 +  public void setGatewayHomeDir( String gatewayHomeDir ) {
 +    this.gatewayHomeDir = gatewayHomeDir;
 +  }
 +
 +  public String getGatewayHomeDir() {
 +    return this.gatewayHomeDir;
 +  }
 +
 +  @Override
 +  public String getGatewayConfDir() {
 +    return gatewayHomeDir;
 +  }
 +
 +  @Override
 +  public String getGatewayDataDir() {
 +    return gatewayHomeDir;
 +  }
 +
 +  @Override
 +  public String getGatewaySecurityDir() {
 +    return gatewayHomeDir + "/security";
 +  }
 +
 +  @Override
 +  public String getGatewayTopologyDir() {
 +    return gatewayHomeDir + "/topologies";
 +  }
 +
 +  @Override
 +  public String getGatewayDeploymentDir() {
 +    return gatewayHomeDir + "/deployments";
 +  }
 +
 +//  public void setDeploymentDir( String clusterConfDir ) {
 +//    this.deployDir = clusterConfDir;
 +//  }
 +
 +  @Override
 +  public String getHadoopConfDir() {
 +    return hadoopConfDir;
 +  }
 +
 +//  public void setHadoopConfDir( String hadoopConfDir ) {
 +//    this.hadoopConfDir = hadoopConfDir;
 +//  }
 +
 +  @Override
 +  public String getGatewayHost() {
 +    return gatewayHost;
 +  }
 +
 +//  public void setGatewayHost( String gatewayHost ) {
 +//    this.gatewayHost = gatewayHost;
 +//  }
 +
 +  @Override
 +  public int getGatewayPort() {
 +    return gatewayPort;
 +  }
 +
 +//  public void setGatewayPort( int gatewayPort ) {
 +//    this.gatewayPort = gatewayPort;
 +//  }
 +
 +  @Override
 +  public String getGatewayPath() {
 +    return gatewayPath;
 +  }
 +
 +  public void setGatewayPath( String gatewayPath ) {
 +    this.gatewayPath = gatewayPath;
 +  }
 +
 +  @Override
 +  public InetSocketAddress getGatewayAddress() throws UnknownHostException {
 +    return new InetSocketAddress( getGatewayHost(), getGatewayPort() );
 +  }
 +
 +
 +  public long getGatewayIdleTimeout() {
 +    return 0l;
 +  }
 +
 +  @Override
 +  public boolean isSSLEnabled() {
 +    return sslEnabled;
 +  }
 +
 +  public void setSSLEnabled( boolean sslEnabled ) {
 +    this.sslEnabled = sslEnabled;
 +  }
 +
 +  @Override
 +  public boolean isHadoopKerberosSecured() {
 +    return hadoopKerberosSecured;
 +  }
 +
 +  public void setHadoopKerberosSecured(boolean hadoopKerberosSecured) {
 +    this.hadoopKerberosSecured = hadoopKerberosSecured;
 +  }
 +
 +  @Override
 +  public String getKerberosConfig() {
 +    return kerberosConfig;
 +  }
 +
 +  public void setKerberosConfig(String kerberosConfig) {
 +    this.kerberosConfig = kerberosConfig;
 +  }
 +
 +  @Override
 +  public boolean isKerberosDebugEnabled() {
 +    return kerberosDebugEnabled;
 +  }
 +
 +  public void setKerberosDebugEnabled(boolean kerberosDebugEnabled) {
 +    this.kerberosDebugEnabled = kerberosDebugEnabled;
 +  }
 +
 +  @Override
 +  public String getKerberosLoginConfig() {
 +    return kerberosLoginConfig;
 +  }
 +
 +  @Override
 +  public String getDefaultTopologyName() {
 +    return defaultTopologyName;
 +  }
 +
 +  public void setDefaultTopologyName( String defaultTopologyName ) {
 +    this.defaultTopologyName = defaultTopologyName;
 +  }
 +
 +  /* (non-Javadoc)
 +   * @see GatewayConfig#getDefaultAppRedirectPath()
 +   */
 +  @Override
 +  public String getDefaultAppRedirectPath() {
 +
 +    if(StringUtils.isBlank(this.defaultTopologyName)) {
 +      return "/gateway/sandbox";
 +    } else {
 +      return "/gateway/"+this.defaultTopologyName;
 +    }
 +
 +  }
 +
 +  /* (non-Javadoc)
 +   * @see GatewayConfig#getFrontendUrl()
 +   */
 +  @Override
 +  public String getFrontendUrl() { return frontendUrl; }
 +
 +  public void setFrontendUrl( String frontendUrl ) {
 +    this.frontendUrl = frontendUrl;
 +  }
 +
 +  /* (non-Javadoc)
 +   * @see GatewayConfig#getExcludedSSLProtocols()
 +   */
 +  @Override
 +  public List getExcludedSSLProtocols() {
 +    List<String> protocols = new ArrayList<String>();
 +    protocols.add("SSLv3");
 +    return protocols;
 +  }
 +
 +  @Override
 +  public List getIncludedSSLCiphers() {
 +    return includedSSLCiphers;
 +  }
 +
 +  public void setIncludedSSLCiphers( List<String> list ) {
 +    includedSSLCiphers = list;
 +  }
 +
 +  @Override
 +  public List getExcludedSSLCiphers() {
 +    return excludedSSLCiphers;
 +  }
 +
 +  public void setExcludedSSLCiphers( List<String> list ) {
 +    excludedSSLCiphers = list;
 +  }
 +
 +  /* (non-Javadoc)
 +   * @see GatewayConfig#isClientAuthNeeded()
 +   */
 +  @Override
 +  public boolean isClientAuthNeeded() {
 +    // TODO Auto-generated method stub
 +    return false;
 +  }
 +
 +  /* (non-Javadoc)
 +   * @see GatewayConfig#getTruststorePath()
 +   */
 +  @Override
 +  public String getTruststorePath() {
 +    // TODO Auto-generated method stub
 +    return null;
 +  }
 +
 +  /* (non-Javadoc)
 +   * @see GatewayConfig#getTrustAllCerts()
 +   */
 +  @Override
 +  public boolean getTrustAllCerts() {
 +    // TODO Auto-generated method stub
 +    return false;
 +  }
 +
 +  /* (non-Javadoc)
 +   * @see GatewayConfig#getTruststoreType()
 +   */
 +  @Override
 +  public String getTruststoreType() {
 +    return truststoreType;
 +  }
 +
 +  public void setTruststoreType( String truststoreType ) {
 +    this.truststoreType = truststoreType;
 +  }
 +
 +  /* (non-Javadoc)
 +   * @see GatewayConfig#getKeystoreType()
 +   */
 +  @Override
 +  public String getKeystoreType() {
 +    return keystoreType;
 +  }
 +
 +  public void setKeystoreType( String keystoreType ) {
 +    this.keystoreType = keystoreType;
 +  }
 +
 +  public void setKerberosLoginConfig(String kerberosLoginConfig) {
 +   this.kerberosLoginConfig = kerberosLoginConfig;
 +  }
 +
 +   @Override
 +   public String getGatewayServicesDir() {
 +    if( gatewayServicesDir != null ) {
 +      return gatewayServicesDir;
 +    } else {
 +      File targetDir = new File( System.getProperty( "user.dir" ), "target/services" );
 +      return targetDir.getPath();
 +    }
 +  }
 +
 +  public void setGatewayServicesDir( String gatewayServicesDir ) {
 +    this.gatewayServicesDir = gatewayServicesDir;
 +  }
 +
 +  @Override
 +  public String getGatewayApplicationsDir() {
 +    if( gatewayApplicationsDir != null ) {
 +      return gatewayApplicationsDir;
 +    } else {
 +      return getGatewayConfDir() + "/applications";
 +    }
 +  }
 +
 +  public void setGatewayApplicationsDir( String gatewayApplicationsDir ) {
 +    this.gatewayApplicationsDir = gatewayApplicationsDir;
 +   }
 +
 +  @Override
 +  public boolean isXForwardedEnabled() {
 +    return xForwardedEnabled;
 +  }
 +
 +  public void setXForwardedEnabled(boolean enabled) {
 +    xForwardedEnabled = enabled;
 +  }
 +
 +  /* (non-Javadoc)
 +   * @see GatewayConfig#getEphemeralDHKeySize()
 +   */
 +  @Override
 +  public String getEphemeralDHKeySize() {
 +    return "2048";
 +  }
 +
 +  @Override
 +  public int getHttpClientMaxConnections() {
 +    return 16;
 +  }
 +
 +  @Override
 +  public int getHttpClientConnectionTimeout() {
 +    return -1;
 +  }
 +
 +  @Override
 +  public int getHttpClientSocketTimeout() {
 +    return -1;
 +  }
 +
 +  @Override
 +  public int getThreadPoolMax() {
-     return 16;
++    return 254;
 +  }
 +
 +  @Override
 +  public int getHttpServerRequestBuffer() {
 +    return 16*1024;
 +  }
 +
 +  @Override
 +  public int getHttpServerRequestHeaderBuffer() {
 +    return 8*1024;
 +  }
 +
 +  @Override
 +  public int getHttpServerResponseBuffer() {
 +    return 32*1024;
 +  }
 +
 +  @Override
 +  public int getHttpServerResponseHeaderBuffer() {
 +    return 8*1024;
 +  }
 +
 +  public void setGatewayDeploymentsBackupVersionLimit( int newBackupVersionLimit ) {
 +    backupVersionLimit = newBackupVersionLimit;
 +  }
 +
 +  public int getGatewayDeploymentsBackupVersionLimit() {
 +    return backupVersionLimit;
 +  }
 +
 +  public void setTopologyPortMapping(ConcurrentHashMap<String, Integer> topologyPortMapping) {
 +    this.topologyPortMapping = topologyPortMapping;
 +  }
 +
 +  public void setGatewayPortMappingEnabled(
 +      boolean topologyPortMappingEnabled) {
 +    isTopologyPortMappingEnabled = topologyPortMappingEnabled;
 +  }
 +
 +  @Override
 +  public long getGatewayDeploymentsBackupAgeLimit() {
 +    return backupAgeLimit;
 +  }
 +
 +  public void setGatewayDeploymentsBackupAgeLimit( long newBackupAgeLimit ) {
 +    backupAgeLimit = newBackupAgeLimit;
 +  }
 +
 +  /* (non-Javadoc)
 +   * @see GatewayConfig#getSigningKeystoreName()
 +   */
 +  @Override
 +  public String getSigningKeystoreName() {
 +    return null;
 +  }
 +
 +  /* (non-Javadoc)
 +   * @see GatewayConfig#getSigningKeyAlias()
 +   */
 +  @Override
 +  public String getSigningKeyAlias() {
 +    return null;
 +  }
 +
 +  @Override
 +  public List<String> getGlobalRulesServices() {
 +    ArrayList<String> services = new ArrayList<>();
 +    services.add("WEBHDFS");
 +    services.add("HBASE");
 +    services.add("HIVE");
 +    services.add("OOZIE");
 +    services.add("RESOURCEMANAGER");
 +    services.add("STORM");
 +    return services;
 +  }
 +
 +  /* (non-Javadoc)
 +   * @see GatewayConfig#isWebsocketEnabled()
 +   */
 +  @Override
 +  public boolean isWebsocketEnabled() {
 +    return DEFAULT_WEBSOCKET_FEATURE_ENABLED;
 +  }
 +
 +  /* (non-Javadoc)
 +   * @see GatewayConfig#getWebsocketMaxTextMessageSize()
 +   */
 +  @Override
 +  public int getWebsocketMaxTextMessageSize() {
 +    return DEFAULT_WEBSOCKET_MAX_TEXT_MESSAGE_SIZE;
 +  }
 +
 +  /* (non-Javadoc)
 +   * @see GatewayConfig#getWebsocketMaxBinaryMessageSize()
 +   */
 +  @Override
 +  public int getWebsocketMaxBinaryMessageSize() {
 +    return DEFAULT_WEBSOCKET_MAX_BINARY_MESSAGE_SIZE;
 +  }
 +
 +  /* (non-Javadoc)
 +   * @see GatewayConfig#getWebsocketMaxTextMessageBufferSize()
 +   */
 +  @Override
 +  public int getWebsocketMaxTextMessageBufferSize() {
 +    return DEFAULT_WEBSOCKET_MAX_TEXT_MESSAGE_BUFFER_SIZE;
 +  }
 +
 +  /* (non-Javadoc)
 +   * @see GatewayConfig#getWebsocketMaxBinaryMessageBufferSize()
 +   */
 +  @Override
 +  public int getWebsocketMaxBinaryMessageBufferSize() {
 +    return DEFAULT_WEBSOCKET_MAX_BINARY_MESSAGE_BUFFER_SIZE;
 +  }
 +
 +  /* (non-Javadoc)
 +   * @see GatewayConfig#getWebsocketInputBufferSize()
 +   */
 +  @Override
 +  public int getWebsocketInputBufferSize() {
 +    return DEFAULT_WEBSOCKET_INPUT_BUFFER_SIZE;
 +  }
 +
 +  /* (non-Javadoc)
 +   * @see GatewayConfig#getWebsocketAsyncWriteTimeout()
 +   */
 +  @Override
 +  public int getWebsocketAsyncWriteTimeout() {
 +    return DEFAULT_WEBSOCKET_ASYNC_WRITE_TIMEOUT;
 +  }
 +
 +  /* (non-Javadoc)
 +   * @see GatewayConfig#getWebsocketIdleTimeout()
 +   */
 +  @Override
 +  public int getWebsocketIdleTimeout() {
 +    return DEFAULT_WEBSOCKET_IDLE_TIMEOUT;
 +  }
 +
 +  @Override
 +  public boolean isMetricsEnabled() {
 +    return false;
 +  }
 +
 +  @Override
 +  public boolean isJmxMetricsReportingEnabled() {
 +    return false;
 +  }
 +
 +  @Override
 +  public boolean isGraphiteMetricsReportingEnabled() {
 +    return false;
 +  }
 +
 +  @Override
 +  public String getGraphiteHost() {
 +    return null;
 +  }
 +
 +  @Override
 +  public int getGraphitePort() {
 +    return 0;
 +  }
 +
 +  @Override
 +  public int getGraphiteReportingFrequency() {
 +    return 0;
 +  }
 +
 +  /* (non-Javadoc)
 +   * @see GatewayConfig#getMimeTypesToCompress()
 +   */
 +  @Override
 +  public List<String> getMimeTypesToCompress() {
 +    return new ArrayList<String>();
 +  }
 +
 +  @Override
 +  public  boolean isCookieScopingToPathEnabled() {
 +    return false;
 +  }
 +
 +  @Override
 +  public String getHeaderNameForRemoteAddress() {
 +    return "X-Forwarded-For";
 +  }
 +
 +  @Override
 +  public String getAlgorithm() {
 +    return null;
 +  }
 +
 +  @Override
 +  public String getPBEAlgorithm() {
 +    return null;
 +  }
 +
 +  @Override
 +  public String getTransformation() {
 +    return null;
 +  }
 +
 +  @Override
 +  public String getSaltSize() {
 +    return null;
 +  }
 +
 +  @Override
 +  public String getIterationCount() {
 +    return null;
 +  }
 +
 +  @Override
 +  public String getKeyLength() {
 +    return null;
 +  }
 +
 +  /**
 +   * Map of Topology names and their ports.
 +   *
 +   * @return
 +   */
 +  @Override
 +  public Map<String, Integer> getGatewayPortMappings() {
 +    return topologyPortMapping;
 +  }
 +
 +  /**
 +   * Is the Port Mapping feature on ?
 +   *
 +   * @return
 +   */
 +  @Override
 +  public boolean isGatewayPortMappingEnabled() {
 +    return isTopologyPortMappingEnabled;
 +  }
 +
 +  @Override
 +  public boolean isGatewayServerHeaderEnabled() {
 +	return false;
 +  }
 +
 +  @Override
 +  public boolean isClientAuthWanted() {
 +    return false;
 +  }
++
++  @Override
++  public String getGatewayProvidersConfigDir() {
++    return null;
++  }
++
++  @Override
++  public String getGatewayDescriptorsDir() {
++    return null;
++  }
++
++  @Override
++  public List<String> getRemoteRegistryConfigurationNames() {
++    return Collections.emptyList();
++  }
++
++  @Override
++  public String getRemoteRegistryConfiguration(String s) {
++    return null;
++  }
++
++  @Override
++  public String getRemoteConfigurationMonitorClientName() {
++    return null;
++  }
++
++  @Override
++  public int getClusterMonitorPollingInterval(String type) {
++    return 600;
++  }
++
++  @Override
++  public boolean isClusterMonitorEnabled(String type) {
++    return false;
++  }
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/22a7304a/gateway-test-utils/src/main/java/org/apache/knox/test/TestUtils.java
----------------------------------------------------------------------
diff --cc gateway-test-utils/src/main/java/org/apache/knox/test/TestUtils.java
index 5437ce1,0000000..e5ed5c9
mode 100644,000000..100644
--- a/gateway-test-utils/src/main/java/org/apache/knox/test/TestUtils.java
+++ b/gateway-test-utils/src/main/java/org/apache/knox/test/TestUtils.java
@@@ -1,216 -1,0 +1,216 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.test;
 +
 +import java.io.File;
 +import java.io.FileNotFoundException;
 +import java.io.IOException;
 +import java.io.InputStream;
 +import java.io.InputStreamReader;
 +import java.io.Reader;
 +import java.io.StringWriter;
 +import java.net.HttpURLConnection;
 +import java.net.InetSocketAddress;
 +import java.net.ServerSocket;
 +import java.net.Socket;
 +import java.net.URL;
 +import java.nio.ByteBuffer;
 +import java.util.Properties;
 +import java.util.UUID;
 +import java.util.concurrent.TimeUnit;
 +
 +import org.apache.commons.io.FileUtils;
 +import org.apache.commons.io.IOUtils;
 +import org.apache.log4j.Logger;
 +import org.apache.velocity.Template;
 +import org.apache.velocity.VelocityContext;
 +import org.apache.velocity.app.VelocityEngine;
 +import org.apache.velocity.runtime.RuntimeConstants;
 +import org.apache.velocity.runtime.resource.loader.ClasspathResourceLoader;
 +import org.eclipse.jetty.http.HttpTester;
 +import org.eclipse.jetty.servlet.ServletTester;
 +
 +public class TestUtils {
 +
 +  private static Logger LOG = Logger.getLogger(TestUtils.class);
 +
 +  public static final long SHORT_TIMEOUT = 1000L;
-   public static final long MEDIUM_TIMEOUT = 20 * 1000L;
++  public static final long MEDIUM_TIMEOUT = 30 * 1000L;
 +  public static final long LONG_TIMEOUT = 60 * 1000L;
 +
 +  public static String getResourceName( Class clazz, String name ) {
 +    name = clazz.getName().replaceAll( "\\.", "/" ) + "/" + name;
 +    return name;
 +  }
 +
 +  public static URL getResourceUrl( Class clazz, String name ) throws FileNotFoundException {
 +    name = getResourceName( clazz, name );
 +    URL url = ClassLoader.getSystemResource( name );
 +    if( url == null ) {
 +      throw new FileNotFoundException( name );
 +    }
 +    return url;
 +  }
 +
 +  public static URL getResourceUrl( String name ) throws FileNotFoundException {
 +    URL url = ClassLoader.getSystemResource( name );
 +    if( url == null ) {
 +      throw new FileNotFoundException( name );
 +    }
 +    return url;
 +  }
 +
 +  public static InputStream getResourceStream( String name ) throws IOException {
 +    URL url = ClassLoader.getSystemResource( name );
 +    InputStream stream = url.openStream();
 +    return stream;
 +  }
 +
 +  public static InputStream getResourceStream( Class clazz, String name ) throws IOException {
 +    URL url = getResourceUrl( clazz, name );
 +    InputStream stream = url.openStream();
 +    return stream;
 +  }
 +
 +  public static Reader getResourceReader( String name, String charset ) throws IOException {
 +    return new InputStreamReader( getResourceStream( name ), charset );
 +  }
 +
 +  public static Reader getResourceReader( Class clazz, String name, String charset ) throws IOException {
 +    return new InputStreamReader( getResourceStream( clazz, name ), charset );
 +  }
 +
 +  public static String getResourceString( Class clazz, String name, String charset ) throws IOException {
 +    return IOUtils.toString( getResourceReader( clazz, name, charset ) );
 +  }
 +
 +  public static File createTempDir( String prefix ) throws IOException {
 +    File targetDir = new File( System.getProperty( "user.dir" ), "target" );
 +    File tempDir = new File( targetDir, prefix + UUID.randomUUID() );
 +    FileUtils.forceMkdir( tempDir );
 +    return tempDir;
 +  }
 +
 +  public static void LOG_ENTER() {
 +    StackTraceElement caller = Thread.currentThread().getStackTrace()[2];
 +    System.out.flush();
 +    System.out.println( String.format( "Running %s#%s", caller.getClassName(), caller.getMethodName() ) );
 +    System.out.flush();
 +  }
 +
 +  public static void LOG_EXIT() {
 +    StackTraceElement caller = Thread.currentThread().getStackTrace()[2];
 +    System.out.flush();
 +    System.out.println( String.format( "Exiting %s#%s", caller.getClassName(), caller.getMethodName() ) );
 +    System.out.flush();
 +  }
 +
 +  public static void awaitPortOpen( InetSocketAddress address, int timeout, int delay ) throws InterruptedException {
 +    long maxTime = System.currentTimeMillis() + timeout;
 +    do {
 +      try {
 +        Socket socket = new Socket();
 +        socket.connect( address, delay );
 +        socket.close();
 +        return;
 +      } catch ( IOException e ) {
 +        //e.printStackTrace();
 +      }
 +    } while( System.currentTimeMillis() < maxTime );
 +    throw new IllegalStateException( "Timed out " + timeout + " waiting for port " + address );
 +  }
 +
 +  public static void awaitNon404HttpStatus( URL url, int timeout, int delay ) throws InterruptedException {
 +    long maxTime = System.currentTimeMillis() + timeout;
 +    do {
 +      Thread.sleep( delay );
 +      HttpURLConnection conn = null;
 +      try {
 +        conn = (HttpURLConnection)url.openConnection();
 +        conn.getInputStream().close();
 +        return;
 +      } catch ( IOException e ) {
 +        //e.printStackTrace();
 +        try {
 +          if( conn != null && conn.getResponseCode() != 404 ) {
 +            return;
 +          }
 +        } catch ( IOException ee ) {
 +          //ee.printStackTrace();
 +        }
 +      }
 +    } while( System.currentTimeMillis() < maxTime );
 +    throw new IllegalStateException( "Timed out " + timeout + " waiting for URL " + url );
 +  }
 +
 +  public static String merge( String resource, Properties properties ) {
 +    ClasspathResourceLoader loader = new ClasspathResourceLoader();
 +    loader.getResourceStream( resource );
 +
 +    VelocityEngine engine = new VelocityEngine();
 +    Properties config = new Properties();
 +    config.setProperty( RuntimeConstants.RUNTIME_LOG_LOGSYSTEM_CLASS, "org.apache.velocity.runtime.log.NullLogSystem" );
 +    config.setProperty( RuntimeConstants.RESOURCE_LOADER, "classpath" );
 +    config.setProperty( "classpath.resource.loader.class", ClasspathResourceLoader.class.getName() );
 +    engine.init( config );
 +
 +    VelocityContext context = new VelocityContext( properties );
 +    Template template = engine.getTemplate( resource );
 +    StringWriter writer = new StringWriter();
 +    template.merge( context, writer );
 +    return writer.toString();
 +  }
 +
 +  public static String merge( Class base, String resource, Properties properties ) {
 +    String baseResource = base.getName().replaceAll( "\\.", "/" );
 +    String fullResource = baseResource + "/" + resource;
 +    return merge( fullResource, properties );
 +  }
 +
 +  public static int findFreePort() throws IOException {
 +    ServerSocket socket = new ServerSocket(0);
 +    int port = socket.getLocalPort();
 +    socket.close();
 +    return port;
 +  }
 +
 +  public static void waitUntilNextSecond() {
 +    long before = System.currentTimeMillis();
 +    long wait;
 +    while( ( wait = ( 1000 - ( System.currentTimeMillis() - before ) ) ) > 0 ) {
 +      try {
 +        Thread.sleep( wait );
 +      } catch( InterruptedException e ) {
 +        // Ignore.
 +      }
 +    }
 +  }
 +
 +  public static HttpTester.Response execute( ServletTester server, HttpTester.Request request ) throws Exception {
 +    LOG.debug( "execute: request=" + request );
 +    ByteBuffer requestBuffer = request.generate();
 +    LOG.trace( "execute: requestBuffer=[" + new String(requestBuffer.array(),0,requestBuffer.limit()) + "]" );
 +    ByteBuffer responseBuffer = server.getResponses( requestBuffer, 30, TimeUnit.SECONDS );
 +    HttpTester.Response response = HttpTester.parseResponse( responseBuffer );
 +    LOG.trace( "execute: responseBuffer=[" + new String(responseBuffer.array(),0,responseBuffer.limit()) + "]" );
 +    LOG.debug( "execute: reponse=" + response );
 +    return response;
 +  }
 +
 +
 +}


[26/53] [abbrv] knox git commit: Merge branch 'master' into KNOX-998-Package_Restructuring

Posted by mo...@apache.org.
http://git-wip-us.apache.org/repos/asf/knox/blob/2c69152f/gateway-test/src/test/java/org/apache/knox/gateway/deploy/DeploymentFactoryFuncTest.java
----------------------------------------------------------------------
diff --cc gateway-test/src/test/java/org/apache/knox/gateway/deploy/DeploymentFactoryFuncTest.java
index c9f262b,0000000..8208e4f
mode 100644,000000..100644
--- a/gateway-test/src/test/java/org/apache/knox/gateway/deploy/DeploymentFactoryFuncTest.java
+++ b/gateway-test/src/test/java/org/apache/knox/gateway/deploy/DeploymentFactoryFuncTest.java
@@@ -1,658 -1,0 +1,663 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.deploy;
 +
 +import java.io.File;
 +import java.io.IOException;
 +import java.net.URISyntaxException;
 +import java.net.URL;
 +import java.util.Arrays;
 +import java.util.Enumeration;
 +import java.util.HashMap;
 +import java.util.Map;
 +import java.util.Set;
 +import java.util.UUID;
 +import javax.xml.parsers.ParserConfigurationException;
 +import javax.xml.transform.TransformerException;
 +import javax.xml.xpath.XPathConstants;
 +import javax.xml.xpath.XPathExpressionException;
 +import javax.xml.xpath.XPathFactory;
 +
 +import org.apache.commons.io.FileUtils;
 +import org.apache.knox.gateway.GatewayTestConfig;
 +import org.apache.knox.gateway.config.GatewayConfig;
 +import org.apache.knox.gateway.filter.XForwardedHeaderFilter;
 +import org.apache.knox.gateway.filter.rewrite.api.UrlRewriteServletFilter;
 +import org.apache.knox.gateway.services.DefaultGatewayServices;
 +import org.apache.knox.gateway.services.ServiceLifecycleException;
 +import org.apache.knox.gateway.topology.Application;
 +import org.apache.knox.gateway.topology.Param;
 +import org.apache.knox.gateway.topology.Provider;
 +import org.apache.knox.gateway.topology.Service;
 +import org.apache.knox.gateway.topology.Topology;
 +import org.apache.knox.gateway.util.XmlUtils;
 +import org.apache.knox.test.TestUtils;
 +import org.apache.knox.test.log.NoOpAppender;
 +import org.apache.log4j.Appender;
 +import org.jboss.shrinkwrap.api.Archive;
 +import org.jboss.shrinkwrap.api.ArchivePath;
 +import org.jboss.shrinkwrap.api.spec.EnterpriseArchive;
 +import org.jboss.shrinkwrap.api.spec.WebArchive;
 +import org.junit.Test;
 +import org.w3c.dom.Document;
 +import org.w3c.dom.Node;
 +import org.xml.sax.SAXException;
 +
 +import static org.apache.knox.test.TestUtils.LOG_ENTER;
 +import static org.apache.knox.test.TestUtils.LOG_EXIT;
 +import static org.hamcrest.CoreMatchers.is;
 +import static org.hamcrest.CoreMatchers.notNullValue;
 +import static org.hamcrest.CoreMatchers.nullValue;
 +import static org.hamcrest.MatcherAssert.assertThat;
 +import static org.hamcrest.core.IsEqual.equalTo;
 +import static org.hamcrest.core.IsNot.not;
 +import static org.hamcrest.xml.HasXPath.hasXPath;
 +import static org.junit.Assert.fail;
 +
 +public class DeploymentFactoryFuncTest {
 +
 +  private static final long SHORT_TIMEOUT = 1000L;
 +  private static final long MEDIUM_TIMEOUT = 5 * SHORT_TIMEOUT;
 +  private static final long LONG_TIMEOUT = 10 * MEDIUM_TIMEOUT;
 +
 +  @Test( timeout = MEDIUM_TIMEOUT )
 +  public void testGenericProviderDeploymentContributor() throws ParserConfigurationException, SAXException, IOException, TransformerException {
 +    LOG_ENTER();
 +    GatewayConfig config = new GatewayTestConfig();
 +    File targetDir = new File( System.getProperty( "user.dir" ), "target" );
 +    File gatewayDir = new File( targetDir, "gateway-home-" + UUID.randomUUID() );
 +    gatewayDir.mkdirs();
 +
 +    ((GatewayTestConfig) config).setGatewayHomeDir( gatewayDir.getAbsolutePath() );
 +
 +    File deployDir = new File( config.getGatewayDeploymentDir() );
 +    deployDir.mkdirs();
 +
 +    //    ((GatewayTestConfig) config).setDeploymentDir( "clusters" );
 +
 +    DefaultGatewayServices srvcs = new DefaultGatewayServices();
 +    Map<String,String> options = new HashMap<>();
 +    options.put("persist-master", "false");
 +    options.put("master", "password");
 +    try {
 +      DeploymentFactory.setGatewayServices(srvcs);
 +      srvcs.init(config, options);
 +    } catch (ServiceLifecycleException e) {
 +      e.printStackTrace(); // I18N not required.
 +    }
 +
 +    Topology topology = new Topology();
 +    topology.setName( "test-cluster" );
 +    Service service = new Service();
 +    service.setRole( "WEBHDFS" );
 +    service.addUrl( "http://localhost:50070/test-service-url" );
 +    topology.addService( service );
 +
 +    Provider provider = new Provider();
 +    provider.setRole( "federation" );
 +    provider.setName( "HeaderPreAuth" );
 +    provider.setEnabled( true );
 +    Param param = new Param();
 +    param.setName( "filter" );
 +    param.setValue( "org.opensource.ExistingFilter" );
 +    provider.addParam( param );
 +    param = new Param();
 +    param.setName( "test-param-name" );
 +    param.setValue( "test-param-value" );
 +    provider.addParam( param );
 +    topology.addProvider( provider );
 +
 +    EnterpriseArchive war = DeploymentFactory.createDeployment( config, topology );
 +
 +    Document gateway = XmlUtils.readXml( war.get( "%2F/WEB-INF/gateway.xml" ).getAsset().openStream() );
 +    //dump( gateway );
 +
 +    //by default the first filter will be the X-Forwarded header filter
 +    assertThat( gateway, hasXPath( "/gateway/resource[1]/filter[1]/role", equalTo( "xforwardedheaders" ) ) );
 +    assertThat( gateway, hasXPath( "/gateway/resource[1]/filter[1]/name", equalTo( "XForwardedHeaderFilter" ) ) );
 +    assertThat( gateway, hasXPath( "/gateway/resource[1]/filter[1]/class", equalTo( "org.apache.knox.gateway.filter.XForwardedHeaderFilter" ) ) );
 +
 +    assertThat( gateway, hasXPath( "/gateway/resource[1]/filter[2]/role", equalTo( "federation" ) ) );
 +    assertThat( gateway, hasXPath( "/gateway/resource[1]/filter[2]/name", equalTo( "HeaderPreAuth" ) ) );
 +    assertThat( gateway, hasXPath( "/gateway/resource[1]/filter[2]/class", equalTo( "org.apache.knox.gateway.preauth.filter.HeaderPreAuthFederationFilter" ) ) );
 +    assertThat( gateway, hasXPath( "/gateway/resource[1]/filter[2]/param[1]/name", equalTo( "filter" ) ) );
 +    assertThat( gateway, hasXPath( "/gateway/resource[1]/filter[2]/param[1]/value", equalTo( "org.opensource.ExistingFilter" ) ) );
 +    assertThat( gateway, hasXPath( "/gateway/resource[1]/filter[2]/param[2]/name", equalTo( "test-param-name" ) ) );
 +    assertThat( gateway, hasXPath( "/gateway/resource[1]/filter[2]/param[2]/value", equalTo( "test-param-value" ) ) );
++
++    // testing for the adding of missing identity assertion provider - since it isn't explicitly added above
++    assertThat( gateway, hasXPath( "/gateway/resource[1]/filter[4]/role", equalTo( "identity-assertion" ) ) );
++    assertThat( gateway, hasXPath( "/gateway/resource[1]/filter[4]/name", equalTo( "Default" ) ) );
++
 +    LOG_EXIT();
 +  }
 +
 +  @Test( timeout = LONG_TIMEOUT )
 +  public void testInvalidGenericProviderDeploymentContributor() throws ParserConfigurationException, SAXException, IOException, TransformerException {
 +    LOG_ENTER();
 +    GatewayConfig config = new GatewayTestConfig();
 +    File targetDir = new File( System.getProperty( "user.dir" ), "target" );
 +    File gatewayDir = new File( targetDir, "gateway-home-" + UUID.randomUUID() );
 +    gatewayDir.mkdirs();
 +    ((GatewayTestConfig) config).setGatewayHomeDir( gatewayDir.getAbsolutePath() );
 +    File deployDir = new File( config.getGatewayDeploymentDir() );
 +    deployDir.mkdirs();
 +
 +    DefaultGatewayServices srvcs = new DefaultGatewayServices();
 +    Map<String,String> options = new HashMap<>();
 +    options.put("persist-master", "false");
 +    options.put("master", "password");
 +    try {
 +      DeploymentFactory.setGatewayServices(srvcs);
 +      srvcs.init(config, options);
 +    } catch (ServiceLifecycleException e) {
 +      e.printStackTrace(); // I18N not required.
 +    }
 +
 +    Topology topology = new Topology();
 +    topology.setName( "test-cluster" );
 +    Service service = new Service();
 +    service.setRole( "WEBHDFS" );
 +    service.addUrl( "http://localhost:50070/test-service-url" );
 +    topology.addService( service );
 +
 +    Provider provider = new Provider();
 +    provider.setRole( "authentication" );
 +    provider.setName( "generic" );
 +    provider.setEnabled( true );
 +    Param param; // = new ProviderParam();
 +    // Missing filter param.
 +    //param.setName( "filter" );
 +    //param.setValue( "org.opensource.ExistingFilter" );
 +    //provider.addParam( param );
 +    param = new Param();
 +    param.setName( "test-param-name" );
 +    param.setValue( "test-param-value" );
 +    provider.addParam( param );
 +    topology.addProvider( provider );
 +
 +    Enumeration<Appender> appenders = NoOpAppender.setUp();
 +    try {
 +      DeploymentFactory.createDeployment( config, topology );
 +      fail( "Should have throws IllegalArgumentException" );
 +    } catch ( DeploymentException e ) {
 +      // Expected.
 +    } finally {
 +      NoOpAppender.tearDown( appenders );
 +    }
 +    LOG_EXIT();
 +  }
 +
 +  @Test( timeout = MEDIUM_TIMEOUT )
 +  public void testSimpleTopology() throws IOException, SAXException, ParserConfigurationException, URISyntaxException, TransformerException {
 +    LOG_ENTER();
 +    GatewayConfig config = new GatewayTestConfig();
 +    //Testing without x-forwarded headers filter
 +    ((GatewayTestConfig)config).setXForwardedEnabled(false);
 +    File targetDir = new File( System.getProperty( "user.dir" ), "target" );
 +    File gatewayDir = new File( targetDir, "gateway-home-" + UUID.randomUUID() );
 +    gatewayDir.mkdirs();
 +    ((GatewayTestConfig) config).setGatewayHomeDir( gatewayDir.getAbsolutePath() );
 +    File deployDir = new File( config.getGatewayDeploymentDir() );
 +    deployDir.mkdirs();
 +
 +    DefaultGatewayServices srvcs = new DefaultGatewayServices();
 +    Map<String,String> options = new HashMap<>();
 +    options.put("persist-master", "false");
 +    options.put("master", "password");
 +    try {
 +      DeploymentFactory.setGatewayServices(srvcs);
 +      srvcs.init(config, options);
 +    } catch (ServiceLifecycleException e) {
 +      e.printStackTrace(); // I18N not required.
 +    }
 +
 +    Topology topology = new Topology();
 +    topology.setName( "test-cluster" );
 +    Service service = new Service();
 +    service.setRole( "WEBHDFS" );
 +    service.addUrl( "http://localhost:50070/webhdfs" );
 +    topology.addService( service );
 +    Provider provider = new Provider();
 +    provider.setRole( "authentication" );
 +    provider.setName( "ShiroProvider" );
 +    provider.setEnabled( true );
 +    Param param = new Param();
 +    param.setName( "contextConfigLocation" );
 +    param.setValue( "classpath:app-context-security.xml" );
 +    provider.addParam( param );
 +    topology.addProvider( provider );
 +    Provider asserter = new Provider();
 +    asserter.setRole( "identity-assertion" );
 +    asserter.setName("Default");
 +    asserter.setEnabled( true );
 +    topology.addProvider( asserter );
 +    Provider authorizer = new Provider();
 +    authorizer.setRole( "authorization" );
 +    authorizer.setName("AclsAuthz");
 +    authorizer.setEnabled( true );
 +    topology.addProvider( authorizer );
 +
 +    EnterpriseArchive war = DeploymentFactory.createDeployment( config, topology );
 +    //    File dir = new File( System.getProperty( "user.dir" ) );
 +    //    File file = war.as( ExplodedExporter.class ).exportExploded( dir, "test-cluster.war" );
 +
 +    Document web = XmlUtils.readXml( war.get( "%2F/WEB-INF/web.xml" ).getAsset().openStream() );
 +    assertThat( web, hasXPath( "/web-app" ) );
 +    assertThat( web, hasXPath( "/web-app/servlet" ) );
 +    assertThat( web, hasXPath( "/web-app/servlet/servlet-name" ) );
 +    assertThat( web, hasXPath( "/web-app/servlet/servlet-name", equalTo( "test-cluster-knox-gateway-servlet" ) ) );
 +    assertThat( web, hasXPath( "/web-app/servlet/servlet-class", equalTo( "org.apache.knox.gateway.GatewayServlet" ) ) );
 +    assertThat( web, hasXPath( "/web-app/servlet/init-param/param-name", equalTo( "gatewayDescriptorLocation" ) ) );
 +    assertThat( web, hasXPath( "/web-app/servlet/init-param/param-value", equalTo( "/WEB-INF/gateway.xml" ) ) );
 +    assertThat( web, hasXPath( "/web-app/servlet-mapping/servlet-name", equalTo( "test-cluster-knox-gateway-servlet" ) ) );
 +    assertThat( web, hasXPath( "/web-app/servlet-mapping/url-pattern", equalTo( "/*" ) ) );
 +
 +    Document gateway = XmlUtils.readXml( war.get( "%2F/WEB-INF/gateway.xml" ).getAsset().openStream() );
 +
 +    assertThat( gateway, hasXPath( "/gateway/resource[1]/pattern", equalTo( "/webhdfs/v1/?**" ) ) );
 +    //assertThat( gateway, hasXPath( "/gateway/resource[1]/target", equalTo( "http://localhost:50070/webhdfs/v1/?{**}" ) ) );
 +
 +    assertThat( gateway, hasXPath( "/gateway/resource[1]/filter[1]/role", equalTo( "authentication" ) ) );
 +    assertThat( gateway, hasXPath( "/gateway/resource[1]/filter[1]/class", equalTo( "org.apache.knox.gateway.filter.ResponseCookieFilter" ) ) );
 +
 +    assertThat( gateway, hasXPath( "/gateway/resource[1]/filter[2]/role", equalTo( "authentication" ) ) );
 +    assertThat( gateway, hasXPath( "/gateway/resource[1]/filter[2]/class", equalTo( "org.apache.shiro.web.servlet.ShiroFilter" ) ) );
 +
 +    assertThat( gateway, hasXPath( "/gateway/resource[1]/filter[3]/role", equalTo( "authentication" ) ) );
 +    assertThat( gateway, hasXPath( "/gateway/resource[1]/filter[3]/class", equalTo( "org.apache.knox.gateway.filter.ShiroSubjectIdentityAdapter" ) ) );
 +
 +    assertThat( gateway, hasXPath( "/gateway/resource[1]/filter[4]/role", equalTo( "rewrite" ) ) );
 +    assertThat( gateway, hasXPath( "/gateway/resource[1]/filter[4]/class", equalTo( "org.apache.knox.gateway.filter.rewrite.api.UrlRewriteServletFilter" ) ) );
 +
 +    assertThat( gateway, hasXPath( "/gateway/resource[1]/filter[5]/role", equalTo( "identity-assertion" ) ) );
 +    assertThat( gateway, hasXPath( "/gateway/resource[1]/filter[5]/class", equalTo( "org.apache.knox.gateway.identityasserter.filter.IdentityAsserterFilter" ) ) );
 +
 +    assertThat( gateway, hasXPath( "/gateway/resource[1]/filter[6]/role", equalTo( "authorization" ) ) );
 +    assertThat( gateway, hasXPath( "/gateway/resource[1]/filter[6]/name", equalTo( "AclsAuthz" ) ) );
 +    assertThat( gateway, hasXPath( "/gateway/resource[1]/filter[6]/class", equalTo( "org.apache.knox.gateway.filter.AclsAuthorizationFilter" ) ) );
 +
 +    assertThat( gateway, hasXPath( "/gateway/resource[1]/filter[7]/role", equalTo( "dispatch" ) ) );
 +    assertThat( gateway, hasXPath( "/gateway/resource[1]/filter[7]/name", equalTo( "webhdfs" ) ) );
 +    assertThat( gateway, hasXPath( "/gateway/resource[1]/filter[7]/class", equalTo( "org.apache.knox.gateway.dispatch.GatewayDispatchFilter" ) ) );
 +
 +    assertThat( gateway, hasXPath( "/gateway/resource[2]/pattern", equalTo( "/webhdfs/v1/**?**" ) ) );
 +    //assertThat( gateway, hasXPath( "/gateway/resource[2]/target", equalTo( "http://localhost:50070/webhdfs/v1/{path=**}?{**}" ) ) );
 +
 +    assertThat( gateway, hasXPath( "/gateway/resource[2]/filter[1]/role", equalTo( "authentication" ) ) );
 +    assertThat( gateway, hasXPath( "/gateway/resource[2]/filter[1]/class", equalTo( "org.apache.knox.gateway.filter.ResponseCookieFilter" ) ) );
 +
 +    assertThat( gateway, hasXPath( "/gateway/resource[2]/filter[2]/role", equalTo( "authentication" ) ) );
 +    assertThat( gateway, hasXPath( "/gateway/resource[2]/filter[2]/class", equalTo( "org.apache.shiro.web.servlet.ShiroFilter" ) ) );
 +
 +    assertThat( gateway, hasXPath( "/gateway/resource[2]/filter[3]/role", equalTo( "authentication" ) ) );
 +    assertThat( gateway, hasXPath( "/gateway/resource[2]/filter[3]/class", equalTo( "org.apache.knox.gateway.filter.ShiroSubjectIdentityAdapter" ) ) );
 +
 +    assertThat( gateway, hasXPath( "/gateway/resource[2]/filter[4]/role", equalTo( "rewrite" ) ) );
 +    assertThat( gateway, hasXPath( "/gateway/resource[2]/filter[4]/class", equalTo( "org.apache.knox.gateway.filter.rewrite.api.UrlRewriteServletFilter" ) ) );
 +
 +    assertThat( gateway, hasXPath( "/gateway/resource[2]/filter[5]/role", equalTo( "identity-assertion" ) ) );
 +    assertThat( gateway, hasXPath( "/gateway/resource[2]/filter[5]/class", equalTo( "org.apache.knox.gateway.identityasserter.filter.IdentityAsserterFilter" ) ) );
 +
 +    assertThat( gateway, hasXPath( "/gateway/resource[1]/filter[6]/role", equalTo( "authorization" ) ) );
 +    assertThat( gateway, hasXPath( "/gateway/resource[1]/filter[6]/name", equalTo( "AclsAuthz" ) ) );
 +    assertThat( gateway, hasXPath( "/gateway/resource[1]/filter[6]/class", equalTo( "org.apache.knox.gateway.filter.AclsAuthorizationFilter" ) ) );
 +
 +    assertThat( gateway, hasXPath( "/gateway/resource[2]/filter[7]/role", equalTo( "dispatch" ) ) );
 +    assertThat( gateway, hasXPath( "/gateway/resource[2]/filter[7]/name", equalTo( "webhdfs" ) ) );
 +    assertThat( gateway, hasXPath( "/gateway/resource[2]/filter[7]/class", equalTo( "org.apache.knox.gateway.dispatch.GatewayDispatchFilter" ) ) );
 +
 +    LOG_EXIT();
 +  }
 +
 +
 +  @Test( timeout = LONG_TIMEOUT )
 +  public void testWebXmlGeneration() throws IOException, SAXException, ParserConfigurationException, URISyntaxException {
 +    LOG_ENTER();
 +    GatewayConfig config = new GatewayTestConfig();
 +    File targetDir = new File(System.getProperty("user.dir"), "target");
 +    File gatewayDir = new File(targetDir, "gateway-home-" + UUID.randomUUID());
 +    gatewayDir.mkdirs();
 +    ((GatewayTestConfig) config).setGatewayHomeDir(gatewayDir.getAbsolutePath());
 +    File deployDir = new File(config.getGatewayDeploymentDir());
 +    deployDir.mkdirs();
 +
 +    DefaultGatewayServices srvcs = new DefaultGatewayServices();
 +    Map<String, String> options = new HashMap<>();
 +    options.put("persist-master", "false");
 +    options.put("master", "password");
 +    try {
 +      DeploymentFactory.setGatewayServices(srvcs);
 +      srvcs.init(config, options);
 +    } catch (ServiceLifecycleException e) {
 +      e.printStackTrace(); // I18N not required.
 +    }
 +
 +    Topology topology = new Topology();
 +    topology.setName("test-cluster");
 +    Service service = new Service();
 +    service.setRole("WEBHDFS");
 +    service.addUrl("http://localhost:50070/webhdfs");
 +    topology.addService(service);
 +    Provider provider = new Provider();
 +    provider.setRole("authentication");
 +    provider.setName("ShiroProvider");
 +    provider.setEnabled(true);
 +    Param param = new Param();
 +    param.setName("contextConfigLocation");
 +    param.setValue("classpath:app-context-security.xml");
 +    provider.addParam(param);
 +    topology.addProvider(provider);
 +    Provider asserter = new Provider();
 +    asserter.setRole("identity-assertion");
 +    asserter.setName("Default");
 +    asserter.setEnabled(true);
 +    topology.addProvider(asserter);
 +    Provider authorizer = new Provider();
 +    authorizer.setRole("authorization");
 +    authorizer.setName("AclsAuthz");
 +    authorizer.setEnabled(true);
 +    topology.addProvider(authorizer);
 +    Provider ha = new Provider();
 +    ha.setRole("ha");
 +    ha.setName("HaProvider");
 +    ha.setEnabled(true);
 +    topology.addProvider(ha);
 +
 +    for (int i = 0; i < 10; i++) {
 +      createAndTestDeployment(config, topology);
 +    }
 +    LOG_EXIT();
 +  }
 +
 +  private void createAndTestDeployment(GatewayConfig config, Topology topology) throws IOException, SAXException, ParserConfigurationException {
 +
 +    EnterpriseArchive war = DeploymentFactory.createDeployment(config, topology);
 +    //      File dir = new File( System.getProperty( "user.dir" ) );
 +    //      File file = war.as( ExplodedExporter.class ).exportExploded( dir, "test-cluster.war" );
 +
 +    Document web = XmlUtils.readXml(war.get("%2F/WEB-INF/web.xml").getAsset().openStream());
 +    assertThat(web, hasXPath("/web-app/servlet/servlet-class", equalTo("org.apache.knox.gateway.GatewayServlet")));
 +    assertThat(web, hasXPath("/web-app/servlet/init-param/param-name", equalTo("gatewayDescriptorLocation")));
 +    assertThat(web, hasXPath("/web-app/servlet/init-param/param-value", equalTo("/WEB-INF/gateway.xml")));
 +    assertThat(web, hasXPath("/web-app/servlet-mapping/servlet-name", equalTo("test-cluster-knox-gateway-servlet")));
 +    assertThat(web, hasXPath("/web-app/servlet-mapping/url-pattern", equalTo("/*")));
 +    //testing the order of listener classes generated
 +    assertThat(web, hasXPath("/web-app/listener[2]/listener-class", equalTo("org.apache.knox.gateway.services.GatewayServicesContextListener")));
 +    assertThat(web, hasXPath("/web-app/listener[3]/listener-class", equalTo("org.apache.knox.gateway.services.GatewayMetricsServletContextListener")));
 +    assertThat(web, hasXPath("/web-app/listener[4]/listener-class", equalTo("org.apache.knox.gateway.ha.provider" +
 +        ".HaServletContextListener")));
 +    assertThat(web, hasXPath("/web-app/listener[5]/listener-class", equalTo("org.apache.knox.gateway.filter" +
 +        ".rewrite.api.UrlRewriteServletContextListener")));
 +  }
 +
 +  @Test( timeout = LONG_TIMEOUT )
 +  public void testDeploymentWithServiceParams() throws Exception {
 +    LOG_ENTER();
 +    GatewayConfig config = new GatewayTestConfig();
 +    File targetDir = new File(System.getProperty("user.dir"), "target");
 +    File gatewayDir = new File(targetDir, "gateway-home-" + UUID.randomUUID());
 +    gatewayDir.mkdirs();
 +    ((GatewayTestConfig) config).setGatewayHomeDir(gatewayDir.getAbsolutePath());
 +    File deployDir = new File(config.getGatewayDeploymentDir());
 +    deployDir.mkdirs();
 +
 +    DefaultGatewayServices srvcs = new DefaultGatewayServices();
 +    Map<String, String> options = new HashMap<>();
 +    options.put("persist-master", "false");
 +    options.put("master", "password");
 +    try {
 +      DeploymentFactory.setGatewayServices(srvcs);
 +      srvcs.init(config, options);
 +    } catch (ServiceLifecycleException e) {
 +      e.printStackTrace(); // I18N not required.
 +    }
 +
 +    Service service;
 +    Param param;
 +    Topology topology = new Topology();
 +    topology.setName( "test-cluster" );
 +
 +    service = new Service();
 +    service.setRole( "HIVE" );
 +    service.setUrls( Arrays.asList( new String[]{ "http://hive-host:50001/" } ) );
 +    param = new Param();
 +    param.setName( "someparam" );
 +    param.setValue( "somevalue" );
 +    service.addParam( param );
 +    topology.addService( service );
 +
 +    service = new Service();
 +    service.setRole( "WEBHBASE" );
 +    service.setUrls( Arrays.asList( new String[]{ "http://hbase-host:50002/" } ) );
 +    param = new Param();
 +    param.setName( "replayBufferSize" );
 +    param.setValue( "33" );
 +    service.addParam( param );
 +    topology.addService( service );
 +
 +    service = new Service();
 +    service.setRole( "OOZIE" );
 +    service.setUrls( Arrays.asList( new String[]{ "http://hbase-host:50003/" } ) );
 +    param = new Param();
 +    param.setName( "otherparam" );
 +    param.setValue( "65" );
 +    service.addParam( param );
 +    topology.addService( service );
 +
 +    EnterpriseArchive war = DeploymentFactory.createDeployment( config, topology );
 +    Document doc = XmlUtils.readXml( war.get( "%2F/WEB-INF/gateway.xml" ).getAsset().openStream() );
 +    //    dump( doc );
 +
 +    Node resourceNode, filterNode, paramNode;
 +    String value;
 +
 +    resourceNode = node( doc, "gateway/resource[role/text()='HIVE']" );
 +    assertThat( resourceNode, is(not(nullValue())));
 +    filterNode = node( resourceNode, "filter[role/text()='dispatch']" );
 +    assertThat( filterNode, is(not(nullValue())));
 +    paramNode = node( filterNode, "param[name/text()='someparam']" );
 +    value = value( paramNode, "value/text()" );
 +    assertThat( value, is( "somevalue" ) ) ;
 +
 +    resourceNode = node( doc, "gateway/resource[role/text()='WEBHBASE']" );
 +    assertThat( resourceNode, is(not(nullValue())));
 +    filterNode = node( resourceNode, "filter[role/text()='dispatch']" );
 +    assertThat( filterNode, is(not(nullValue())));
 +    paramNode = node( filterNode, "param[name/text()='replayBufferSize']" );
 +    value = value( paramNode, "value/text()" );
 +    assertThat( value, is( "33" ) ) ;
 +
 +    resourceNode = node( doc, "gateway/resource[role/text()='OOZIE']" );
 +    assertThat( resourceNode, is(not(nullValue())));
 +    filterNode = node( resourceNode, "filter[role/text()='dispatch']" );
 +    assertThat( filterNode, is(not(nullValue())));
 +    paramNode = node( filterNode, "param[name/text()='otherparam']" );
 +    value = value( paramNode, "value/text()" );
 +    assertThat( value, is( "65" ) ) ;
 +
 +    FileUtils.deleteQuietly( deployDir );
 +
 +    LOG_EXIT();
 +  }
 +
 +  @Test( timeout = MEDIUM_TIMEOUT )
 +  public void testDeploymentWithApplication() throws Exception {
 +    LOG_ENTER();
 +    GatewayConfig config = new GatewayTestConfig();
 +    File targetDir = new File(System.getProperty("user.dir"), "target");
 +    File gatewayDir = new File(targetDir, "gateway-home-" + UUID.randomUUID());
 +    gatewayDir.mkdirs();
 +    ((GatewayTestConfig) config).setGatewayHomeDir(gatewayDir.getAbsolutePath());
 +    File deployDir = new File(config.getGatewayDeploymentDir());
 +    deployDir.mkdirs();
 +    URL serviceUrl = TestUtils.getResourceUrl( DeploymentFactoryFuncTest.class, "test-apps/minimal-test-app/service.xml" );
 +    File serviceFile = new File( serviceUrl.toURI() );
 +    File appsDir = serviceFile.getParentFile().getParentFile();
 +    ((GatewayTestConfig)config).setGatewayApplicationsDir(appsDir.getAbsolutePath());
 +
 +    DefaultGatewayServices srvcs = new DefaultGatewayServices();
 +    Map<String, String> options = new HashMap<>();
 +    options.put("persist-master", "false");
 +    options.put("master", "password");
 +    try {
 +      DeploymentFactory.setGatewayServices(srvcs);
 +      srvcs.init(config, options);
 +    } catch (ServiceLifecycleException e) {
 +      e.printStackTrace(); // I18N not required.
 +    }
 +
 +    Topology topology = new Topology();
 +    topology.setName( "test-topology" );
 +
 +    Application app;
 +
 +    app = new Application();
 +    app.setName( "minimal-test-app" );
 +    app.addUrl( "/minimal-test-app-path" );
 +    topology.addApplication( app );
 +
 +    EnterpriseArchive archive = DeploymentFactory.createDeployment( config, topology );
 +    assertThat( archive, notNullValue() );
 +
 +    Document doc;
 +
 +    doc = XmlUtils.readXml( archive.get( "META-INF/topology.xml" ).getAsset().openStream() );
 +    assertThat( doc, notNullValue() );
 +
 +    doc = XmlUtils.readXml( archive.get( "%2Fminimal-test-app-path/WEB-INF/gateway.xml" ).getAsset().openStream() );
 +    assertThat( doc, notNullValue() );
 +    //dump( doc );
 +    assertThat( doc, hasXPath("/gateway/resource/pattern", equalTo("/**?**")));
 +    assertThat( doc, hasXPath("/gateway/resource/filter[1]/role", equalTo("xforwardedheaders")));
 +    assertThat( doc, hasXPath("/gateway/resource/filter[1]/name", equalTo("XForwardedHeaderFilter")));
 +    assertThat( doc, hasXPath("/gateway/resource/filter[1]/class", equalTo(XForwardedHeaderFilter.class.getName())));
 +    assertThat( doc, hasXPath("/gateway/resource/filter[2]/role", equalTo("rewrite")));
 +    assertThat( doc, hasXPath("/gateway/resource/filter[2]/name", equalTo("url-rewrite")));
 +    assertThat( doc, hasXPath("/gateway/resource/filter[2]/class", equalTo(UrlRewriteServletFilter.class.getName())));
 +
 +    LOG_EXIT();
 +  }
 +
 +  @Test( timeout = MEDIUM_TIMEOUT )
 +  public void testDeploymentWithServicesAndApplications() throws Exception {
 +    LOG_ENTER();
 +    GatewayConfig config = new GatewayTestConfig();
 +    File targetDir = new File(System.getProperty("user.dir"), "target");
 +    File gatewayDir = new File(targetDir, "gateway-home-" + UUID.randomUUID());
 +    gatewayDir.mkdirs();
 +    ((GatewayTestConfig) config).setGatewayHomeDir(gatewayDir.getAbsolutePath());
 +    File deployDir = new File(config.getGatewayDeploymentDir());
 +    deployDir.mkdirs();
 +    URL serviceUrl = TestUtils.getResourceUrl( DeploymentFactoryFuncTest.class, "test-apps/minimal-test-app/service.xml" );
 +    File serviceFile = new File( serviceUrl.toURI() );
 +    File appsDir = serviceFile.getParentFile().getParentFile();
 +    ((GatewayTestConfig)config).setGatewayApplicationsDir(appsDir.getAbsolutePath());
 +
 +    DefaultGatewayServices srvcs = new DefaultGatewayServices();
 +    Map<String, String> options = new HashMap<>();
 +    options.put("persist-master", "false");
 +    options.put("master", "password");
 +    try {
 +      DeploymentFactory.setGatewayServices(srvcs);
 +      srvcs.init(config, options);
 +    } catch (ServiceLifecycleException e) {
 +      e.printStackTrace(); // I18N not required.
 +    }
 +
 +    Topology topology = new Topology();
 +    topology.setName( "test-topology" );
 +
 +    Application app;
 +
 +    topology.setName( "test-cluster" );
 +    Service service = new Service();
 +    service.setRole( "WEBHDFS" );
 +    service.addUrl( "http://localhost:50070/test-service-url" );
 +    topology.addService( service );
 +
 +    app = new Application();
 +    app.setName( "minimal-test-app" );
 +    app.addUrl( "/minimal-test-app-path-one" );
 +    topology.addApplication( app );
 +
 +    app.setName( "minimal-test-app" );
 +    app.addUrl( "/minimal-test-app-path-two" );
 +    topology.addApplication( app );
 +
 +    EnterpriseArchive archive = DeploymentFactory.createDeployment( config, topology );
 +    assertThat( archive, notNullValue() );
 +
 +    Document doc;
 +    org.jboss.shrinkwrap.api.Node node;
 +
 +    node = archive.get( "META-INF/topology.xml" );
 +    assertThat( "Find META-INF/topology.xml", node, notNullValue() );
 +    doc = XmlUtils.readXml( node.getAsset().openStream() );
 +    assertThat( "Parse META-INF/topology.xml", doc, notNullValue() );
 +
 +    node = archive.get( "%2F" );
 +    assertThat( "Find %2F", node, notNullValue() );
 +    node = archive.get( "%2F/WEB-INF/gateway.xml" );
 +    assertThat( "Find %2F/WEB-INF/gateway.xml", node, notNullValue() );
 +    doc = XmlUtils.readXml( node.getAsset().openStream() );
 +    assertThat( "Parse %2F/WEB-INF/gateway.xml", doc, notNullValue() );
 +
 +    WebArchive war = archive.getAsType( WebArchive.class, "%2Fminimal-test-app-path-one" );
 +    assertThat( "Find %2Fminimal-test-app-path-one", war, notNullValue() );
 +    node = war.get( "/WEB-INF/gateway.xml" );
 +    assertThat( "Find %2Fminimal-test-app-path-one/WEB-INF/gateway.xml", node, notNullValue() );
 +    doc = XmlUtils.readXml( node.getAsset().openStream() );
 +    assertThat( "Parse %2Fminimal-test-app-path-one/WEB-INF/gateway.xml", doc, notNullValue() );
 +
 +    war = archive.getAsType( WebArchive.class, "%2Fminimal-test-app-path-two" );
 +    assertThat( "Find %2Fminimal-test-app-path-two", war, notNullValue() );
 +    node = war.get( "/WEB-INF/gateway.xml" );
 +    assertThat( "Find %2Fminimal-test-app-path-two/WEB-INF/gateway.xml", node, notNullValue() );
 +    doc = XmlUtils.readXml( node.getAsset().openStream() );
 +    assertThat( "Parse %2Fminimal-test-app-path-two/WEB-INF/gateway.xml", doc, notNullValue() );
 +
 +    LOG_EXIT();
 +  }
 +
 +  private Node node( Node scope, String expression ) throws XPathExpressionException {
 +    return (Node)XPathFactory.newInstance().newXPath().compile( expression ).evaluate( scope, XPathConstants.NODE );
 +  }
 +
 +  private String value( Node scope, String expression ) throws XPathExpressionException {
 +    return XPathFactory.newInstance().newXPath().compile( expression ).evaluate( scope );
 +  }
 +
 +  private static void dump( org.jboss.shrinkwrap.api.Node node, String prefix ) {
 +    System.out.println( prefix + ": " + node.getPath() );
 +    Set<org.jboss.shrinkwrap.api.Node> children = node.getChildren();
 +    if( children != null && !children.isEmpty() ) {
 +      for( org.jboss.shrinkwrap.api.Node child : children ) {
 +        dump( child, prefix + "    " );
 +      }
 +    }
 +  }
 +
 +  private static void dump( Archive archive ) {
 +    Map<ArchivePath,org.jboss.shrinkwrap.api.Node> content = archive.getContent();
 +    for( Map.Entry<ArchivePath,org.jboss.shrinkwrap.api.Node> entry : content.entrySet() ) {
 +      dump( entry.getValue(), "    " );
 +    }
 +  }
 +
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/2c69152f/knox-cli-launcher/pom.xml
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/knox/blob/2c69152f/pom.xml
----------------------------------------------------------------------


[36/53] [abbrv] knox git commit: Merge branch 'master' into KNOX-998-Package_Restructuring

Posted by mo...@apache.org.
http://git-wip-us.apache.org/repos/asf/knox/blob/22a7304a/gateway-server/src/main/java/org/apache/knox/gateway/services/topology/impl/DefaultTopologyService.java
----------------------------------------------------------------------
diff --cc gateway-server/src/main/java/org/apache/knox/gateway/services/topology/impl/DefaultTopologyService.java
index 38653f4,0000000..c6e373d
mode 100644,000000..100644
--- a/gateway-server/src/main/java/org/apache/knox/gateway/services/topology/impl/DefaultTopologyService.java
+++ b/gateway-server/src/main/java/org/apache/knox/gateway/services/topology/impl/DefaultTopologyService.java
@@@ -1,818 -1,0 +1,895 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +
 +package org.apache.knox.gateway.services.topology.impl;
 +
 +
 +import org.apache.commons.digester3.Digester;
 +import org.apache.commons.digester3.binder.DigesterLoader;
 +import org.apache.commons.io.FileUtils;
 +import org.apache.commons.io.FilenameUtils;
 +import org.apache.commons.io.monitor.FileAlterationListener;
 +import org.apache.commons.io.monitor.FileAlterationListenerAdaptor;
 +import org.apache.commons.io.monitor.FileAlterationMonitor;
 +import org.apache.commons.io.monitor.FileAlterationObserver;
 +import org.apache.knox.gateway.GatewayMessages;
++import org.apache.knox.gateway.GatewayServer;
 +import org.apache.knox.gateway.audit.api.Action;
 +import org.apache.knox.gateway.audit.api.ActionOutcome;
 +import org.apache.knox.gateway.audit.api.AuditServiceFactory;
 +import org.apache.knox.gateway.audit.api.Auditor;
 +import org.apache.knox.gateway.audit.api.ResourceType;
 +import org.apache.knox.gateway.audit.log4j.audit.AuditConstants;
 +import org.apache.knox.gateway.config.GatewayConfig;
 +import org.apache.knox.gateway.i18n.messages.MessagesFactory;
 +import org.apache.knox.gateway.service.definition.ServiceDefinition;
++import org.apache.knox.gateway.services.GatewayServices;
 +import org.apache.knox.gateway.services.ServiceLifecycleException;
++import org.apache.knox.gateway.services.security.AliasService;
 +import org.apache.knox.gateway.services.topology.TopologyService;
++import org.apache.knox.gateway.topology.ClusterConfigurationMonitorService;
 +import org.apache.knox.gateway.topology.Topology;
 +import org.apache.knox.gateway.topology.TopologyEvent;
 +import org.apache.knox.gateway.topology.TopologyListener;
 +import org.apache.knox.gateway.topology.TopologyMonitor;
 +import org.apache.knox.gateway.topology.TopologyProvider;
 +import org.apache.knox.gateway.topology.builder.TopologyBuilder;
++import org.apache.knox.gateway.topology.discovery.ClusterConfigurationMonitor;
++import org.apache.knox.gateway.topology.monitor.RemoteConfigurationMonitor;
++import org.apache.knox.gateway.topology.monitor.RemoteConfigurationMonitorFactory;
++import org.apache.knox.gateway.topology.simple.SimpleDescriptorHandler;
 +import org.apache.knox.gateway.topology.validation.TopologyValidator;
 +import org.apache.knox.gateway.topology.xml.AmbariFormatXmlTopologyRules;
 +import org.apache.knox.gateway.topology.xml.KnoxFormatXmlTopologyRules;
 +import org.apache.knox.gateway.util.ServiceDefinitionsLoader;
- import org.apache.knox.gateway.services.security.AliasService;
- import org.apache.knox.gateway.topology.simple.SimpleDescriptorHandler;
 +import org.eclipse.persistence.jaxb.JAXBContextProperties;
 +import org.xml.sax.SAXException;
 +
 +import javax.xml.bind.JAXBContext;
 +import javax.xml.bind.JAXBException;
 +import javax.xml.bind.Marshaller;
 +import java.io.File;
 +import java.io.FileFilter;
 +import java.io.IOException;
 +import java.net.URISyntaxException;
 +import java.util.ArrayList;
 +import java.util.Arrays;
 +import java.util.Collection;
 +import java.util.Collections;
 +import java.util.HashMap;
 +import java.util.HashSet;
 +import java.util.List;
 +import java.util.Map;
 +import java.util.Set;
 +
 +import static org.apache.commons.digester3.binder.DigesterLoader.newLoader;
 +
 +
 +public class DefaultTopologyService
 +    extends FileAlterationListenerAdaptor
 +    implements TopologyService, TopologyMonitor, TopologyProvider, FileFilter, FileAlterationListener {
 +
 +  private static Auditor auditor = AuditServiceFactory.getAuditService().getAuditor(
 +    AuditConstants.DEFAULT_AUDITOR_NAME, AuditConstants.KNOX_SERVICE_NAME,
 +    AuditConstants.KNOX_COMPONENT_NAME);
 +
 +  private static final List<String> SUPPORTED_TOPOLOGY_FILE_EXTENSIONS = new ArrayList<String>();
 +  static {
 +    SUPPORTED_TOPOLOGY_FILE_EXTENSIONS.add("xml");
 +    SUPPORTED_TOPOLOGY_FILE_EXTENSIONS.add("conf");
 +  }
 +
 +  private static GatewayMessages log = MessagesFactory.get(GatewayMessages.class);
 +  private static DigesterLoader digesterLoader = newLoader(new KnoxFormatXmlTopologyRules(), new AmbariFormatXmlTopologyRules());
 +  private List<FileAlterationMonitor> monitors = new ArrayList<>();
 +  private File topologiesDirectory;
 +  private File sharedProvidersDirectory;
 +  private File descriptorsDirectory;
 +
 +  private DescriptorsMonitor descriptorsMonitor;
 +
 +  private Set<TopologyListener> listeners;
 +  private volatile Map<File, Topology> topologies;
 +  private AliasService aliasService;
 +
++  private RemoteConfigurationMonitor remoteMonitor = null;
 +
 +  private Topology loadTopology(File file) throws IOException, SAXException, URISyntaxException, InterruptedException {
 +    final long TIMEOUT = 250; //ms
 +    final long DELAY = 50; //ms
 +    log.loadingTopologyFile(file.getAbsolutePath());
 +    Topology topology;
 +    long start = System.currentTimeMillis();
 +    while (true) {
 +      try {
 +        topology = loadTopologyAttempt(file);
 +        break;
 +      } catch (IOException e) {
 +        if (System.currentTimeMillis() - start < TIMEOUT) {
 +          log.failedToLoadTopologyRetrying(file.getAbsolutePath(), Long.toString(DELAY), e);
 +          Thread.sleep(DELAY);
 +        } else {
 +          throw e;
 +        }
 +      } catch (SAXException e) {
 +        if (System.currentTimeMillis() - start < TIMEOUT) {
 +          log.failedToLoadTopologyRetrying(file.getAbsolutePath(), Long.toString(DELAY), e);
 +          Thread.sleep(DELAY);
 +        } else {
 +          throw e;
 +        }
 +      }
 +    }
 +    return topology;
 +  }
 +
 +  private Topology loadTopologyAttempt(File file) throws IOException, SAXException, URISyntaxException {
 +    Topology topology;
 +    Digester digester = digesterLoader.newDigester();
 +    TopologyBuilder topologyBuilder = digester.parse(FileUtils.openInputStream(file));
 +    if (null == topologyBuilder) {
 +      return null;
 +    }
 +    topology = topologyBuilder.build();
 +    topology.setUri(file.toURI());
 +    topology.setName(FilenameUtils.removeExtension(file.getName()));
 +    topology.setTimestamp(file.lastModified());
 +    return topology;
 +  }
 +
 +  private void redeployTopology(Topology topology) {
 +    File topologyFile = new File(topology.getUri());
 +    try {
 +      TopologyValidator tv = new TopologyValidator(topology);
 +
 +      if(tv.validateTopology()) {
 +        throw new SAXException(tv.getErrorString());
 +      }
 +
 +      long start = System.currentTimeMillis();
 +      long limit = 1000L; // One second.
 +      long elapsed = 1;
 +      while (elapsed <= limit) {
 +        try {
 +          long origTimestamp = topologyFile.lastModified();
 +          long setTimestamp = Math.max(System.currentTimeMillis(), topologyFile.lastModified() + elapsed);
 +          if(topologyFile.setLastModified(setTimestamp)) {
 +            long newTimstamp = topologyFile.lastModified();
 +            if(newTimstamp > origTimestamp) {
 +              break;
 +            } else {
 +              Thread.sleep(10);
 +              elapsed = System.currentTimeMillis() - start;
 +              continue;
 +            }
 +          } else {
 +            auditor.audit(Action.REDEPLOY, topology.getName(), ResourceType.TOPOLOGY,
 +                ActionOutcome.FAILURE);
 +            log.failedToRedeployTopology(topology.getName());
 +            break;
 +          }
 +        } catch (InterruptedException e) {
 +          auditor.audit(Action.REDEPLOY, topology.getName(), ResourceType.TOPOLOGY,
 +              ActionOutcome.FAILURE);
 +          log.failedToRedeployTopology(topology.getName(), e);
 +          e.printStackTrace();
 +        }
 +      }
 +    } catch (SAXException e) {
 +      auditor.audit(Action.REDEPLOY, topology.getName(), ResourceType.TOPOLOGY, ActionOutcome.FAILURE);
 +      log.failedToRedeployTopology(topology.getName(), e);
 +    }
 +  }
 +
 +  private List<TopologyEvent> createChangeEvents(
 +      Map<File, Topology> oldTopologies,
 +      Map<File, Topology> newTopologies) {
 +    ArrayList<TopologyEvent> events = new ArrayList<TopologyEvent>();
 +    // Go through the old topologies and find anything that was deleted.
 +    for (File file : oldTopologies.keySet()) {
 +      if (!newTopologies.containsKey(file)) {
 +        events.add(new TopologyEvent(TopologyEvent.Type.DELETED, oldTopologies.get(file)));
 +      }
 +    }
 +    // Go through the new topologies and figure out what was updated vs added.
 +    for (File file : newTopologies.keySet()) {
 +      if (oldTopologies.containsKey(file)) {
 +        Topology oldTopology = oldTopologies.get(file);
 +        Topology newTopology = newTopologies.get(file);
 +        if (newTopology.getTimestamp() > oldTopology.getTimestamp()) {
 +          events.add(new TopologyEvent(TopologyEvent.Type.UPDATED, newTopologies.get(file)));
 +        }
 +      } else {
 +        events.add(new TopologyEvent(TopologyEvent.Type.CREATED, newTopologies.get(file)));
 +      }
 +    }
 +    return events;
 +  }
 +
++  private File calculateAbsoluteProvidersConfigDir(GatewayConfig config) {
++    File pcDir = new File(config.getGatewayProvidersConfigDir());
++    return pcDir.getAbsoluteFile();
++  }
++
++  private File calculateAbsoluteDescriptorsDir(GatewayConfig config) {
++    File descDir = new File(config.getGatewayDescriptorsDir());
++    return descDir.getAbsoluteFile();
++  }
++
 +  private File calculateAbsoluteTopologiesDir(GatewayConfig config) {
 +    File topoDir = new File(config.getGatewayTopologyDir());
 +    topoDir = topoDir.getAbsoluteFile();
 +    return topoDir;
 +  }
 +
 +  private File calculateAbsoluteConfigDir(GatewayConfig config) {
-     File configDir = null;
++    File configDir;
 +
 +    String path = config.getGatewayConfDir();
 +    configDir = (path != null) ? new File(path) : (new File(config.getGatewayTopologyDir())).getParentFile();
 +
 +    return configDir.getAbsoluteFile();
 +  }
 +
 +  private void  initListener(FileAlterationMonitor  monitor,
 +                            File                   directory,
 +                            FileFilter             filter,
 +                            FileAlterationListener listener) {
 +    monitors.add(monitor);
 +    FileAlterationObserver observer = new FileAlterationObserver(directory, filter);
 +    observer.addListener(listener);
 +    monitor.addObserver(observer);
 +  }
 +
 +  private void initListener(File directory, FileFilter filter, FileAlterationListener listener) throws IOException, SAXException {
 +    // Increasing the monitoring interval to 5 seconds as profiling has shown
 +    // this is rather expensive in terms of generated garbage objects.
 +    initListener(new FileAlterationMonitor(5000L), directory, filter, listener);
 +  }
 +
 +  private Map<File, Topology> loadTopologies(File directory) {
 +    Map<File, Topology> map = new HashMap<>();
 +    if (directory.isDirectory() && directory.canRead()) {
 +      File[] existingTopologies = directory.listFiles(this);
 +      if (existingTopologies != null) {
 +        for (File file : existingTopologies) {
 +          try {
 +            Topology loadTopology = loadTopology(file);
 +            if (null != loadTopology) {
 +              map.put(file, loadTopology);
 +            } else {
 +              auditor.audit(Action.LOAD, file.getAbsolutePath(), ResourceType.TOPOLOGY,
 +                      ActionOutcome.FAILURE);
 +              log.failedToLoadTopology(file.getAbsolutePath());
 +            }
 +          } catch (IOException e) {
 +            // Maybe it makes sense to throw exception
 +            auditor.audit(Action.LOAD, file.getAbsolutePath(), ResourceType.TOPOLOGY,
 +                    ActionOutcome.FAILURE);
 +            log.failedToLoadTopology(file.getAbsolutePath(), e);
 +          } catch (SAXException e) {
 +            // Maybe it makes sense to throw exception
 +            auditor.audit(Action.LOAD, file.getAbsolutePath(), ResourceType.TOPOLOGY,
 +                    ActionOutcome.FAILURE);
 +            log.failedToLoadTopology(file.getAbsolutePath(), e);
 +          } catch (Exception e) {
 +            // Maybe it makes sense to throw exception
 +            auditor.audit(Action.LOAD, file.getAbsolutePath(), ResourceType.TOPOLOGY,
 +                    ActionOutcome.FAILURE);
 +            log.failedToLoadTopology(file.getAbsolutePath(), e);
 +          }
 +        }
 +      }
 +    }
 +    return map;
 +  }
 +
 +  public void setAliasService(AliasService as) {
 +    this.aliasService = as;
 +  }
 +
 +  public void deployTopology(Topology t){
 +
 +    try {
 +      File temp = new File(topologiesDirectory.getAbsolutePath() + "/" + t.getName() + ".xml.temp");
 +      Package topologyPkg = Topology.class.getPackage();
 +      String pkgName = topologyPkg.getName();
 +      String bindingFile = pkgName.replace(".", "/") + "/topology_binding-xml.xml";
 +
 +      Map<String, Object> properties = new HashMap<>(1);
 +      properties.put(JAXBContextProperties.OXM_METADATA_SOURCE, bindingFile);
 +      JAXBContext jc = JAXBContext.newInstance(pkgName, Topology.class.getClassLoader(), properties);
 +      Marshaller mr = jc.createMarshaller();
 +
 +      mr.setProperty(Marshaller.JAXB_FORMATTED_OUTPUT, true);
 +      mr.marshal(t, temp);
 +
 +      File topology = new File(topologiesDirectory.getAbsolutePath() + "/" + t.getName() + ".xml");
 +      if(!temp.renameTo(topology)) {
 +        FileUtils.forceDelete(temp);
 +        throw new IOException("Could not rename temp file");
 +      }
 +
 +      // This code will check if the topology is valid, and retrieve the errors if it is not.
 +      TopologyValidator validator = new TopologyValidator( topology.getAbsolutePath() );
 +      if( !validator.validateTopology() ){
 +        throw new SAXException( validator.getErrorString() );
 +      }
 +
 +
 +    } catch (JAXBException e) {
 +      auditor.audit(Action.DEPLOY, t.getName(), ResourceType.TOPOLOGY, ActionOutcome.FAILURE);
 +      log.failedToDeployTopology(t.getName(), e);
 +    } catch (IOException io) {
 +      auditor.audit(Action.DEPLOY, t.getName(), ResourceType.TOPOLOGY, ActionOutcome.FAILURE);
 +      log.failedToDeployTopology(t.getName(), io);
 +    } catch (SAXException sx){
 +      auditor.audit(Action.DEPLOY, t.getName(), ResourceType.TOPOLOGY, ActionOutcome.FAILURE);
 +      log.failedToDeployTopology(t.getName(), sx);
 +    }
 +    reloadTopologies();
 +  }
 +
 +  public void redeployTopologies(String topologyName) {
 +
 +    for (Topology topology : getTopologies()) {
 +      if (topologyName == null || topologyName.equals(topology.getName())) {
 +        redeployTopology(topology);
 +      }
 +    }
 +
 +  }
 +
 +  public void reloadTopologies() {
 +    try {
 +      synchronized (this) {
 +        Map<File, Topology> oldTopologies = topologies;
 +        Map<File, Topology> newTopologies = loadTopologies(topologiesDirectory);
 +        List<TopologyEvent> events = createChangeEvents(oldTopologies, newTopologies);
 +        topologies = newTopologies;
 +        notifyChangeListeners(events);
 +      }
 +    } catch (Exception e) {
 +      // Maybe it makes sense to throw exception
 +      log.failedToReloadTopologies(e);
 +    }
 +  }
 +
 +  public void deleteTopology(Topology t) {
 +    File topoDir = topologiesDirectory;
 +
 +    if(topoDir.isDirectory() && topoDir.canRead()) {
 +      for (File f : listFiles(topoDir)) {
 +        String fName = FilenameUtils.getBaseName(f.getName());
 +        if(fName.equals(t.getName())) {
 +          f.delete();
 +        }
 +      }
 +    }
 +    reloadTopologies();
 +  }
 +
 +  private void notifyChangeListeners(List<TopologyEvent> events) {
 +    for (TopologyListener listener : listeners) {
 +      try {
 +        listener.handleTopologyEvent(events);
 +      } catch (RuntimeException e) {
 +        auditor.audit(Action.LOAD, "Topology_Event", ResourceType.TOPOLOGY, ActionOutcome.FAILURE);
 +        log.failedToHandleTopologyEvents(e);
 +      }
 +    }
 +  }
 +
 +  public Map<String, List<String>> getServiceTestURLs(Topology t, GatewayConfig config) {
 +    File tFile = null;
 +    Map<String, List<String>> urls = new HashMap<>();
 +    if (topologiesDirectory.isDirectory() && topologiesDirectory.canRead()) {
 +      for (File f : listFiles(topologiesDirectory)) {
 +        if (FilenameUtils.removeExtension(f.getName()).equals(t.getName())) {
 +          tFile = f;
 +        }
 +      }
 +    }
 +    Set<ServiceDefinition> defs;
 +    if(tFile != null) {
 +      defs = ServiceDefinitionsLoader.getServiceDefinitions(new File(config.getGatewayServicesDir()));
 +
 +      for(ServiceDefinition def : defs) {
 +        urls.put(def.getRole(), def.getTestURLs());
 +      }
 +    }
 +    return urls;
 +  }
 +
 +  public Collection<Topology> getTopologies() {
 +    Map<File, Topology> map = topologies;
 +    return Collections.unmodifiableCollection(map.values());
 +  }
 +
 +  @Override
 +  public boolean deployProviderConfiguration(String name, String content) {
 +    return writeConfig(sharedProvidersDirectory, name, content);
 +  }
 +
 +  @Override
 +  public Collection<File> getProviderConfigurations() {
 +    List<File> providerConfigs = new ArrayList<>();
 +    for (File providerConfig : listFiles(sharedProvidersDirectory)) {
 +      if (SharedProviderConfigMonitor.SUPPORTED_EXTENSIONS.contains(FilenameUtils.getExtension(providerConfig.getName()))) {
 +        providerConfigs.add(providerConfig);
 +      }
 +    }
 +    return providerConfigs;
 +  }
 +
 +  @Override
 +  public boolean deleteProviderConfiguration(String name) {
 +    boolean result = false;
 +
 +    File providerConfig = getExistingFile(sharedProvidersDirectory, name);
 +    if (providerConfig != null) {
 +      List<String> references = descriptorsMonitor.getReferencingDescriptors(providerConfig.getAbsolutePath());
 +      if (references.isEmpty()) {
 +        result = providerConfig.delete();
 +      } else {
 +        log.preventedDeletionOfSharedProviderConfiguration(providerConfig.getAbsolutePath());
 +      }
 +    } else {
 +      result = true; // If it already does NOT exist, then the delete effectively succeeded
 +    }
 +
 +    return result;
 +  }
 +
 +  @Override
 +  public boolean deployDescriptor(String name, String content) {
 +    return writeConfig(descriptorsDirectory, name, content);
 +  }
 +
 +  @Override
 +  public Collection<File> getDescriptors() {
 +    List<File> descriptors = new ArrayList<>();
 +    for (File descriptor : listFiles(descriptorsDirectory)) {
 +      if (DescriptorsMonitor.SUPPORTED_EXTENSIONS.contains(FilenameUtils.getExtension(descriptor.getName()))) {
 +        descriptors.add(descriptor);
 +      }
 +    }
 +    return descriptors;
 +  }
 +
 +  @Override
 +  public boolean deleteDescriptor(String name) {
 +    File descriptor = getExistingFile(descriptorsDirectory, name);
 +    return (descriptor == null) || descriptor.delete();
 +  }
 +
 +  @Override
 +  public void addTopologyChangeListener(TopologyListener listener) {
 +    listeners.add(listener);
 +  }
 +
 +  @Override
 +  public void startMonitor() throws Exception {
++    // Start the local configuration monitors
 +    for (FileAlterationMonitor monitor : monitors) {
 +      monitor.start();
 +    }
++
++    // Start the remote configuration monitor, if it has been initialized
++    if (remoteMonitor != null) {
++      try {
++        remoteMonitor.start();
++      } catch (Exception e) {
++        log.remoteConfigurationMonitorStartFailure(remoteMonitor.getClass().getTypeName(), e.getLocalizedMessage(), e);
++      }
++    }
 +  }
 +
 +  @Override
 +  public void stopMonitor() throws Exception {
++    // Stop the local configuration monitors
 +    for (FileAlterationMonitor monitor : monitors) {
 +      monitor.stop();
 +    }
++
++    // Stop the remote configuration monitor, if it has been initialized
++    if (remoteMonitor != null) {
++      remoteMonitor.stop();
++    }
 +  }
 +
 +  @Override
 +  public boolean accept(File file) {
 +    boolean accept = false;
 +    if (!file.isDirectory() && file.canRead()) {
 +      String extension = FilenameUtils.getExtension(file.getName());
 +      if (SUPPORTED_TOPOLOGY_FILE_EXTENSIONS.contains(extension)) {
 +        accept = true;
 +      }
 +    }
 +    return accept;
 +  }
 +
 +  @Override
 +  public void onFileCreate(File file) {
 +    onFileChange(file);
 +  }
 +
 +  @Override
 +  public void onFileDelete(java.io.File file) {
 +    // For full topology descriptors, we need to make sure to delete any corresponding simple descriptors to prevent
 +    // unintended subsequent generation of the topology descriptor
 +    for (String ext : DescriptorsMonitor.SUPPORTED_EXTENSIONS) {
 +      File simpleDesc =
 +              new File(descriptorsDirectory, FilenameUtils.getBaseName(file.getName()) + "." + ext);
 +      if (simpleDesc.exists()) {
 +        log.deletingDescriptorForTopologyDeletion(simpleDesc.getName(), file.getName());
 +        simpleDesc.delete();
 +      }
 +    }
 +
 +    onFileChange(file);
 +  }
 +
 +  @Override
 +  public void onFileChange(File file) {
 +    reloadTopologies();
 +  }
 +
 +  @Override
 +  public void stop() {
 +
 +  }
 +
 +  @Override
 +  public void start() {
- 
++    // Register a cluster configuration monitor listener for change notifications
++    ClusterConfigurationMonitorService ccms =
++                  GatewayServer.getGatewayServices().getService(GatewayServices.CLUSTER_CONFIGURATION_MONITOR_SERVICE);
++    ccms.addListener(new TopologyDiscoveryTrigger(this));
 +  }
 +
 +  @Override
 +  public void init(GatewayConfig config, Map<String, String> options) throws ServiceLifecycleException {
 +
 +    try {
-       listeners = new HashSet<>();
++      listeners  = new HashSet<>();
 +      topologies = new HashMap<>();
 +
 +      topologiesDirectory = calculateAbsoluteTopologiesDir(config);
 +
 +      File configDirectory = calculateAbsoluteConfigDir(config);
 +      descriptorsDirectory = new File(configDirectory, "descriptors");
 +      sharedProvidersDirectory = new File(configDirectory, "shared-providers");
 +
 +      // Add support for conf/topologies
 +      initListener(topologiesDirectory, this, this);
 +
 +      // Add support for conf/descriptors
 +      descriptorsMonitor = new DescriptorsMonitor(topologiesDirectory, aliasService);
 +      initListener(descriptorsDirectory,
 +                   descriptorsMonitor,
 +                   descriptorsMonitor);
 +      log.monitoringDescriptorChangesInDirectory(descriptorsDirectory.getAbsolutePath());
 +
 +      // Add support for conf/shared-providers
 +      SharedProviderConfigMonitor spm = new SharedProviderConfigMonitor(descriptorsMonitor, descriptorsDirectory);
 +      initListener(sharedProvidersDirectory, spm, spm);
 +      log.monitoringProviderConfigChangesInDirectory(sharedProvidersDirectory.getAbsolutePath());
 +
 +      // For all the descriptors currently in the descriptors dir at start-up time, trigger topology generation.
 +      // This happens prior to the start-up loading of the topologies.
 +      String[] descriptorFilenames =  descriptorsDirectory.list();
 +      if (descriptorFilenames != null) {
-           for (String descriptorFilename : descriptorFilenames) {
-               if (DescriptorsMonitor.isDescriptorFile(descriptorFilename)) {
-                   descriptorsMonitor.onFileChange(new File(descriptorsDirectory, descriptorFilename));
-               }
++        for (String descriptorFilename : descriptorFilenames) {
++          if (DescriptorsMonitor.isDescriptorFile(descriptorFilename)) {
++            // If there isn't a corresponding topology file, or if the descriptor has been modified since the
++            // corresponding topology file was generated, then trigger generation of one
++            File matchingTopologyFile = getExistingFile(topologiesDirectory, FilenameUtils.getBaseName(descriptorFilename));
++            if (matchingTopologyFile == null ||
++                    matchingTopologyFile.lastModified() < (new File(descriptorsDirectory, descriptorFilename)).lastModified()) {
++              descriptorsMonitor.onFileChange(new File(descriptorsDirectory, descriptorFilename));
++            }
 +          }
++        }
 +      }
 +
++      // Initialize the remote configuration monitor, if it has been configured
++      remoteMonitor = RemoteConfigurationMonitorFactory.get(config);
++
 +    } catch (IOException | SAXException io) {
 +      throw new ServiceLifecycleException(io.getMessage());
 +    }
 +  }
 +
- 
 +  /**
 +   * Utility method for listing the files in the specified directory.
 +   * This method is "nicer" than the File#listFiles() because it will not return null.
 +   *
 +   * @param directory The directory whose files should be returned.
 +   *
 +   * @return A List of the Files on the directory.
 +   */
 +  private static List<File> listFiles(File directory) {
-     List<File> result = null;
++    List<File> result;
 +    File[] files = directory.listFiles();
 +    if (files != null) {
 +      result = Arrays.asList(files);
 +    } else {
 +      result = Collections.emptyList();
 +    }
 +    return result;
 +  }
 +
 +  /**
 +   * Search for a file in the specified directory whose base name (filename without extension) matches the
 +   * specified basename.
 +   *
 +   * @param directory The directory in which to search.
 +   * @param basename  The basename of interest.
 +   *
 +   * @return The matching File
 +   */
 +  private static File getExistingFile(File directory, String basename) {
 +    File match = null;
 +    for (File file : listFiles(directory)) {
 +      if (FilenameUtils.getBaseName(file.getName()).equals(basename)) {
 +        match = file;
 +        break;
 +      }
 +    }
 +    return match;
 +  }
 +
 +  /**
 +   * Write the specified content to a file.
 +   *
 +   * @param dest    The destination directory.
 +   * @param name    The name of the file.
 +   * @param content The contents of the file.
 +   *
 +   * @return true, if the write succeeds; otherwise, false.
 +   */
 +  private static boolean writeConfig(File dest, String name, String content) {
 +    boolean result = false;
 +
 +    File destFile = new File(dest, name);
 +    try {
 +      FileUtils.writeStringToFile(destFile, content);
 +      log.wroteConfigurationFile(destFile.getAbsolutePath());
 +      result = true;
 +    } catch (IOException e) {
 +      log.failedToWriteConfigurationFile(destFile.getAbsolutePath(), e);
 +    }
 +
 +    return result;
 +  }
 +
 +
 +  /**
 +   * Change handler for simple descriptors
 +   */
 +  public static class DescriptorsMonitor extends FileAlterationListenerAdaptor
 +                                          implements FileFilter {
 +
 +    static final List<String> SUPPORTED_EXTENSIONS = new ArrayList<String>();
 +    static {
 +      SUPPORTED_EXTENSIONS.add("json");
 +      SUPPORTED_EXTENSIONS.add("yml");
 +      SUPPORTED_EXTENSIONS.add("yaml");
 +    }
 +
 +    private File topologiesDir;
 +
 +    private AliasService aliasService;
 +
 +    private Map<String, List<String>> providerConfigReferences = new HashMap<>();
 +
 +
 +    static boolean isDescriptorFile(String filename) {
 +      return SUPPORTED_EXTENSIONS.contains(FilenameUtils.getExtension(filename));
 +    }
 +
 +    public DescriptorsMonitor(File topologiesDir, AliasService aliasService) {
 +      this.topologiesDir  = topologiesDir;
 +      this.aliasService   = aliasService;
 +    }
 +
 +    List<String> getReferencingDescriptors(String providerConfigPath) {
 +      List<String> result = providerConfigReferences.get(FilenameUtils.normalize(providerConfigPath));
 +      if (result == null) {
 +        result = Collections.emptyList();
 +      }
 +      return result;
 +    }
 +
 +    @Override
 +    public void onFileCreate(File file) {
 +      onFileChange(file);
 +    }
 +
 +    @Override
 +    public void onFileDelete(File file) {
 +      // For simple descriptors, we need to make sure to delete any corresponding full topology descriptors to trigger undeployment
 +      for (String ext : DefaultTopologyService.SUPPORTED_TOPOLOGY_FILE_EXTENSIONS) {
 +        File topologyFile =
 +                new File(topologiesDir, FilenameUtils.getBaseName(file.getName()) + "." + ext);
 +        if (topologyFile.exists()) {
 +          log.deletingTopologyForDescriptorDeletion(topologyFile.getName(), file.getName());
 +          topologyFile.delete();
 +        }
 +      }
 +
 +      String normalizedFilePath = FilenameUtils.normalize(file.getAbsolutePath());
 +      String reference = null;
 +      for (Map.Entry<String, List<String>> entry : providerConfigReferences.entrySet()) {
 +        if (entry.getValue().contains(normalizedFilePath)) {
 +          reference = entry.getKey();
 +          break;
 +        }
 +      }
 +
 +      if (reference != null) {
 +        providerConfigReferences.get(reference).remove(normalizedFilePath);
 +        log.removedProviderConfigurationReference(normalizedFilePath, reference);
 +      }
 +    }
 +
 +    @Override
 +    public void onFileChange(File file) {
 +      try {
 +        // When a simple descriptor has been created or modified, generate the new topology descriptor
 +        Map<String, File> result = SimpleDescriptorHandler.handle(file, topologiesDir, aliasService);
 +        log.generatedTopologyForDescriptorChange(result.get("topology").getName(), file.getName());
 +
 +        // Add the provider config reference relationship for handling updates to the provider config
 +        String providerConfig = FilenameUtils.normalize(result.get("reference").getAbsolutePath());
 +        if (!providerConfigReferences.containsKey(providerConfig)) {
 +          providerConfigReferences.put(providerConfig, new ArrayList<String>());
 +        }
 +        List<String> refs = providerConfigReferences.get(providerConfig);
 +        String descriptorName = FilenameUtils.normalize(file.getAbsolutePath());
 +        if (!refs.contains(descriptorName)) {
 +          // Need to check if descriptor had previously referenced another provider config, so it can be removed
 +          for (List<String> descs : providerConfigReferences.values()) {
 +            if (descs.contains(descriptorName)) {
 +              descs.remove(descriptorName);
 +            }
 +          }
 +
 +          // Add the current reference relationship
 +          refs.add(descriptorName);
 +          log.addedProviderConfigurationReference(descriptorName, providerConfig);
 +        }
 +      } catch (Exception e) {
 +        log.simpleDescriptorHandlingError(file.getName(), e);
 +      }
 +    }
 +
 +    @Override
 +    public boolean accept(File file) {
 +      boolean accept = false;
 +      if (!file.isDirectory() && file.canRead()) {
 +        String extension = FilenameUtils.getExtension(file.getName());
 +        if (SUPPORTED_EXTENSIONS.contains(extension)) {
 +          accept = true;
 +        }
 +      }
 +      return accept;
 +    }
 +  }
 +
 +  /**
 +   * Change handler for shared provider configurations
 +   */
 +  public static class SharedProviderConfigMonitor extends FileAlterationListenerAdaptor
 +          implements FileFilter {
 +
 +    static final List<String> SUPPORTED_EXTENSIONS = new ArrayList<>();
 +    static {
 +      SUPPORTED_EXTENSIONS.add("xml");
 +    }
 +
 +    private DescriptorsMonitor descriptorsMonitor;
 +    private File descriptorsDir;
 +
 +
 +    SharedProviderConfigMonitor(DescriptorsMonitor descMonitor, File descriptorsDir) {
 +      this.descriptorsMonitor = descMonitor;
 +      this.descriptorsDir     = descriptorsDir;
 +    }
 +
 +    @Override
 +    public void onFileCreate(File file) {
 +      onFileChange(file);
 +    }
 +
 +    @Override
 +    public void onFileDelete(File file) {
 +      onFileChange(file);
 +    }
 +
 +    @Override
 +    public void onFileChange(File file) {
 +      // For shared provider configuration, we need to update any simple descriptors that reference it
 +      for (File descriptor : getReferencingDescriptors(file)) {
 +        descriptor.setLastModified(System.currentTimeMillis());
 +      }
 +    }
 +
 +    private List<File> getReferencingDescriptors(File sharedProviderConfig) {
 +      List<File> references = new ArrayList<>();
 +
 +      for (File descriptor : listFiles(descriptorsDir)) {
 +        if (DescriptorsMonitor.SUPPORTED_EXTENSIONS.contains(FilenameUtils.getExtension(descriptor.getName()))) {
 +          for (String reference : descriptorsMonitor.getReferencingDescriptors(FilenameUtils.normalize(sharedProviderConfig.getAbsolutePath()))) {
 +            references.add(new File(reference));
 +          }
 +        }
 +      }
 +
 +      return references;
 +    }
 +
 +    @Override
 +    public boolean accept(File file) {
 +      boolean accept = false;
 +      if (!file.isDirectory() && file.canRead()) {
 +        String extension = FilenameUtils.getExtension(file.getName());
 +        if (SUPPORTED_EXTENSIONS.contains(extension)) {
 +          accept = true;
 +        }
 +      }
 +      return accept;
 +    }
 +  }
 +
++  /**
++   * Listener for Ambari config change events, which will trigger re-generation (including re-discovery) of the
++   * affected topologies.
++   */
++  private static class TopologyDiscoveryTrigger implements ClusterConfigurationMonitor.ConfigurationChangeListener {
++
++    private TopologyService topologyService = null;
++
++    TopologyDiscoveryTrigger(TopologyService topologyService) {
++      this.topologyService = topologyService;
++    }
++
++    @Override
++    public void onConfigurationChange(String source, String clusterName) {
++      log.noticedClusterConfigurationChange(source, clusterName);
++      try {
++        // Identify any descriptors associated with the cluster configuration change
++        for (File descriptor : topologyService.getDescriptors()) {
++          String descriptorContent = FileUtils.readFileToString(descriptor);
++          if (descriptorContent.contains(source)) {
++            if (descriptorContent.contains(clusterName)) {
++              log.triggeringTopologyRegeneration(source, clusterName, descriptor.getAbsolutePath());
++              // 'Touch' the descriptor to trigger re-generation of the associated topology
++              descriptor.setLastModified(System.currentTimeMillis());
++            }
++          }
++        }
++      } catch (Exception e) {
++        log.errorRespondingToConfigChange(source, clusterName, e);
++      }
++    }
++  }
++
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/22a7304a/gateway-server/src/main/java/org/apache/knox/gateway/topology/simple/SimpleDescriptorFactory.java
----------------------------------------------------------------------
diff --cc gateway-server/src/main/java/org/apache/knox/gateway/topology/simple/SimpleDescriptorFactory.java
index 254dca1,0000000..4def2b7
mode 100644,000000..100644
--- a/gateway-server/src/main/java/org/apache/knox/gateway/topology/simple/SimpleDescriptorFactory.java
+++ b/gateway-server/src/main/java/org/apache/knox/gateway/topology/simple/SimpleDescriptorFactory.java
@@@ -1,71 -1,0 +1,71 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements. See the NOTICE file distributed with this
 + * work for additional information regarding copyright ownership. The ASF
 + * licenses this file to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance with the License.
 + * You may obtain a copy of the License at
 + *
 + * http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 + * License for the specific language governing permissions and limitations under
 + * the License.
 + */
 +package org.apache.knox.gateway.topology.simple;
 +
 +import com.fasterxml.jackson.databind.ObjectMapper;
 +import com.fasterxml.jackson.dataformat.yaml.YAMLFactory;
 +import org.apache.commons.io.FilenameUtils;
 +
 +import java.io.File;
 +import java.io.IOException;
 +
 +
 +public class SimpleDescriptorFactory {
 +
 +    /**
 +     * Create a SimpleDescriptor from the specified file.
 +     *
 +     * @param path The path to the file.
 +     * @return A SimpleDescriptor based on the contents of the file.
 +     *
 +     * @throws IOException
 +     */
 +    public static SimpleDescriptor parse(String path) throws IOException {
 +        SimpleDescriptor sd;
 +
 +        if (path.endsWith(".json")) {
 +            sd = parseJSON(path);
-         } else if (path.endsWith(".yml")) {
++        } else if (path.endsWith(".yml") || path.endsWith(".yaml")) {
 +            sd = parseYAML(path);
 +        } else {
 +           throw new IllegalArgumentException("Unsupported simple descriptor format: " + path.substring(path.lastIndexOf('.')));
 +        }
 +
 +        return sd;
 +    }
 +
 +
 +    static SimpleDescriptor parseJSON(String path) throws IOException {
 +        final ObjectMapper mapper = new ObjectMapper();
 +        SimpleDescriptorImpl sd = mapper.readValue(new File(path), SimpleDescriptorImpl.class);
 +        if (sd != null) {
 +            sd.setName(FilenameUtils.getBaseName(path));
 +        }
 +        return sd;
 +    }
 +
 +
 +    static SimpleDescriptor parseYAML(String path) throws IOException {
 +        final ObjectMapper mapper = new ObjectMapper(new YAMLFactory());
 +        SimpleDescriptorImpl sd = mapper.readValue(new File(path), SimpleDescriptorImpl.class);
 +        if (sd != null) {
 +            sd.setName(FilenameUtils.getBaseName(path));
 +        }
 +        return sd;
 +    }
 +
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/22a7304a/gateway-server/src/main/java/org/apache/knox/gateway/topology/simple/SimpleDescriptorHandler.java
----------------------------------------------------------------------
diff --cc gateway-server/src/main/java/org/apache/knox/gateway/topology/simple/SimpleDescriptorHandler.java
index 2e3214d,0000000..30786dc
mode 100644,000000..100644
--- a/gateway-server/src/main/java/org/apache/knox/gateway/topology/simple/SimpleDescriptorHandler.java
+++ b/gateway-server/src/main/java/org/apache/knox/gateway/topology/simple/SimpleDescriptorHandler.java
@@@ -1,316 -1,0 +1,382 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements. See the NOTICE file distributed with this
 + * work for additional information regarding copyright ownership. The ASF
 + * licenses this file to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance with the License.
 + * You may obtain a copy of the License at
 + *
 + * http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 + * License for the specific language governing permissions and limitations under
 + * the License.
 + */
 +package org.apache.knox.gateway.topology.simple;
 +
- import org.apache.knox.gateway.i18n.messages.MessagesFactory;
- import org.apache.knox.gateway.services.Service;
- import org.apache.knox.gateway.topology.discovery.DefaultServiceDiscoveryConfig;
- import org.apache.knox.gateway.topology.discovery.ServiceDiscovery;
- import org.apache.knox.gateway.topology.discovery.ServiceDiscoveryFactory;
 +import java.io.BufferedWriter;
 +import java.io.File;
 +import java.io.FileInputStream;
 +import java.io.FileWriter;
 +import java.io.InputStreamReader;
 +import java.io.IOException;
 +
 +import java.net.URI;
 +import java.net.URISyntaxException;
 +
 +import java.util.ArrayList;
 +import java.util.Collections;
 +import java.util.HashMap;
 +import java.util.List;
 +import java.util.Map;
 +
++import org.apache.knox.gateway.GatewayServer;
++import org.apache.knox.gateway.i18n.messages.MessagesFactory;
++import org.apache.knox.gateway.services.GatewayServices;
++import org.apache.knox.gateway.services.Service;
++import org.apache.knox.gateway.services.security.AliasService;
++import org.apache.knox.gateway.services.security.KeystoreService;
++import org.apache.knox.gateway.services.security.MasterService;
++import org.apache.knox.gateway.topology.discovery.DefaultServiceDiscoveryConfig;
++import org.apache.knox.gateway.topology.discovery.ServiceDiscovery;
++import org.apache.knox.gateway.topology.discovery.ServiceDiscoveryFactory;
 +
 +
 +/**
 + * Processes simple topology descriptors, producing full topology files, which can subsequently be deployed to the
 + * gateway.
 + */
 +public class SimpleDescriptorHandler {
 +
 +    private static final Service[] NO_GATEWAY_SERVICES = new Service[]{};
 +
 +    private static final SimpleDescriptorMessages log = MessagesFactory.get(SimpleDescriptorMessages.class);
 +
++    private static Map<String, ServiceDiscovery> discoveryInstances = new HashMap<>();
++
 +    public static Map<String, File> handle(File desc) throws IOException {
 +        return handle(desc, NO_GATEWAY_SERVICES);
 +    }
 +
 +    public static Map<String, File> handle(File desc, Service...gatewayServices) throws IOException {
 +        return handle(desc, desc.getParentFile(), gatewayServices);
 +    }
 +
 +    public static Map<String, File> handle(File desc, File destDirectory) throws IOException {
 +        return handle(desc, destDirectory, NO_GATEWAY_SERVICES);
 +    }
 +
 +    public static Map<String, File> handle(File desc, File destDirectory, Service...gatewayServices) throws IOException {
 +        return handle(SimpleDescriptorFactory.parse(desc.getAbsolutePath()), desc.getParentFile(), destDirectory, gatewayServices);
 +    }
 +
 +    public static Map<String, File> handle(SimpleDescriptor desc, File srcDirectory, File destDirectory) {
 +        return handle(desc, srcDirectory, destDirectory, NO_GATEWAY_SERVICES);
 +    }
 +
 +    public static Map<String, File> handle(SimpleDescriptor desc, File srcDirectory, File destDirectory, Service...gatewayServices) {
 +        Map<String, File> result = new HashMap<>();
 +
 +        File topologyDescriptor;
 +
 +        DefaultServiceDiscoveryConfig sdc = new DefaultServiceDiscoveryConfig(desc.getDiscoveryAddress());
 +        sdc.setUser(desc.getDiscoveryUser());
 +        sdc.setPasswordAlias(desc.getDiscoveryPasswordAlias());
 +
 +        // Use the discovery type from the descriptor. If it's unspecified, employ the default type.
 +        String discoveryType = desc.getDiscoveryType();
 +        if (discoveryType == null) {
 +            discoveryType = "AMBARI";
 +        }
 +
-         ServiceDiscovery sd = ServiceDiscoveryFactory.get(discoveryType, gatewayServices);
++        // Use the cached discovery object for the required type, if it has already been loaded
++        ServiceDiscovery sd = discoveryInstances.get(discoveryType);
++        if (sd == null) {
++            sd = ServiceDiscoveryFactory.get(discoveryType, gatewayServices);
++            discoveryInstances.put(discoveryType, sd);
++        }
 +        ServiceDiscovery.Cluster cluster = sd.discover(sdc, desc.getClusterName());
 +
 +        List<String> validServiceNames = new ArrayList<>();
 +
 +        Map<String, Map<String, String>> serviceParams = new HashMap<>();
 +        Map<String, List<String>>        serviceURLs   = new HashMap<>();
 +
 +        if (cluster != null) {
 +            for (SimpleDescriptor.Service descService : desc.getServices()) {
 +                String serviceName = descService.getName();
 +
 +                List<String> descServiceURLs = descService.getURLs();
 +                if (descServiceURLs == null || descServiceURLs.isEmpty()) {
 +                    descServiceURLs = cluster.getServiceURLs(serviceName);
 +                }
 +
 +                // Validate the discovered service URLs
 +                List<String> validURLs = new ArrayList<>();
 +                if (descServiceURLs != null && !descServiceURLs.isEmpty()) {
 +                    // Validate the URL(s)
 +                    for (String descServiceURL : descServiceURLs) {
 +                        if (validateURL(serviceName, descServiceURL)) {
 +                            validURLs.add(descServiceURL);
 +                        }
 +                    }
 +
 +                    if (!validURLs.isEmpty()) {
 +                        validServiceNames.add(serviceName);
 +                    }
 +                }
 +
 +                // If there is at least one valid URL associated with the service, then add it to the map
 +                if (!validURLs.isEmpty()) {
 +                    serviceURLs.put(serviceName, validURLs);
 +                } else {
 +                    log.failedToDiscoverClusterServiceURLs(serviceName, cluster.getName());
 +                }
 +
 +                // Service params
 +                if (descService.getParams() != null) {
 +                    serviceParams.put(serviceName, descService.getParams());
 +                    if (!validServiceNames.contains(serviceName)) {
 +                        validServiceNames.add(serviceName);
 +                    }
 +                }
 +            }
 +        } else {
 +            log.failedToDiscoverClusterServices(desc.getClusterName());
 +        }
 +
++        // Provision the query param encryption password here, rather than relying on the random password generated
++        // when the topology is deployed. This is to support Knox HA deployments, where multiple Knox instances are
++        // generating topologies based on a shared remote descriptor, and they must all be able to encrypt/decrypt
++        // query params with the same credentials. (KNOX-1136)
++        if (!provisionQueryParamEncryptionCredential(desc.getName())) {
++            log.unableCreatePasswordForEncryption(desc.getName());
++        }
++
 +        BufferedWriter fw = null;
 +        topologyDescriptor = null;
 +        File providerConfig;
 +        try {
 +            // Verify that the referenced provider configuration exists before attempting to reading it
 +            providerConfig = resolveProviderConfigurationReference(desc.getProviderConfig(), srcDirectory);
 +            if (providerConfig == null) {
 +                log.failedToResolveProviderConfigRef(desc.getProviderConfig());
 +                throw new IllegalArgumentException("Unresolved provider configuration reference: " +
 +                                                   desc.getProviderConfig() + " ; Topology update aborted!");
 +            }
 +            result.put("reference", providerConfig);
 +
 +            // TODO: Should the contents of the provider config be validated before incorporating it into the topology?
 +
 +            String topologyFilename = desc.getName();
 +            if (topologyFilename == null) {
 +                topologyFilename = desc.getClusterName();
 +            }
 +            topologyDescriptor = new File(destDirectory, topologyFilename + ".xml");
 +
 +            fw = new BufferedWriter(new FileWriter(topologyDescriptor));
 +
 +            fw.write("<?xml version=\"1.0\" encoding=\"utf-8\"?>\n");
 +
 +            fw.write("<!--==============================================-->\n");
 +            fw.write("<!-- DO NOT EDIT. This is an auto-generated file. -->\n");
 +            fw.write("<!--==============================================-->\n");
 +
 +            fw.write("<topology>\n");
 +
 +            // KNOX-1105 Indicate that this topology was auto-generated
 +            fw.write("    <generated>true</generated>\n");
 +
 +            // Copy the externalized provider configuration content into the topology descriptor in-line
 +            InputStreamReader policyReader = new InputStreamReader(new FileInputStream(providerConfig));
 +            char[] buffer = new char[1024];
 +            int count;
 +            while ((count = policyReader.read(buffer)) > 0) {
 +                fw.write(buffer, 0, count);
 +            }
 +            policyReader.close();
 +
 +            // Services
 +            // Sort the service names to write the services alphabetically
 +            List<String> serviceNames = new ArrayList<>(validServiceNames);
 +            Collections.sort(serviceNames);
 +
 +            // Write the service declarations
 +            for (String serviceName : serviceNames) {
++                fw.write("\n");
 +                fw.write("    <service>\n");
 +                fw.write("        <role>" + serviceName + "</role>\n");
 +
 +                // URLs
 +                List<String> urls = serviceURLs.get(serviceName);
 +                if (urls != null) {
 +                    for (String url : urls) {
 +                        fw.write("        <url>" + url + "</url>\n");
 +                    }
 +                }
 +
 +                // Params
 +                Map<String, String> svcParams = serviceParams.get(serviceName);
 +                if (svcParams != null) {
 +                    for (String paramName : svcParams.keySet()) {
 +                        fw.write("        <param>\n");
 +                        fw.write("            <name>" + paramName + "</name>\n");
 +                        fw.write("            <value>" + svcParams.get(paramName) + "</value>\n");
 +                        fw.write("        </param>\n");
 +                    }
 +                }
 +
 +                fw.write("    </service>\n");
 +            }
 +
 +            // Applications
 +            List<SimpleDescriptor.Application> apps = desc.getApplications();
 +            if (apps != null) {
 +                for (SimpleDescriptor.Application app : apps) {
 +                    fw.write("    <application>\n");
 +                    fw.write("        <name>" + app.getName() + "</name>\n");
 +
 +                    // URLs
 +                    List<String> urls = app.getURLs();
 +                    if (urls != null) {
 +                        for (String url : urls) {
 +                            fw.write("        <url>" + url + "</url>\n");
 +                        }
 +                    }
 +
 +                    // Params
 +                    Map<String, String> appParams = app.getParams();
 +                    if (appParams != null) {
 +                        for (String paramName : appParams.keySet()) {
 +                            fw.write("        <param>\n");
 +                            fw.write("            <name>" + paramName + "</name>\n");
 +                            fw.write("            <value>" + appParams.get(paramName) + "</value>\n");
 +                            fw.write("        </param>\n");
 +                        }
 +                    }
 +
 +                    fw.write("    </application>\n");
 +                }
 +            }
 +
 +            fw.write("</topology>\n");
 +
 +            fw.flush();
 +        } catch (IOException e) {
 +            log.failedToGenerateTopologyFromSimpleDescriptor(topologyDescriptor.getName(), e);
 +            topologyDescriptor.delete();
 +        } finally {
 +            if (fw != null) {
 +                try {
 +                    fw.close();
 +                } catch (IOException e) {
 +                    // ignore
 +                }
 +            }
 +        }
 +
 +        result.put("topology", topologyDescriptor);
 +        return result;
 +    }
 +
 +
++    /**
++     * KNOX-1136
++     *
++     * Provision the query string encryption password prior to it being randomly generated during the topology
++     * deployment.
++     *
++     * @param topologyName The name of the topology for which the credential will be provisioned.
++     *
++     * @return true if the credential was successfully provisioned; otherwise, false.
++     */
++    private static boolean provisionQueryParamEncryptionCredential(String topologyName) {
++        boolean result = false;
++
++        try {
++            GatewayServices services = GatewayServer.getGatewayServices();
++            if (services != null) {
++                MasterService ms = services.getService("MasterService");
++                if (ms != null) {
++                    KeystoreService ks = services.getService(GatewayServices.KEYSTORE_SERVICE);
++                    if (ks != null) {
++                        if (!ks.isCredentialStoreForClusterAvailable(topologyName)) {
++                            ks.createCredentialStoreForCluster(topologyName);
++                        }
++
++                        // If the credential store existed, or it was just successfully created
++                        if (ks.getCredentialStoreForCluster(topologyName) != null) {
++                            AliasService aliasService = services.getService(GatewayServices.ALIAS_SERVICE);
++                            if (aliasService != null) {
++                                // Derive and set the query param encryption password
++                                String queryEncryptionPass = new String(ms.getMasterSecret()) + topologyName;
++                                aliasService.addAliasForCluster(topologyName, "encryptQueryString", queryEncryptionPass);
++                                result = true;
++                            }
++                        }
++                    }
++                }
++            }
++        } catch (Exception e) {
++            log.exceptionCreatingPasswordForEncryption(topologyName, e);
++        }
++
++        return result;
++    }
++
++
 +    private static boolean validateURL(String serviceName, String url) {
 +        boolean result = false;
 +
 +        if (url != null && !url.isEmpty()) {
 +            try {
 +                new URI(url);
 +                result = true;
 +            } catch (URISyntaxException e) {
 +                log.serviceURLValidationFailed(serviceName, url, e);
 +            }
 +        }
 +
 +        return result;
 +    }
 +
 +
 +    private static File resolveProviderConfigurationReference(String reference, File srcDirectory) {
 +        File providerConfig;
 +
 +        // If the reference includes a path
 +        if (reference.contains(File.separator)) {
 +            // Check if it's an absolute path
 +            providerConfig = new File(reference);
 +            if (!providerConfig.exists()) {
 +                // If it's not an absolute path, try treating it as a relative path
 +                providerConfig = new File(srcDirectory, reference);
 +                if (!providerConfig.exists()) {
 +                    providerConfig = null;
 +                }
 +            }
 +        } else { // No file path, just a name
 +            // Check if it's co-located with the referencing descriptor
 +            providerConfig = new File(srcDirectory, reference);
 +            if (!providerConfig.exists()) {
 +                // Check the shared-providers config location
 +                File sharedProvidersDir = new File(srcDirectory, "../shared-providers");
 +                if (sharedProvidersDir.exists()) {
 +                    providerConfig = new File(sharedProvidersDir, reference);
 +                    if (!providerConfig.exists()) {
 +                        // Check if it's a valid name without the extension
 +                        providerConfig = new File(sharedProvidersDir, reference + ".xml");
 +                        if (!providerConfig.exists()) {
 +                            providerConfig = null;
 +                        }
 +                    }
 +                }
 +            }
 +        }
 +
 +        return providerConfig;
 +    }
 +
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/22a7304a/gateway-server/src/main/java/org/apache/knox/gateway/topology/simple/SimpleDescriptorMessages.java
----------------------------------------------------------------------
diff --cc gateway-server/src/main/java/org/apache/knox/gateway/topology/simple/SimpleDescriptorMessages.java
index 07c4350,0000000..28962f9
mode 100644,000000..100644
--- a/gateway-server/src/main/java/org/apache/knox/gateway/topology/simple/SimpleDescriptorMessages.java
+++ b/gateway-server/src/main/java/org/apache/knox/gateway/topology/simple/SimpleDescriptorMessages.java
@@@ -1,50 -1,0 +1,59 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements. See the NOTICE file distributed with this
 + * work for additional information regarding copyright ownership. The ASF
 + * licenses this file to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance with the License.
 + * You may obtain a copy of the License at
 + * <p>
 + * http://www.apache.org/licenses/LICENSE-2.0
 + * <p>
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 + * License for the specific language governing permissions and limitations under
 + * the License.
 + */
 +package org.apache.knox.gateway.topology.simple;
 +
 +import org.apache.knox.gateway.i18n.messages.Message;
 +import org.apache.knox.gateway.i18n.messages.MessageLevel;
 +import org.apache.knox.gateway.i18n.messages.Messages;
 +import org.apache.knox.gateway.i18n.messages.StackTrace;
 +
 +@Messages(logger="org.apache.gateway.topology.simple")
 +public interface SimpleDescriptorMessages {
 +
 +    @Message(level = MessageLevel.ERROR,
 +            text = "Service discovery for cluster {0} failed.")
 +    void failedToDiscoverClusterServices(final String cluster);
 +
 +    @Message(level = MessageLevel.ERROR,
 +            text = "No valid URLs were discovered for {0} in the {1} cluster.")
 +    void failedToDiscoverClusterServiceURLs(final String serviceName, final String clusterName);
 +
 +    @Message(level = MessageLevel.ERROR,
 +            text = "Failed to resolve the referenced provider configuration {0}.")
 +    void failedToResolveProviderConfigRef(final String providerConfigRef);
 +
 +    @Message(level = MessageLevel.ERROR,
 +            text = "URL validation failed for {0} URL {1} : {2}")
 +    void serviceURLValidationFailed(final String serviceName,
 +                                    final String url,
 +                                    @StackTrace( level = MessageLevel.DEBUG ) Exception e );
 +
 +    @Message(level = MessageLevel.ERROR,
 +            text = "Error generating topology {0} from simple descriptor: {1}")
 +    void failedToGenerateTopologyFromSimpleDescriptor(final String topologyFile,
 +                                                      @StackTrace( level = MessageLevel.DEBUG ) Exception e );
 +
++    @Message( level = MessageLevel.ERROR,
++              text = "Error creating a password for query string encryption for {0}: {1}" )
++    void exceptionCreatingPasswordForEncryption(String topologyName,
++                                                @StackTrace( level = MessageLevel.DEBUG) Exception e);
++
++    @Message( level = MessageLevel.ERROR,
++            text = "Failed to create a password for query string encryption for {0}." )
++    void unableCreatePasswordForEncryption(String topologyName);
++
 +}


[45/53] [abbrv] knox git commit: KNOX-998 - Merge from trunk 0.14.0 code

Posted by mo...@apache.org.
http://git-wip-us.apache.org/repos/asf/knox/blob/e766b3b7/gateway-server/src/test/java/org/apache/hadoop/gateway/service/config/remote/LocalFileSystemRemoteConfigurationRegistryClientService.java
----------------------------------------------------------------------
diff --git a/gateway-server/src/test/java/org/apache/hadoop/gateway/service/config/remote/LocalFileSystemRemoteConfigurationRegistryClientService.java b/gateway-server/src/test/java/org/apache/hadoop/gateway/service/config/remote/LocalFileSystemRemoteConfigurationRegistryClientService.java
deleted file mode 100644
index 0bfc39a..0000000
--- a/gateway-server/src/test/java/org/apache/hadoop/gateway/service/config/remote/LocalFileSystemRemoteConfigurationRegistryClientService.java
+++ /dev/null
@@ -1,263 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.gateway.service.config.remote;
-
-import org.apache.commons.io.FileUtils;
-import org.apache.hadoop.gateway.config.GatewayConfig;
-import org.apache.hadoop.gateway.service.config.remote.config.RemoteConfigurationRegistriesAccessor;
-import org.apache.hadoop.gateway.services.ServiceLifecycleException;
-import org.apache.hadoop.gateway.services.config.client.RemoteConfigurationRegistryClient;
-import org.apache.hadoop.gateway.services.config.client.RemoteConfigurationRegistryClientService;
-import org.apache.hadoop.gateway.services.security.AliasService;
-
-import java.io.File;
-import java.io.IOException;
-import java.nio.file.Files;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-import java.nio.file.attribute.PosixFilePermission;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.function.Function;
-
-/**
- * An implementation of RemoteConfigurationRegistryClientService intended to be used for testing without having to
- * connect to an actual remote configuration registry.
- */
-public class LocalFileSystemRemoteConfigurationRegistryClientService implements RemoteConfigurationRegistryClientService {
-
-    public static final String TYPE = "LocalFileSystem";
-
-    private Map<String, RemoteConfigurationRegistryClient> clients = new HashMap<>();
-
-
-    @Override
-    public void setAliasService(AliasService aliasService) {
-        // N/A
-    }
-
-    @Override
-    public RemoteConfigurationRegistryClient get(String name) {
-        return clients.get(name);
-    }
-
-    @Override
-    public void init(GatewayConfig config, Map<String, String> options) throws ServiceLifecycleException {
-        List<RemoteConfigurationRegistryConfig> registryConfigurations =
-                                        RemoteConfigurationRegistriesAccessor.getRemoteRegistryConfigurations(config);
-        for (RemoteConfigurationRegistryConfig registryConfig : registryConfigurations) {
-            if (TYPE.equalsIgnoreCase(registryConfig.getRegistryType())) {
-                RemoteConfigurationRegistryClient registryClient = createClient(registryConfig);
-                clients.put(registryConfig.getName(), registryClient);
-            }
-        }
-    }
-
-    @Override
-    public void start() throws ServiceLifecycleException {
-
-    }
-
-    @Override
-    public void stop() throws ServiceLifecycleException {
-
-    }
-
-
-    private RemoteConfigurationRegistryClient createClient(RemoteConfigurationRegistryConfig config) {
-        String rootDir = config.getConnectionString();
-
-        return new RemoteConfigurationRegistryClient() {
-            private File root = new File(rootDir);
-
-            @Override
-            public String getAddress() {
-                return root.getAbsolutePath();
-            }
-
-            @Override
-            public boolean entryExists(String path) {
-                return (new File(root, path)).exists();
-            }
-
-            @Override
-            public List<EntryACL> getACL(String path) {
-                List<EntryACL> result = new ArrayList<>();
-
-                Path resolved = Paths.get(rootDir, path);
-                try {
-                    Map<String, List<String>> collected = new HashMap<>();
-
-                    Set<PosixFilePermission> perms = Files.getPosixFilePermissions(resolved);
-                    for (PosixFilePermission perm : perms) {
-                        String[] parsed = perm.toString().split("_");
-                        collected.computeIfAbsent(parsed[0].toLowerCase(), s -> new ArrayList<>()).add(parsed[1].toLowerCase());
-                    }
-
-                    for (String id : collected.keySet()) {
-                        EntryACL acl = new EntryACL() {
-                            @Override
-                            public String getId() {
-                                return id;
-                            }
-
-                            @Override
-                            public String getType() {
-                                return "fs";
-                            }
-
-                            @Override
-                            public Object getPermissions() {
-                                return collected.get(id).toString();
-                            }
-
-                            @Override
-                            public boolean canRead() {
-                                return true;
-                            }
-
-                            @Override
-                            public boolean canWrite() {
-                                return true;
-                            }
-                        };
-                        result.add(acl);
-                    }
-                } catch (IOException e) {
-                    e.printStackTrace();
-                }
-                return result;
-            }
-
-            @Override
-            public List<String> listChildEntries(String path) {
-                List<String> result = new ArrayList<>();
-
-                File entry = new File(root, path);
-                if (entry.exists() && entry.isDirectory()) {
-                    String[] list = entry.list();
-                    if (list != null) {
-                        result.addAll(Arrays.asList(entry.list()));
-                    }
-                }
-
-                return result;
-            }
-
-            @Override
-            public String getEntryData(String path) {
-                return getEntryData(path, "UTF-8");
-            }
-
-            @Override
-            public String getEntryData(String path, String encoding) {
-                String result = null;
-                File entry = new File(root, path);
-                if (entry.isFile() && entry.exists()) {
-                    try {
-                        result = FileUtils.readFileToString(entry, encoding);
-                    } catch (IOException e) {
-                        e.printStackTrace();
-                    }
-                }
-                return result;
-            }
-
-            @Override
-            public void createEntry(String path) {
-                createEntry(path, "");
-            }
-
-            @Override
-            public void createEntry(String path, String data) {
-                createEntry(path, data, "UTF-8");
-            }
-
-            @Override
-            public void createEntry(String path, String data, String encoding) {
-                File entry = new File(root, path);
-                if (!entry.exists()) {
-                    if (data != null) {
-                        try {
-                            FileUtils.writeStringToFile(entry, data, encoding);
-                        } catch (IOException e) {
-                            e.printStackTrace();
-                        }
-                    }
-                }
-            }
-
-            @Override
-            public int setEntryData(String path, String data) {
-                setEntryData(path, data, "UTF-8");
-                return 0;
-            }
-
-            @Override
-            public int setEntryData(String path, String data, String encoding) {
-                File entry = new File(root, path);
-                if (entry.exists()) {
-                    try {
-                        FileUtils.writeStringToFile(entry, data, encoding);
-                    } catch (IOException e) {
-                        e.printStackTrace();
-                    }
-                }
-                return 0;
-            }
-
-            @Override
-            public boolean isAuthenticationConfigured() {
-                return false;
-            }
-
-            @Override
-            public void setACL(String path, List<EntryACL> acls) {
-                //
-            }
-
-            @Override
-            public void deleteEntry(String path) {
-                File entry = new File(root, path);
-                if (entry.exists()) {
-                    entry.delete();
-                }
-            }
-
-            @Override
-            public void addChildEntryListener(String path, ChildEntryListener listener) throws Exception {
-                // N/A
-            }
-
-            @Override
-            public void addEntryListener(String path, EntryListener listener) throws Exception {
-                // N/A
-            }
-
-            @Override
-            public void removeEntryListener(String path) throws Exception {
-                // N/A
-            }
-        };
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/knox/blob/e766b3b7/gateway-server/src/test/java/org/apache/hadoop/gateway/service/config/remote/LocalFileSystemRemoteConfigurationRegistryClientServiceProvider.java
----------------------------------------------------------------------
diff --git a/gateway-server/src/test/java/org/apache/hadoop/gateway/service/config/remote/LocalFileSystemRemoteConfigurationRegistryClientServiceProvider.java b/gateway-server/src/test/java/org/apache/hadoop/gateway/service/config/remote/LocalFileSystemRemoteConfigurationRegistryClientServiceProvider.java
deleted file mode 100644
index 42e79c1..0000000
--- a/gateway-server/src/test/java/org/apache/hadoop/gateway/service/config/remote/LocalFileSystemRemoteConfigurationRegistryClientServiceProvider.java
+++ /dev/null
@@ -1,32 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.gateway.service.config.remote;
-
-import org.apache.hadoop.gateway.services.config.client.RemoteConfigurationRegistryClientService;
-
-public class LocalFileSystemRemoteConfigurationRegistryClientServiceProvider implements RemoteConfigurationRegistryClientServiceProvider {
-
-    @Override
-    public String getType() {
-        return LocalFileSystemRemoteConfigurationRegistryClientService.TYPE;
-    }
-
-    @Override
-    public RemoteConfigurationRegistryClientService newInstance() {
-        return new LocalFileSystemRemoteConfigurationRegistryClientService();
-    }
-}

http://git-wip-us.apache.org/repos/asf/knox/blob/e766b3b7/gateway-server/src/test/java/org/apache/hadoop/gateway/topology/monitor/ZooKeeperConfigurationMonitorTest.java
----------------------------------------------------------------------
diff --git a/gateway-server/src/test/java/org/apache/hadoop/gateway/topology/monitor/ZooKeeperConfigurationMonitorTest.java b/gateway-server/src/test/java/org/apache/hadoop/gateway/topology/monitor/ZooKeeperConfigurationMonitorTest.java
deleted file mode 100644
index 1c4ed6e..0000000
--- a/gateway-server/src/test/java/org/apache/hadoop/gateway/topology/monitor/ZooKeeperConfigurationMonitorTest.java
+++ /dev/null
@@ -1,355 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.gateway.topology.monitor;
-
-import org.apache.commons.io.FileUtils;
-import org.apache.curator.framework.CuratorFramework;
-import org.apache.curator.framework.CuratorFrameworkFactory;
-import org.apache.curator.retry.ExponentialBackoffRetry;
-import org.apache.curator.test.InstanceSpec;
-import org.apache.curator.test.TestingCluster;
-import org.apache.hadoop.gateway.config.GatewayConfig;
-import org.apache.hadoop.gateway.service.config.remote.zk.ZooKeeperClientService;
-import org.apache.hadoop.gateway.service.config.remote.zk.ZooKeeperClientServiceProvider;
-import org.apache.hadoop.gateway.services.config.client.RemoteConfigurationRegistryClientService;
-import org.apache.hadoop.gateway.services.security.AliasService;
-import org.apache.hadoop.test.TestUtils;
-import org.apache.zookeeper.CreateMode;
-import org.apache.zookeeper.ZooDefs;
-import org.apache.zookeeper.data.ACL;
-import org.easymock.EasyMock;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import java.io.File;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-/**
- * Test the ZooKeeperConfigMonitor WITHOUT SASL configured or znode ACLs applied.
- * The implementation of the monitor is the same regardless, since the ACLs are defined by the ZooKeeper znode
- * creator, and the SASL config is purely JAAS (and external to the implementation).
- */
-public class ZooKeeperConfigurationMonitorTest {
-
-    private static final String PATH_KNOX = "/knox";
-    private static final String PATH_KNOX_CONFIG = PATH_KNOX + "/config";
-    private static final String PATH_KNOX_PROVIDERS = PATH_KNOX_CONFIG + "/shared-providers";
-    private static final String PATH_KNOX_DESCRIPTORS = PATH_KNOX_CONFIG + "/descriptors";
-
-    private static File testTmp;
-    private static File providersDir;
-    private static File descriptorsDir;
-
-    private static TestingCluster zkCluster;
-
-    private static CuratorFramework client;
-
-    private GatewayConfig gc;
-
-
-    @BeforeClass
-    public static void setupSuite() throws Exception {
-        testTmp = TestUtils.createTempDir(ZooKeeperConfigurationMonitorTest.class.getName());
-        File confDir   = TestUtils.createTempDir(testTmp + "/conf");
-        providersDir   = TestUtils.createTempDir(confDir + "/shared-providers");
-        descriptorsDir = TestUtils.createTempDir(confDir + "/descriptors");
-
-        configureAndStartZKCluster();
-    }
-
-    private static void configureAndStartZKCluster() throws Exception {
-        // Configure security for the ZK cluster instances
-        Map<String, Object> customInstanceSpecProps = new HashMap<>();
-        customInstanceSpecProps.put("authProvider.1", "org.apache.zookeeper.server.auth.SASLAuthenticationProvider");
-        customInstanceSpecProps.put("requireClientAuthScheme", "sasl");
-
-        // Define the test cluster
-        List<InstanceSpec> instanceSpecs = new ArrayList<>();
-        for (int i = 0 ; i < 3 ; i++) {
-            InstanceSpec is = new InstanceSpec(null, -1, -1, -1, false, (i+1), -1, -1, customInstanceSpecProps);
-            instanceSpecs.add(is);
-        }
-        zkCluster = new TestingCluster(instanceSpecs);
-
-        // Start the cluster
-        zkCluster.start();
-
-        // Create the client for the test cluster
-        client = CuratorFrameworkFactory.builder()
-                                        .connectString(zkCluster.getConnectString())
-                                        .retryPolicy(new ExponentialBackoffRetry(100, 3))
-                                        .build();
-        assertNotNull(client);
-        client.start();
-
-        // Create the knox config paths with an ACL for the sasl user configured for the client
-        List<ACL> acls = new ArrayList<>();
-        acls.add(new ACL(ZooDefs.Perms.ALL, ZooDefs.Ids.ANYONE_ID_UNSAFE));
-
-        client.create().creatingParentsIfNeeded().withMode(CreateMode.PERSISTENT).withACL(acls).forPath(PATH_KNOX_DESCRIPTORS);
-        assertNotNull("Failed to create node:" + PATH_KNOX_DESCRIPTORS,
-                client.checkExists().forPath(PATH_KNOX_DESCRIPTORS));
-        client.create().creatingParentsIfNeeded().withMode(CreateMode.PERSISTENT).withACL(acls).forPath(PATH_KNOX_PROVIDERS);
-        assertNotNull("Failed to create node:" + PATH_KNOX_PROVIDERS,
-                client.checkExists().forPath(PATH_KNOX_PROVIDERS));
-    }
-
-    @AfterClass
-    public static void tearDownSuite() throws Exception {
-        // Clean up the ZK nodes, and close the client
-        if (client != null) {
-            client.delete().deletingChildrenIfNeeded().forPath(PATH_KNOX);
-            client.close();
-        }
-
-        // Shutdown the ZK cluster
-        zkCluster.close();
-
-        // Delete the working dir
-        testTmp.delete();
-    }
-
-    @Test
-    public void testZooKeeperConfigMonitor() throws Exception {
-        String configMonitorName = "remoteConfigMonitorClient";
-
-        // Setup the base GatewayConfig mock
-        gc = EasyMock.createNiceMock(GatewayConfig.class);
-        EasyMock.expect(gc.getGatewayProvidersConfigDir()).andReturn(providersDir.getAbsolutePath()).anyTimes();
-        EasyMock.expect(gc.getGatewayDescriptorsDir()).andReturn(descriptorsDir.getAbsolutePath()).anyTimes();
-        EasyMock.expect(gc.getRemoteRegistryConfigurationNames())
-                .andReturn(Collections.singletonList(configMonitorName))
-                .anyTimes();
-        final String registryConfig =
-                                GatewayConfig.REMOTE_CONFIG_REGISTRY_TYPE + "=" + ZooKeeperClientService.TYPE + ";" +
-                                GatewayConfig.REMOTE_CONFIG_REGISTRY_ADDRESS + "=" + zkCluster.getConnectString();
-        EasyMock.expect(gc.getRemoteRegistryConfiguration(configMonitorName))
-                .andReturn(registryConfig)
-                .anyTimes();
-        EasyMock.expect(gc.getRemoteConfigurationMonitorClientName()).andReturn(configMonitorName).anyTimes();
-        EasyMock.replay(gc);
-
-        AliasService aliasService = EasyMock.createNiceMock(AliasService.class);
-        EasyMock.replay(aliasService);
-
-        RemoteConfigurationRegistryClientService clientService = (new ZooKeeperClientServiceProvider()).newInstance();
-        clientService.setAliasService(aliasService);
-        clientService.init(gc, Collections.emptyMap());
-        clientService.start();
-
-        DefaultRemoteConfigurationMonitor cm = new DefaultRemoteConfigurationMonitor(gc, clientService);
-
-        try {
-            cm.start();
-        } catch (Exception e) {
-            fail("Failed to start monitor: " + e.getMessage());
-        }
-
-        try {
-            final String pc_one_znode = getProviderPath("providers-config1.xml");
-            final File pc_one         = new File(providersDir, "providers-config1.xml");
-            final String pc_two_znode = getProviderPath("providers-config2.xml");
-            final File pc_two         = new File(providersDir, "providers-config2.xml");
-
-            client.create().withMode(CreateMode.PERSISTENT).forPath(pc_one_znode, TEST_PROVIDERS_CONFIG_1.getBytes());
-            Thread.sleep(100);
-            assertTrue(pc_one.exists());
-            assertEquals(TEST_PROVIDERS_CONFIG_1, FileUtils.readFileToString(pc_one));
-
-            client.create().withMode(CreateMode.PERSISTENT).forPath(getProviderPath("providers-config2.xml"), TEST_PROVIDERS_CONFIG_2.getBytes());
-            Thread.sleep(100);
-            assertTrue(pc_two.exists());
-            assertEquals(TEST_PROVIDERS_CONFIG_2, FileUtils.readFileToString(pc_two));
-
-            client.setData().forPath(pc_two_znode, TEST_PROVIDERS_CONFIG_1.getBytes());
-            Thread.sleep(100);
-            assertTrue(pc_two.exists());
-            assertEquals(TEST_PROVIDERS_CONFIG_1, FileUtils.readFileToString(pc_two));
-
-            client.delete().forPath(pc_two_znode);
-            Thread.sleep(100);
-            assertFalse(pc_two.exists());
-
-            client.delete().forPath(pc_one_znode);
-            Thread.sleep(100);
-            assertFalse(pc_one.exists());
-
-            final String desc_one_znode   = getDescriptorPath("test1.json");
-            final String desc_two_znode   = getDescriptorPath("test2.json");
-            final String desc_three_znode = getDescriptorPath("test3.json");
-            final File desc_one           = new File(descriptorsDir, "test1.json");
-            final File desc_two           = new File(descriptorsDir, "test2.json");
-            final File desc_three         = new File(descriptorsDir, "test3.json");
-
-            client.create().withMode(CreateMode.PERSISTENT).forPath(desc_one_znode, TEST_DESCRIPTOR_1.getBytes());
-            Thread.sleep(100);
-            assertTrue(desc_one.exists());
-            assertEquals(TEST_DESCRIPTOR_1, FileUtils.readFileToString(desc_one));
-
-            client.create().withMode(CreateMode.PERSISTENT).forPath(desc_two_znode, TEST_DESCRIPTOR_1.getBytes());
-            Thread.sleep(100);
-            assertTrue(desc_two.exists());
-            assertEquals(TEST_DESCRIPTOR_1, FileUtils.readFileToString(desc_two));
-
-            client.setData().forPath(desc_two_znode, TEST_DESCRIPTOR_2.getBytes());
-            Thread.sleep(100);
-            assertTrue(desc_two.exists());
-            assertEquals(TEST_DESCRIPTOR_2, FileUtils.readFileToString(desc_two));
-
-            client.create().withMode(CreateMode.PERSISTENT).forPath(desc_three_znode, TEST_DESCRIPTOR_1.getBytes());
-            Thread.sleep(100);
-            assertTrue(desc_three.exists());
-            assertEquals(TEST_DESCRIPTOR_1, FileUtils.readFileToString(desc_three));
-
-            client.delete().forPath(desc_two_znode);
-            Thread.sleep(100);
-            assertFalse("Expected test2.json to have been deleted.", desc_two.exists());
-
-            client.delete().forPath(desc_three_znode);
-            Thread.sleep(100);
-            assertFalse(desc_three.exists());
-
-            client.delete().forPath(desc_one_znode);
-            Thread.sleep(100);
-            assertFalse(desc_one.exists());
-        } finally {
-            cm.stop();
-        }
-    }
-
-    private static String getDescriptorPath(String descriptorName) {
-        return PATH_KNOX_DESCRIPTORS + "/" + descriptorName;
-    }
-
-    private static String getProviderPath(String providerConfigName) {
-        return PATH_KNOX_PROVIDERS + "/" + providerConfigName;
-    }
-
-
-    private static final String TEST_PROVIDERS_CONFIG_1 =
-            "<gateway>\n" +
-            "    <provider>\n" +
-            "        <role>identity-assertion</role>\n" +
-            "        <name>Default</name>\n" +
-            "        <enabled>true</enabled>\n" +
-            "    </provider>\n" +
-            "    <provider>\n" +
-            "        <role>hostmap</role>\n" +
-            "        <name>static</name>\n" +
-            "        <enabled>true</enabled>\n" +
-            "        <param><name>localhost</name><value>sandbox,sandbox.hortonworks.com</value></param>\n" +
-            "    </provider>\n" +
-            "</gateway>\n";
-
-    private static final String TEST_PROVIDERS_CONFIG_2 =
-            "<gateway>\n" +
-            "    <provider>\n" +
-            "        <role>authentication</role>\n" +
-            "        <name>ShiroProvider</name>\n" +
-            "        <enabled>true</enabled>\n" +
-            "        <param>\n" +
-            "            <name>sessionTimeout</name>\n" +
-            "            <value>30</value>\n" +
-            "        </param>\n" +
-            "        <param>\n" +
-            "            <name>main.ldapRealm</name>\n" +
-            "            <value>org.apache.hadoop.gateway.shirorealm.KnoxLdapRealm</value>\n" +
-            "        </param>\n" +
-            "        <param>\n" +
-            "            <name>main.ldapContextFactory</name>\n" +
-            "            <value>org.apache.hadoop.gateway.shirorealm.KnoxLdapContextFactory</value>\n" +
-            "        </param>\n" +
-            "        <param>\n" +
-            "            <name>main.ldapRealm.contextFactory</name>\n" +
-            "            <value>$ldapContextFactory</value>\n" +
-            "        </param>\n" +
-            "        <param>\n" +
-            "            <name>main.ldapRealm.userDnTemplate</name>\n" +
-            "            <value>uid={0},ou=people,dc=hadoop,dc=apache,dc=org</value>\n" +
-            "        </param>\n" +
-            "        <param>\n" +
-            "            <name>main.ldapRealm.contextFactory.url</name>\n" +
-            "            <value>ldap://localhost:33389</value>\n" +
-            "        </param>\n" +
-            "        <param>\n" +
-            "            <name>main.ldapRealm.contextFactory.authenticationMechanism</name>\n" +
-            "            <value>simple</value>\n" +
-            "        </param>\n" +
-            "        <param>\n" +
-            "            <name>urls./**</name>\n" +
-            "            <value>authcBasic</value>\n" +
-            "        </param>\n" +
-            "    </provider>\n" +
-            "</gateway>\n";
-
-    private static final String TEST_DESCRIPTOR_1 =
-            "{\n" +
-            "  \"discovery-type\":\"AMBARI\",\n" +
-            "  \"discovery-address\":\"http://sandbox.hortonworks.com:8080\",\n" +
-            "  \"discovery-user\":\"maria_dev\",\n" +
-            "  \"discovery-pwd-alias\":\"sandbox.ambari.discovery.password\",\n" +
-            "  \"provider-config-ref\":\"sandbox-providers.xml\",\n" +
-            "  \"cluster\":\"Sandbox\",\n" +
-            "  \"services\":[\n" +
-            "    {\"name\":\"NODEUI\"},\n" +
-            "    {\"name\":\"YARNUI\"},\n" +
-            "    {\"name\":\"HDFSUI\"},\n" +
-            "    {\"name\":\"OOZIEUI\"},\n" +
-            "    {\"name\":\"HBASEUI\"},\n" +
-            "    {\"name\":\"NAMENODE\"},\n" +
-            "    {\"name\":\"JOBTRACKER\"},\n" +
-            "    {\"name\":\"WEBHDFS\"},\n" +
-            "    {\"name\":\"WEBHCAT\"},\n" +
-            "    {\"name\":\"OOZIE\"},\n" +
-            "    {\"name\":\"WEBHBASE\"},\n" +
-            "    {\"name\":\"RESOURCEMANAGER\"},\n" +
-            "    {\"name\":\"AMBARI\", \"urls\":[\"http://c6401.ambari.apache.org:8080\"]},\n" +
-            "    {\"name\":\"AMBARIUI\", \"urls\":[\"http://c6401.ambari.apache.org:8080\"]}\n" +
-            "  ]\n" +
-            "}\n";
-
-    private static final String TEST_DESCRIPTOR_2 =
-            "{\n" +
-            "  \"discovery-type\":\"AMBARI\",\n" +
-            "  \"discovery-address\":\"http://sandbox.hortonworks.com:8080\",\n" +
-            "  \"discovery-user\":\"maria_dev\",\n" +
-            "  \"discovery-pwd-alias\":\"sandbox.ambari.discovery.password\",\n" +
-            "  \"provider-config-ref\":\"sandbox-providers.xml\",\n" +
-            "  \"cluster\":\"Sandbox\",\n" +
-            "  \"services\":[\n" +
-            "    {\"name\":\"NAMENODE\"},\n" +
-            "    {\"name\":\"JOBTRACKER\"},\n" +
-            "    {\"name\":\"WEBHDFS\"},\n" +
-            "    {\"name\":\"WEBHCAT\"},\n" +
-            "    {\"name\":\"OOZIE\"},\n" +
-            "    {\"name\":\"WEBHBASE\"},\n" +
-            "    {\"name\":\"RESOURCEMANAGER\"}\n" +
-            "  ]\n" +
-            "}\n";
-
-}

http://git-wip-us.apache.org/repos/asf/knox/blob/e766b3b7/gateway-server/src/test/java/org/apache/knox/gateway/service/config/remote/LocalFileSystemRemoteConfigurationRegistryClientService.java
----------------------------------------------------------------------
diff --git a/gateway-server/src/test/java/org/apache/knox/gateway/service/config/remote/LocalFileSystemRemoteConfigurationRegistryClientService.java b/gateway-server/src/test/java/org/apache/knox/gateway/service/config/remote/LocalFileSystemRemoteConfigurationRegistryClientService.java
new file mode 100644
index 0000000..3bf7d2e
--- /dev/null
+++ b/gateway-server/src/test/java/org/apache/knox/gateway/service/config/remote/LocalFileSystemRemoteConfigurationRegistryClientService.java
@@ -0,0 +1,263 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.knox.gateway.service.config.remote;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.knox.gateway.config.GatewayConfig;
+import org.apache.knox.gateway.service.config.remote.config.RemoteConfigurationRegistriesAccessor;
+import org.apache.knox.gateway.services.ServiceLifecycleException;
+import org.apache.knox.gateway.services.config.client.RemoteConfigurationRegistryClient;
+import org.apache.knox.gateway.services.config.client.RemoteConfigurationRegistryClientService;
+import org.apache.knox.gateway.services.security.AliasService;
+
+import java.io.File;
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.nio.file.attribute.PosixFilePermission;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.function.Function;
+
+/**
+ * An implementation of RemoteConfigurationRegistryClientService intended to be used for testing without having to
+ * connect to an actual remote configuration registry.
+ */
+public class LocalFileSystemRemoteConfigurationRegistryClientService implements RemoteConfigurationRegistryClientService {
+
+    public static final String TYPE = "LocalFileSystem";
+
+    private Map<String, RemoteConfigurationRegistryClient> clients = new HashMap<>();
+
+
+    @Override
+    public void setAliasService(AliasService aliasService) {
+        // N/A
+    }
+
+    @Override
+    public RemoteConfigurationRegistryClient get(String name) {
+        return clients.get(name);
+    }
+
+    @Override
+    public void init(GatewayConfig config, Map<String, String> options) throws ServiceLifecycleException {
+        List<RemoteConfigurationRegistryConfig> registryConfigurations =
+                                        RemoteConfigurationRegistriesAccessor.getRemoteRegistryConfigurations(config);
+        for (RemoteConfigurationRegistryConfig registryConfig : registryConfigurations) {
+            if (TYPE.equalsIgnoreCase(registryConfig.getRegistryType())) {
+                RemoteConfigurationRegistryClient registryClient = createClient(registryConfig);
+                clients.put(registryConfig.getName(), registryClient);
+            }
+        }
+    }
+
+    @Override
+    public void start() throws ServiceLifecycleException {
+
+    }
+
+    @Override
+    public void stop() throws ServiceLifecycleException {
+
+    }
+
+
+    private RemoteConfigurationRegistryClient createClient(RemoteConfigurationRegistryConfig config) {
+        String rootDir = config.getConnectionString();
+
+        return new RemoteConfigurationRegistryClient() {
+            private File root = new File(rootDir);
+
+            @Override
+            public String getAddress() {
+                return root.getAbsolutePath();
+            }
+
+            @Override
+            public boolean entryExists(String path) {
+                return (new File(root, path)).exists();
+            }
+
+            @Override
+            public List<EntryACL> getACL(String path) {
+                List<EntryACL> result = new ArrayList<>();
+
+                Path resolved = Paths.get(rootDir, path);
+                try {
+                    Map<String, List<String>> collected = new HashMap<>();
+
+                    Set<PosixFilePermission> perms = Files.getPosixFilePermissions(resolved);
+                    for (PosixFilePermission perm : perms) {
+                        String[] parsed = perm.toString().split("_");
+                        collected.computeIfAbsent(parsed[0].toLowerCase(), s -> new ArrayList<>()).add(parsed[1].toLowerCase());
+                    }
+
+                    for (String id : collected.keySet()) {
+                        EntryACL acl = new EntryACL() {
+                            @Override
+                            public String getId() {
+                                return id;
+                            }
+
+                            @Override
+                            public String getType() {
+                                return "fs";
+                            }
+
+                            @Override
+                            public Object getPermissions() {
+                                return collected.get(id).toString();
+                            }
+
+                            @Override
+                            public boolean canRead() {
+                                return true;
+                            }
+
+                            @Override
+                            public boolean canWrite() {
+                                return true;
+                            }
+                        };
+                        result.add(acl);
+                    }
+                } catch (IOException e) {
+                    e.printStackTrace();
+                }
+                return result;
+            }
+
+            @Override
+            public List<String> listChildEntries(String path) {
+                List<String> result = new ArrayList<>();
+
+                File entry = new File(root, path);
+                if (entry.exists() && entry.isDirectory()) {
+                    String[] list = entry.list();
+                    if (list != null) {
+                        result.addAll(Arrays.asList(entry.list()));
+                    }
+                }
+
+                return result;
+            }
+
+            @Override
+            public String getEntryData(String path) {
+                return getEntryData(path, "UTF-8");
+            }
+
+            @Override
+            public String getEntryData(String path, String encoding) {
+                String result = null;
+                File entry = new File(root, path);
+                if (entry.isFile() && entry.exists()) {
+                    try {
+                        result = FileUtils.readFileToString(entry, encoding);
+                    } catch (IOException e) {
+                        e.printStackTrace();
+                    }
+                }
+                return result;
+            }
+
+            @Override
+            public void createEntry(String path) {
+                createEntry(path, "");
+            }
+
+            @Override
+            public void createEntry(String path, String data) {
+                createEntry(path, data, "UTF-8");
+            }
+
+            @Override
+            public void createEntry(String path, String data, String encoding) {
+                File entry = new File(root, path);
+                if (!entry.exists()) {
+                    if (data != null) {
+                        try {
+                            FileUtils.writeStringToFile(entry, data, encoding);
+                        } catch (IOException e) {
+                            e.printStackTrace();
+                        }
+                    }
+                }
+            }
+
+            @Override
+            public int setEntryData(String path, String data) {
+                setEntryData(path, data, "UTF-8");
+                return 0;
+            }
+
+            @Override
+            public int setEntryData(String path, String data, String encoding) {
+                File entry = new File(root, path);
+                if (entry.exists()) {
+                    try {
+                        FileUtils.writeStringToFile(entry, data, encoding);
+                    } catch (IOException e) {
+                        e.printStackTrace();
+                    }
+                }
+                return 0;
+            }
+
+            @Override
+            public boolean isAuthenticationConfigured() {
+                return false;
+            }
+
+            @Override
+            public void setACL(String path, List<EntryACL> acls) {
+                //
+            }
+
+            @Override
+            public void deleteEntry(String path) {
+                File entry = new File(root, path);
+                if (entry.exists()) {
+                    entry.delete();
+                }
+            }
+
+            @Override
+            public void addChildEntryListener(String path, ChildEntryListener listener) throws Exception {
+                // N/A
+            }
+
+            @Override
+            public void addEntryListener(String path, EntryListener listener) throws Exception {
+                // N/A
+            }
+
+            @Override
+            public void removeEntryListener(String path) throws Exception {
+                // N/A
+            }
+        };
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/knox/blob/e766b3b7/gateway-server/src/test/java/org/apache/knox/gateway/service/config/remote/LocalFileSystemRemoteConfigurationRegistryClientServiceProvider.java
----------------------------------------------------------------------
diff --git a/gateway-server/src/test/java/org/apache/knox/gateway/service/config/remote/LocalFileSystemRemoteConfigurationRegistryClientServiceProvider.java b/gateway-server/src/test/java/org/apache/knox/gateway/service/config/remote/LocalFileSystemRemoteConfigurationRegistryClientServiceProvider.java
new file mode 100644
index 0000000..3b96068
--- /dev/null
+++ b/gateway-server/src/test/java/org/apache/knox/gateway/service/config/remote/LocalFileSystemRemoteConfigurationRegistryClientServiceProvider.java
@@ -0,0 +1,32 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.knox.gateway.service.config.remote;
+
+import org.apache.knox.gateway.services.config.client.RemoteConfigurationRegistryClientService;
+
+public class LocalFileSystemRemoteConfigurationRegistryClientServiceProvider implements RemoteConfigurationRegistryClientServiceProvider {
+
+    @Override
+    public String getType() {
+        return LocalFileSystemRemoteConfigurationRegistryClientService.TYPE;
+    }
+
+    @Override
+    public RemoteConfigurationRegistryClientService newInstance() {
+        return new LocalFileSystemRemoteConfigurationRegistryClientService();
+    }
+}

http://git-wip-us.apache.org/repos/asf/knox/blob/e766b3b7/gateway-server/src/test/java/org/apache/knox/gateway/topology/monitor/ZooKeeperConfigurationMonitorTest.java
----------------------------------------------------------------------
diff --git a/gateway-server/src/test/java/org/apache/knox/gateway/topology/monitor/ZooKeeperConfigurationMonitorTest.java b/gateway-server/src/test/java/org/apache/knox/gateway/topology/monitor/ZooKeeperConfigurationMonitorTest.java
new file mode 100644
index 0000000..75cd5d0
--- /dev/null
+++ b/gateway-server/src/test/java/org/apache/knox/gateway/topology/monitor/ZooKeeperConfigurationMonitorTest.java
@@ -0,0 +1,355 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.knox.gateway.topology.monitor;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.curator.framework.CuratorFramework;
+import org.apache.curator.framework.CuratorFrameworkFactory;
+import org.apache.curator.retry.ExponentialBackoffRetry;
+import org.apache.curator.test.InstanceSpec;
+import org.apache.curator.test.TestingCluster;
+import org.apache.knox.gateway.config.GatewayConfig;
+import org.apache.knox.gateway.service.config.remote.zk.ZooKeeperClientService;
+import org.apache.knox.gateway.service.config.remote.zk.ZooKeeperClientServiceProvider;
+import org.apache.knox.gateway.services.config.client.RemoteConfigurationRegistryClientService;
+import org.apache.knox.gateway.services.security.AliasService;
+import org.apache.knox.test.TestUtils;
+import org.apache.zookeeper.CreateMode;
+import org.apache.zookeeper.ZooDefs;
+import org.apache.zookeeper.data.ACL;
+import org.easymock.EasyMock;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.io.File;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+/**
+ * Test the ZooKeeperConfigMonitor WITHOUT SASL configured or znode ACLs applied.
+ * The implementation of the monitor is the same regardless, since the ACLs are defined by the ZooKeeper znode
+ * creator, and the SASL config is purely JAAS (and external to the implementation).
+ */
+public class ZooKeeperConfigurationMonitorTest {
+
+    private static final String PATH_KNOX = "/knox";
+    private static final String PATH_KNOX_CONFIG = PATH_KNOX + "/config";
+    private static final String PATH_KNOX_PROVIDERS = PATH_KNOX_CONFIG + "/shared-providers";
+    private static final String PATH_KNOX_DESCRIPTORS = PATH_KNOX_CONFIG + "/descriptors";
+
+    private static File testTmp;
+    private static File providersDir;
+    private static File descriptorsDir;
+
+    private static TestingCluster zkCluster;
+
+    private static CuratorFramework client;
+
+    private GatewayConfig gc;
+
+
+    @BeforeClass
+    public static void setupSuite() throws Exception {
+        testTmp = TestUtils.createTempDir(ZooKeeperConfigurationMonitorTest.class.getName());
+        File confDir   = TestUtils.createTempDir(testTmp + "/conf");
+        providersDir   = TestUtils.createTempDir(confDir + "/shared-providers");
+        descriptorsDir = TestUtils.createTempDir(confDir + "/descriptors");
+
+        configureAndStartZKCluster();
+    }
+
+    private static void configureAndStartZKCluster() throws Exception {
+        // Configure security for the ZK cluster instances
+        Map<String, Object> customInstanceSpecProps = new HashMap<>();
+        customInstanceSpecProps.put("authProvider.1", "org.apache.zookeeper.server.auth.SASLAuthenticationProvider");
+        customInstanceSpecProps.put("requireClientAuthScheme", "sasl");
+
+        // Define the test cluster
+        List<InstanceSpec> instanceSpecs = new ArrayList<>();
+        for (int i = 0 ; i < 3 ; i++) {
+            InstanceSpec is = new InstanceSpec(null, -1, -1, -1, false, (i+1), -1, -1, customInstanceSpecProps);
+            instanceSpecs.add(is);
+        }
+        zkCluster = new TestingCluster(instanceSpecs);
+
+        // Start the cluster
+        zkCluster.start();
+
+        // Create the client for the test cluster
+        client = CuratorFrameworkFactory.builder()
+                                        .connectString(zkCluster.getConnectString())
+                                        .retryPolicy(new ExponentialBackoffRetry(100, 3))
+                                        .build();
+        assertNotNull(client);
+        client.start();
+
+        // Create the knox config paths with an ACL for the sasl user configured for the client
+        List<ACL> acls = new ArrayList<>();
+        acls.add(new ACL(ZooDefs.Perms.ALL, ZooDefs.Ids.ANYONE_ID_UNSAFE));
+
+        client.create().creatingParentsIfNeeded().withMode(CreateMode.PERSISTENT).withACL(acls).forPath(PATH_KNOX_DESCRIPTORS);
+        assertNotNull("Failed to create node:" + PATH_KNOX_DESCRIPTORS,
+                client.checkExists().forPath(PATH_KNOX_DESCRIPTORS));
+        client.create().creatingParentsIfNeeded().withMode(CreateMode.PERSISTENT).withACL(acls).forPath(PATH_KNOX_PROVIDERS);
+        assertNotNull("Failed to create node:" + PATH_KNOX_PROVIDERS,
+                client.checkExists().forPath(PATH_KNOX_PROVIDERS));
+    }
+
+    @AfterClass
+    public static void tearDownSuite() throws Exception {
+        // Clean up the ZK nodes, and close the client
+        if (client != null) {
+            client.delete().deletingChildrenIfNeeded().forPath(PATH_KNOX);
+            client.close();
+        }
+
+        // Shutdown the ZK cluster
+        zkCluster.close();
+
+        // Delete the working dir
+        testTmp.delete();
+    }
+
+    @Test
+    public void testZooKeeperConfigMonitor() throws Exception {
+        String configMonitorName = "remoteConfigMonitorClient";
+
+        // Setup the base GatewayConfig mock
+        gc = EasyMock.createNiceMock(GatewayConfig.class);
+        EasyMock.expect(gc.getGatewayProvidersConfigDir()).andReturn(providersDir.getAbsolutePath()).anyTimes();
+        EasyMock.expect(gc.getGatewayDescriptorsDir()).andReturn(descriptorsDir.getAbsolutePath()).anyTimes();
+        EasyMock.expect(gc.getRemoteRegistryConfigurationNames())
+                .andReturn(Collections.singletonList(configMonitorName))
+                .anyTimes();
+        final String registryConfig =
+                                GatewayConfig.REMOTE_CONFIG_REGISTRY_TYPE + "=" + ZooKeeperClientService.TYPE + ";" +
+                                GatewayConfig.REMOTE_CONFIG_REGISTRY_ADDRESS + "=" + zkCluster.getConnectString();
+        EasyMock.expect(gc.getRemoteRegistryConfiguration(configMonitorName))
+                .andReturn(registryConfig)
+                .anyTimes();
+        EasyMock.expect(gc.getRemoteConfigurationMonitorClientName()).andReturn(configMonitorName).anyTimes();
+        EasyMock.replay(gc);
+
+        AliasService aliasService = EasyMock.createNiceMock(AliasService.class);
+        EasyMock.replay(aliasService);
+
+        RemoteConfigurationRegistryClientService clientService = (new ZooKeeperClientServiceProvider()).newInstance();
+        clientService.setAliasService(aliasService);
+        clientService.init(gc, Collections.emptyMap());
+        clientService.start();
+
+        DefaultRemoteConfigurationMonitor cm = new DefaultRemoteConfigurationMonitor(gc, clientService);
+
+        try {
+            cm.start();
+        } catch (Exception e) {
+            fail("Failed to start monitor: " + e.getMessage());
+        }
+
+        try {
+            final String pc_one_znode = getProviderPath("providers-config1.xml");
+            final File pc_one         = new File(providersDir, "providers-config1.xml");
+            final String pc_two_znode = getProviderPath("providers-config2.xml");
+            final File pc_two         = new File(providersDir, "providers-config2.xml");
+
+            client.create().withMode(CreateMode.PERSISTENT).forPath(pc_one_znode, TEST_PROVIDERS_CONFIG_1.getBytes());
+            Thread.sleep(100);
+            assertTrue(pc_one.exists());
+            assertEquals(TEST_PROVIDERS_CONFIG_1, FileUtils.readFileToString(pc_one));
+
+            client.create().withMode(CreateMode.PERSISTENT).forPath(getProviderPath("providers-config2.xml"), TEST_PROVIDERS_CONFIG_2.getBytes());
+            Thread.sleep(100);
+            assertTrue(pc_two.exists());
+            assertEquals(TEST_PROVIDERS_CONFIG_2, FileUtils.readFileToString(pc_two));
+
+            client.setData().forPath(pc_two_znode, TEST_PROVIDERS_CONFIG_1.getBytes());
+            Thread.sleep(100);
+            assertTrue(pc_two.exists());
+            assertEquals(TEST_PROVIDERS_CONFIG_1, FileUtils.readFileToString(pc_two));
+
+            client.delete().forPath(pc_two_znode);
+            Thread.sleep(100);
+            assertFalse(pc_two.exists());
+
+            client.delete().forPath(pc_one_znode);
+            Thread.sleep(100);
+            assertFalse(pc_one.exists());
+
+            final String desc_one_znode   = getDescriptorPath("test1.json");
+            final String desc_two_znode   = getDescriptorPath("test2.json");
+            final String desc_three_znode = getDescriptorPath("test3.json");
+            final File desc_one           = new File(descriptorsDir, "test1.json");
+            final File desc_two           = new File(descriptorsDir, "test2.json");
+            final File desc_three         = new File(descriptorsDir, "test3.json");
+
+            client.create().withMode(CreateMode.PERSISTENT).forPath(desc_one_znode, TEST_DESCRIPTOR_1.getBytes());
+            Thread.sleep(100);
+            assertTrue(desc_one.exists());
+            assertEquals(TEST_DESCRIPTOR_1, FileUtils.readFileToString(desc_one));
+
+            client.create().withMode(CreateMode.PERSISTENT).forPath(desc_two_znode, TEST_DESCRIPTOR_1.getBytes());
+            Thread.sleep(100);
+            assertTrue(desc_two.exists());
+            assertEquals(TEST_DESCRIPTOR_1, FileUtils.readFileToString(desc_two));
+
+            client.setData().forPath(desc_two_znode, TEST_DESCRIPTOR_2.getBytes());
+            Thread.sleep(100);
+            assertTrue(desc_two.exists());
+            assertEquals(TEST_DESCRIPTOR_2, FileUtils.readFileToString(desc_two));
+
+            client.create().withMode(CreateMode.PERSISTENT).forPath(desc_three_znode, TEST_DESCRIPTOR_1.getBytes());
+            Thread.sleep(100);
+            assertTrue(desc_three.exists());
+            assertEquals(TEST_DESCRIPTOR_1, FileUtils.readFileToString(desc_three));
+
+            client.delete().forPath(desc_two_znode);
+            Thread.sleep(100);
+            assertFalse("Expected test2.json to have been deleted.", desc_two.exists());
+
+            client.delete().forPath(desc_three_znode);
+            Thread.sleep(100);
+            assertFalse(desc_three.exists());
+
+            client.delete().forPath(desc_one_znode);
+            Thread.sleep(100);
+            assertFalse(desc_one.exists());
+        } finally {
+            cm.stop();
+        }
+    }
+
+    private static String getDescriptorPath(String descriptorName) {
+        return PATH_KNOX_DESCRIPTORS + "/" + descriptorName;
+    }
+
+    private static String getProviderPath(String providerConfigName) {
+        return PATH_KNOX_PROVIDERS + "/" + providerConfigName;
+    }
+
+
+    private static final String TEST_PROVIDERS_CONFIG_1 =
+            "<gateway>\n" +
+            "    <provider>\n" +
+            "        <role>identity-assertion</role>\n" +
+            "        <name>Default</name>\n" +
+            "        <enabled>true</enabled>\n" +
+            "    </provider>\n" +
+            "    <provider>\n" +
+            "        <role>hostmap</role>\n" +
+            "        <name>static</name>\n" +
+            "        <enabled>true</enabled>\n" +
+            "        <param><name>localhost</name><value>sandbox,sandbox.hortonworks.com</value></param>\n" +
+            "    </provider>\n" +
+            "</gateway>\n";
+
+    private static final String TEST_PROVIDERS_CONFIG_2 =
+            "<gateway>\n" +
+            "    <provider>\n" +
+            "        <role>authentication</role>\n" +
+            "        <name>ShiroProvider</name>\n" +
+            "        <enabled>true</enabled>\n" +
+            "        <param>\n" +
+            "            <name>sessionTimeout</name>\n" +
+            "            <value>30</value>\n" +
+            "        </param>\n" +
+            "        <param>\n" +
+            "            <name>main.ldapRealm</name>\n" +
+            "            <value>org.apache.knox.gateway.shirorealm.KnoxLdapRealm</value>\n" +
+            "        </param>\n" +
+            "        <param>\n" +
+            "            <name>main.ldapContextFactory</name>\n" +
+            "            <value>org.apache.knox.gateway.shirorealm.KnoxLdapContextFactory</value>\n" +
+            "        </param>\n" +
+            "        <param>\n" +
+            "            <name>main.ldapRealm.contextFactory</name>\n" +
+            "            <value>$ldapContextFactory</value>\n" +
+            "        </param>\n" +
+            "        <param>\n" +
+            "            <name>main.ldapRealm.userDnTemplate</name>\n" +
+            "            <value>uid={0},ou=people,dc=hadoop,dc=apache,dc=org</value>\n" +
+            "        </param>\n" +
+            "        <param>\n" +
+            "            <name>main.ldapRealm.contextFactory.url</name>\n" +
+            "            <value>ldap://localhost:33389</value>\n" +
+            "        </param>\n" +
+            "        <param>\n" +
+            "            <name>main.ldapRealm.contextFactory.authenticationMechanism</name>\n" +
+            "            <value>simple</value>\n" +
+            "        </param>\n" +
+            "        <param>\n" +
+            "            <name>urls./**</name>\n" +
+            "            <value>authcBasic</value>\n" +
+            "        </param>\n" +
+            "    </provider>\n" +
+            "</gateway>\n";
+
+    private static final String TEST_DESCRIPTOR_1 =
+            "{\n" +
+            "  \"discovery-type\":\"AMBARI\",\n" +
+            "  \"discovery-address\":\"http://sandbox.hortonworks.com:8080\",\n" +
+            "  \"discovery-user\":\"maria_dev\",\n" +
+            "  \"discovery-pwd-alias\":\"sandbox.ambari.discovery.password\",\n" +
+            "  \"provider-config-ref\":\"sandbox-providers.xml\",\n" +
+            "  \"cluster\":\"Sandbox\",\n" +
+            "  \"services\":[\n" +
+            "    {\"name\":\"NODEUI\"},\n" +
+            "    {\"name\":\"YARNUI\"},\n" +
+            "    {\"name\":\"HDFSUI\"},\n" +
+            "    {\"name\":\"OOZIEUI\"},\n" +
+            "    {\"name\":\"HBASEUI\"},\n" +
+            "    {\"name\":\"NAMENODE\"},\n" +
+            "    {\"name\":\"JOBTRACKER\"},\n" +
+            "    {\"name\":\"WEBHDFS\"},\n" +
+            "    {\"name\":\"WEBHCAT\"},\n" +
+            "    {\"name\":\"OOZIE\"},\n" +
+            "    {\"name\":\"WEBHBASE\"},\n" +
+            "    {\"name\":\"RESOURCEMANAGER\"},\n" +
+            "    {\"name\":\"AMBARI\", \"urls\":[\"http://c6401.ambari.apache.org:8080\"]},\n" +
+            "    {\"name\":\"AMBARIUI\", \"urls\":[\"http://c6401.ambari.apache.org:8080\"]}\n" +
+            "  ]\n" +
+            "}\n";
+
+    private static final String TEST_DESCRIPTOR_2 =
+            "{\n" +
+            "  \"discovery-type\":\"AMBARI\",\n" +
+            "  \"discovery-address\":\"http://sandbox.hortonworks.com:8080\",\n" +
+            "  \"discovery-user\":\"maria_dev\",\n" +
+            "  \"discovery-pwd-alias\":\"sandbox.ambari.discovery.password\",\n" +
+            "  \"provider-config-ref\":\"sandbox-providers.xml\",\n" +
+            "  \"cluster\":\"Sandbox\",\n" +
+            "  \"services\":[\n" +
+            "    {\"name\":\"NAMENODE\"},\n" +
+            "    {\"name\":\"JOBTRACKER\"},\n" +
+            "    {\"name\":\"WEBHDFS\"},\n" +
+            "    {\"name\":\"WEBHCAT\"},\n" +
+            "    {\"name\":\"OOZIE\"},\n" +
+            "    {\"name\":\"WEBHBASE\"},\n" +
+            "    {\"name\":\"RESOURCEMANAGER\"}\n" +
+            "  ]\n" +
+            "}\n";
+
+}

http://git-wip-us.apache.org/repos/asf/knox/blob/e766b3b7/gateway-server/src/test/java/org/apache/knox/gateway/util/KnoxCLITest.java
----------------------------------------------------------------------
diff --git a/gateway-server/src/test/java/org/apache/knox/gateway/util/KnoxCLITest.java b/gateway-server/src/test/java/org/apache/knox/gateway/util/KnoxCLITest.java
index 902327c..b768937 100644
--- a/gateway-server/src/test/java/org/apache/knox/gateway/util/KnoxCLITest.java
+++ b/gateway-server/src/test/java/org/apache/knox/gateway/util/KnoxCLITest.java
@@ -20,7 +20,7 @@ package org.apache.knox.gateway.util;
 import com.mycila.xmltool.XMLDoc;
 import com.mycila.xmltool.XMLTag;
 import org.apache.commons.io.FileUtils;
-import org.apache.knox.conf.Configuration;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.knox.gateway.config.impl.GatewayConfigImpl;
 import org.apache.knox.gateway.services.GatewayServices;
 import org.apache.knox.gateway.services.config.client.RemoteConfigurationRegistryClient;

http://git-wip-us.apache.org/repos/asf/knox/blob/e766b3b7/gateway-server/src/test/resources/META-INF/services/org.apache.hadoop.gateway.service.config.remote.RemoteConfigurationRegistryClientServiceProvider
----------------------------------------------------------------------
diff --git a/gateway-server/src/test/resources/META-INF/services/org.apache.hadoop.gateway.service.config.remote.RemoteConfigurationRegistryClientServiceProvider b/gateway-server/src/test/resources/META-INF/services/org.apache.hadoop.gateway.service.config.remote.RemoteConfigurationRegistryClientServiceProvider
deleted file mode 100644
index ffd9284..0000000
--- a/gateway-server/src/test/resources/META-INF/services/org.apache.hadoop.gateway.service.config.remote.RemoteConfigurationRegistryClientServiceProvider
+++ /dev/null
@@ -1,19 +0,0 @@
-##########################################################################
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-##########################################################################
-
-org.apache.hadoop.gateway.service.config.remote.LocalFileSystemRemoteConfigurationRegistryClientServiceProvider

http://git-wip-us.apache.org/repos/asf/knox/blob/e766b3b7/gateway-server/src/test/resources/META-INF/services/org.apache.knox.gateway.service.config.remote.RemoteConfigurationRegistryClientServiceProvider
----------------------------------------------------------------------
diff --git a/gateway-server/src/test/resources/META-INF/services/org.apache.knox.gateway.service.config.remote.RemoteConfigurationRegistryClientServiceProvider b/gateway-server/src/test/resources/META-INF/services/org.apache.knox.gateway.service.config.remote.RemoteConfigurationRegistryClientServiceProvider
new file mode 100644
index 0000000..46dbdf2
--- /dev/null
+++ b/gateway-server/src/test/resources/META-INF/services/org.apache.knox.gateway.service.config.remote.RemoteConfigurationRegistryClientServiceProvider
@@ -0,0 +1,19 @@
+##########################################################################
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##########################################################################
+
+org.apache.knox.gateway.service.config.remote.LocalFileSystemRemoteConfigurationRegistryClientServiceProvider

http://git-wip-us.apache.org/repos/asf/knox/blob/e766b3b7/gateway-service-definitions/src/main/resources/services/ambariui/2.2.1/service.xml
----------------------------------------------------------------------
diff --git a/gateway-service-definitions/src/main/resources/services/ambariui/2.2.1/service.xml b/gateway-service-definitions/src/main/resources/services/ambariui/2.2.1/service.xml
deleted file mode 100644
index e69de29..0000000

http://git-wip-us.apache.org/repos/asf/knox/blob/e766b3b7/gateway-service-remoteconfig/src/main/java/org/apache/hadoop/gateway/service/config/remote/RemoteConfigurationMessages.java
----------------------------------------------------------------------
diff --git a/gateway-service-remoteconfig/src/main/java/org/apache/hadoop/gateway/service/config/remote/RemoteConfigurationMessages.java b/gateway-service-remoteconfig/src/main/java/org/apache/hadoop/gateway/service/config/remote/RemoteConfigurationMessages.java
deleted file mode 100644
index 7cd1324..0000000
--- a/gateway-service-remoteconfig/src/main/java/org/apache/hadoop/gateway/service/config/remote/RemoteConfigurationMessages.java
+++ /dev/null
@@ -1,49 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.gateway.service.config.remote;
-
-import org.apache.hadoop.gateway.i18n.messages.Message;
-import org.apache.hadoop.gateway.i18n.messages.MessageLevel;
-import org.apache.hadoop.gateway.i18n.messages.Messages;
-import org.apache.hadoop.gateway.i18n.messages.StackTrace;
-
-
-/**
- *
- */
-@Messages(logger="org.apache.hadoop.gateway.service.config.remote")
-public interface RemoteConfigurationMessages {
-
-    @Message(level = MessageLevel.WARN,
-             text = "Multiple remote configuration registries are not currently supported if any of them requires authentication.")
-    void multipleRemoteRegistryConfigurations();
-
-    @Message(level = MessageLevel.ERROR, text = "Failed to resolve the credential alias {0}")
-    void unresolvedCredentialAlias(final String alias);
-
-    @Message(level = MessageLevel.ERROR, text = "An error occurred interacting with the remote configuration registry : {0}")
-    void errorInteractingWithRemoteConfigRegistry(@StackTrace(level = MessageLevel.DEBUG) Exception e);
-
-    @Message(level = MessageLevel.ERROR, text = "An error occurred handling the ACL for remote configuration {0} : {1}")
-    void errorHandlingRemoteConfigACL(final String path,
-                                      @StackTrace(level = MessageLevel.DEBUG) Exception e);
-
-    @Message(level = MessageLevel.ERROR, text = "An error occurred setting the ACL for remote configuration {0} : {1}")
-    void errorSettingEntryACL(final String path,
-                              @StackTrace(level = MessageLevel.DEBUG) Exception e);
-
-}

http://git-wip-us.apache.org/repos/asf/knox/blob/e766b3b7/gateway-service-remoteconfig/src/main/java/org/apache/hadoop/gateway/service/config/remote/RemoteConfigurationRegistryClientServiceFactory.java
----------------------------------------------------------------------
diff --git a/gateway-service-remoteconfig/src/main/java/org/apache/hadoop/gateway/service/config/remote/RemoteConfigurationRegistryClientServiceFactory.java b/gateway-service-remoteconfig/src/main/java/org/apache/hadoop/gateway/service/config/remote/RemoteConfigurationRegistryClientServiceFactory.java
deleted file mode 100644
index cd58e22..0000000
--- a/gateway-service-remoteconfig/src/main/java/org/apache/hadoop/gateway/service/config/remote/RemoteConfigurationRegistryClientServiceFactory.java
+++ /dev/null
@@ -1,41 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.gateway.service.config.remote;
-
-import org.apache.hadoop.gateway.config.GatewayConfig;
-import org.apache.hadoop.gateway.services.config.client.RemoteConfigurationRegistryClientService;
-
-import java.util.ServiceLoader;
-
-public class RemoteConfigurationRegistryClientServiceFactory {
-
-    public static RemoteConfigurationRegistryClientService newInstance(GatewayConfig config) {
-        RemoteConfigurationRegistryClientService rcs = null;
-
-        ServiceLoader<RemoteConfigurationRegistryClientServiceProvider> providers =
-                                             ServiceLoader.load(RemoteConfigurationRegistryClientServiceProvider.class);
-        for (RemoteConfigurationRegistryClientServiceProvider provider : providers) {
-            rcs = provider.newInstance();
-            if (rcs != null) {
-                break;
-            }
-        }
-
-        return rcs;
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/knox/blob/e766b3b7/gateway-service-remoteconfig/src/main/java/org/apache/hadoop/gateway/service/config/remote/RemoteConfigurationRegistryClientServiceProvider.java
----------------------------------------------------------------------
diff --git a/gateway-service-remoteconfig/src/main/java/org/apache/hadoop/gateway/service/config/remote/RemoteConfigurationRegistryClientServiceProvider.java b/gateway-service-remoteconfig/src/main/java/org/apache/hadoop/gateway/service/config/remote/RemoteConfigurationRegistryClientServiceProvider.java
deleted file mode 100644
index ddfc392..0000000
--- a/gateway-service-remoteconfig/src/main/java/org/apache/hadoop/gateway/service/config/remote/RemoteConfigurationRegistryClientServiceProvider.java
+++ /dev/null
@@ -1,27 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.gateway.service.config.remote;
-
-import org.apache.hadoop.gateway.services.config.client.RemoteConfigurationRegistryClientService;
-
-public interface RemoteConfigurationRegistryClientServiceProvider {
-
-    String getType();
-
-    RemoteConfigurationRegistryClientService newInstance();
-
-}

http://git-wip-us.apache.org/repos/asf/knox/blob/e766b3b7/gateway-service-remoteconfig/src/main/java/org/apache/hadoop/gateway/service/config/remote/RemoteConfigurationRegistryConfig.java
----------------------------------------------------------------------
diff --git a/gateway-service-remoteconfig/src/main/java/org/apache/hadoop/gateway/service/config/remote/RemoteConfigurationRegistryConfig.java b/gateway-service-remoteconfig/src/main/java/org/apache/hadoop/gateway/service/config/remote/RemoteConfigurationRegistryConfig.java
deleted file mode 100644
index 6409250..0000000
--- a/gateway-service-remoteconfig/src/main/java/org/apache/hadoop/gateway/service/config/remote/RemoteConfigurationRegistryConfig.java
+++ /dev/null
@@ -1,43 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.gateway.service.config.remote;
-
-public interface RemoteConfigurationRegistryConfig {
-
-    String getName();
-
-    String getRegistryType();
-
-    String getConnectionString();
-
-    String getNamespace();
-
-    boolean isSecureRegistry();
-
-    String getAuthType(); // digest, kerberos, etc...
-
-    String getPrincipal();
-
-    String getCredentialAlias();
-
-    String getKeytab();
-
-    boolean isUseTicketCache();
-
-    boolean isUseKeyTab();
-
-}

http://git-wip-us.apache.org/repos/asf/knox/blob/e766b3b7/gateway-service-remoteconfig/src/main/java/org/apache/hadoop/gateway/service/config/remote/config/DefaultRemoteConfigurationRegistries.java
----------------------------------------------------------------------
diff --git a/gateway-service-remoteconfig/src/main/java/org/apache/hadoop/gateway/service/config/remote/config/DefaultRemoteConfigurationRegistries.java b/gateway-service-remoteconfig/src/main/java/org/apache/hadoop/gateway/service/config/remote/config/DefaultRemoteConfigurationRegistries.java
deleted file mode 100644
index ebcae1b..0000000
--- a/gateway-service-remoteconfig/src/main/java/org/apache/hadoop/gateway/service/config/remote/config/DefaultRemoteConfigurationRegistries.java
+++ /dev/null
@@ -1,104 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.gateway.service.config.remote.config;
-
-import org.apache.hadoop.gateway.config.GatewayConfig;
-
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-/**
- * A set of RemoteConfigurationRegistry configurations based on a set of property name-value pairs.
- */
-class DefaultRemoteConfigurationRegistries extends RemoteConfigurationRegistries {
-
-    private static final String PROPERTY_DELIM       = ";";
-    private static final String PROPERTY_VALUE_DELIM = "=";
-
-    private List<RemoteConfigurationRegistry> configuredRegistries = new ArrayList<>();
-
-    /**
-     * Derive the remote registry configurations from the specified GatewayConfig.
-     *
-     * @param gc The source GatewayConfig
-     */
-    DefaultRemoteConfigurationRegistries(GatewayConfig gc) {
-        List<String> configRegistryNames = gc.getRemoteRegistryConfigurationNames();
-        for (String configRegistryName : configRegistryNames) {
-            configuredRegistries.add(extractConfigForRegistry(gc, configRegistryName));
-        }
-    }
-
-    /**
-     * Extract the configuration for the specified registry configuration name.
-     *
-     * @param gc           The GatewayConfig from which to extract the registry config.
-     * @param registryName The name of the registry config.
-     *
-     * @return The resulting RemoteConfigurationRegistry object, or null.
-     */
-    private static RemoteConfigurationRegistry extractConfigForRegistry(GatewayConfig gc, String registryName) {
-        RemoteConfigurationRegistry result = new RemoteConfigurationRegistry();
-
-        result.setName(registryName);
-
-        Map<String, String> properties = parsePropertyValue(gc.getRemoteRegistryConfiguration(registryName));
-
-        result.setRegistryType(properties.get(GatewayConfig.REMOTE_CONFIG_REGISTRY_TYPE));
-        result.setConnectionString(properties.get(GatewayConfig.REMOTE_CONFIG_REGISTRY_ADDRESS));
-        result.setNamespace(properties.get(GatewayConfig.REMOTE_CONFIG_REGISTRY_NAMESPACE));
-        result.setAuthType(properties.get(GatewayConfig.REMOTE_CONFIG_REGISTRY_AUTH_TYPE));
-        result.setPrincipal(properties.get(GatewayConfig.REMOTE_CONFIG_REGISTRY_PRINCIPAL));
-        result.setCredentialAlias(properties.get(GatewayConfig.REMOTE_CONFIG_REGISTRY_CREDENTIAL_ALIAS));
-        result.setKeytab(properties.get(GatewayConfig.REMOTE_CONFIG_REGISTRY_KEYTAB));
-        result.setUseKeytab(Boolean.valueOf(properties.get(GatewayConfig.REMOTE_CONFIG_REGISTRY_USE_KEYTAB)));
-        result.setUseTicketCache(Boolean.valueOf(properties.get(GatewayConfig.REMOTE_CONFIG_REGISTRY_USE_TICKET_CACHE)));
-
-        return result;
-    }
-
-    /**
-     * Parse the specified registry config properties String.
-     *
-     * @param value The property value content from GatewayConfig.
-     *
-     * @return A Map of the parsed properties and their respective values.
-     */
-    private static Map<String, String> parsePropertyValue(final String value) {
-        Map<String, String> result = new HashMap<>();
-
-        if (value != null) {
-            String[] props = value.split(PROPERTY_DELIM);
-            for (String prop : props) {
-                String[] split = prop.split(PROPERTY_VALUE_DELIM);
-                String propName  = split[0];
-                String propValue = (split.length > 1) ? split[1] : null;
-                result.put(propName, propValue);
-            }
-        }
-
-        return result;
-    }
-
-    @Override
-    List<RemoteConfigurationRegistry> getRegistryConfigurations() {
-        return configuredRegistries;
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/knox/blob/e766b3b7/gateway-service-remoteconfig/src/main/java/org/apache/hadoop/gateway/service/config/remote/config/RemoteConfigurationRegistries.java
----------------------------------------------------------------------
diff --git a/gateway-service-remoteconfig/src/main/java/org/apache/hadoop/gateway/service/config/remote/config/RemoteConfigurationRegistries.java b/gateway-service-remoteconfig/src/main/java/org/apache/hadoop/gateway/service/config/remote/config/RemoteConfigurationRegistries.java
deleted file mode 100644
index fa045c0..0000000
--- a/gateway-service-remoteconfig/src/main/java/org/apache/hadoop/gateway/service/config/remote/config/RemoteConfigurationRegistries.java
+++ /dev/null
@@ -1,33 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.gateway.service.config.remote.config;
-
-import javax.xml.bind.annotation.XmlElement;
-import javax.xml.bind.annotation.XmlRootElement;
-import java.util.ArrayList;
-import java.util.List;
-
-@XmlRootElement(name="remote-configuration-registries")
-class RemoteConfigurationRegistries {
-
-    private List<RemoteConfigurationRegistry> registryConfigurations = new ArrayList<>();
-
-    @XmlElement(name="remote-configuration-registry")
-    List<RemoteConfigurationRegistry> getRegistryConfigurations() {
-        return registryConfigurations;
-    }
-}

http://git-wip-us.apache.org/repos/asf/knox/blob/e766b3b7/gateway-service-remoteconfig/src/main/java/org/apache/hadoop/gateway/service/config/remote/config/RemoteConfigurationRegistriesAccessor.java
----------------------------------------------------------------------
diff --git a/gateway-service-remoteconfig/src/main/java/org/apache/hadoop/gateway/service/config/remote/config/RemoteConfigurationRegistriesAccessor.java b/gateway-service-remoteconfig/src/main/java/org/apache/hadoop/gateway/service/config/remote/config/RemoteConfigurationRegistriesAccessor.java
deleted file mode 100644
index 9fed589..0000000
--- a/gateway-service-remoteconfig/src/main/java/org/apache/hadoop/gateway/service/config/remote/config/RemoteConfigurationRegistriesAccessor.java
+++ /dev/null
@@ -1,60 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.gateway.service.config.remote.config;
-
-import org.apache.hadoop.gateway.config.GatewayConfig;
-import org.apache.hadoop.gateway.service.config.remote.RemoteConfigurationRegistryConfig;
-
-import java.io.File;
-import java.util.ArrayList;
-import java.util.List;
-
-public class RemoteConfigurationRegistriesAccessor {
-
-    // System property for specifying a reference to an XML configuration external to the gateway config
-    private static final String XML_CONFIG_REFERENCE_SYSTEM_PROPERTY_NAME =
-                                                                "org.apache.knox.gateway.remote.registry.config.file";
-
-
-    public static List<RemoteConfigurationRegistryConfig> getRemoteRegistryConfigurations(GatewayConfig gatewayConfig) {
-        List<RemoteConfigurationRegistryConfig> result = new ArrayList<>();
-
-        boolean useReferencedFile = false;
-
-        // First check for the system property pointing to a valid XML config for the remote registries
-        String remoteConfigRegistryConfigFilename = System.getProperty(XML_CONFIG_REFERENCE_SYSTEM_PROPERTY_NAME);
-        if (remoteConfigRegistryConfigFilename != null) {
-            File remoteConfigRegistryConfigFile = new File(remoteConfigRegistryConfigFilename);
-            if (remoteConfigRegistryConfigFile.exists()) {
-                useReferencedFile = true;
-                // Parse the file, and build the registry config set
-                result.addAll(RemoteConfigurationRegistriesParser.getConfig(remoteConfigRegistryConfigFilename));
-            }
-        }
-
-        // If the system property was not set to a valid reference to another config file, then try to derive the
-        // registry configurations from the gateway config.
-        if (!useReferencedFile) {
-            RemoteConfigurationRegistries remoteConfigRegistries =
-                                                            new DefaultRemoteConfigurationRegistries(gatewayConfig);
-            result.addAll(remoteConfigRegistries.getRegistryConfigurations());
-        }
-
-        return result;
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/knox/blob/e766b3b7/gateway-service-remoteconfig/src/main/java/org/apache/hadoop/gateway/service/config/remote/config/RemoteConfigurationRegistriesParser.java
----------------------------------------------------------------------
diff --git a/gateway-service-remoteconfig/src/main/java/org/apache/hadoop/gateway/service/config/remote/config/RemoteConfigurationRegistriesParser.java b/gateway-service-remoteconfig/src/main/java/org/apache/hadoop/gateway/service/config/remote/config/RemoteConfigurationRegistriesParser.java
deleted file mode 100644
index 3ea71ef..0000000
--- a/gateway-service-remoteconfig/src/main/java/org/apache/hadoop/gateway/service/config/remote/config/RemoteConfigurationRegistriesParser.java
+++ /dev/null
@@ -1,48 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.gateway.service.config.remote.config;
-
-import org.apache.hadoop.gateway.service.config.remote.RemoteConfigurationRegistryConfig;
-
-import javax.xml.bind.JAXBContext;
-import javax.xml.bind.JAXBException;
-import javax.xml.bind.Unmarshaller;
-import java.io.File;
-import java.util.ArrayList;
-import java.util.List;
-
-class RemoteConfigurationRegistriesParser {
-
-    static List<RemoteConfigurationRegistryConfig> getConfig(String configFilename) {
-        List<RemoteConfigurationRegistryConfig> result = new ArrayList<>();
-
-        File file = new File(configFilename);
-
-        try {
-            JAXBContext jaxbContext = JAXBContext.newInstance(RemoteConfigurationRegistries.class);
-            Unmarshaller jaxbUnmarshaller = jaxbContext.createUnmarshaller();
-            RemoteConfigurationRegistries parsedContent = (RemoteConfigurationRegistries) jaxbUnmarshaller.unmarshal(file);
-            if (parsedContent != null) {
-                result.addAll(parsedContent.getRegistryConfigurations());
-            }
-        } catch (JAXBException e) {
-            e.printStackTrace();
-        }
-
-        return result;
-    }
-}


[48/53] [abbrv] knox git commit: Merge branch 'master' into KNOX-998-Package_Restructuring

Posted by mo...@apache.org.
http://git-wip-us.apache.org/repos/asf/knox/blob/e5fd0622/gateway-server/src/test/java/org/apache/knox/gateway/topology/monitor/ZooKeeperConfigurationMonitorTest.java
----------------------------------------------------------------------
diff --cc gateway-server/src/test/java/org/apache/knox/gateway/topology/monitor/ZooKeeperConfigurationMonitorTest.java
index 75cd5d0,0000000..2e753f1
mode 100644,000000..100644
--- a/gateway-server/src/test/java/org/apache/knox/gateway/topology/monitor/ZooKeeperConfigurationMonitorTest.java
+++ b/gateway-server/src/test/java/org/apache/knox/gateway/topology/monitor/ZooKeeperConfigurationMonitorTest.java
@@@ -1,355 -1,0 +1,368 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements. See the NOTICE file distributed with this
 + * work for additional information regarding copyright ownership. The ASF
 + * licenses this file to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance with the License.
 + * You may obtain a copy of the License at
 + * <p>
 + * http://www.apache.org/licenses/LICENSE-2.0
 + * <p>
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 + * License for the specific language governing permissions and limitations under
 + * the License.
 + */
 +package org.apache.knox.gateway.topology.monitor;
 +
 +import org.apache.commons.io.FileUtils;
 +import org.apache.curator.framework.CuratorFramework;
 +import org.apache.curator.framework.CuratorFrameworkFactory;
 +import org.apache.curator.retry.ExponentialBackoffRetry;
 +import org.apache.curator.test.InstanceSpec;
 +import org.apache.curator.test.TestingCluster;
 +import org.apache.knox.gateway.config.GatewayConfig;
 +import org.apache.knox.gateway.service.config.remote.zk.ZooKeeperClientService;
 +import org.apache.knox.gateway.service.config.remote.zk.ZooKeeperClientServiceProvider;
 +import org.apache.knox.gateway.services.config.client.RemoteConfigurationRegistryClientService;
 +import org.apache.knox.gateway.services.security.AliasService;
 +import org.apache.knox.test.TestUtils;
 +import org.apache.zookeeper.CreateMode;
 +import org.apache.zookeeper.ZooDefs;
 +import org.apache.zookeeper.data.ACL;
 +import org.easymock.EasyMock;
 +import org.junit.AfterClass;
 +import org.junit.BeforeClass;
 +import org.junit.Test;
 +
 +import java.io.File;
 +import java.util.ArrayList;
 +import java.util.Collections;
 +import java.util.HashMap;
 +import java.util.List;
 +import java.util.Map;
 +
 +import static org.junit.Assert.assertEquals;
 +import static org.junit.Assert.assertFalse;
 +import static org.junit.Assert.assertNotNull;
 +import static org.junit.Assert.assertTrue;
 +import static org.junit.Assert.fail;
 +
 +/**
 + * Test the ZooKeeperConfigMonitor WITHOUT SASL configured or znode ACLs applied.
 + * The implementation of the monitor is the same regardless, since the ACLs are defined by the ZooKeeper znode
 + * creator, and the SASL config is purely JAAS (and external to the implementation).
 + */
 +public class ZooKeeperConfigurationMonitorTest {
 +
 +    private static final String PATH_KNOX = "/knox";
 +    private static final String PATH_KNOX_CONFIG = PATH_KNOX + "/config";
 +    private static final String PATH_KNOX_PROVIDERS = PATH_KNOX_CONFIG + "/shared-providers";
 +    private static final String PATH_KNOX_DESCRIPTORS = PATH_KNOX_CONFIG + "/descriptors";
 +
 +    private static File testTmp;
 +    private static File providersDir;
 +    private static File descriptorsDir;
 +
 +    private static TestingCluster zkCluster;
 +
 +    private static CuratorFramework client;
 +
 +    private GatewayConfig gc;
 +
 +
 +    @BeforeClass
 +    public static void setupSuite() throws Exception {
 +        testTmp = TestUtils.createTempDir(ZooKeeperConfigurationMonitorTest.class.getName());
 +        File confDir   = TestUtils.createTempDir(testTmp + "/conf");
 +        providersDir   = TestUtils.createTempDir(confDir + "/shared-providers");
 +        descriptorsDir = TestUtils.createTempDir(confDir + "/descriptors");
 +
 +        configureAndStartZKCluster();
 +    }
 +
 +    private static void configureAndStartZKCluster() throws Exception {
 +        // Configure security for the ZK cluster instances
 +        Map<String, Object> customInstanceSpecProps = new HashMap<>();
 +        customInstanceSpecProps.put("authProvider.1", "org.apache.zookeeper.server.auth.SASLAuthenticationProvider");
 +        customInstanceSpecProps.put("requireClientAuthScheme", "sasl");
 +
 +        // Define the test cluster
 +        List<InstanceSpec> instanceSpecs = new ArrayList<>();
 +        for (int i = 0 ; i < 3 ; i++) {
 +            InstanceSpec is = new InstanceSpec(null, -1, -1, -1, false, (i+1), -1, -1, customInstanceSpecProps);
 +            instanceSpecs.add(is);
 +        }
 +        zkCluster = new TestingCluster(instanceSpecs);
 +
 +        // Start the cluster
 +        zkCluster.start();
 +
 +        // Create the client for the test cluster
 +        client = CuratorFrameworkFactory.builder()
 +                                        .connectString(zkCluster.getConnectString())
 +                                        .retryPolicy(new ExponentialBackoffRetry(100, 3))
 +                                        .build();
 +        assertNotNull(client);
 +        client.start();
 +
 +        // Create the knox config paths with an ACL for the sasl user configured for the client
 +        List<ACL> acls = new ArrayList<>();
 +        acls.add(new ACL(ZooDefs.Perms.ALL, ZooDefs.Ids.ANYONE_ID_UNSAFE));
 +
 +        client.create().creatingParentsIfNeeded().withMode(CreateMode.PERSISTENT).withACL(acls).forPath(PATH_KNOX_DESCRIPTORS);
 +        assertNotNull("Failed to create node:" + PATH_KNOX_DESCRIPTORS,
-                 client.checkExists().forPath(PATH_KNOX_DESCRIPTORS));
++                      client.checkExists().forPath(PATH_KNOX_DESCRIPTORS));
 +        client.create().creatingParentsIfNeeded().withMode(CreateMode.PERSISTENT).withACL(acls).forPath(PATH_KNOX_PROVIDERS);
 +        assertNotNull("Failed to create node:" + PATH_KNOX_PROVIDERS,
-                 client.checkExists().forPath(PATH_KNOX_PROVIDERS));
++                      client.checkExists().forPath(PATH_KNOX_PROVIDERS));
 +    }
 +
 +    @AfterClass
 +    public static void tearDownSuite() throws Exception {
 +        // Clean up the ZK nodes, and close the client
 +        if (client != null) {
 +            client.delete().deletingChildrenIfNeeded().forPath(PATH_KNOX);
 +            client.close();
 +        }
 +
 +        // Shutdown the ZK cluster
 +        zkCluster.close();
 +
 +        // Delete the working dir
 +        testTmp.delete();
 +    }
 +
 +    @Test
 +    public void testZooKeeperConfigMonitor() throws Exception {
 +        String configMonitorName = "remoteConfigMonitorClient";
 +
 +        // Setup the base GatewayConfig mock
 +        gc = EasyMock.createNiceMock(GatewayConfig.class);
 +        EasyMock.expect(gc.getGatewayProvidersConfigDir()).andReturn(providersDir.getAbsolutePath()).anyTimes();
 +        EasyMock.expect(gc.getGatewayDescriptorsDir()).andReturn(descriptorsDir.getAbsolutePath()).anyTimes();
 +        EasyMock.expect(gc.getRemoteRegistryConfigurationNames())
 +                .andReturn(Collections.singletonList(configMonitorName))
 +                .anyTimes();
 +        final String registryConfig =
 +                                GatewayConfig.REMOTE_CONFIG_REGISTRY_TYPE + "=" + ZooKeeperClientService.TYPE + ";" +
 +                                GatewayConfig.REMOTE_CONFIG_REGISTRY_ADDRESS + "=" + zkCluster.getConnectString();
 +        EasyMock.expect(gc.getRemoteRegistryConfiguration(configMonitorName))
 +                .andReturn(registryConfig)
 +                .anyTimes();
 +        EasyMock.expect(gc.getRemoteConfigurationMonitorClientName()).andReturn(configMonitorName).anyTimes();
 +        EasyMock.replay(gc);
 +
 +        AliasService aliasService = EasyMock.createNiceMock(AliasService.class);
 +        EasyMock.replay(aliasService);
 +
 +        RemoteConfigurationRegistryClientService clientService = (new ZooKeeperClientServiceProvider()).newInstance();
 +        clientService.setAliasService(aliasService);
 +        clientService.init(gc, Collections.emptyMap());
 +        clientService.start();
 +
 +        DefaultRemoteConfigurationMonitor cm = new DefaultRemoteConfigurationMonitor(gc, clientService);
 +
++        // Create a provider configuration in the test ZK, prior to starting the monitor, to make sure that the monitor
++        // will download existing entries upon starting.
++        final String preExistingProviderConfig = getProviderPath("pre-existing-providers.xml");
++        client.create().withMode(CreateMode.PERSISTENT).forPath(preExistingProviderConfig,
++                                                                TEST_PROVIDERS_CONFIG_1.getBytes());
++        File preExistingProviderConfigLocalFile = new File(providersDir, "pre-existing-providers.xml");
++        assertFalse("This file should not exist locally prior to monitor starting.",
++                    preExistingProviderConfigLocalFile.exists());
++
 +        try {
 +            cm.start();
 +        } catch (Exception e) {
 +            fail("Failed to start monitor: " + e.getMessage());
 +        }
 +
++        assertTrue("This file should exist locally immediately after monitor starting.",
++                    preExistingProviderConfigLocalFile.exists());
++
++
 +        try {
 +            final String pc_one_znode = getProviderPath("providers-config1.xml");
 +            final File pc_one         = new File(providersDir, "providers-config1.xml");
 +            final String pc_two_znode = getProviderPath("providers-config2.xml");
 +            final File pc_two         = new File(providersDir, "providers-config2.xml");
 +
 +            client.create().withMode(CreateMode.PERSISTENT).forPath(pc_one_znode, TEST_PROVIDERS_CONFIG_1.getBytes());
 +            Thread.sleep(100);
 +            assertTrue(pc_one.exists());
 +            assertEquals(TEST_PROVIDERS_CONFIG_1, FileUtils.readFileToString(pc_one));
 +
 +            client.create().withMode(CreateMode.PERSISTENT).forPath(getProviderPath("providers-config2.xml"), TEST_PROVIDERS_CONFIG_2.getBytes());
 +            Thread.sleep(100);
 +            assertTrue(pc_two.exists());
 +            assertEquals(TEST_PROVIDERS_CONFIG_2, FileUtils.readFileToString(pc_two));
 +
 +            client.setData().forPath(pc_two_znode, TEST_PROVIDERS_CONFIG_1.getBytes());
 +            Thread.sleep(100);
 +            assertTrue(pc_two.exists());
 +            assertEquals(TEST_PROVIDERS_CONFIG_1, FileUtils.readFileToString(pc_two));
 +
 +            client.delete().forPath(pc_two_znode);
 +            Thread.sleep(100);
 +            assertFalse(pc_two.exists());
 +
 +            client.delete().forPath(pc_one_znode);
 +            Thread.sleep(100);
 +            assertFalse(pc_one.exists());
 +
 +            final String desc_one_znode   = getDescriptorPath("test1.json");
 +            final String desc_two_znode   = getDescriptorPath("test2.json");
 +            final String desc_three_znode = getDescriptorPath("test3.json");
 +            final File desc_one           = new File(descriptorsDir, "test1.json");
 +            final File desc_two           = new File(descriptorsDir, "test2.json");
 +            final File desc_three         = new File(descriptorsDir, "test3.json");
 +
 +            client.create().withMode(CreateMode.PERSISTENT).forPath(desc_one_znode, TEST_DESCRIPTOR_1.getBytes());
 +            Thread.sleep(100);
 +            assertTrue(desc_one.exists());
 +            assertEquals(TEST_DESCRIPTOR_1, FileUtils.readFileToString(desc_one));
 +
 +            client.create().withMode(CreateMode.PERSISTENT).forPath(desc_two_znode, TEST_DESCRIPTOR_1.getBytes());
 +            Thread.sleep(100);
 +            assertTrue(desc_two.exists());
 +            assertEquals(TEST_DESCRIPTOR_1, FileUtils.readFileToString(desc_two));
 +
 +            client.setData().forPath(desc_two_znode, TEST_DESCRIPTOR_2.getBytes());
 +            Thread.sleep(100);
 +            assertTrue(desc_two.exists());
 +            assertEquals(TEST_DESCRIPTOR_2, FileUtils.readFileToString(desc_two));
 +
 +            client.create().withMode(CreateMode.PERSISTENT).forPath(desc_three_znode, TEST_DESCRIPTOR_1.getBytes());
 +            Thread.sleep(100);
 +            assertTrue(desc_three.exists());
 +            assertEquals(TEST_DESCRIPTOR_1, FileUtils.readFileToString(desc_three));
 +
 +            client.delete().forPath(desc_two_znode);
 +            Thread.sleep(100);
 +            assertFalse("Expected test2.json to have been deleted.", desc_two.exists());
 +
 +            client.delete().forPath(desc_three_znode);
 +            Thread.sleep(100);
 +            assertFalse(desc_three.exists());
 +
 +            client.delete().forPath(desc_one_znode);
 +            Thread.sleep(100);
 +            assertFalse(desc_one.exists());
 +        } finally {
 +            cm.stop();
 +        }
 +    }
 +
 +    private static String getDescriptorPath(String descriptorName) {
 +        return PATH_KNOX_DESCRIPTORS + "/" + descriptorName;
 +    }
 +
 +    private static String getProviderPath(String providerConfigName) {
 +        return PATH_KNOX_PROVIDERS + "/" + providerConfigName;
 +    }
 +
 +
 +    private static final String TEST_PROVIDERS_CONFIG_1 =
 +            "<gateway>\n" +
 +            "    <provider>\n" +
 +            "        <role>identity-assertion</role>\n" +
 +            "        <name>Default</name>\n" +
 +            "        <enabled>true</enabled>\n" +
 +            "    </provider>\n" +
 +            "    <provider>\n" +
 +            "        <role>hostmap</role>\n" +
 +            "        <name>static</name>\n" +
 +            "        <enabled>true</enabled>\n" +
 +            "        <param><name>localhost</name><value>sandbox,sandbox.hortonworks.com</value></param>\n" +
 +            "    </provider>\n" +
 +            "</gateway>\n";
 +
 +    private static final String TEST_PROVIDERS_CONFIG_2 =
 +            "<gateway>\n" +
 +            "    <provider>\n" +
 +            "        <role>authentication</role>\n" +
 +            "        <name>ShiroProvider</name>\n" +
 +            "        <enabled>true</enabled>\n" +
 +            "        <param>\n" +
 +            "            <name>sessionTimeout</name>\n" +
 +            "            <value>30</value>\n" +
 +            "        </param>\n" +
 +            "        <param>\n" +
 +            "            <name>main.ldapRealm</name>\n" +
 +            "            <value>org.apache.knox.gateway.shirorealm.KnoxLdapRealm</value>\n" +
 +            "        </param>\n" +
 +            "        <param>\n" +
 +            "            <name>main.ldapContextFactory</name>\n" +
 +            "            <value>org.apache.knox.gateway.shirorealm.KnoxLdapContextFactory</value>\n" +
 +            "        </param>\n" +
 +            "        <param>\n" +
 +            "            <name>main.ldapRealm.contextFactory</name>\n" +
 +            "            <value>$ldapContextFactory</value>\n" +
 +            "        </param>\n" +
 +            "        <param>\n" +
 +            "            <name>main.ldapRealm.userDnTemplate</name>\n" +
 +            "            <value>uid={0},ou=people,dc=hadoop,dc=apache,dc=org</value>\n" +
 +            "        </param>\n" +
 +            "        <param>\n" +
 +            "            <name>main.ldapRealm.contextFactory.url</name>\n" +
 +            "            <value>ldap://localhost:33389</value>\n" +
 +            "        </param>\n" +
 +            "        <param>\n" +
 +            "            <name>main.ldapRealm.contextFactory.authenticationMechanism</name>\n" +
 +            "            <value>simple</value>\n" +
 +            "        </param>\n" +
 +            "        <param>\n" +
 +            "            <name>urls./**</name>\n" +
 +            "            <value>authcBasic</value>\n" +
 +            "        </param>\n" +
 +            "    </provider>\n" +
 +            "</gateway>\n";
 +
 +    private static final String TEST_DESCRIPTOR_1 =
 +            "{\n" +
 +            "  \"discovery-type\":\"AMBARI\",\n" +
 +            "  \"discovery-address\":\"http://sandbox.hortonworks.com:8080\",\n" +
 +            "  \"discovery-user\":\"maria_dev\",\n" +
 +            "  \"discovery-pwd-alias\":\"sandbox.ambari.discovery.password\",\n" +
 +            "  \"provider-config-ref\":\"sandbox-providers.xml\",\n" +
 +            "  \"cluster\":\"Sandbox\",\n" +
 +            "  \"services\":[\n" +
 +            "    {\"name\":\"NODEUI\"},\n" +
 +            "    {\"name\":\"YARNUI\"},\n" +
 +            "    {\"name\":\"HDFSUI\"},\n" +
 +            "    {\"name\":\"OOZIEUI\"},\n" +
 +            "    {\"name\":\"HBASEUI\"},\n" +
 +            "    {\"name\":\"NAMENODE\"},\n" +
 +            "    {\"name\":\"JOBTRACKER\"},\n" +
 +            "    {\"name\":\"WEBHDFS\"},\n" +
 +            "    {\"name\":\"WEBHCAT\"},\n" +
 +            "    {\"name\":\"OOZIE\"},\n" +
 +            "    {\"name\":\"WEBHBASE\"},\n" +
 +            "    {\"name\":\"RESOURCEMANAGER\"},\n" +
 +            "    {\"name\":\"AMBARI\", \"urls\":[\"http://c6401.ambari.apache.org:8080\"]},\n" +
 +            "    {\"name\":\"AMBARIUI\", \"urls\":[\"http://c6401.ambari.apache.org:8080\"]}\n" +
 +            "  ]\n" +
 +            "}\n";
 +
 +    private static final String TEST_DESCRIPTOR_2 =
 +            "{\n" +
 +            "  \"discovery-type\":\"AMBARI\",\n" +
 +            "  \"discovery-address\":\"http://sandbox.hortonworks.com:8080\",\n" +
 +            "  \"discovery-user\":\"maria_dev\",\n" +
 +            "  \"discovery-pwd-alias\":\"sandbox.ambari.discovery.password\",\n" +
 +            "  \"provider-config-ref\":\"sandbox-providers.xml\",\n" +
 +            "  \"cluster\":\"Sandbox\",\n" +
 +            "  \"services\":[\n" +
 +            "    {\"name\":\"NAMENODE\"},\n" +
 +            "    {\"name\":\"JOBTRACKER\"},\n" +
 +            "    {\"name\":\"WEBHDFS\"},\n" +
 +            "    {\"name\":\"WEBHCAT\"},\n" +
 +            "    {\"name\":\"OOZIE\"},\n" +
 +            "    {\"name\":\"WEBHBASE\"},\n" +
 +            "    {\"name\":\"RESOURCEMANAGER\"}\n" +
 +            "  ]\n" +
 +            "}\n";
 +
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/e5fd0622/gateway-server/src/test/java/org/apache/knox/gateway/util/KnoxCLITest.java
----------------------------------------------------------------------
diff --cc gateway-server/src/test/java/org/apache/knox/gateway/util/KnoxCLITest.java
index b768937,0000000..116b8dd
mode 100644,000000..100644
--- a/gateway-server/src/test/java/org/apache/knox/gateway/util/KnoxCLITest.java
+++ b/gateway-server/src/test/java/org/apache/knox/gateway/util/KnoxCLITest.java
@@@ -1,1032 -1,0 +1,1048 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.util;
 +
 +import com.mycila.xmltool.XMLDoc;
 +import com.mycila.xmltool.XMLTag;
 +import org.apache.commons.io.FileUtils;
 +import org.apache.hadoop.conf.Configuration;
 +import org.apache.knox.gateway.config.impl.GatewayConfigImpl;
 +import org.apache.knox.gateway.services.GatewayServices;
 +import org.apache.knox.gateway.services.config.client.RemoteConfigurationRegistryClient;
 +import org.apache.knox.gateway.services.config.client.RemoteConfigurationRegistryClientService;
 +import org.apache.knox.gateway.services.security.AliasService;
 +import org.apache.knox.gateway.services.security.MasterService;
 +import org.apache.knox.test.TestUtils;
 +import org.junit.Before;
 +import org.junit.Test;
 +
 +import java.io.ByteArrayOutputStream;
 +import java.io.File;
 +import java.io.FileOutputStream;
 +import java.io.IOException;
 +import java.io.PrintStream;
 +import java.net.URL;
 +import java.util.UUID;
 +
 +import static org.hamcrest.CoreMatchers.containsString;
 +import static org.hamcrest.CoreMatchers.is;
 +import static org.hamcrest.CoreMatchers.not;
 +import static org.hamcrest.CoreMatchers.notNullValue;
 +import static org.junit.Assert.assertEquals;
 +import static org.junit.Assert.assertFalse;
 +import static org.junit.Assert.assertNotNull;
 +import static org.junit.Assert.assertNull;
 +import static org.junit.Assert.assertThat;
 +import static org.junit.Assert.assertTrue;
 +
 +/**
 + * @author larry
 + *
 + */
 +public class KnoxCLITest {
 +  private final ByteArrayOutputStream outContent = new ByteArrayOutputStream();
 +  private final ByteArrayOutputStream errContent = new ByteArrayOutputStream();
 +
 +  @Before
 +  public void setup() throws Exception {
 +    System.setOut(new PrintStream(outContent));
 +    System.setErr(new PrintStream(errContent));
 +  }
 +
 +  @Test
 +  public void testRemoteConfigurationRegistryClientService() throws Exception {
 +    outContent.reset();
 +
 +    KnoxCLI cli = new KnoxCLI();
 +    Configuration config = new GatewayConfigImpl();
 +    // Configure a client for the test local filesystem registry implementation
 +    config.set("gateway.remote.config.registry.test_client", "type=LocalFileSystem;address=/test");
 +    cli.setConf(config);
 +
 +    // This is only to get the gateway services initialized
 +    cli.run(new String[]{"version"});
 +
 +    RemoteConfigurationRegistryClientService service =
 +                                   cli.getGatewayServices().getService(GatewayServices.REMOTE_REGISTRY_CLIENT_SERVICE);
 +    assertNotNull(service);
 +    RemoteConfigurationRegistryClient client = service.get("test_client");
 +    assertNotNull(client);
 +
 +    assertNull(service.get("bogus"));
 +  }
 +
 +  @Test
 +  public void testListRemoteConfigurationRegistryClients() throws Exception {
 +    outContent.reset();
 +
 +    KnoxCLI cli = new KnoxCLI();
 +    String[] args = { "list-registry-clients", "--master","master" };
 +
 +    Configuration config = new GatewayConfigImpl();
 +    cli.setConf(config);
 +
 +    // Test with no registry clients configured
 +    int rc = cli.run(args);
 +    assertEquals(0, rc);
 +    assertTrue(outContent.toString(), outContent.toString().isEmpty());
 +
 +    // Test with a single client configured
 +    // Configure a client for the test local filesystem registry implementation
 +    config.set("gateway.remote.config.registry.test_client", "type=LocalFileSystem;address=/test1");
 +    cli.setConf(config);
 +    outContent.reset();
 +    rc = cli.run(args);
 +    assertEquals(0, rc);
 +    assertTrue(outContent.toString(), outContent.toString().contains("test_client"));
 +
 +    // Configure another client for the test local filesystem registry implementation
 +    config.set("gateway.remote.config.registry.another_client", "type=LocalFileSystem;address=/test2");
 +    cli.setConf(config);
 +    outContent.reset();
 +    rc = cli.run(args);
 +    assertEquals(0, rc);
 +    assertTrue(outContent.toString(), outContent.toString().contains("test_client"));
 +    assertTrue(outContent.toString(), outContent.toString().contains("another_client"));
 +  }
 +
 +  @Test
 +  public void testRemoteConfigurationRegistryGetACLs() throws Exception {
 +    outContent.reset();
 +
 +
 +    final File testRoot = TestUtils.createTempDir(this.getClass().getName());
 +    try {
 +      final File testRegistry = new File(testRoot, "registryRoot");
 +
 +      final String providerConfigName = "my-provider-config.xml";
 +      final String providerConfigContent = "<gateway/>\n";
 +      final File testProviderConfig = new File(testRoot, providerConfigName);
 +      final String[] uploadArgs = {"upload-provider-config", testProviderConfig.getAbsolutePath(),
 +                                   "--registry-client", "test_client",
 +                                   "--master", "master"};
 +      FileUtils.writeStringToFile(testProviderConfig, providerConfigContent);
 +
 +
 +      final String[] args = {"get-registry-acl", "/knox/config/shared-providers",
 +                             "--registry-client", "test_client",
 +                             "--master", "master"};
 +
 +      KnoxCLI cli = new KnoxCLI();
 +      Configuration config = new GatewayConfigImpl();
 +      // Configure a client for the test local filesystem registry implementation
 +      config.set("gateway.remote.config.registry.test_client", "type=LocalFileSystem;address=" + testRegistry);
 +      cli.setConf(config);
 +
 +      int rc = cli.run(uploadArgs);
 +      assertEquals(0, rc);
 +
 +      // Run the test command
 +      rc = cli.run(args);
 +
 +      // Validate the result
 +      assertEquals(0, rc);
 +      String result = outContent.toString();
 +      assertEquals(result, 3, result.split("\n").length);
 +    } finally {
 +      FileUtils.forceDelete(testRoot);
 +    }
 +  }
 +
 +
 +  @Test
 +  public void testRemoteConfigurationRegistryUploadProviderConfig() throws Exception {
 +    outContent.reset();
 +
 +    final String providerConfigName = "my-provider-config.xml";
 +    final String providerConfigContent = "<gateway/>\n";
 +
 +    final File testRoot = TestUtils.createTempDir(this.getClass().getName());
 +    try {
 +      final File testRegistry = new File(testRoot, "registryRoot");
 +      final File testProviderConfig = new File(testRoot, providerConfigName);
 +
 +      final String[] args = {"upload-provider-config", testProviderConfig.getAbsolutePath(),
 +                             "--registry-client", "test_client",
 +                             "--master", "master"};
 +
 +      FileUtils.writeStringToFile(testProviderConfig, providerConfigContent);
 +
 +      KnoxCLI cli = new KnoxCLI();
 +      Configuration config = new GatewayConfigImpl();
 +      // Configure a client for the test local filesystem registry implementation
 +      config.set("gateway.remote.config.registry.test_client", "type=LocalFileSystem;address=" + testRegistry);
 +      cli.setConf(config);
 +
 +      // Run the test command
 +      int rc = cli.run(args);
 +
 +      // Validate the result
 +      assertEquals(0, rc);
++
++      outContent.reset();
++      final String[] listArgs = {"list-provider-configs", "--registry-client", "test_client"};
++      cli.run(listArgs);
++      String outStr =  outContent.toString().trim();
++      assertTrue(outStr.startsWith("Provider Configurations"));
++      assertTrue(outStr.endsWith(")\n"+providerConfigName));
++
 +      File registryFile = new File(testRegistry, "knox/config/shared-providers/" + providerConfigName);
 +      assertTrue(registryFile.exists());
 +      assertEquals(FileUtils.readFileToString(registryFile), providerConfigContent);
 +    } finally {
 +      FileUtils.forceDelete(testRoot);
 +    }
 +  }
 +
 +
 +  @Test
 +  public void testRemoteConfigurationRegistryUploadProviderConfigWithDestinationOverride() throws Exception {
 +    outContent.reset();
 +
 +    final String providerConfigName = "my-provider-config.xml";
 +    final String entryName = "my-providers.xml";
 +    final String providerConfigContent = "<gateway/>\n";
 +
 +    final File testRoot = TestUtils.createTempDir(this.getClass().getName());
 +    try {
 +      final File testRegistry = new File(testRoot, "registryRoot");
 +      final File testProviderConfig = new File(testRoot, providerConfigName);
 +
 +      final String[] args = {"upload-provider-config", testProviderConfig.getAbsolutePath(),
 +                             "--entry-name", entryName,
 +                             "--registry-client", "test_client",
 +                             "--master", "master"};
 +
 +      FileUtils.writeStringToFile(testProviderConfig, providerConfigContent);
 +
 +      KnoxCLI cli = new KnoxCLI();
 +      Configuration config = new GatewayConfigImpl();
 +      // Configure a client for the test local filesystem registry implementation
 +      config.set("gateway.remote.config.registry.test_client", "type=LocalFileSystem;address=" + testRegistry);
 +      cli.setConf(config);
 +
 +      // Run the test command
 +      int rc = cli.run(args);
 +
 +      // Validate the result
 +      assertEquals(0, rc);
 +      assertFalse((new File(testRegistry, "knox/config/shared-providers/" + providerConfigName)).exists());
 +      File registryFile = new File(testRegistry, "knox/config/shared-providers/" + entryName);
 +      assertTrue(registryFile.exists());
 +      assertEquals(FileUtils.readFileToString(registryFile), providerConfigContent);
 +    } finally {
 +      FileUtils.forceDelete(testRoot);
 +    }
 +  }
 +
 +
 +  @Test
 +  public void testRemoteConfigurationRegistryUploadDescriptor() throws Exception {
 +    outContent.reset();
 +
 +    final String descriptorName = "my-topology.json";
 +    final String descriptorContent = testDescriptorContentJSON;
 +
 +    final File testRoot = TestUtils.createTempDir(this.getClass().getName());
 +    try {
 +      final File testRegistry = new File(testRoot, "registryRoot");
 +      final File testDescriptor = new File(testRoot, descriptorName);
 +
 +      final String[] args = {"upload-descriptor", testDescriptor.getAbsolutePath(),
 +                             "--registry-client", "test_client",
 +                             "--master", "master"};
 +
 +      FileUtils.writeStringToFile(testDescriptor, descriptorContent);
 +
 +      KnoxCLI cli = new KnoxCLI();
 +      Configuration config = new GatewayConfigImpl();
 +      // Configure a client for the test local filesystem registry implementation
 +      config.set("gateway.remote.config.registry.test_client", "type=LocalFileSystem;address=" + testRegistry);
 +      cli.setConf(config);
 +
 +      // Run the test command
 +      int rc = cli.run(args);
 +
 +      // Validate the result
 +      assertEquals(0, rc);
++
++      outContent.reset();
++      final String[] listArgs = {"list-descriptors", "--registry-client", "test_client"};
++      cli.run(listArgs);
++      String outStr =  outContent.toString().trim();
++      assertTrue(outStr.startsWith("Descriptors"));
++      assertTrue(outStr.endsWith(")\n"+descriptorName));
++
 +      File registryFile = new File(testRegistry, "knox/config/descriptors/" + descriptorName);
 +      assertTrue(registryFile.exists());
 +      assertEquals(FileUtils.readFileToString(registryFile), descriptorContent);
 +    } finally {
 +      FileUtils.forceDelete(testRoot);
 +    }
 +  }
 +
 +  @Test
 +  public void testRemoteConfigurationRegistryUploadDescriptorWithDestinationOverride() throws Exception {
 +    outContent.reset();
 +
 +    final String descriptorName = "my-topology.json";
 +    final String entryName = "different-topology.json";
 +    final String descriptorContent = testDescriptorContentJSON;
 +
 +    final File testRoot = TestUtils.createTempDir(this.getClass().getName());
 +    try {
 +      final File testRegistry = new File(testRoot, "registryRoot");
 +      final File testDescriptor = new File(testRoot, descriptorName);
 +
 +      final String[] args = {"upload-descriptor", testDescriptor.getAbsolutePath(),
 +                             "--entry-name", entryName,
 +                             "--registry-client", "test_client",
 +                             "--master", "master"};
 +
 +      FileUtils.writeStringToFile(testDescriptor, descriptorContent);
 +
 +      KnoxCLI cli = new KnoxCLI();
 +      Configuration config = new GatewayConfigImpl();
 +      // Configure a client for the test local filesystem registry implementation
 +      config.set("gateway.remote.config.registry.test_client", "type=LocalFileSystem;address=" + testRegistry);
 +      cli.setConf(config);
 +
 +      // Run the test command
 +      int rc = cli.run(args);
 +
 +      // Validate the result
 +      assertEquals(0, rc);
 +      assertFalse((new File(testRegistry, "knox/config/descriptors/" + descriptorName)).exists());
 +      File registryFile = new File(testRegistry, "knox/config/descriptors/" + entryName);
 +      assertTrue(registryFile.exists());
 +      assertEquals(FileUtils.readFileToString(registryFile), descriptorContent);
 +    } finally {
 +      FileUtils.forceDelete(testRoot);
 +    }
 +  }
 +
 +  @Test
 +  public void testRemoteConfigurationRegistryDeleteProviderConfig() throws Exception {
 +    outContent.reset();
 +
 +    // Create a provider config
 +    final String providerConfigName = "my-provider-config.xml";
 +    final String providerConfigContent = "<gateway/>\n";
 +
 +    final File testRoot = TestUtils.createTempDir(this.getClass().getName());
 +    try {
 +      final File testRegistry = new File(testRoot, "registryRoot");
 +      final File testProviderConfig = new File(testRoot, providerConfigName);
 +
 +      final String[] createArgs = {"upload-provider-config", testProviderConfig.getAbsolutePath(),
 +                                   "--registry-client", "test_client",
 +                                   "--master", "master"};
 +
 +      FileUtils.writeStringToFile(testProviderConfig, providerConfigContent);
 +
 +      KnoxCLI cli = new KnoxCLI();
 +      Configuration config = new GatewayConfigImpl();
 +      // Configure a client for the test local filesystem registry implementation
 +      config.set("gateway.remote.config.registry.test_client", "type=LocalFileSystem;address=" + testRegistry);
 +      cli.setConf(config);
 +
 +      // Run the test command
 +      int rc = cli.run(createArgs);
 +
 +      // Validate the result
 +      assertEquals(0, rc);
 +      File registryFile = new File(testRegistry, "knox/config/shared-providers/" + providerConfigName);
 +      assertTrue(registryFile.exists());
 +
 +      outContent.reset();
 +
 +      // Delete the created provider config
 +      final String[] deleteArgs = {"delete-provider-config", providerConfigName,
 +                                   "--registry-client", "test_client",
 +                                   "--master", "master"};
 +      rc = cli.run(deleteArgs);
 +      assertEquals(0, rc);
 +      assertFalse(registryFile.exists());
 +
 +      // Try to delete a provider config that does not exist
 +      rc = cli.run(new String[]{"delete-provider-config", "imaginary-providers.xml",
 +                                "--registry-client", "test_client",
 +                                "--master", "master"});
 +      assertEquals(0, rc);
 +    } finally {
 +      FileUtils.forceDelete(testRoot);
 +    }
 +  }
 +
 +  @Test
 +  public void testRemoteConfigurationRegistryDeleteDescriptor() throws Exception {
 +    outContent.reset();
 +
 +    final String descriptorName = "my-topology.json";
 +    final String descriptorContent = testDescriptorContentJSON;
 +
 +    final File testRoot = TestUtils.createTempDir(this.getClass().getName());
 +    try {
 +      final File testRegistry = new File(testRoot, "registryRoot");
 +      final File testDescriptor = new File(testRoot, descriptorName);
 +
 +      final String[] createArgs = {"upload-descriptor", testDescriptor.getAbsolutePath(),
 +                             "--registry-client", "test_client",
 +                             "--master", "master"};
 +
 +      FileUtils.writeStringToFile(testDescriptor, descriptorContent);
 +
 +      KnoxCLI cli = new KnoxCLI();
 +      Configuration config = new GatewayConfigImpl();
 +      // Configure a client for the test local filesystem registry implementation
 +      config.set("gateway.remote.config.registry.test_client", "type=LocalFileSystem;address=" + testRegistry);
 +      cli.setConf(config);
 +
 +      // Run the test command
 +      int rc = cli.run(createArgs);
 +
 +      // Validate the result
 +      assertEquals(0, rc);
 +      File registryFile = new File(testRegistry, "knox/config/descriptors/" + descriptorName);
 +      assertTrue(registryFile.exists());
 +
 +      outContent.reset();
 +
 +      // Delete the created provider config
 +      final String[] deleteArgs = {"delete-descriptor", descriptorName,
 +                                   "--registry-client", "test_client",
 +                                   "--master", "master"};
 +      rc = cli.run(deleteArgs);
 +      assertEquals(0, rc);
 +      assertFalse(registryFile.exists());
 +
 +      // Try to delete a descriptor that does not exist
 +      rc = cli.run(new String[]{"delete-descriptor", "bogus.json",
 +                                "--registry-client", "test_client",
 +                                "--master", "master"});
 +      assertEquals(0, rc);
 +    } finally {
 +      FileUtils.forceDelete(testRoot);
 +    }
 +  }
 +
 +  @Test
 +  public void testSuccessfulAliasLifecycle() throws Exception {
 +    outContent.reset();
 +    String[] args1 = {"create-alias", "alias1", "--value", "testvalue1", "--master", "master"};
 +    int rc = 0;
 +    KnoxCLI cli = new KnoxCLI();
 +    cli.setConf(new GatewayConfigImpl());
 +    rc = cli.run(args1);
 +    assertEquals(0, rc);
 +    assertTrue(outContent.toString(), outContent.toString().contains("alias1 has been successfully " +
 +        "created."));
 +
 +    outContent.reset();
 +    String[] args2 = {"list-alias", "--master", 
 +        "master"};
 +    rc = cli.run(args2);
 +    assertEquals(0, rc);
 +    assertTrue(outContent.toString(), outContent.toString().contains("alias1"));
 +
 +    outContent.reset();
 +    String[] args4 = {"delete-alias", "alias1", "--master", 
 +      "master"};
 +    rc = cli.run(args4);
 +    assertEquals(0, rc);
 +    assertTrue(outContent.toString(), outContent.toString().contains("alias1 has been successfully " +
 +        "deleted."));
 +
 +    outContent.reset();
 +    rc = cli.run(args2);
 +    assertEquals(0, rc);
 +    assertFalse(outContent.toString(), outContent.toString().contains("alias1"));
 +  }
 +  
 +  @Test
 +  public void testListAndDeleteOfAliasForInvalidClusterName() throws Exception {
 +    outContent.reset();
 +    String[] args1 =
 +        { "create-alias", "alias1", "--cluster", "cluster1", "--value", "testvalue1", "--master",
 +            "master" };
 +    int rc = 0;
 +    KnoxCLI cli = new KnoxCLI();
 +    cli.setConf(new GatewayConfigImpl());
 +    rc = cli.run(args1);
 +    assertEquals(0, rc);
 +    assertTrue(outContent.toString(), outContent.toString().contains(
 +      "alias1 has been successfully " + "created."));
 +
 +    outContent.reset();
 +    String[] args2 = { "list-alias", "--cluster", "Invalidcluster1", "--master", "master" };
 +    rc = cli.run(args2);
 +    assertEquals(0, rc);
 +    System.out.println(outContent.toString());
 +    assertTrue(outContent.toString(),
 +      outContent.toString().contains("Invalid cluster name provided: Invalidcluster1"));
 +
 +    outContent.reset();
 +    String[] args4 =
 +        { "delete-alias", "alias1", "--cluster", "Invalidcluster1", "--master", "master" };
 +    rc = cli.run(args4);
 +    assertEquals(0, rc);
 +    assertTrue(outContent.toString(),
 +      outContent.toString().contains("Invalid cluster name provided: Invalidcluster1"));
 +
 +  }
 +
 +  @Test
 +  public void testDeleteOfNonExistAliasFromUserDefinedCluster() throws Exception {
 +    KnoxCLI cli = new KnoxCLI();
 +    cli.setConf(new GatewayConfigImpl());
 +    try {
 +      int rc = 0;
 +      outContent.reset();
 +      String[] args1 =
 +          { "create-alias", "alias1", "--cluster", "cluster1", "--value", "testvalue1", "--master",
 +              "master" };
 +      cli.run(args1);
 +
 +      // Delete invalid alias from the cluster
 +      outContent.reset();
 +      String[] args2 = { "delete-alias", "alias2", "--cluster", "cluster1", "--master", "master" };
 +      rc = cli.run(args2);
 +      assertEquals(0, rc);
 +      assertTrue(outContent.toString().contains("No such alias exists in the cluster."));
 +    } finally {
 +      outContent.reset();
 +      String[] args1 = { "delete-alias", "alias1", "--cluster", "cluster1", "--master", "master" };
 +      cli.run(args1);
 +    }
 +  }
 +
 +  @Test
 +  public void testDeleteOfNonExistAliasFromDefaultCluster() throws Exception {
 +    KnoxCLI cli = new KnoxCLI();
 +    cli.setConf(new GatewayConfigImpl());
 +    try {
 +      int rc = 0;
 +      outContent.reset();
 +      String[] args1 = { "create-alias", "alias1", "--value", "testvalue1", "--master", "master" };
 +      cli.run(args1);
 +
 +      // Delete invalid alias from the cluster
 +      outContent.reset();
 +      String[] args2 = { "delete-alias", "alias2", "--master", "master" };
 +      rc = cli.run(args2);
 +      assertEquals(0, rc);
 +      assertTrue(outContent.toString().contains("No such alias exists in the cluster."));
 +    } finally {
 +      outContent.reset();
 +      String[] args1 = { "delete-alias", "alias1", "--master", "master" };
 +      cli.run(args1);
 +    }
 +  }
 +
 +  @Test
 +  public void testForInvalidArgument() throws Exception {
 +    outContent.reset();
 +    String[] args1 = { "--value", "testvalue1", "--master", "master" };
 +    KnoxCLI cli = new KnoxCLI();
 +    cli.setConf(new GatewayConfigImpl());
 +    int rc = cli.run(args1);
 +    assertEquals(-2, rc);
 +    assertTrue(outContent.toString().contains("ERROR: Invalid Command"));
 +  }
 +
 +  @Test
 +  public void testListAndDeleteOfAliasForValidClusterName() throws Exception {
 +    outContent.reset();
 +    String[] args1 =
 +        { "create-alias", "alias1", "--cluster", "cluster1", "--value", "testvalue1", "--master",
 +            "master" };
 +    int rc = 0;
 +    KnoxCLI cli = new KnoxCLI();
 +    cli.setConf(new GatewayConfigImpl());
 +    rc = cli.run(args1);
 +    assertEquals(0, rc);
 +    assertTrue(outContent.toString(), outContent.toString().contains(
 +      "alias1 has been successfully " + "created."));
 +
 +    outContent.reset();
 +    String[] args2 = { "list-alias", "--cluster", "cluster1", "--master", "master" };
 +    rc = cli.run(args2);
 +    assertEquals(0, rc);
 +    System.out.println(outContent.toString());
 +    assertTrue(outContent.toString(), outContent.toString().contains("alias1"));
 +
 +    outContent.reset();
 +    String[] args4 =
 +        { "delete-alias", "alias1", "--cluster", "cluster1", "--master", "master" };
 +    rc = cli.run(args4);
 +    assertEquals(0, rc);
 +    assertTrue(outContent.toString(), outContent.toString().contains(
 +      "alias1 has been successfully " + "deleted."));
 +
 +    outContent.reset();
 +    rc = cli.run(args2);
 +    assertEquals(0, rc);
 +    assertFalse(outContent.toString(), outContent.toString().contains("alias1"));
 +
 +  }
 +
 +  @Test
 +  public void testGatewayAndClusterStores() throws Exception {
 +    GatewayConfigImpl config = new GatewayConfigImpl();
 +    FileUtils.deleteQuietly( new File( config.getGatewaySecurityDir() ) );
 +
 +    outContent.reset();
 +    String[] gwCreateArgs = {"create-alias", "alias1", "--value", "testvalue1", "--master", "master"};
 +    int rc = 0;
 +    KnoxCLI cli = new KnoxCLI();
 +    cli.setConf( config );
 +    rc = cli.run(gwCreateArgs);
 +    assertEquals(0, rc);
 +    assertTrue(outContent.toString(), outContent.toString().contains("alias1 has been successfully " +
 +        "created."));
 +
 +    AliasService as = cli.getGatewayServices().getService(GatewayServices.ALIAS_SERVICE);
 +
 +    outContent.reset();
 +    String[] clusterCreateArgs = {"create-alias", "alias2", "--value", "testvalue1", "--cluster", "test", 
 +        "--master", "master"};
 +    cli = new KnoxCLI();
 +    cli.setConf( config );
 +    rc = cli.run(clusterCreateArgs);
 +    assertEquals(0, rc);
 +    assertTrue(outContent.toString(), outContent.toString().contains("alias2 has been successfully " +
 +        "created."));
 +
 +    outContent.reset();
 +    String[] args2 = {"list-alias", "--master", "master"};
 +    cli = new KnoxCLI();
 +    rc = cli.run(args2);
 +    assertEquals(0, rc);
 +    assertFalse(outContent.toString(), outContent.toString().contains("alias2"));
 +    assertTrue(outContent.toString(), outContent.toString().contains("alias1"));
 +
 +    char[] passwordChars = as.getPasswordFromAliasForCluster("test", "alias2");
 +    assertNotNull(passwordChars);
 +    assertTrue(new String(passwordChars), "testvalue1".equals(new String(passwordChars)));
 +
 +    outContent.reset();
 +    String[] args1 = {"list-alias", "--cluster", "test", "--master", "master"};
 +    cli = new KnoxCLI();
 +    rc = cli.run(args1);
 +    assertEquals(0, rc);
 +    assertFalse(outContent.toString(), outContent.toString().contains("alias1"));
 +    assertTrue(outContent.toString(), outContent.toString().contains("alias2"));
 +
 +    outContent.reset();
 +    String[] args4 = {"delete-alias", "alias1", "--master", "master"};
 +    cli = new KnoxCLI();
 +    rc = cli.run(args4);
 +    assertEquals(0, rc);
 +    assertTrue(outContent.toString(), outContent.toString().contains("alias1 has been successfully " +
 +        "deleted."));
 +    
 +    outContent.reset();
 +    String[] args5 = {"delete-alias", "alias2", "--cluster", "test", "--master", "master"};
 +    cli = new KnoxCLI();
 +    rc = cli.run(args5);
 +    assertEquals(0, rc);
 +    assertTrue(outContent.toString(), outContent.toString().contains("alias2 has been successfully " +
 +        "deleted."));
 +  }
 +
 +  private void createTestMaster() throws Exception {
 +    outContent.reset();
 +    String[] args = new String[]{ "create-master", "--master", "master", "--force" };
 +    KnoxCLI cli = new KnoxCLI();
 +    int rc = cli.run(args);
 +    assertThat( rc, is( 0 ) );
 +    MasterService ms = cli.getGatewayServices().getService("MasterService");
 +    String master = String.copyValueOf( ms.getMasterSecret() );
 +    assertThat( master, is( "master" ) );
 +    assertThat( outContent.toString(), containsString( "Master secret has been persisted to disk." ) );
 +  }
 +
 +  @Test
 +  public void testCreateSelfSignedCert() throws Exception {
 +    GatewayConfigImpl config = new GatewayConfigImpl();
 +    FileUtils.deleteQuietly( new File( config.getGatewaySecurityDir() ) );
 +    createTestMaster();
 +    outContent.reset();
 +    KnoxCLI cli = new KnoxCLI();
 +    cli.setConf( config );
 +    String[] gwCreateArgs = {"create-cert", "--hostname", "hostname1", "--master", "master"};
 +    int rc = 0;
 +    rc = cli.run(gwCreateArgs);
 +    assertEquals(0, rc);
 +    assertTrue(outContent.toString(), outContent.toString().contains("gateway-identity has been successfully " +
 +        "created."));
 +  }
 +
 +  @Test
 +  public void testExportCert() throws Exception {
 +    GatewayConfigImpl config = new GatewayConfigImpl();
 +    FileUtils.deleteQuietly( new File( config.getGatewaySecurityDir() ) );
 +    createTestMaster();
 +    outContent.reset();
 +    KnoxCLI cli = new KnoxCLI();
 +    cli.setConf( config );
 +    String[] gwCreateArgs = {"create-cert", "--hostname", "hostname1", "--master", "master"};
 +    int rc = 0;
 +    rc = cli.run(gwCreateArgs);
 +    assertEquals(0, rc);
 +    assertTrue(outContent.toString(), outContent.toString().contains("gateway-identity has been successfully " +
 +        "created."));
 +
 +    outContent.reset();
 +    String[] gwCreateArgs2 = {"export-cert", "--type", "PEM"};
 +    rc = 0;
 +    rc = cli.run(gwCreateArgs2);
 +    assertEquals(0, rc);
 +    assertTrue(outContent.toString(), outContent.toString().contains("Certificate gateway-identity has been successfully exported to"));
 +    assertTrue(outContent.toString(), outContent.toString().contains("gateway-identity.pem"));
 +
 +    outContent.reset();
 +    String[] gwCreateArgs2_5 = {"export-cert"};
 +    rc = 0;
 +    rc = cli.run(gwCreateArgs2_5);
 +    assertEquals(0, rc);
 +    assertTrue(outContent.toString(), outContent.toString().contains("Certificate gateway-identity has been successfully exported to"));
 +    assertTrue(outContent.toString(), outContent.toString().contains("gateway-identity.pem"));
 +
 +    outContent.reset();
 +    String[] gwCreateArgs3 = {"export-cert", "--type", "JKS"};
 +    rc = 0;
 +    rc = cli.run(gwCreateArgs3);
 +    assertEquals(0, rc);
 +    assertTrue(outContent.toString(), outContent.toString().contains("Certificate gateway-identity has been successfully exported to"));
 +    assertTrue(outContent.toString(), outContent.toString().contains("gateway-client-trust.jks"));
 +
 +    outContent.reset();
 +    String[] gwCreateArgs4 = {"export-cert", "--type", "invalid"};
 +    rc = 0;
 +    rc = cli.run(gwCreateArgs4);
 +    assertEquals(0, rc);
 +    assertTrue(outContent.toString(), outContent.toString().contains("Invalid type for export file provided."));
 +  }
 +
 +  @Test
 +  public void testCreateMaster() throws Exception {
 +    GatewayConfigImpl config = new GatewayConfigImpl();
 +    FileUtils.deleteQuietly( new File( config.getGatewaySecurityDir() ) );
 +    outContent.reset();
 +    String[] args = {"create-master", "--master", "master"};
 +    int rc = 0;
 +    KnoxCLI cli = new KnoxCLI();
 +    cli.setConf( config );
 +    rc = cli.run(args);
 +    assertEquals(0, rc);
 +    MasterService ms = cli.getGatewayServices().getService("MasterService");
 +    // assertTrue(ms.getClass().getName(), ms.getClass().getName().equals("kjdfhgjkhfdgjkh"));
 +    assertTrue( new String( ms.getMasterSecret() ), "master".equals( new String( ms.getMasterSecret() ) ) );
 +    assertTrue(outContent.toString(), outContent.toString().contains("Master secret has been persisted to disk."));
 +  }
 +
 +  @Test
 +  public void testCreateMasterGenerate() throws Exception {
 +    String[] args = {"create-master", "--generate" };
 +    int rc = 0;
 +    GatewayConfigImpl config = new GatewayConfigImpl();
 +    File masterFile = new File( config.getGatewaySecurityDir(), "master" );
 +
 +    // Need to delete the master file so that the change isn't ignored.
 +    if( masterFile.exists() ) {
 +      assertThat( "Failed to delete existing master file.", masterFile.delete(), is( true ) );
 +    }
 +    outContent.reset();
 +    KnoxCLI cli = new KnoxCLI();
 +    cli.setConf(config);
 +    rc = cli.run(args);
 +    assertThat( rc, is( 0 ) );
 +    MasterService ms = cli.getGatewayServices().getService("MasterService");
 +    String master = String.copyValueOf( ms.getMasterSecret() );
 +    assertThat( master.length(), is( 36 ) );
 +    assertThat( master.indexOf( '-' ), is( 8 ) );
 +    assertThat( master.indexOf( '-', 9 ), is( 13 ) );
 +    assertThat( master.indexOf( '-', 14 ), is( 18 ) );
 +    assertThat( master.indexOf( '-', 19 ), is( 23 ) );
 +    assertThat( UUID.fromString( master ), notNullValue() );
 +    assertThat( outContent.toString(), containsString( "Master secret has been persisted to disk." ) );
 +
 +    // Need to delete the master file so that the change isn't ignored.
 +    if( masterFile.exists() ) {
 +      assertThat( "Failed to delete existing master file.", masterFile.delete(), is( true ) );
 +    }
 +    outContent.reset();
 +    cli = new KnoxCLI();
 +    rc = cli.run(args);
 +    ms = cli.getGatewayServices().getService("MasterService");
 +    String master2 = String.copyValueOf( ms.getMasterSecret() );
 +    assertThat( master2.length(), is( 36 ) );
 +    assertThat( UUID.fromString( master2 ), notNullValue() );
 +    assertThat( master2, not( is( master ) ) );
 +    assertThat( rc, is( 0 ) );
 +    assertThat(outContent.toString(), containsString("Master secret has been persisted to disk."));
 +  }
 +
 +  @Test
 +  public void testCreateMasterForce() throws Exception {
 +    GatewayConfigImpl config = new GatewayConfigImpl();
 +    File masterFile = new File( config.getGatewaySecurityDir(), "master" );
 +
 +    // Need to delete the master file so that the change isn't ignored.
 +    if( masterFile.exists() ) {
 +      assertThat( "Failed to delete existing master file.", masterFile.delete(), is( true ) );
 +    }
 +
 +    KnoxCLI cli = new KnoxCLI();
 +    cli.setConf(config);
 +    MasterService ms;
 +    int rc = 0;
 +    outContent.reset();
 +
 +    String[] args = { "create-master", "--master", "test-master-1" };
 +
 +    rc = cli.run(args);
 +    assertThat( rc, is( 0 ) );
 +    ms = cli.getGatewayServices().getService("MasterService");
 +    String master = String.copyValueOf( ms.getMasterSecret() );
 +    assertThat( master, is( "test-master-1" ) );
 +    assertThat( outContent.toString(), containsString( "Master secret has been persisted to disk." ) );
 +
 +    outContent.reset();
 +    rc = cli.run(args);
 +    assertThat( rc, is(0 ) );
 +    assertThat( outContent.toString(), containsString( "Master secret is already present on disk." ) );
 +
 +    outContent.reset();
 +    args = new String[]{ "create-master", "--master", "test-master-2", "--force" };
 +    rc = cli.run(args);
 +    assertThat( rc, is( 0 ) );
 +    ms = cli.getGatewayServices().getService("MasterService");
 +    master = String.copyValueOf( ms.getMasterSecret() );
 +    assertThat( master, is( "test-master-2" ) );
 +    assertThat( outContent.toString(), containsString( "Master secret has been persisted to disk." ) );
 +  }
 +
 +  @Test
 +  public void testListTopology() throws Exception {
 +
 +    GatewayConfigMock config = new GatewayConfigMock();
 +    URL topoURL = ClassLoader.getSystemResource("conf-demo/conf/topologies/admin.xml");
 +    config.setConfDir( new File(topoURL.getFile()).getParentFile().getParent() );
 +    String args[] = {"list-topologies", "--master", "knox"};
 +
 +    KnoxCLI cli = new KnoxCLI();
 +    cli.setConf( config );
 +
 +    cli.run( args );
 +    assertThat(outContent.toString(), containsString("sandbox"));
 +    assertThat(outContent.toString(), containsString("admin"));
 +  }
 +
 +  private class GatewayConfigMock extends GatewayConfigImpl{
 +    private String confDir;
 +    public void setConfDir(String location) {
 +      confDir = location;
 +    }
 +
 +    @Override
 +    public String getGatewayConfDir(){
 +      return confDir;
 +    }
 +  }
 +
 +  private static XMLTag createBadTopology() {
 +    XMLTag xml = XMLDoc.newDocument(true)
 +        .addRoot( "topology" )
 +        .addTag( "gateway" )
 +
 +        .addTag( "provider" )
 +        .addTag( "role" ).addText( "authentication" )
 +        .addTag( "name" ).addText( "ShiroProvider" )
 +        .addTag( "enabled" ).addText( "123" )
 +        .addTag( "param" )
 +        .addTag( "name" ).addText( "" )
 +        .addTag( "value" ).addText( "org.apache.knox.gateway.shirorealm.KnoxLdapRealm" ).gotoParent()
 +        .addTag( "param" )
 +        .addTag( "name" ).addText( "main.ldapRealm.userDnTemplate" )
 +        .addTag( "value" ).addText( "uid={0},ou=people,dc=hadoop,dc=apache,dc=org" ).gotoParent()
 +        .addTag( "param" )
 +        .addTag( "name" ).addText( "main.ldapRealm.contextFactory.url" )
 +        .addTag( "value" ).addText( "ldap://localhost:8443" ).gotoParent()
 +        .addTag( "param" )
 +        .addTag( "name" ).addText( "main.ldapRealm.contextFactory.authenticationMechanism" )
 +        .addTag( "value" ).addText( "simple" ).gotoParent()
 +        .addTag( "param" )
 +        .addTag( "name" ).addText( "urls./**" )
 +        .addTag( "value" ).addText( "authcBasic" ).gotoParent().gotoParent()
 +        .addTag( "provider" )
 +        .addTag( "role" ).addText( "identity-assertion" )
 +        .addTag( "enabled" ).addText( "vvv" )
 +        .addTag( "name" ).addText( "Default" ).gotoParent()
 +        .addTag( "provider" )
 +        .gotoRoot()
 +        .addTag( "service" )
 +        .addTag( "role" ).addText( "test-service-role" )
 +        .gotoRoot();
 +    return xml;
 +  }
 +
 +  private static XMLTag createGoodTopology() {
 +    XMLTag xml = XMLDoc.newDocument( true )
 +        .addRoot( "topology" )
 +        .addTag( "gateway" )
 +
 +        .addTag( "provider" )
 +        .addTag( "role" ).addText( "authentication" )
 +        .addTag( "name" ).addText( "ShiroProvider" )
 +        .addTag( "enabled" ).addText( "true" )
 +        .addTag( "param" )
 +        .addTag( "name" ).addText( "main.ldapRealm" )
 +        .addTag( "value" ).addText( "org.apache.knox.gateway.shirorealm.KnoxLdapRealm" ).gotoParent()
 +        .addTag( "param" )
 +        .addTag( "name" ).addText( "main.ldapRealm.userDnTemplate" )
 +        .addTag( "value" ).addText( "uid={0},ou=people,dc=hadoop,dc=apache,dc=org" ).gotoParent()
 +        .addTag( "param" )
 +        .addTag( "name" ).addText( "main.ldapRealm.contextFactory.url" )
 +        .addTag( "value" ).addText( "ldap://localhost:8443").gotoParent()
 +        .addTag( "param" )
 +        .addTag( "name" ).addText( "main.ldapRealm.contextFactory.authenticationMechanism" )
 +        .addTag( "value" ).addText( "simple" ).gotoParent()
 +        .addTag( "param" )
 +        .addTag( "name" ).addText( "urls./**" )
 +        .addTag( "value" ).addText( "authcBasic" ).gotoParent().gotoParent()
 +        .addTag( "provider" )
 +        .addTag( "role" ).addText( "identity-assertion" )
 +        .addTag( "enabled" ).addText( "true" )
 +        .addTag( "name" ).addText( "Default" ).gotoParent()
 +        .addTag( "provider" )
 +        .gotoRoot()
 +        .addTag( "service" )
 +        .addTag( "role" ).addText( "test-service-role" )
 +        .gotoRoot();
 +    return xml;
 +  }
 +
 +  private File writeTestTopology( String name, XMLTag xml ) throws IOException {
 +    // Create the test topology.
 +
 +    GatewayConfigMock config = new GatewayConfigMock();
 +    URL topoURL = ClassLoader.getSystemResource("conf-demo/conf/topologies/admin.xml");
 +    config.setConfDir( new File(topoURL.getFile()).getParentFile().getParent() );
 +
 +    File tempFile = new File( config.getGatewayTopologyDir(), name + ".xml." + UUID.randomUUID() );
 +    FileOutputStream stream = new FileOutputStream( tempFile );
 +    xml.toStream( stream );
 +    stream.close();
 +    File descriptor = new File( config.getGatewayTopologyDir(), name + ".xml" );
 +    tempFile.renameTo( descriptor );
 +    return descriptor;
 +  }
 +
 +  @Test
 +  public void testValidateTopology() throws Exception {
 +
 +    GatewayConfigMock config = new GatewayConfigMock();
 +    URL topoURL = ClassLoader.getSystemResource("conf-demo/conf/topologies/admin.xml");
 +    config.setConfDir( new File(topoURL.getFile()).getParentFile().getParent() );
 +    String args[] = {"validate-topology", "--master", "knox", "--cluster", "sandbox"};
 +
 +    KnoxCLI cli = new KnoxCLI();
 +    cli.setConf( config );
 +    cli.run( args );
 +
 +    assertThat(outContent.toString(), containsString(config.getGatewayTopologyDir()));
 +    assertThat(outContent.toString(), containsString("sandbox"));
 +    assertThat(outContent.toString(), containsString("success"));
 +    outContent.reset();
 +
 +
 +    String args2[] = {"validate-topology", "--master", "knox", "--cluster", "NotATopology"};
 +    cli.run(args2);
 +
 +    assertThat(outContent.toString(), containsString("NotATopology"));
 +    assertThat(outContent.toString(), containsString("does not exist"));
 +    outContent.reset();
 +
 +    String args3[] = {"validate-topology", "--master", "knox", "--path", config.getGatewayTopologyDir() + "/admin.xml"};
 +    cli.run(args3);
 +
 +    assertThat(outContent.toString(), containsString("admin"));
 +    assertThat(outContent.toString(), containsString("success"));
 +    outContent.reset();
 +
 +    String args4[] = {"validate-topology", "--master", "knox", "--path", "not/a/path"};
 +    cli.run(args4);
 +    assertThat(outContent.toString(), containsString("does not exist"));
 +    assertThat(outContent.toString(), containsString("not/a/path"));
 +  }
 +
 +  @Test
 +  public void testValidateTopologyOutput() throws Exception {
 +
 +    File bad = writeTestTopology( "test-cluster-bad", createBadTopology() );
 +    File good = writeTestTopology( "test-cluster-good", createGoodTopology() );
 +
 +    GatewayConfigMock config = new GatewayConfigMock();
 +    URL topoURL = ClassLoader.getSystemResource("conf-demo/conf/topologies/admin.xml");
 +    config.setConfDir( new File(topoURL.getFile()).getParentFile().getParent() );
 +    String args[] = {"validate-topology", "--master", "knox", "--cluster", "test-cluster-bad"};
 +
 +    KnoxCLI cli = new KnoxCLI();
 +    cli.setConf( config );
 +    cli.run( args );
 +
 +    assertThat(outContent.toString(), containsString(config.getGatewayTopologyDir()));
 +    assertThat(outContent.toString(), containsString("test-cluster-bad"));
 +    assertThat(outContent.toString(), containsString("unsuccessful"));
 +    assertThat(outContent.toString(), containsString("Invalid content"));
 +    assertThat(outContent.toString(), containsString("Line"));
 +
 +
 +    outContent.reset();
 +
 +    String args2[] = {"validate-topology", "--master", "knox", "--cluster", "test-cluster-good"};
 +
 +    cli.run(args2);
 +
 +    assertThat(outContent.toString(), containsString(config.getGatewayTopologyDir()));
 +    assertThat(outContent.toString(), containsString("success"));
 +    assertThat(outContent.toString(), containsString("test-cluster-good"));
 +
 +
 +  }
 +
 +  private static final String testDescriptorContentJSON = "{\n" +
 +                                                          "  \"discovery-address\":\"http://localhost:8080\",\n" +
 +                                                          "  \"discovery-user\":\"maria_dev\",\n" +
 +                                                          "  \"discovery-pwd-alias\":\"sandbox.discovery.password\",\n" +
 +                                                          "  \"provider-config-ref\":\"my-provider-config\",\n" +
 +                                                          "  \"cluster\":\"Sandbox\",\n" +
 +                                                          "  \"services\":[\n" +
 +                                                          "    {\"name\":\"NAMENODE\"},\n" +
 +                                                          "    {\"name\":\"JOBTRACKER\"},\n" +
 +                                                          "    {\"name\":\"WEBHDFS\"},\n" +
 +                                                          "    {\"name\":\"WEBHCAT\"},\n" +
 +                                                          "    {\"name\":\"OOZIE\"},\n" +
 +                                                          "    {\"name\":\"WEBHBASE\"},\n" +
 +                                                          "    {\"name\":\"HIVE\"},\n" +
 +                                                          "    {\"name\":\"RESOURCEMANAGER\"}\n" +
 +                                                          "  ]\n" +
 +                                                          "}";
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/e5fd0622/pom.xml
----------------------------------------------------------------------


[06/53] [abbrv] knox git commit: KNOX-1081 - Manually remove the picketlink folder because of merge issue

Posted by mo...@apache.org.
KNOX-1081 - Manually remove the picketlink folder because of merge issue


Project: http://git-wip-us.apache.org/repos/asf/knox/repo
Commit: http://git-wip-us.apache.org/repos/asf/knox/commit/78d35f16
Tree: http://git-wip-us.apache.org/repos/asf/knox/tree/78d35f16
Diff: http://git-wip-us.apache.org/repos/asf/knox/diff/78d35f16

Branch: refs/heads/master
Commit: 78d35f16f84f91985bbbd7582d79f648e4ef76e9
Parents: 8affbc0
Author: Sandeep More <mo...@apache.org>
Authored: Mon Oct 16 10:55:24 2017 -0400
Committer: Sandeep More <mo...@apache.org>
Committed: Mon Oct 16 10:55:24 2017 -0400

----------------------------------------------------------------------
 .../gateway/picketlink/PicketlinkMessages.java   |  0
 .../picketlink/deploy/PicketlinkConf.java        |  0
 .../PicketlinkFederationProviderContributor.java |  0
 .../filter/CaptureOriginalURLFilter.java         |  0
 .../filter/PicketlinkIdentityAdapter.java        |  0
 ....gateway.deploy.ProviderDeploymentContributor | 19 -------------------
 .../knox/gateway/picketlink/PicketlinkTest.java  |  0
 7 files changed, 19 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/knox/blob/78d35f16/gateway-provider-security-picketlink/src/main/java/org/apache/knox/gateway/picketlink/PicketlinkMessages.java
----------------------------------------------------------------------
diff --git a/gateway-provider-security-picketlink/src/main/java/org/apache/knox/gateway/picketlink/PicketlinkMessages.java b/gateway-provider-security-picketlink/src/main/java/org/apache/knox/gateway/picketlink/PicketlinkMessages.java
deleted file mode 100644
index e69de29..0000000

http://git-wip-us.apache.org/repos/asf/knox/blob/78d35f16/gateway-provider-security-picketlink/src/main/java/org/apache/knox/gateway/picketlink/deploy/PicketlinkConf.java
----------------------------------------------------------------------
diff --git a/gateway-provider-security-picketlink/src/main/java/org/apache/knox/gateway/picketlink/deploy/PicketlinkConf.java b/gateway-provider-security-picketlink/src/main/java/org/apache/knox/gateway/picketlink/deploy/PicketlinkConf.java
deleted file mode 100644
index e69de29..0000000

http://git-wip-us.apache.org/repos/asf/knox/blob/78d35f16/gateway-provider-security-picketlink/src/main/java/org/apache/knox/gateway/picketlink/deploy/PicketlinkFederationProviderContributor.java
----------------------------------------------------------------------
diff --git a/gateway-provider-security-picketlink/src/main/java/org/apache/knox/gateway/picketlink/deploy/PicketlinkFederationProviderContributor.java b/gateway-provider-security-picketlink/src/main/java/org/apache/knox/gateway/picketlink/deploy/PicketlinkFederationProviderContributor.java
deleted file mode 100644
index e69de29..0000000

http://git-wip-us.apache.org/repos/asf/knox/blob/78d35f16/gateway-provider-security-picketlink/src/main/java/org/apache/knox/gateway/picketlink/filter/CaptureOriginalURLFilter.java
----------------------------------------------------------------------
diff --git a/gateway-provider-security-picketlink/src/main/java/org/apache/knox/gateway/picketlink/filter/CaptureOriginalURLFilter.java b/gateway-provider-security-picketlink/src/main/java/org/apache/knox/gateway/picketlink/filter/CaptureOriginalURLFilter.java
deleted file mode 100644
index e69de29..0000000

http://git-wip-us.apache.org/repos/asf/knox/blob/78d35f16/gateway-provider-security-picketlink/src/main/java/org/apache/knox/gateway/picketlink/filter/PicketlinkIdentityAdapter.java
----------------------------------------------------------------------
diff --git a/gateway-provider-security-picketlink/src/main/java/org/apache/knox/gateway/picketlink/filter/PicketlinkIdentityAdapter.java b/gateway-provider-security-picketlink/src/main/java/org/apache/knox/gateway/picketlink/filter/PicketlinkIdentityAdapter.java
deleted file mode 100644
index e69de29..0000000

http://git-wip-us.apache.org/repos/asf/knox/blob/78d35f16/gateway-provider-security-picketlink/src/main/resources/META-INF/services/org.apache.knox.gateway.deploy.ProviderDeploymentContributor
----------------------------------------------------------------------
diff --git a/gateway-provider-security-picketlink/src/main/resources/META-INF/services/org.apache.knox.gateway.deploy.ProviderDeploymentContributor b/gateway-provider-security-picketlink/src/main/resources/META-INF/services/org.apache.knox.gateway.deploy.ProviderDeploymentContributor
deleted file mode 100644
index 2d6b75c..0000000
--- a/gateway-provider-security-picketlink/src/main/resources/META-INF/services/org.apache.knox.gateway.deploy.ProviderDeploymentContributor
+++ /dev/null
@@ -1,19 +0,0 @@
-##########################################################################
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-##########################################################################
-
-org.apache.knox.gateway.picketlink.deploy.PicketlinkFederationProviderContributor

http://git-wip-us.apache.org/repos/asf/knox/blob/78d35f16/gateway-provider-security-picketlink/src/test/java/org/apache/knox/gateway/picketlink/PicketlinkTest.java
----------------------------------------------------------------------
diff --git a/gateway-provider-security-picketlink/src/test/java/org/apache/knox/gateway/picketlink/PicketlinkTest.java b/gateway-provider-security-picketlink/src/test/java/org/apache/knox/gateway/picketlink/PicketlinkTest.java
deleted file mode 100644
index e69de29..0000000


[51/53] [abbrv] knox git commit: Merge branch 'master' into KNOX-998-Package_Restructuring

Posted by mo...@apache.org.
http://git-wip-us.apache.org/repos/asf/knox/blob/e5fd0622/gateway-discovery-ambari/src/test/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariDynamicServiceURLCreatorTest.java
----------------------------------------------------------------------
diff --cc gateway-discovery-ambari/src/test/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariDynamicServiceURLCreatorTest.java
index f015dd5,0000000..c0b1de8
mode 100644,000000..100644
--- a/gateway-discovery-ambari/src/test/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariDynamicServiceURLCreatorTest.java
+++ b/gateway-discovery-ambari/src/test/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariDynamicServiceURLCreatorTest.java
@@@ -1,876 -1,0 +1,920 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements. See the NOTICE file distributed with this
 + * work for additional information regarding copyright ownership. The ASF
 + * licenses this file to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance with the License.
 + * You may obtain a copy of the License at
 + * <p>
 + * http://www.apache.org/licenses/LICENSE-2.0
 + * <p>
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 + * License for the specific language governing permissions and limitations under
 + * the License.
 + */
 +package org.apache.knox.gateway.topology.discovery.ambari;
 +
 +import org.apache.commons.io.FileUtils;
 +import org.easymock.EasyMock;
 +import org.junit.Test;
 +
 +import java.io.File;
 +import java.net.MalformedURLException;
 +import java.net.URI;
 +import java.net.URISyntaxException;
 +import java.util.Arrays;
 +import java.util.Collections;
 +import java.util.HashMap;
 +import java.util.LinkedList;
 +import java.util.List;
 +import java.util.Map;
 +
 +import static junit.framework.TestCase.assertTrue;
 +import static junit.framework.TestCase.fail;
 +import static org.junit.Assert.assertEquals;
++import static org.junit.Assert.assertFalse;
 +import static org.junit.Assert.assertNotNull;
 +
 +
 +public class AmbariDynamicServiceURLCreatorTest {
 +
 +    @Test
 +    public void testHiveURLFromInternalMapping() throws Exception {
 +        testHiveURL(null);
 +    }
 +
 +    @Test
 +    public void testHiveURLFromExternalMapping() throws Exception {
 +        testHiveURL(TEST_MAPPING_CONFIG);
 +    }
 +
 +    private void testHiveURL(Object mappingConfiguration) throws Exception {
 +
 +        final String   SERVICE_NAME = "HIVE";
 +        final String[] HOSTNAMES    = {"host3", "host2", "host4"};
 +        final String   HTTP_PATH    = "cliservice";
 +        final String   HTTP_PORT    = "10001";
 +        final String   BINARY_PORT  = "10000";
 +
 +        String expectedScheme = "http";
 +
 +        final List<String> hiveServerHosts = Arrays.asList(HOSTNAMES);
 +
 +        AmbariComponent hiveServer = EasyMock.createNiceMock(AmbariComponent.class);
 +
 +        AmbariCluster cluster = EasyMock.createNiceMock(AmbariCluster.class);
 +        EasyMock.expect(cluster.getComponent("HIVE_SERVER")).andReturn(hiveServer).anyTimes();
 +        EasyMock.replay(cluster);
 +
 +        // Configure HTTP Transport
 +        EasyMock.expect(hiveServer.getHostNames()).andReturn(hiveServerHosts).anyTimes();
 +        EasyMock.expect(hiveServer.getConfigProperty("hive.server2.use.SSL")).andReturn("false").anyTimes();
 +        EasyMock.expect(hiveServer.getConfigProperty("hive.server2.thrift.http.path")).andReturn(HTTP_PATH).anyTimes();
 +        EasyMock.expect(hiveServer.getConfigProperty("hive.server2.thrift.http.port")).andReturn(HTTP_PORT).anyTimes();
 +        EasyMock.expect(hiveServer.getConfigProperty("hive.server2.transport.mode")).andReturn("http").anyTimes();
 +        EasyMock.replay(hiveServer);
 +
 +        // Run the test
 +        AmbariDynamicServiceURLCreator builder = newURLCreator(cluster, mappingConfiguration);
 +        List<String> urls = builder.create(SERVICE_NAME);
 +        assertEquals(HOSTNAMES.length, urls.size());
 +        validateServiceURLs(urls, HOSTNAMES, expectedScheme, HTTP_PORT, HTTP_PATH);
 +
 +        // Configure BINARY Transport
 +        EasyMock.reset(hiveServer);
 +        EasyMock.expect(hiveServer.getHostNames()).andReturn(hiveServerHosts).anyTimes();
 +        EasyMock.expect(hiveServer.getConfigProperty("hive.server2.use.SSL")).andReturn("false").anyTimes();
 +        EasyMock.expect(hiveServer.getConfigProperty("hive.server2.thrift.http.path")).andReturn("").anyTimes();
 +        EasyMock.expect(hiveServer.getConfigProperty("hive.server2.thrift.http.port")).andReturn(HTTP_PORT).anyTimes();
 +        EasyMock.expect(hiveServer.getConfigProperty("hive.server2.thrift.port")).andReturn(BINARY_PORT).anyTimes();
 +        EasyMock.expect(hiveServer.getConfigProperty("hive.server2.transport.mode")).andReturn("binary").anyTimes();
 +        EasyMock.replay(hiveServer);
 +
 +        // Run the test
 +        urls = builder.create(SERVICE_NAME);
 +        assertEquals(HOSTNAMES.length, urls.size());
 +        validateServiceURLs(urls, HOSTNAMES, expectedScheme, HTTP_PORT, "");
 +
 +        // Configure HTTPS Transport
 +        EasyMock.reset(hiveServer);
 +        EasyMock.expect(hiveServer.getHostNames()).andReturn(hiveServerHosts).anyTimes();
 +        EasyMock.expect(hiveServer.getConfigProperty("hive.server2.use.SSL")).andReturn("true").anyTimes();
 +        EasyMock.expect(hiveServer.getConfigProperty("hive.server2.thrift.http.path")).andReturn(HTTP_PATH).anyTimes();
 +        EasyMock.expect(hiveServer.getConfigProperty("hive.server2.thrift.http.port")).andReturn(HTTP_PORT).anyTimes();
 +        EasyMock.expect(hiveServer.getConfigProperty("hive.server2.transport.mode")).andReturn("http").anyTimes();
 +        EasyMock.replay(hiveServer);
 +
 +        // Run the test
 +        expectedScheme = "https";
 +        urls = builder.create(SERVICE_NAME);
 +        assertEquals(HOSTNAMES.length, urls.size());
 +        validateServiceURLs(urls, HOSTNAMES, expectedScheme, HTTP_PORT, HTTP_PATH);
 +    }
 +
++
 +    @Test
 +    public void testResourceManagerURLFromInternalMapping() throws Exception {
 +        testResourceManagerURL(null);
 +    }
 +
 +    @Test
 +    public void testResourceManagerURLFromExternalMapping() throws Exception {
 +        testResourceManagerURL(TEST_MAPPING_CONFIG);
 +    }
 +
 +    private void testResourceManagerURL(Object mappingConfiguration) throws Exception {
 +
 +        final String HTTP_ADDRESS  = "host2:1111";
 +        final String HTTPS_ADDRESS = "host2:22222";
 +
 +        // HTTP
 +        AmbariComponent resman = EasyMock.createNiceMock(AmbariComponent.class);
 +        setResourceManagerComponentExpectations(resman, HTTP_ADDRESS, HTTPS_ADDRESS, "HTTP");
 +
 +        AmbariCluster cluster = EasyMock.createNiceMock(AmbariCluster.class);
 +        EasyMock.expect(cluster.getComponent("RESOURCEMANAGER")).andReturn(resman).anyTimes();
 +        EasyMock.replay(cluster);
 +
 +        // Run the test
 +        AmbariDynamicServiceURLCreator builder = newURLCreator(cluster, mappingConfiguration);
 +        String url = builder.create("RESOURCEMANAGER").get(0);
 +        assertEquals("http://" + HTTP_ADDRESS + "/ws", url);
 +
 +        // HTTPS
 +        EasyMock.reset(resman);
 +        setResourceManagerComponentExpectations(resman, HTTP_ADDRESS, HTTPS_ADDRESS, "HTTPS_ONLY");
 +
 +        // Run the test
 +        url = builder.create("RESOURCEMANAGER").get(0);
 +        assertEquals("https://" + HTTPS_ADDRESS + "/ws", url);
 +    }
 +
 +    private void setResourceManagerComponentExpectations(final AmbariComponent resmanMock,
 +                                                         final String          httpAddress,
 +                                                         final String          httpsAddress,
 +                                                         final String          httpPolicy) {
 +        EasyMock.expect(resmanMock.getConfigProperty("yarn.resourcemanager.webapp.address")).andReturn(httpAddress).anyTimes();
 +        EasyMock.expect(resmanMock.getConfigProperty("yarn.resourcemanager.webapp.https.address")).andReturn(httpsAddress).anyTimes();
 +        EasyMock.expect(resmanMock.getConfigProperty("yarn.http.policy")).andReturn(httpPolicy).anyTimes();
 +        EasyMock.replay(resmanMock);
 +    }
 +
 +    @Test
 +    public void testJobTrackerURLFromInternalMapping() throws Exception {
 +        testJobTrackerURL(null);
 +    }
 +
 +    @Test
 +    public void testJobTrackerURLFromExternalMapping() throws Exception {
 +        testJobTrackerURL(TEST_MAPPING_CONFIG);
 +    }
 +
 +    private void testJobTrackerURL(Object mappingConfiguration) throws Exception {
 +        final String ADDRESS = "host2:5678";
 +
 +        AmbariComponent resman = EasyMock.createNiceMock(AmbariComponent.class);
 +        EasyMock.expect(resman.getConfigProperty("yarn.resourcemanager.address")).andReturn(ADDRESS).anyTimes();
 +        EasyMock.replay(resman);
 +
 +        AmbariCluster cluster = EasyMock.createNiceMock(AmbariCluster.class);
 +        EasyMock.expect(cluster.getComponent("RESOURCEMANAGER")).andReturn(resman).anyTimes();
 +        EasyMock.replay(cluster);
 +
 +        // Run the test
 +        AmbariDynamicServiceURLCreator builder = newURLCreator(cluster, mappingConfiguration);
 +        String url = builder.create("JOBTRACKER").get(0);
 +        assertEquals("rpc://" + ADDRESS, url);
 +    }
 +
 +    @Test
 +    public void testNameNodeURLFromInternalMapping() throws Exception {
 +        testNameNodeURL(null);
 +    }
 +
 +    @Test
 +    public void testNameNodeURLFromExternalMapping() throws Exception {
 +        testNameNodeURL(TEST_MAPPING_CONFIG);
 +    }
 +
 +    private void testNameNodeURL(Object mappingConfiguration) throws Exception {
 +        final String ADDRESS = "host1:1234";
 +
 +        AmbariComponent namenode = EasyMock.createNiceMock(AmbariComponent.class);
 +        EasyMock.expect(namenode.getConfigProperty("dfs.namenode.rpc-address")).andReturn(ADDRESS).anyTimes();
 +        EasyMock.replay(namenode);
 +
 +        AmbariCluster cluster = EasyMock.createNiceMock(AmbariCluster.class);
 +        EasyMock.expect(cluster.getComponent("NAMENODE")).andReturn(namenode).anyTimes();
 +        EasyMock.replay(cluster);
 +
 +        // Run the test
 +        AmbariDynamicServiceURLCreator builder = newURLCreator(cluster, mappingConfiguration);
 +        String url = builder.create("NAMENODE").get(0);
 +        assertEquals("hdfs://" + ADDRESS, url);
 +    }
 +
++
++    @Test
++    public void testNameNodeHAURLFromInternalMapping() throws Exception {
++        testNameNodeURLHA(null);
++    }
++
++    @Test
++    public void testNameNodeHAURLFromExternalMapping() throws Exception {
++        testNameNodeURLHA(TEST_MAPPING_CONFIG);
++    }
++
++    private void testNameNodeURLHA(Object mappingConfiguration) throws Exception {
++        final String NAMESERVICE = "myNSCluster";
++
++        AmbariComponent namenode = EasyMock.createNiceMock(AmbariComponent.class);
++        EasyMock.expect(namenode.getConfigProperty("dfs.nameservices")).andReturn(NAMESERVICE).anyTimes();
++        EasyMock.replay(namenode);
++
++        AmbariCluster cluster = EasyMock.createNiceMock(AmbariCluster.class);
++        EasyMock.expect(cluster.getComponent("NAMENODE")).andReturn(namenode).anyTimes();
++        EasyMock.replay(cluster);
++
++        // Run the test
++        AmbariDynamicServiceURLCreator builder = newURLCreator(cluster, mappingConfiguration);
++        String url = builder.create("NAMENODE").get(0);
++        assertEquals("hdfs://" + NAMESERVICE, url);
++    }
++
++
 +    @Test
 +    public void testWebHCatURLFromInternalMapping() throws Exception {
 +        testWebHCatURL(null);
 +    }
 +
 +    @Test
 +    public void testWebHCatURLFromExternalMapping() throws Exception {
 +        testWebHCatURL(TEST_MAPPING_CONFIG);
 +    }
 +
 +    private void testWebHCatURL(Object mappingConfiguration) throws Exception {
 +
 +        final String HOSTNAME = "host3";
 +        final String PORT     = "1919";
 +
 +        AmbariComponent webhcatServer = EasyMock.createNiceMock(AmbariComponent.class);
 +        EasyMock.expect(webhcatServer.getConfigProperty("templeton.port")).andReturn(PORT).anyTimes();
 +        List<String> webHcatServerHosts = Collections.singletonList(HOSTNAME);
 +        EasyMock.expect(webhcatServer.getHostNames()).andReturn(webHcatServerHosts).anyTimes();
 +        EasyMock.replay(webhcatServer);
 +
 +        AmbariCluster cluster = EasyMock.createNiceMock(AmbariCluster.class);
 +        EasyMock.expect(cluster.getComponent("WEBHCAT_SERVER")).andReturn(webhcatServer).anyTimes();
 +        EasyMock.replay(cluster);
 +
 +        // Run the test
 +        AmbariDynamicServiceURLCreator builder = newURLCreator(cluster, mappingConfiguration);
 +        String url = builder.create("WEBHCAT").get(0);
 +        assertEquals("http://" + HOSTNAME + ":" + PORT + "/templeton", url);
 +    }
 +
 +    @Test
 +    public void testOozieURLFromInternalMapping() throws Exception {
 +        testOozieURL(null);
 +    }
 +
 +    @Test
 +    public void testOozieURLFromExternalMapping() throws Exception {
 +        testOozieURL(TEST_MAPPING_CONFIG);
 +    }
 +
 +    private void testOozieURL(Object mappingConfiguration) throws Exception {
 +        final String URL = "http://host3:2222";
 +
 +        AmbariComponent oozieServer = EasyMock.createNiceMock(AmbariComponent.class);
 +        EasyMock.expect(oozieServer.getConfigProperty("oozie.base.url")).andReturn(URL).anyTimes();
 +        EasyMock.replay(oozieServer);
 +
 +        AmbariCluster cluster = EasyMock.createNiceMock(AmbariCluster.class);
 +        EasyMock.expect(cluster.getComponent("OOZIE_SERVER")).andReturn(oozieServer).anyTimes();
 +        EasyMock.replay(cluster);
 +
 +        // Run the test
 +        AmbariDynamicServiceURLCreator builder = newURLCreator(cluster, mappingConfiguration);
 +        String url = builder.create("OOZIE").get(0);
 +        assertEquals(URL, url);
 +    }
 +
 +    @Test
 +    public void testWebHBaseURLFromInternalMapping() throws Exception {
 +        testWebHBaseURL(null);
 +    }
 +
 +    @Test
 +    public void testWebHBaseURLFromExternalMapping() throws Exception {
 +        testWebHBaseURL(TEST_MAPPING_CONFIG);
 +    }
 +
 +    private void testWebHBaseURL(Object mappingConfiguration) throws Exception {
 +        final String[] HOSTNAMES = {"host2", "host4"};
 +
 +        AmbariComponent hbaseMaster = EasyMock.createNiceMock(AmbariComponent.class);
 +        List<String> hbaseMasterHosts = Arrays.asList(HOSTNAMES);
 +        EasyMock.expect(hbaseMaster.getHostNames()).andReturn(hbaseMasterHosts).anyTimes();
 +        EasyMock.replay(hbaseMaster);
 +
 +        AmbariCluster cluster = EasyMock.createNiceMock(AmbariCluster.class);
 +        EasyMock.expect(cluster.getComponent("HBASE_MASTER")).andReturn(hbaseMaster).anyTimes();
 +        EasyMock.replay(cluster);
 +
 +        // Run the test
 +        AmbariDynamicServiceURLCreator builder = newURLCreator(cluster, mappingConfiguration);
 +        List<String> urls = builder.create("WEBHBASE");
 +        validateServiceURLs(urls, HOSTNAMES, "http", "60080", null);
 +    }
 +
 +    @Test
 +    public void testWebHdfsURLFromInternalMapping() throws Exception {
 +        testWebHdfsURL(null);
 +    }
 +
 +    @Test
 +    public void testWebHdfsURLFromExternalMapping() throws Exception {
 +        testWebHdfsURL(TEST_MAPPING_CONFIG);
 +    }
 +
-     @Test
-     public void testWebHdfsURLFromSystemPropertyOverride() throws Exception {
-         // Write the test mapping configuration to a temp file
-         File mappingFile = File.createTempFile("mapping-config", "xml");
-         FileUtils.write(mappingFile, OVERRIDE_MAPPING_FILE_CONTENTS, "utf-8");
- 
-         // Set the system property to point to the temp file
-         System.setProperty(AmbariDynamicServiceURLCreator.MAPPING_CONFIG_OVERRIDE_PROPERTY,
-                            mappingFile.getAbsolutePath());
-         try {
-             final String ADDRESS = "host3:1357";
-             // The URL creator should apply the file contents, and create the URL accordingly
-             String url = getTestWebHdfsURL(ADDRESS, null);
- 
-             // Verify the URL matches the pattern from the file
-             assertEquals("http://" + ADDRESS + "/webhdfs/OVERRIDE", url);
-         } finally {
-             // Reset the system property, and delete the temp file
-             System.clearProperty(AmbariDynamicServiceURLCreator.MAPPING_CONFIG_OVERRIDE_PROPERTY);
-             mappingFile.delete();
-         }
-     }
- 
 +    private void testWebHdfsURL(Object mappingConfiguration) throws Exception {
 +        final String ADDRESS = "host3:1357";
 +        assertEquals("http://" + ADDRESS + "/webhdfs", getTestWebHdfsURL(ADDRESS, mappingConfiguration));
 +    }
 +
 +
 +    private String getTestWebHdfsURL(String address, Object mappingConfiguration) throws Exception {
 +        AmbariCluster.ServiceConfiguration hdfsSC = EasyMock.createNiceMock(AmbariCluster.ServiceConfiguration.class);
 +        Map<String, String> hdfsProps = new HashMap<>();
 +        hdfsProps.put("dfs.namenode.http-address", address);
 +        EasyMock.expect(hdfsSC.getProperties()).andReturn(hdfsProps).anyTimes();
 +        EasyMock.replay(hdfsSC);
 +
 +        AmbariCluster cluster = EasyMock.createNiceMock(AmbariCluster.class);
 +        EasyMock.expect(cluster.getServiceConfiguration("HDFS", "hdfs-site")).andReturn(hdfsSC).anyTimes();
 +        EasyMock.replay(cluster);
 +
 +        // Create the URL
-         AmbariDynamicServiceURLCreator creator = newURLCreator(cluster, mappingConfiguration);
-         return creator.create("WEBHDFS").get(0);
++        List<String> urls = ServiceURLFactory.newInstance(cluster).create("WEBHDFS");
++        assertNotNull(urls);
++        assertFalse(urls.isEmpty());
++        return urls.get(0);
++    }
++
++    @Test
++    public void testWebHdfsURLHA() throws Exception {
++        final String NAMESERVICES   = "myNameServicesCluster";
++        final String HTTP_ADDRESS_1 = "host1:50070";
++        final String HTTP_ADDRESS_2 = "host2:50077";
++
++        final String EXPECTED_ADDR_1 = "http://" + HTTP_ADDRESS_1 + "/webhdfs";
++        final String EXPECTED_ADDR_2 = "http://" + HTTP_ADDRESS_2 + "/webhdfs";
++
++        AmbariComponent namenode = EasyMock.createNiceMock(AmbariComponent.class);
++        EasyMock.expect(namenode.getConfigProperty("dfs.nameservices")).andReturn(NAMESERVICES).anyTimes();
++        EasyMock.replay(namenode);
++
++        AmbariCluster.ServiceConfiguration hdfsSC = EasyMock.createNiceMock(AmbariCluster.ServiceConfiguration.class);
++        Map<String, String> hdfsProps = new HashMap<>();
++        hdfsProps.put("dfs.namenode.http-address." + NAMESERVICES + ".nn1", HTTP_ADDRESS_1);
++        hdfsProps.put("dfs.namenode.http-address." + NAMESERVICES + ".nn2", HTTP_ADDRESS_2);
++        EasyMock.expect(hdfsSC.getProperties()).andReturn(hdfsProps).anyTimes();
++        EasyMock.replay(hdfsSC);
++
++        AmbariCluster cluster = EasyMock.createNiceMock(AmbariCluster.class);
++        EasyMock.expect(cluster.getComponent("NAMENODE")).andReturn(namenode).anyTimes();
++        EasyMock.expect(cluster.getServiceConfiguration("HDFS", "hdfs-site")).andReturn(hdfsSC).anyTimes();
++        EasyMock.replay(cluster);
++
++        // Create the URL
++        List<String> webhdfsURLs = ServiceURLFactory.newInstance(cluster).create("WEBHDFS");
++        assertEquals(2, webhdfsURLs.size());
++        assertTrue(webhdfsURLs.contains(EXPECTED_ADDR_1));
++        assertTrue(webhdfsURLs.contains(EXPECTED_ADDR_2));
 +    }
 +
 +
 +    @Test
 +    public void testAtlasApiURL() throws Exception {
 +        final String ATLAS_REST_ADDRESS = "http://host2:21000";
 +
 +        AmbariComponent atlasServer = EasyMock.createNiceMock(AmbariComponent.class);
 +        EasyMock.expect(atlasServer.getConfigProperty("atlas.rest.address")).andReturn(ATLAS_REST_ADDRESS).anyTimes();
 +        EasyMock.replay(atlasServer);
 +
 +        AmbariCluster cluster = EasyMock.createNiceMock(AmbariCluster.class);
 +        EasyMock.expect(cluster.getComponent("ATLAS_SERVER")).andReturn(atlasServer).anyTimes();
 +        EasyMock.replay(cluster);
 +
 +        // Run the test
 +        AmbariDynamicServiceURLCreator builder = newURLCreator(cluster, null);
 +        List<String> urls = builder.create("ATLAS-API");
 +        assertEquals(1, urls.size());
 +        assertEquals(ATLAS_REST_ADDRESS, urls.get(0));
 +    }
 +
 +
 +    @Test
 +    public void testAtlasURL() throws Exception {
 +        final String HTTP_PORT = "8787";
 +        final String HTTPS_PORT = "8989";
 +
 +        final String[] HOSTNAMES = {"host1", "host4"};
 +        final List<String> atlastServerHosts = Arrays.asList(HOSTNAMES);
 +
 +        AmbariComponent atlasServer = EasyMock.createNiceMock(AmbariComponent.class);
 +        EasyMock.expect(atlasServer.getHostNames()).andReturn(atlastServerHosts).anyTimes();
 +        EasyMock.expect(atlasServer.getConfigProperty("atlas.enableTLS")).andReturn("false").anyTimes();
 +        EasyMock.expect(atlasServer.getConfigProperty("atlas.server.http.port")).andReturn(HTTP_PORT).anyTimes();
 +        EasyMock.expect(atlasServer.getConfigProperty("atlas.server.https.port")).andReturn(HTTPS_PORT).anyTimes();
 +        EasyMock.replay(atlasServer);
 +
 +        AmbariCluster cluster = EasyMock.createNiceMock(AmbariCluster.class);
 +        EasyMock.expect(cluster.getComponent("ATLAS_SERVER")).andReturn(atlasServer).anyTimes();
 +        EasyMock.replay(cluster);
 +
 +        // Run the test
 +        AmbariDynamicServiceURLCreator builder = newURLCreator(cluster, null);
 +        List<String> urls = builder.create("ATLAS");
 +        validateServiceURLs(urls, HOSTNAMES, "http", HTTP_PORT, null);
 +
 +        EasyMock.reset(atlasServer);
 +        EasyMock.expect(atlasServer.getHostNames()).andReturn(atlastServerHosts).anyTimes();
 +        EasyMock.expect(atlasServer.getConfigProperty("atlas.enableTLS")).andReturn("true").anyTimes();
 +        EasyMock.expect(atlasServer.getConfigProperty("atlas.server.http.port")).andReturn(HTTP_PORT).anyTimes();
 +        EasyMock.expect(atlasServer.getConfigProperty("atlas.server.https.port")).andReturn(HTTPS_PORT).anyTimes();
 +        EasyMock.replay(atlasServer);
 +
 +        // Run the test
 +        urls = builder.create("ATLAS");
 +        validateServiceURLs(urls, HOSTNAMES, "https", HTTPS_PORT, null);
 +    }
 +
 +
 +    @Test
 +    public void testZeppelinURL() throws Exception {
 +        final String HTTP_PORT = "8787";
 +        final String HTTPS_PORT = "8989";
 +
 +        final String[] HOSTNAMES = {"host1", "host4"};
 +        final List<String> atlastServerHosts = Arrays.asList(HOSTNAMES);
 +
 +        AmbariComponent zeppelinMaster = EasyMock.createNiceMock(AmbariComponent.class);
 +        EasyMock.expect(zeppelinMaster.getHostNames()).andReturn(atlastServerHosts).anyTimes();
 +        EasyMock.expect(zeppelinMaster.getConfigProperty("zeppelin.ssl")).andReturn("false").anyTimes();
 +        EasyMock.expect(zeppelinMaster.getConfigProperty("zeppelin.server.port")).andReturn(HTTP_PORT).anyTimes();
 +        EasyMock.expect(zeppelinMaster.getConfigProperty("zeppelin.server.ssl.port")).andReturn(HTTPS_PORT).anyTimes();
 +        EasyMock.replay(zeppelinMaster);
 +
 +        AmbariCluster cluster = EasyMock.createNiceMock(AmbariCluster.class);
 +        EasyMock.expect(cluster.getComponent("ZEPPELIN_MASTER")).andReturn(zeppelinMaster).anyTimes();
 +        EasyMock.replay(cluster);
 +
 +        AmbariDynamicServiceURLCreator builder = newURLCreator(cluster, null);
 +
 +        // Run the test
 +        validateServiceURLs(builder.create("ZEPPELIN"), HOSTNAMES, "http", HTTP_PORT, null);
 +
 +        EasyMock.reset(zeppelinMaster);
 +        EasyMock.expect(zeppelinMaster.getHostNames()).andReturn(atlastServerHosts).anyTimes();
 +        EasyMock.expect(zeppelinMaster.getConfigProperty("zeppelin.ssl")).andReturn("true").anyTimes();
 +        EasyMock.expect(zeppelinMaster.getConfigProperty("zeppelin.server.port")).andReturn(HTTP_PORT).anyTimes();
 +        EasyMock.expect(zeppelinMaster.getConfigProperty("zeppelin.server.ssl.port")).andReturn(HTTPS_PORT).anyTimes();
 +        EasyMock.replay(zeppelinMaster);
 +
 +        // Run the test
 +        validateServiceURLs(builder.create("ZEPPELIN"), HOSTNAMES, "https", HTTPS_PORT, null);
 +    }
 +
 +
 +    @Test
 +    public void testZeppelinUiURL() throws Exception {
 +        final String HTTP_PORT = "8787";
 +        final String HTTPS_PORT = "8989";
 +
 +        final String[] HOSTNAMES = {"host1", "host4"};
 +        final List<String> atlastServerHosts = Arrays.asList(HOSTNAMES);
 +
 +        AmbariComponent zeppelinMaster = EasyMock.createNiceMock(AmbariComponent.class);
 +        EasyMock.expect(zeppelinMaster.getHostNames()).andReturn(atlastServerHosts).anyTimes();
 +        EasyMock.expect(zeppelinMaster.getConfigProperty("zeppelin.ssl")).andReturn("false").anyTimes();
 +        EasyMock.expect(zeppelinMaster.getConfigProperty("zeppelin.server.port")).andReturn(HTTP_PORT).anyTimes();
 +        EasyMock.expect(zeppelinMaster.getConfigProperty("zeppelin.server.ssl.port")).andReturn(HTTPS_PORT).anyTimes();
 +        EasyMock.replay(zeppelinMaster);
 +
 +        AmbariCluster cluster = EasyMock.createNiceMock(AmbariCluster.class);
 +        EasyMock.expect(cluster.getComponent("ZEPPELIN_MASTER")).andReturn(zeppelinMaster).anyTimes();
 +        EasyMock.replay(cluster);
 +
 +        AmbariDynamicServiceURLCreator builder = newURLCreator(cluster, null);
 +
 +        // Run the test
 +        validateServiceURLs(builder.create("ZEPPELINUI"), HOSTNAMES, "http", HTTP_PORT, null);
 +
 +        EasyMock.reset(zeppelinMaster);
 +        EasyMock.expect(zeppelinMaster.getHostNames()).andReturn(atlastServerHosts).anyTimes();
 +        EasyMock.expect(zeppelinMaster.getConfigProperty("zeppelin.ssl")).andReturn("true").anyTimes();
 +        EasyMock.expect(zeppelinMaster.getConfigProperty("zeppelin.server.port")).andReturn(HTTP_PORT).anyTimes();
 +        EasyMock.expect(zeppelinMaster.getConfigProperty("zeppelin.server.ssl.port")).andReturn(HTTPS_PORT).anyTimes();
 +        EasyMock.replay(zeppelinMaster);
 +
 +        // Run the test
 +        validateServiceURLs(builder.create("ZEPPELINUI"), HOSTNAMES, "https", HTTPS_PORT, null);
 +    }
 +
 +
 +    @Test
 +    public void testZeppelinWsURL() throws Exception {
 +        final String HTTP_PORT = "8787";
 +        final String HTTPS_PORT = "8989";
 +
 +        final String[] HOSTNAMES = {"host1", "host4"};
 +        final List<String> atlastServerHosts = Arrays.asList(HOSTNAMES);
 +
 +        AmbariComponent zeppelinMaster = EasyMock.createNiceMock(AmbariComponent.class);
 +        EasyMock.expect(zeppelinMaster.getHostNames()).andReturn(atlastServerHosts).anyTimes();
 +        EasyMock.expect(zeppelinMaster.getConfigProperty("zeppelin.ssl")).andReturn("false").anyTimes();
 +        EasyMock.expect(zeppelinMaster.getConfigProperty("zeppelin.server.port")).andReturn(HTTP_PORT).anyTimes();
 +        EasyMock.expect(zeppelinMaster.getConfigProperty("zeppelin.server.ssl.port")).andReturn(HTTPS_PORT).anyTimes();
 +        EasyMock.replay(zeppelinMaster);
 +
 +        AmbariCluster cluster = EasyMock.createNiceMock(AmbariCluster.class);
 +        EasyMock.expect(cluster.getComponent("ZEPPELIN_MASTER")).andReturn(zeppelinMaster).anyTimes();
 +        EasyMock.replay(cluster);
 +
 +        AmbariDynamicServiceURLCreator builder = newURLCreator(cluster, null);
 +
 +        // Run the test
 +        validateServiceURLs(builder.create("ZEPPELINWS"), HOSTNAMES, "ws", HTTP_PORT, null);
 +
 +        EasyMock.reset(zeppelinMaster);
 +        EasyMock.expect(zeppelinMaster.getHostNames()).andReturn(atlastServerHosts).anyTimes();
 +        EasyMock.expect(zeppelinMaster.getConfigProperty("zeppelin.ssl")).andReturn("true").anyTimes();
 +        EasyMock.expect(zeppelinMaster.getConfigProperty("zeppelin.server.port")).andReturn(HTTP_PORT).anyTimes();
 +        EasyMock.expect(zeppelinMaster.getConfigProperty("zeppelin.server.ssl.port")).andReturn(HTTPS_PORT).anyTimes();
 +        EasyMock.replay(zeppelinMaster);
 +
 +        // Run the test
 +        validateServiceURLs(builder.create("ZEPPELINWS"), HOSTNAMES, "wss", HTTPS_PORT, null);
 +    }
 +
 +
 +    @Test
 +    public void testDruidCoordinatorURL() throws Exception {
 +        final String PORT = "8787";
 +
 +        final String[] HOSTNAMES = {"host3", "host2"};
 +        final List<String> druidCoordinatorHosts = Arrays.asList(HOSTNAMES);
 +
 +        AmbariComponent druidCoordinator = EasyMock.createNiceMock(AmbariComponent.class);
 +        EasyMock.expect(druidCoordinator.getHostNames()).andReturn(druidCoordinatorHosts).anyTimes();
 +        EasyMock.expect(druidCoordinator.getConfigProperty("druid.port")).andReturn(PORT).anyTimes();
 +        EasyMock.replay(druidCoordinator);
 +
 +        AmbariCluster cluster = EasyMock.createNiceMock(AmbariCluster.class);
 +        EasyMock.expect(cluster.getComponent("DRUID_COORDINATOR")).andReturn(druidCoordinator).anyTimes();
 +        EasyMock.replay(cluster);
 +
 +        // Run the test
 +        AmbariDynamicServiceURLCreator builder = newURLCreator(cluster, null);
 +        List<String> urls = builder.create("DRUID-COORDINATOR");
 +        validateServiceURLs(urls, HOSTNAMES, "http", PORT, null);
 +    }
 +
 +
 +    @Test
 +    public void testDruidBrokerURL() throws Exception {
 +        final String PORT = "8181";
 +
 +        final String[] HOSTNAMES = {"host4", "host3"};
 +        final List<String> druidHosts = Arrays.asList(HOSTNAMES);
 +
 +        AmbariComponent druidBroker = EasyMock.createNiceMock(AmbariComponent.class);
 +        EasyMock.expect(druidBroker.getHostNames()).andReturn(druidHosts).anyTimes();
 +        EasyMock.expect(druidBroker.getConfigProperty("druid.port")).andReturn(PORT).anyTimes();
 +        EasyMock.replay(druidBroker);
 +
 +        AmbariCluster cluster = EasyMock.createNiceMock(AmbariCluster.class);
 +        EasyMock.expect(cluster.getComponent("DRUID_BROKER")).andReturn(druidBroker).anyTimes();
 +        EasyMock.replay(cluster);
 +
 +        // Run the test
 +        AmbariDynamicServiceURLCreator builder = newURLCreator(cluster, null);
 +        List<String> urls = builder.create("DRUID-BROKER");
 +        validateServiceURLs(urls, HOSTNAMES, "http", PORT, null);
 +    }
 +
 +
 +    @Test
 +    public void testDruidRouterURL() throws Exception {
 +        final String PORT = "8282";
 +
 +        final String[] HOSTNAMES = {"host5", "host7"};
 +        final List<String> druidHosts = Arrays.asList(HOSTNAMES);
 +
 +        AmbariComponent druidRouter = EasyMock.createNiceMock(AmbariComponent.class);
 +        EasyMock.expect(druidRouter.getHostNames()).andReturn(druidHosts).anyTimes();
 +        EasyMock.expect(druidRouter.getConfigProperty("druid.port")).andReturn(PORT).anyTimes();
 +        EasyMock.replay(druidRouter);
 +
 +        AmbariCluster cluster = EasyMock.createNiceMock(AmbariCluster.class);
 +        EasyMock.expect(cluster.getComponent("DRUID_ROUTER")).andReturn(druidRouter).anyTimes();
 +        EasyMock.replay(cluster);
 +
 +        // Run the test
 +        AmbariDynamicServiceURLCreator builder = newURLCreator(cluster, null);
 +        List<String> urls = builder.create("DRUID-ROUTER");
 +        validateServiceURLs(urls, HOSTNAMES, "http", PORT, null);
 +    }
 +
 +
 +    @Test
 +    public void testDruidOverlordURL() throws Exception {
 +        final String PORT = "8383";
 +
 +        final String[] HOSTNAMES = {"host4", "host1"};
 +        final List<String> druidHosts = Arrays.asList(HOSTNAMES);
 +
 +        AmbariComponent druidOverlord = EasyMock.createNiceMock(AmbariComponent.class);
 +        EasyMock.expect(druidOverlord.getHostNames()).andReturn(druidHosts).anyTimes();
 +        EasyMock.expect(druidOverlord.getConfigProperty("druid.port")).andReturn(PORT).anyTimes();
 +        EasyMock.replay(druidOverlord);
 +
 +        AmbariCluster cluster = EasyMock.createNiceMock(AmbariCluster.class);
 +        EasyMock.expect(cluster.getComponent("DRUID_OVERLORD")).andReturn(druidOverlord).anyTimes();
 +        EasyMock.replay(cluster);
 +
 +        // Run the test
 +        AmbariDynamicServiceURLCreator builder = newURLCreator(cluster, null);
 +        List<String> urls = builder.create("DRUID-OVERLORD");
 +        validateServiceURLs(urls, HOSTNAMES, "http", PORT, null);
 +    }
 +
 +
 +    @Test
 +    public void testDruidSupersetURL() throws Exception {
 +        final String PORT = "8484";
 +
 +        final String[] HOSTNAMES = {"host4", "host1"};
 +        final List<String> druidHosts = Arrays.asList(HOSTNAMES);
 +
 +        AmbariComponent druidSuperset = EasyMock.createNiceMock(AmbariComponent.class);
 +        EasyMock.expect(druidSuperset.getHostNames()).andReturn(druidHosts).anyTimes();
 +        EasyMock.expect(druidSuperset.getConfigProperty("SUPERSET_WEBSERVER_PORT")).andReturn(PORT).anyTimes();
 +        EasyMock.replay(druidSuperset);
 +
 +        AmbariCluster cluster = EasyMock.createNiceMock(AmbariCluster.class);
 +        EasyMock.expect(cluster.getComponent("DRUID_SUPERSET")).andReturn(druidSuperset).anyTimes();
 +        EasyMock.replay(cluster);
 +
 +        // Run the test
 +        AmbariDynamicServiceURLCreator builder = newURLCreator(cluster, null);
 +        List<String> urls = builder.create("SUPERSET");
 +        validateServiceURLs(urls, HOSTNAMES, "http", PORT, null);
 +    }
 +
 +
 +    @Test
 +    public void testMissingServiceComponentURL() throws Exception {
 +        AmbariCluster cluster = EasyMock.createNiceMock(AmbariCluster.class);
 +        EasyMock.expect(cluster.getComponent("DRUID_BROKER")).andReturn(null).anyTimes();
 +        EasyMock.expect(cluster.getComponent("HIVE_SERVER")).andReturn(null).anyTimes();
 +        EasyMock.replay(cluster);
 +
 +        // Run the test
 +        AmbariDynamicServiceURLCreator builder = newURLCreator(cluster, null);
 +        List<String> urls = builder.create("DRUID-BROKER");
 +        assertNotNull(urls);
 +        assertEquals(1, urls.size());
 +        assertEquals("http://{HOST}:{PORT}", urls.get(0));
 +
 +        urls = builder.create("HIVE");
 +        assertNotNull(urls);
 +        assertEquals(1, urls.size());
 +        assertEquals("http://{HOST}:{PORT}/{PATH}", urls.get(0));
 +    }
 +
 +
 +    /**
 +     * Convenience method for creating AmbariDynamicServiceURLCreator instances from different mapping configuration
 +     * input sources.
 +     *
 +     * @param cluster       The Ambari ServiceDiscovery Cluster model
 +     * @param mappingConfig The mapping configuration, or null if the internal config should be used.
 +     *
 +     * @return An AmbariDynamicServiceURLCreator instance, capable of creating service URLs based on the specified
 +     *         cluster's configuration details.
 +     */
 +    private static AmbariDynamicServiceURLCreator newURLCreator(AmbariCluster cluster, Object mappingConfig) throws Exception {
 +        AmbariDynamicServiceURLCreator result = null;
 +
 +        if (mappingConfig == null) {
 +            result = new AmbariDynamicServiceURLCreator(cluster);
 +        } else {
 +            if (mappingConfig instanceof String) {
 +                result = new AmbariDynamicServiceURLCreator(cluster, (String) mappingConfig);
 +            } else if (mappingConfig instanceof File) {
 +                result = new AmbariDynamicServiceURLCreator(cluster, (File) mappingConfig);
 +            }
 +        }
 +
 +        return result;
 +    }
 +
 +
 +    /**
 +     * Validate the specifed HIVE URLs.
 +     *
 +     * @param urlsToValidate The URLs to validate
 +     * @param hostNames      The host names expected in the test URLs
 +     * @param scheme         The expected scheme for the URLs
 +     * @param port           The expected port for the URLs
 +     * @param path           The expected path for the URLs
 +     */
 +    private static void validateServiceURLs(List<String> urlsToValidate,
 +                                            String[]     hostNames,
 +                                            String       scheme,
 +                                            String       port,
 +                                            String       path) throws MalformedURLException {
 +
 +        List<String> hostNamesToTest = new LinkedList<>(Arrays.asList(hostNames));
 +        for (String url : urlsToValidate) {
 +            URI test = null;
 +            try {
 +                // Make sure it's a valid URL
 +                test = new URI(url);
 +            } catch (URISyntaxException e) {
 +                fail(e.getMessage());
 +            }
 +
 +            // Validate the scheme
 +            assertEquals(scheme, test.getScheme());
 +
 +            // Validate the port
 +            assertEquals(port, String.valueOf(test.getPort()));
 +
 +            // If the expected path is not specified, don't validate it
 +            if (path != null) {
 +                assertEquals("/" + path, test.getPath());
 +            }
 +
 +            // Validate the host name
 +            assertTrue(hostNamesToTest.contains(test.getHost()));
 +            hostNamesToTest.remove(test.getHost());
 +        }
 +        assertTrue(hostNamesToTest.isEmpty());
 +    }
 +
 +
 +    private static final String TEST_MAPPING_CONFIG =
 +            "<?xml version=\"1.0\" encoding=\"utf-8\"?>\n" +
 +            "<service-discovery-url-mappings>\n" +
 +            "  <service name=\"NAMENODE\">\n" +
-             "    <url-pattern>hdfs://{DFS_NAMENODE_RPC_ADDRESS}</url-pattern>\n" +
++            "    <url-pattern>hdfs://{DFS_NAMENODE_ADDRESS}</url-pattern>\n" +
 +            "    <properties>\n" +
 +            "      <property name=\"DFS_NAMENODE_RPC_ADDRESS\">\n" +
 +            "        <component>NAMENODE</component>\n" +
 +            "        <config-property>dfs.namenode.rpc-address</config-property>\n" +
 +            "      </property>\n" +
++            "      <property name=\"DFS_NAMESERVICES\">\n" +
++            "        <component>NAMENODE</component>\n" +
++            "        <config-property>dfs.nameservices</config-property>\n" +
++            "      </property>\n" +
++            "      <property name=\"DFS_NAMENODE_ADDRESS\">\n" +
++            "        <config-property>\n" +
++            "          <if property=\"DFS_NAMESERVICES\">\n" +
++            "            <then>DFS_NAMESERVICES</then>\n" +
++            "            <else>DFS_NAMENODE_RPC_ADDRESS</else>\n" +
++            "          </if>\n" +
++            "        </config-property>\n" +
++            "      </property>\n" +
 +            "    </properties>\n" +
 +            "  </service>\n" +
 +            "\n" +
 +            "  <service name=\"JOBTRACKER\">\n" +
 +            "    <url-pattern>rpc://{YARN_RM_ADDRESS}</url-pattern>\n" +
 +            "    <properties>\n" +
 +            "      <property name=\"YARN_RM_ADDRESS\">\n" +
 +            "        <component>RESOURCEMANAGER</component>\n" +
 +            "        <config-property>yarn.resourcemanager.address</config-property>\n" +
 +            "      </property>\n" +
 +            "    </properties>\n" +
 +            "  </service>\n" +
 +            "\n" +
-             "  <service name=\"WEBHDFS\">\n" +
-             "    <url-pattern>http://{WEBHDFS_ADDRESS}/webhdfs</url-pattern>\n" +
-             "    <properties>\n" +
-             "      <property name=\"WEBHDFS_ADDRESS\">\n" +
-             "        <service-config name=\"HDFS\">hdfs-site</service-config>\n" +
-             "        <config-property>dfs.namenode.http-address</config-property>\n" +
-             "      </property>\n" +
-             "    </properties>\n" +
-             "  </service>\n" +
-             "\n" +
 +            "  <service name=\"WEBHCAT\">\n" +
 +            "    <url-pattern>http://{HOST}:{PORT}/templeton</url-pattern>\n" +
 +            "    <properties>\n" +
 +            "      <property name=\"HOST\">\n" +
 +            "        <component>WEBHCAT_SERVER</component>\n" +
 +            "        <hostname/>\n" +
 +            "      </property>\n" +
 +            "      <property name=\"PORT\">\n" +
 +            "        <component>WEBHCAT_SERVER</component>\n" +
 +            "        <config-property>templeton.port</config-property>\n" +
 +            "      </property>\n" +
 +            "    </properties>\n" +
 +            "  </service>\n" +
 +            "\n" +
 +            "  <service name=\"OOZIE\">\n" +
 +            "    <url-pattern>{OOZIE_ADDRESS}</url-pattern>\n" +
 +            "    <properties>\n" +
 +            "      <property name=\"OOZIE_ADDRESS\">\n" +
 +            "        <component>OOZIE_SERVER</component>\n" +
 +            "        <config-property>oozie.base.url</config-property>\n" +
 +            "      </property>\n" +
 +            "    </properties>\n" +
 +            "  </service>\n" +
 +            "\n" +
 +            "  <service name=\"WEBHBASE\">\n" +
 +            "    <url-pattern>http://{HOST}:60080</url-pattern>\n" +
 +            "    <properties>\n" +
 +            "      <property name=\"HOST\">\n" +
 +            "        <component>HBASE_MASTER</component>\n" +
 +            "        <hostname/>\n" +
 +            "      </property>\n" +
 +            "    </properties>\n" +
 +            "  </service>\n" +
 +            "  <service name=\"RESOURCEMANAGER\">\n" +
 +            "    <url-pattern>{SCHEME}://{WEBAPP_ADDRESS}/ws</url-pattern>\n" +
 +            "    <properties>\n" +
 +            "      <property name=\"WEBAPP_HTTP_ADDRESS\">\n" +
 +            "        <component>RESOURCEMANAGER</component>\n" +
 +            "        <config-property>yarn.resourcemanager.webapp.address</config-property>\n" +
 +            "      </property>\n" +
 +            "      <property name=\"WEBAPP_HTTPS_ADDRESS\">\n" +
 +            "        <component>RESOURCEMANAGER</component>\n" +
 +            "        <config-property>yarn.resourcemanager.webapp.https.address</config-property>\n" +
 +            "      </property>\n" +
 +            "      <property name=\"HTTP_POLICY\">\n" +
 +            "        <component>RESOURCEMANAGER</component>\n" +
 +            "        <config-property>yarn.http.policy</config-property>\n" +
 +            "      </property>\n" +
 +            "      <property name=\"SCHEME\">\n" +
 +            "        <config-property>\n" +
 +            "          <if property=\"HTTP_POLICY\" value=\"HTTPS_ONLY\">\n" +
 +            "            <then>https</then>\n" +
 +            "            <else>http</else>\n" +
 +            "          </if>\n" +
 +            "        </config-property>\n" +
 +            "      </property>\n" +
 +            "      <property name=\"WEBAPP_ADDRESS\">\n" +
 +            "        <component>RESOURCEMANAGER</component>\n" +
 +            "        <config-property>\n" +
 +            "          <if property=\"HTTP_POLICY\" value=\"HTTPS_ONLY\">\n" +
 +            "            <then>WEBAPP_HTTPS_ADDRESS</then>\n" +
 +            "            <else>WEBAPP_HTTP_ADDRESS</else>\n" +
 +            "          </if>\n" +
 +            "        </config-property>\n" +
 +            "      </property>\n" +
 +            "    </properties>\n" +
 +            "  </service>\n" +
 +            "  <service name=\"HIVE\">\n" +
 +            "    <url-pattern>{SCHEME}://{HOST}:{PORT}/{PATH}</url-pattern>\n" +
 +            "    <properties>\n" +
 +            "      <property name=\"HOST\">\n" +
 +            "        <component>HIVE_SERVER</component>\n" +
 +            "        <hostname/>\n" +
 +            "      </property>\n" +
 +            "      <property name=\"USE_SSL\">\n" +
 +            "        <component>HIVE_SERVER</component>\n" +
 +            "        <config-property>hive.server2.use.SSL</config-property>\n" +
 +            "      </property>\n" +
 +            "      <property name=\"PATH\">\n" +
 +            "        <component>HIVE_SERVER</component>\n" +
 +            "        <config-property>hive.server2.thrift.http.path</config-property>\n" +
 +            "      </property>\n" +
 +            "      <property name=\"PORT\">\n" +
 +            "        <component>HIVE_SERVER</component>\n" +
 +            "        <config-property>hive.server2.thrift.http.port</config-property>\n" +
 +            "      </property>\n" +
 +            "      <property name=\"SCHEME\">\n" +
 +            "        <config-property>\n" +
 +            "            <if property=\"USE_SSL\" value=\"true\">\n" +
 +            "                <then>https</then>\n" +
 +            "                <else>http</else>\n" +
 +            "            </if>\n" +
 +            "        </config-property>\n" +
 +            "      </property>\n" +
 +            "    </properties>\n" +
 +            "  </service>\n" +
 +            "</service-discovery-url-mappings>\n";
 +
 +
 +    private static final String OVERRIDE_MAPPING_FILE_CONTENTS =
 +            "<?xml version=\"1.0\" encoding=\"utf-8\"?>\n" +
 +            "<service-discovery-url-mappings>\n" +
 +            "  <service name=\"WEBHDFS\">\n" +
 +            "    <url-pattern>http://{WEBHDFS_ADDRESS}/webhdfs/OVERRIDE</url-pattern>\n" +
 +            "    <properties>\n" +
 +            "      <property name=\"WEBHDFS_ADDRESS\">\n" +
 +            "        <service-config name=\"HDFS\">hdfs-site</service-config>\n" +
 +            "        <config-property>dfs.namenode.http-address</config-property>\n" +
 +            "      </property>\n" +
 +            "    </properties>\n" +
 +            "  </service>\n" +
 +            "</service-discovery-url-mappings>\n";
 +
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/e5fd0622/gateway-server/src/main/java/org/apache/knox/gateway/filter/PortMappingHelperHandler.java
----------------------------------------------------------------------
diff --cc gateway-server/src/main/java/org/apache/knox/gateway/filter/PortMappingHelperHandler.java
index 71df7c4,0000000..69bc0be
mode 100644,000000..100644
--- a/gateway-server/src/main/java/org/apache/knox/gateway/filter/PortMappingHelperHandler.java
+++ b/gateway-server/src/main/java/org/apache/knox/gateway/filter/PortMappingHelperHandler.java
@@@ -1,156 -1,0 +1,156 @@@
 +package org.apache.knox.gateway.filter;
 +
 +import org.apache.commons.lang.StringUtils;
 +import org.apache.knox.gateway.GatewayMessages;
 +import org.apache.knox.gateway.config.GatewayConfig;
 +import org.apache.knox.gateway.i18n.messages.MessagesFactory;
 +import org.eclipse.jetty.server.Request;
 +import org.eclipse.jetty.server.handler.HandlerWrapper;
 +
 +import javax.servlet.ServletException;
 +import javax.servlet.http.HttpServletRequest;
 +import javax.servlet.http.HttpServletResponse;
 +import java.io.IOException;
 +import java.util.Map;
 +
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + * <p>
 + * http://www.apache.org/licenses/LICENSE-2.0
 + * <p>
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +
 +/**
 + * This is a helper handler that adjusts the "target" patch of the request.
 + * Used when Topology Port Mapping feature is used.
 + * See KNOX-928
 + * <p>
 + * This class also handles the Default Topology Feature
 + * where, any one of the topologies can be set to "default"
 + * and can listen on the standard Knox port (8443) and
 + * will not need /gateway/{topology} context.
 + * Basically Topology Port Mapping for standard port.
 + * Backwards compatible to Default Topology Feature.
 + *
 + */
 +public class PortMappingHelperHandler extends HandlerWrapper {
 +
 +  private static final GatewayMessages LOG = MessagesFactory
 +      .get(GatewayMessages.class);
 +
 +  final GatewayConfig config;
 +
 +  private String defaultTopologyRedirectContext = null;
 +
 +  public PortMappingHelperHandler(final GatewayConfig config) {
 +
 +    this.config = config;
 +    //Set up context for default topology feature.
 +    String defaultTopologyName = config.getDefaultTopologyName();
 +
 +    // default topology feature can also be enabled using port mapping feature
 +    // config e.g. gateway.port.mapping.{defaultTopologyName}
 +
 +    if(defaultTopologyName == null && config.getGatewayPortMappings().values().contains(config.getGatewayPort())) {
 +
 +      for(final Map.Entry<String, Integer> entry: config.getGatewayPortMappings().entrySet()) {
 +
 +        if(entry.getValue().intValue() == config.getGatewayPort()) {
 +          defaultTopologyRedirectContext = "/" + config.getGatewayPath() + "/" + entry.getKey();
 +          break;
 +        }
 +
 +      }
 +
 +
 +    }
 +
 +    if (defaultTopologyName != null) {
 +      defaultTopologyRedirectContext = config.getDefaultAppRedirectPath();
 +      if (defaultTopologyRedirectContext != null
 +          && defaultTopologyRedirectContext.trim().isEmpty()) {
 +        defaultTopologyRedirectContext = null;
 +      }
 +    }
 +    if (defaultTopologyRedirectContext != null) {
 +      LOG.defaultTopologySetup(defaultTopologyName,
 +          defaultTopologyRedirectContext);
 +    }
 +
 +  }
 +
 +  @Override
 +  public void handle(final String target, final Request baseRequest,
 +      final HttpServletRequest request, final HttpServletResponse response)
 +      throws IOException, ServletException {
 +
 +    String newTarget = target;
-     String baseURI = baseRequest.getUri().toString();
++    String baseURI = baseRequest.getRequestURI();
 +
 +    // If Port Mapping feature enabled
 +    if (config.isGatewayPortMappingEnabled()) {
 +      int targetIndex;
 +      String context = "";
 +
 +      // extract the gateway specific part i.e. {/gatewayName/}
 +      String originalContextPath = "";
 +      targetIndex = StringUtils.ordinalIndexOf(target, "/", 2);
 +
 +      // Match found e.g. /{string}/
 +      if (targetIndex > 0) {
 +        originalContextPath = target.substring(0, targetIndex + 1);
 +      } else if (targetIndex == -1) {
 +        targetIndex = StringUtils.ordinalIndexOf(target, "/", 1);
 +        // For cases "/" and "/hive"
 +        if(targetIndex == 0) {
 +          originalContextPath = target;
 +        }
 +      }
 +
 +      // Match "/{gatewayName}/{topologyName/foo" or "/".
 +      // There could be a case where content is served from the root
 +      // i.e. https://host:port/
 +
 +      if (!baseURI.startsWith(originalContextPath)) {
 +        final int index = StringUtils.ordinalIndexOf(baseURI, "/", 3);
 +        if (index > 0) {
 +          context = baseURI.substring(0, index);
 +        }
 +      }
 +
 +      if(!StringUtils.isBlank(context)) {
 +        LOG.topologyPortMappingAddContext(target, context + target);
 +      }
 +      // Move on to the next handler in chain with updated path
 +      newTarget = context + target;
 +    }
 +
 +    //Backwards compatibility for default topology feature
 +    if (defaultTopologyRedirectContext != null && !baseURI
 +        .startsWith("/" + config.getGatewayPath())) {
 +      newTarget = defaultTopologyRedirectContext + target;
 +
 +      final RequestUpdateHandler.ForwardedRequest newRequest = new RequestUpdateHandler.ForwardedRequest(
 +          request, defaultTopologyRedirectContext, newTarget);
 +
 +      LOG.defaultTopologyForward(target, newTarget);
 +      super.handle(newTarget, baseRequest, newRequest, response);
 +
 +    } else {
 +
 +      super.handle(newTarget, baseRequest, request, response);
 +    }
 +
 +  }
 +}


[22/53] [abbrv] knox git commit: KNOX-998 - Some more refactoring

Posted by mo...@apache.org.
http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-test/src/test/java/org/apache/knox/gateway/deploy/DeploymentFactoryFuncTest.java
----------------------------------------------------------------------
diff --git a/gateway-test/src/test/java/org/apache/knox/gateway/deploy/DeploymentFactoryFuncTest.java b/gateway-test/src/test/java/org/apache/knox/gateway/deploy/DeploymentFactoryFuncTest.java
index 25ad1c3..c9f262b 100644
--- a/gateway-test/src/test/java/org/apache/knox/gateway/deploy/DeploymentFactoryFuncTest.java
+++ b/gateway-test/src/test/java/org/apache/knox/gateway/deploy/DeploymentFactoryFuncTest.java
@@ -46,8 +46,8 @@ import org.apache.knox.gateway.topology.Provider;
 import org.apache.knox.gateway.topology.Service;
 import org.apache.knox.gateway.topology.Topology;
 import org.apache.knox.gateway.util.XmlUtils;
-import org.apache.hadoop.test.TestUtils;
-import org.apache.hadoop.test.log.NoOpAppender;
+import org.apache.knox.test.TestUtils;
+import org.apache.knox.test.log.NoOpAppender;
 import org.apache.log4j.Appender;
 import org.jboss.shrinkwrap.api.Archive;
 import org.jboss.shrinkwrap.api.ArchivePath;
@@ -58,8 +58,8 @@ import org.w3c.dom.Document;
 import org.w3c.dom.Node;
 import org.xml.sax.SAXException;
 
-import static org.apache.hadoop.test.TestUtils.LOG_ENTER;
-import static org.apache.hadoop.test.TestUtils.LOG_EXIT;
+import static org.apache.knox.test.TestUtils.LOG_ENTER;
+import static org.apache.knox.test.TestUtils.LOG_EXIT;
 import static org.hamcrest.CoreMatchers.is;
 import static org.hamcrest.CoreMatchers.notNullValue;
 import static org.hamcrest.CoreMatchers.nullValue;

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-util-common/src/test/java/org/apache/knox/gateway/audit/AuditLayoutTest.java
----------------------------------------------------------------------
diff --git a/gateway-util-common/src/test/java/org/apache/knox/gateway/audit/AuditLayoutTest.java b/gateway-util-common/src/test/java/org/apache/knox/gateway/audit/AuditLayoutTest.java
index 8ff183e..6400f1b 100644
--- a/gateway-util-common/src/test/java/org/apache/knox/gateway/audit/AuditLayoutTest.java
+++ b/gateway-util-common/src/test/java/org/apache/knox/gateway/audit/AuditLayoutTest.java
@@ -26,7 +26,7 @@ import org.apache.knox.gateway.audit.api.CorrelationService;
 import org.apache.knox.gateway.audit.api.CorrelationServiceFactory;
 import org.apache.knox.gateway.audit.log4j.audit.AuditConstants;
 import org.apache.knox.gateway.audit.log4j.layout.AuditLayout;
-import org.apache.hadoop.test.log.CollectAppender;
+import org.apache.knox.test.log.CollectAppender;
 import org.apache.log4j.LogManager;
 import org.apache.log4j.PropertyConfigurator;
 import org.apache.log4j.spi.LoggingEvent;

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-util-common/src/test/java/org/apache/knox/gateway/audit/AuditServiceTest.java
----------------------------------------------------------------------
diff --git a/gateway-util-common/src/test/java/org/apache/knox/gateway/audit/AuditServiceTest.java b/gateway-util-common/src/test/java/org/apache/knox/gateway/audit/AuditServiceTest.java
index 7c05a2a..7b08e83 100644
--- a/gateway-util-common/src/test/java/org/apache/knox/gateway/audit/AuditServiceTest.java
+++ b/gateway-util-common/src/test/java/org/apache/knox/gateway/audit/AuditServiceTest.java
@@ -27,7 +27,7 @@ import org.apache.knox.gateway.audit.api.CorrelationServiceFactory;
 import org.apache.knox.gateway.audit.log4j.audit.AuditConstants;
 import org.apache.knox.gateway.audit.log4j.audit.Log4jAuditService;
 import org.apache.knox.gateway.audit.log4j.correlation.Log4jCorrelationService;
-import org.apache.hadoop.test.log.CollectAppender;
+import org.apache.knox.test.log.CollectAppender;
 import org.apache.log4j.LogManager;
 import org.apache.log4j.PropertyConfigurator;
 import org.apache.log4j.spi.LoggingEvent;

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-util-common/src/test/java/org/apache/knox/gateway/audit/StoreAndForwardAppenderTest.java
----------------------------------------------------------------------
diff --git a/gateway-util-common/src/test/java/org/apache/knox/gateway/audit/StoreAndForwardAppenderTest.java b/gateway-util-common/src/test/java/org/apache/knox/gateway/audit/StoreAndForwardAppenderTest.java
index 808acb7..becad46 100644
--- a/gateway-util-common/src/test/java/org/apache/knox/gateway/audit/StoreAndForwardAppenderTest.java
+++ b/gateway-util-common/src/test/java/org/apache/knox/gateway/audit/StoreAndForwardAppenderTest.java
@@ -17,7 +17,7 @@
  */
 package org.apache.knox.gateway.audit;
 
-import org.apache.hadoop.test.log.CollectAppender;
+import org.apache.knox.test.log.CollectAppender;
 import org.apache.log4j.LogManager;
 import org.apache.log4j.Logger;
 import org.apache.log4j.PropertyConfigurator;

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-util-common/src/test/resources/audit-log4j.properties
----------------------------------------------------------------------
diff --git a/gateway-util-common/src/test/resources/audit-log4j.properties b/gateway-util-common/src/test/resources/audit-log4j.properties
index ccc92f5..c4d4fd1 100644
--- a/gateway-util-common/src/test/resources/audit-log4j.properties
+++ b/gateway-util-common/src/test/resources/audit-log4j.properties
@@ -22,4 +22,4 @@ log4j.logger.audit.forward = INFO, audit-forward
 log4j.appender.audit-store = org.apache.knox.gateway.audit.log4j.appender.JdbmStoreAndForwardAppender
 log4j.appender.audit-store.file = target/audit
 
-log4j.appender.audit-forward = org.apache.hadoop.test.log.CollectAppender
\ No newline at end of file
+log4j.appender.audit-forward = org.apache.knox.test.log.CollectAppender
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-util-urltemplate/src/test/java/org/apache/knox/gateway/util/urltemplate/ExpanderTest.java
----------------------------------------------------------------------
diff --git a/gateway-util-urltemplate/src/test/java/org/apache/knox/gateway/util/urltemplate/ExpanderTest.java b/gateway-util-urltemplate/src/test/java/org/apache/knox/gateway/util/urltemplate/ExpanderTest.java
index acf7cf6..60f6bbd 100644
--- a/gateway-util-urltemplate/src/test/java/org/apache/knox/gateway/util/urltemplate/ExpanderTest.java
+++ b/gateway-util-urltemplate/src/test/java/org/apache/knox/gateway/util/urltemplate/ExpanderTest.java
@@ -17,8 +17,8 @@
  */
 package org.apache.knox.gateway.util.urltemplate;
 
-import org.apache.hadoop.test.category.FastTests;
-import org.apache.hadoop.test.category.UnitTests;
+import org.apache.knox.test.category.FastTests;
+import org.apache.knox.test.category.UnitTests;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-util-urltemplate/src/test/java/org/apache/knox/gateway/util/urltemplate/MatcherTest.java
----------------------------------------------------------------------
diff --git a/gateway-util-urltemplate/src/test/java/org/apache/knox/gateway/util/urltemplate/MatcherTest.java b/gateway-util-urltemplate/src/test/java/org/apache/knox/gateway/util/urltemplate/MatcherTest.java
index df31d3d..e75c89b 100644
--- a/gateway-util-urltemplate/src/test/java/org/apache/knox/gateway/util/urltemplate/MatcherTest.java
+++ b/gateway-util-urltemplate/src/test/java/org/apache/knox/gateway/util/urltemplate/MatcherTest.java
@@ -18,8 +18,8 @@
 package org.apache.knox.gateway.util.urltemplate;
 
 
-import org.apache.hadoop.test.category.FastTests;
-import org.apache.hadoop.test.category.UnitTests;
+import org.apache.knox.test.category.FastTests;
+import org.apache.knox.test.category.UnitTests;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-util-urltemplate/src/test/java/org/apache/knox/gateway/util/urltemplate/MatcherTest.java.orig
----------------------------------------------------------------------
diff --git a/gateway-util-urltemplate/src/test/java/org/apache/knox/gateway/util/urltemplate/MatcherTest.java.orig b/gateway-util-urltemplate/src/test/java/org/apache/knox/gateway/util/urltemplate/MatcherTest.java.orig
deleted file mode 100644
index 4e1a9c8..0000000
--- a/gateway-util-urltemplate/src/test/java/org/apache/knox/gateway/util/urltemplate/MatcherTest.java.orig
+++ /dev/null
@@ -1,839 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.knox.gateway.util.urltemplate;
-
-
-import org.apache.hadoop.test.category.FastTests;
-import org.apache.hadoop.test.category.UnitTests;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-import java.net.URISyntaxException;
-
-import static org.hamcrest.CoreMatchers.equalTo;
-import static org.hamcrest.CoreMatchers.is;
-import static org.hamcrest.CoreMatchers.nullValue;
-import static org.hamcrest.CoreMatchers.sameInstance;
-import static org.hamcrest.core.IsCollectionContaining.hasItem;
-import static org.hamcrest.core.IsNull.notNullValue;
-import static org.junit.Assert.assertThat;
-
-//TODO: Test to make sure that extra unmatched query parameters prevent a match.
-@Category( { UnitTests.class, FastTests.class } )
-public class MatcherTest {
-
-  private void addTemplate( Matcher<String> matcher, String template ) throws URISyntaxException {
-    matcher.add( Parser.parse( template ), template );
-  }
-
-  private void assertValidMatch( Matcher<String> matcher, String uri, String template ) throws URISyntaxException {
-    if( template == null ) {
-      assertThat( matcher.match( Parser.parse( uri ) ), nullValue() );
-    } else {
-      Template uriTemplate = Parser.parse( uri );
-      Matcher<String>.Match match = matcher.match( uriTemplate );
-      assertThat( "Expected to find a match.", match, notNullValue() );
-      assertThat( match.getValue(), equalTo( template ) );
-    }
-  }
-
-  @Test
-  public void testWildcardCharacterInInputTemplate() throws URISyntaxException {
-    Matcher<String> matcher;
-    Template patternTemplate, inputTemplate;
-    Matcher<String>.Match match;
-
-    // First verify that if .../test_table/test_row/family1... works.
-    matcher = new Matcher<String>();
-    inputTemplate = Parser.parse( "https://localhost:8443/gateway/sandbox/hbase/test_table/test_row/family1:row2_col1,family2/0,9223372036854775807?v=1" );
-    patternTemplate = Parser.parse( "*://*:*/**/webhdfs/{version}/{path=**}?{**}" );
-    matcher.add( patternTemplate, "webhdfs" );
-    match = matcher.match( inputTemplate );
-    assertThat( match, nullValue() );
-
-    // Then reproduce the issue with .../test_table/*/family1..
-    matcher = new Matcher<String>();
-    inputTemplate = Parser.parse( "https://localhost:8443/gateway/sandbox/hbase/test_table/*/family1:row2_col1,family2/0,9223372036854775807?v=1" );
-    patternTemplate = Parser.parse( "*://*:*/**/webhdfs/{version}/{path=**}?{**}" );
-    matcher.add( patternTemplate, "webhdfs" );
-    match = matcher.match( inputTemplate );
-    assertThat( match, nullValue() );
-
-    // Reproduce the issue where the wrong match was picked when there was a "*" in the input URL template.
-    matcher = new Matcher<String>();
-    inputTemplate = Parser.parse( "https://localhost:8443/gateway/sandbox/hbase/test_table/*/family1:row2_col1,family2/0,9223372036854775807?v=1" );
-    patternTemplate = Parser.parse( "*://*:*/**/webhdfs/{version}/{path=**}?{**}" );
-    matcher.add( patternTemplate, "webhdfs" );
-    patternTemplate = Parser.parse( "*://*:*/**/hbase/{path=**}?{**}" );
-    matcher.add( patternTemplate, "hbase" );
-    match = matcher.match( inputTemplate );
-    assertThat( match.getValue(), is( "hbase" ) );
-  }
-  
-  @Test
-  public void testDefaultAppDeployment() throws Exception {
-    Matcher<String> matcher;
-    Template patternTemplate, inputTemplate;
-    Matcher<String>.Match match;
-
-    matcher = new Matcher<String>();
-    inputTemplate = Parser.parse( "https://localhost:8443/webhdfs/v1/tmp?op=LISTSTATUS" );
-    patternTemplate = Parser.parse( "*://*:*/webhdfs/{version}/{path=**}?{**}" );
-    matcher.add( patternTemplate, "webhdfs" );
-    match = matcher.match( inputTemplate );
-    assertThat( match, notNullValue() );
-  }
-
-  @Test
-  public void testRootPathMatching() throws Exception {
-    Matcher<String> matcher;
-    Template patternTemplate, inputTemplate;
-    Matcher<String>.Match match;
-
-    ///////
-    patternTemplate = Parser.parse( "*://*:*" );
-    matcher = new Matcher<String>();
-    matcher.add( patternTemplate, "test-match" );
-
-    inputTemplate = Parser.parse( "test-scheme://test-host:42" );
-    match = matcher.match( inputTemplate );
-    assertThat( match, notNullValue() );
-    inputTemplate = Parser.parse( "test-scheme://test-host:42/" );
-    match = matcher.match( inputTemplate );
-    assertThat( match, notNullValue() );
-    inputTemplate = Parser.parse( "test-scheme://test-host:42/test-path" );
-    match = matcher.match( inputTemplate );
-    assertThat( match, nullValue() );
-
-    ///////
-    patternTemplate = Parser.parse( "*://*:*/" );
-    matcher = new Matcher<String>();
-    matcher.add( patternTemplate, "test-match" );
-
-    inputTemplate = Parser.parse( "test-scheme://test-host:42" );
-    match = matcher.match( inputTemplate );
-    assertThat( match, notNullValue() );
-    inputTemplate = Parser.parse( "test-scheme://test-host:42/" );
-    match = matcher.match( inputTemplate );
-    assertThat( match, notNullValue() );
-    inputTemplate = Parser.parse( "test-scheme://test-host:42/test-path" );
-    match = matcher.match( inputTemplate );
-    assertThat( match, nullValue() );
-
-    ///////
-    patternTemplate = Parser.parse( "*://*:*/*" );
-    matcher = new Matcher<String>();
-    matcher.add( patternTemplate, "test-match" );
-
-    inputTemplate = Parser.parse( "test-scheme://test-host:42" );
-    match = matcher.match( inputTemplate );
-    assertThat( match, nullValue() );
-    inputTemplate = Parser.parse( "test-scheme://test-host:42/" );
-    match = matcher.match( inputTemplate );
-    assertThat( match, nullValue() );
-    inputTemplate = Parser.parse( "test-scheme://test-host:42/test-path" );
-    match = matcher.match( inputTemplate );
-    assertThat( match, notNullValue() );
-
-    ///////
-    patternTemplate = Parser.parse( "*://*:*/**" );
-    matcher = new Matcher<String>();
-    matcher.add( patternTemplate, "test-match" );
-
-//KM: I'm not sure what the correct behavior is here.
-//    inputTemplate = Parser.parse( "test-scheme://test-host:42" );
-//    match = matcher.match( inputTemplate );
-//    assertThat( match, ? );
-//    inputTemplate = Parser.parse( "test-scheme://test-host:42/" );
-//    match = matcher.match( inputTemplate );
-//    assertThat( match, ? );
-    inputTemplate = Parser.parse( "test-scheme://test-host:42/test-path" );
-    match = matcher.match( inputTemplate );
-    assertThat( match, notNullValue() );
-
-    ///////
-    patternTemplate = Parser.parse( "*://*:*/{path=*}" );
-    matcher = new Matcher<String>();
-    matcher.add( patternTemplate, "test-match" );
-
-    inputTemplate = Parser.parse( "test-scheme://test-host:42" );
-    match = matcher.match( inputTemplate );
-    assertThat( match, nullValue() );
-    inputTemplate = Parser.parse( "test-scheme://test-host:42/" );
-    match = matcher.match( inputTemplate );
-    assertThat( match, nullValue() );
-    inputTemplate = Parser.parse( "test-scheme://test-host:42/test-path" );
-    match = matcher.match( inputTemplate );
-    assertThat( match, notNullValue() );
-
-    ///////
-    patternTemplate = Parser.parse( "*://*:*/{path=**}" );
-    matcher = new Matcher<String>();
-    matcher.add( patternTemplate, "test-match" );
-
-//KM: I'm not sure what the correct behavior is here.
-//    inputTemplate = Parser.parse( "test-scheme://test-host:42" );
-//    match = matcher.match( inputTemplate );
-//    assertThat( match, ? );
-//    inputTemplate = Parser.parse( "test-scheme://test-host:42/" );
-//    match = matcher.match( inputTemplate );
-//    assertThat( match, ? );
-    inputTemplate = Parser.parse( "test-scheme://test-host:42/test-path" );
-    match = matcher.match( inputTemplate );
-    assertThat( match, notNullValue() );
-  }
-
-  @Test
-  public void testTopLevelPathGlobMatch() throws Exception {
-    Matcher<String> matcher;
-    Template patternTemplate, inputTemplate;
-    Matcher<String>.Match match;
-
-    patternTemplate = Parser.parse( "{*}://{host}:{*}/{**=**}?{**}" );
-    inputTemplate = Parser.parse( "test-scheme://test-input-host:42/test-path/test-file?test-name=test-value" );
-    matcher = new Matcher<String>();
-    matcher.add( patternTemplate, "test-math" );
-    match = matcher.match( inputTemplate );
-    assertThat( "Should match because the path ** should include both test-path and test-file", match, notNullValue() );
-
-    patternTemplate = Parser.parse( "{*}://{host}:{*}/{**}?{**}" );
-    inputTemplate = Parser.parse( "test-scheme://test-input-host:42/test-path/test-file?test-name=test-value" );
-    matcher = new Matcher<String>();
-    matcher.add( patternTemplate, "test-math" );
-    match = matcher.match( inputTemplate );
-    assertThat( "Should match because the path ** should include both test-path and test-file", match, notNullValue() );
-  }
-
-  @Test
-  public void testQueryHandling() throws Exception {
-    Matcher<String> matcher;
-    Template patternTemplate, inputTemplate;
-    Matcher<String>.Match match;
-
-    patternTemplate = Parser.parse( "/path?{query}" );
-    inputTemplate = Parser.parse( "/path" );
-    matcher = new Matcher<String>();
-    matcher.add( patternTemplate, "T" );
-    match = matcher.match( inputTemplate );
-    assertThat( "Should not match because input does not contain the required query.", match, nullValue() );
-
-    matcher = new Matcher<String>();
-    matcher.add( Parser.parse( "/path?{query}" ), "T1" );
-    matcher.add( Parser.parse( "/path" ), "T2" );
-    inputTemplate = Parser.parse( "/path" );
-    match = matcher.match( inputTemplate );
-    assertThat( "Should match because there is an entry in the matcher without a query.", match, notNullValue() );
-    assertThat( match.getValue(), equalTo( "T2") );
-
-    patternTemplate = Parser.parse( "/path?{query}" );
-    inputTemplate = Parser.parse( "/path?query=value" );
-    matcher = new Matcher<String>();
-    matcher.add( patternTemplate, "T" );
-    match = matcher.match( inputTemplate );
-    assertThat( "Should match because input does contain the required query.", match, notNullValue() );
-    assertThat( match.getParams().resolve( "query" ), hasItem( "value" ) );
-    assertThat( match.getParams().resolve( "query" ).size(), equalTo( 1 ) );
-
-    patternTemplate = Parser.parse( "/path?{*}" );
-    inputTemplate = Parser.parse( "/path" );
-    matcher = new Matcher<String>();
-    matcher.add( patternTemplate, "T" );
-    match = matcher.match( inputTemplate );
-    assertThat( "Should not match because input does not contain the required query.", match, nullValue() );
-
-    patternTemplate = Parser.parse( "/path?*" );
-    inputTemplate = Parser.parse( "/path" );
-    matcher = new Matcher<String>();
-    matcher.add( patternTemplate, "T" );
-    match = matcher.match( inputTemplate );
-    assertThat( "Should not match because input does not contain the required query.", match, nullValue() );
-
-    patternTemplate = Parser.parse( "/path?*" );
-    inputTemplate = Parser.parse( "/path?query=value" );
-    matcher = new Matcher<String>();
-    matcher.add( patternTemplate, "T" );
-    match = matcher.match( inputTemplate );
-    assertThat(
-        "Should match because the template has an extra query and the input has a query.",
-        match, notNullValue() );
-    assertThat(
-        "Should not have extracts any parameters since pattern template didn't contain {}",
-        match.getParams().resolve( "query" ), nullValue() );
-
-    patternTemplate = Parser.parse( "/path?{*}" );
-    inputTemplate = Parser.parse( "/path?query=value" );
-    matcher = new Matcher<String>();
-    matcher.add( patternTemplate, "T" );
-    match = matcher.match( inputTemplate );
-    assertThat( "Should match because input does contain the required query.", match, notNullValue() );
-    assertThat( match.getParams().resolve( "query" ), hasItem( "value" ) );
-
-    patternTemplate = Parser.parse( "/path?{**}" );
-    inputTemplate = Parser.parse( "/path" );
-    matcher = new Matcher<String>();
-    matcher.add( patternTemplate, "T" );
-    match = matcher.match( inputTemplate );
-    assertThat( "Should match because the template has an optional query.", match, notNullValue() );
-
-    patternTemplate = Parser.parse( "/path?**" );
-    inputTemplate = Parser.parse( "/path" );
-    matcher = new Matcher<String>();
-    matcher.add( patternTemplate, "T" );
-    match = matcher.match( inputTemplate );
-    assertThat( "Should match because the template has an optional extra query.", match, notNullValue() );
-
-    patternTemplate = Parser.parse( "/path?**" );
-    inputTemplate = Parser.parse( "/path?query=value" );
-    matcher = new Matcher<String>();
-    matcher.add( patternTemplate, "T" );
-    match = matcher.match( inputTemplate );
-    assertThat( "Should match because the template has an optional extra query.", match, notNullValue() );
-    assertThat( match.getParams().resolve( "query" ), nullValue() );
-
-    patternTemplate = Parser.parse( "/path?{**}" );
-    inputTemplate = Parser.parse( "/path?query=value" );
-    matcher = new Matcher<String>();
-    matcher.add( patternTemplate, "T" );
-    match = matcher.match( inputTemplate );
-    assertThat( "Should match because the template has an optional extra query.", match, notNullValue() );
-    assertThat( match.getParams().resolve( "query" ), hasItem( "value" ) );
-    assertThat( match.getParams().resolve( "query" ).size(), equalTo( 1 ) );
-
-    patternTemplate = Parser.parse( "/path?{query}&{*}" );
-    inputTemplate = Parser.parse( "/path?query=value" );
-    matcher = new Matcher<String>();
-    matcher.add( patternTemplate, "T" );
-    match = matcher.match( inputTemplate );
-    assertThat( "Should not match because input does not contain the required extra query.", match, nullValue() );
-
-    patternTemplate = Parser.parse( "/path?{query}&{*}" );
-    inputTemplate = Parser.parse( "/path?query=value&extra=extra-value" );
-    matcher = new Matcher<String>();
-    matcher.add( patternTemplate, "T" );
-    match = matcher.match( inputTemplate );
-    assertThat( "Should match because input does contain the required query.", match, notNullValue() );
-    assertThat( match.getParams().resolve( "query" ), hasItem( "value" ) );
-    assertThat( match.getParams().resolve( "query" ).size(), equalTo( 1 ) );
-
-    patternTemplate = Parser.parse( "/path?{query=**}" );
-    inputTemplate = Parser.parse( "/path?query=value1&query=value2" );
-    matcher = new Matcher<String>();
-    matcher.add( patternTemplate, "T" );
-    match = matcher.match( inputTemplate );
-    assertThat( "Should match because input does contain the required query.", match, notNullValue() );
-    assertThat( match.getParams().resolve( "query" ), hasItem( "value1" ) );
-    assertThat( match.getParams().resolve( "query" ), hasItem( "value2" ) );
-    assertThat( match.getParams().resolve( "query" ).size(), equalTo( 2 ) );
-
-    patternTemplate = Parser.parse( "/path?{query}" );
-    inputTemplate = Parser.parse( "/path?query=value1&query=value2" );
-    matcher = new Matcher<String>();
-    matcher.add( patternTemplate, "T" );
-    match = matcher.match( inputTemplate );
-    assertThat( "Should match because input does contain the required query.", match, notNullValue() );
-    assertThat( match.getParams().resolve( "query" ), hasItem( "value1" ) );
-    assertThat( match.getParams().resolve( "query" ), hasItem( "value2" ) );
-    assertThat( match.getParams().resolve( "query" ).size(), equalTo( 2 ) );
-  }
-
-  @Test
-  public void testMatchCompleteUrl() throws Exception {
-    Matcher<String> matcher;
-    String pattern, input;
-    Template patternTemplate, inputTemplate;
-    Matcher<String>.Match match;
-
-    matcher = new Matcher<String>();
-    pattern = "foo://username:password@example.com:8042/over/there/index.dtb?type=animal&name=narwhal#nose";
-    patternTemplate = Parser.parse( pattern );
-    matcher.add( patternTemplate, pattern );
-    input = "foo://username:password@example.com:8042/over/there/index.dtb?type=animal&name=narwhal#nose";
-    inputTemplate = Parser.parse( input );
-    match = matcher.match( inputTemplate );
-    assertThat( match.getTemplate(), sameInstance( patternTemplate ) );
-    assertThat( match.getValue(), equalTo( pattern ) );
-
-    matcher = new Matcher<String>();
-    pattern = "foo://username:password@example.com:8042/over/there/index.dtb?type=animal&name=narwhal#nose";
-    patternTemplate = Parser.parse( pattern );
-    matcher.add( patternTemplate, pattern );
-
-    input = pattern;
-    inputTemplate = Parser.parse( input );
-    match = matcher.match( inputTemplate );
-    assertThat( match, notNullValue() );
-
-    input = "not://username:password@example.com:8042/over/there/index.dtb?type=animal&name=narwhal#nose";
-    inputTemplate = Parser.parse( input );
-    match = matcher.match( inputTemplate );
-    assertThat( match, nullValue() );
-  }
-
-  @Test
-  public void testMatch() throws Exception {
-    Matcher<String> matcher;
-    String pattern, input;
-    Template patternTemplate, inputTemplate;
-    Matcher<String>.Match match;
-
-    matcher = new Matcher<String>();
-    pattern = "path";
-    patternTemplate = Parser.parse( pattern );
-    matcher.add( patternTemplate, pattern );
-    assertThat( matcher.get( patternTemplate ), is( pattern ) );
-    input = "path";
-    inputTemplate = Parser.parse( input );
-    match = matcher.match( inputTemplate );
-    assertThat( match.getTemplate(), sameInstance( patternTemplate ) );
-    assertThat( match.getValue(), equalTo( pattern ) );
-
-
-    matcher = new Matcher<String>();
-    pattern = "/path";
-    patternTemplate = Parser.parse( pattern );
-    matcher.add( patternTemplate, pattern );
-    input = "/path";
-    inputTemplate = Parser.parse( input );
-    match = matcher.match( inputTemplate );
-    assertThat( match.getTemplate(), sameInstance( patternTemplate ) );
-    assertThat( match.getValue(), equalTo( pattern ) );
-
-    matcher = new Matcher<String>();
-    pattern = "path/path";
-    patternTemplate = Parser.parse( pattern );
-    matcher.add( patternTemplate, pattern );
-    input = "path/path";
-    inputTemplate = Parser.parse( input );
-    match = matcher.match( inputTemplate );
-    assertThat( match.getTemplate(), sameInstance( patternTemplate ) );
-    assertThat( match.getValue(), equalTo( pattern ) );
-
-    matcher = new Matcher<String>();
-    pattern = "*/path";
-    patternTemplate = Parser.parse( pattern );
-    matcher.add( patternTemplate, pattern );
-    input = "pathA/path";
-    inputTemplate = Parser.parse( input );
-    match = matcher.match( inputTemplate );
-    assertThat( match.getTemplate(), sameInstance( patternTemplate ) );
-    assertThat( match.getValue(), equalTo( pattern ) );
-
-    matcher = new Matcher<String>();
-    pattern = "**/path";
-    patternTemplate = Parser.parse( pattern );
-    matcher.add( patternTemplate, pattern );
-    input = "pathA/pathB/path";
-    inputTemplate = Parser.parse( input );
-    match = matcher.match( inputTemplate );
-    assertThat( match.getTemplate(), sameInstance( patternTemplate ) );
-    assertThat( match.getValue(), equalTo( pattern ) );
-
-    matcher = new Matcher<String>();
-    pattern = "path-1/{path=**}/path-4";
-    patternTemplate = Parser.parse( pattern );
-    matcher.add( patternTemplate, pattern );
-    input = "path-1/path-2/path-3/path-4";
-    inputTemplate = Parser.parse( input );
-    match = matcher.match( inputTemplate );
-    assertThat( match.getTemplate(), sameInstance( patternTemplate ) );
-    assertThat( match.getValue(), equalTo( pattern ) );
-    assertThat( match.getParams().resolve( "path" ).get( 0 ), equalTo( "path-2" ) );
-    assertThat( match.getParams().resolve( "path" ).get( 1 ), equalTo( "path-3" ) );
-
-    matcher = new Matcher<String>();
-    pattern = "/";
-    patternTemplate = Parser.parse( pattern );
-    matcher.add( patternTemplate, pattern );
-    input = "/";
-    inputTemplate = Parser.parse( input );
-    match = matcher.match( inputTemplate );
-    assertThat( match.getTemplate(), sameInstance( patternTemplate ) );
-    assertThat( match.getValue(), equalTo( pattern ) );
-
-    matcher = new Matcher<String>();
-    pattern = "";
-    patternTemplate = Parser.parse( pattern );
-    matcher.add( patternTemplate, pattern );
-    input = "";
-    inputTemplate = Parser.parse( input );
-    match = matcher.match( inputTemplate );
-    assertThat( match.getTemplate(), sameInstance( patternTemplate ) );
-    assertThat( match.getValue(), equalTo( pattern ) );
-  }
-
-  @Test
-  public void testVariousPatterns() throws URISyntaxException {
-    Matcher<String> matcher = new Matcher<String>();
-    matcher.add( Parser.parse( "/webhdfs" ), "/webhdfs" );
-    matcher.add( Parser.parse( "/webhdfs/dfshealth.jsp" ), "/webhdfs/dfshealth.jsp" );
-    matcher.add( Parser.parse( "/webhdfs/*.jsp" ), "/webhdfs/*.jsp" );
-    matcher.add( Parser.parse( "/webhdfs/other.jsp" ), "/webhdfs/other.jsp" );
-    matcher.add( Parser.parse( "/webhdfs/*" ), "/webhdfs/*" );
-    matcher.add( Parser.parse( "/webhdfs/**" ), "/webhdfs/**" );
-    matcher.add( Parser.parse( "/webhdfs/v1/**" ), "/webhdfs/v1/**" );
-    matcher.add( Parser.parse( "/webhdfs/**/middle/*.xml" ), "/webhdfs/**/middle/*.xml" );
-
-    assertValidMatch( matcher, "/webhdfs", "/webhdfs" );
-    assertValidMatch( matcher, "/webhdfs/dfshealth.jsp", "/webhdfs/dfshealth.jsp" );
-    assertValidMatch( matcher, "/webhdfs/v1", "/webhdfs/*" ); // The star should be picked in preference to the glob.
-    assertValidMatch( matcher, "/webhdfs/some.jsp", "/webhdfs/*.jsp" );
-    assertValidMatch( matcher, "/webhdfs/other.jsp", "/webhdfs/other.jsp" );
-    assertValidMatch( matcher, "/webhdfs/path/some.jsp", "/webhdfs/**" );
-    assertValidMatch( matcher, "/webhdfs/path/middle/some.jsp", "/webhdfs/**" );
-    assertValidMatch( matcher, "/webhdfs/path/middle/some.xml", "/webhdfs/**/middle/*.xml" );
-    assertValidMatch( matcher, "/webhdfs/path/to/file", "/webhdfs/**" );
-    assertValidMatch( matcher, "/webhdfs/v1/path/to/file", "/webhdfs/v1/**" );
-  }
-
-  @Test
-  public void testStar() throws URISyntaxException {
-    Matcher<String> matcher = new Matcher<String>();
-    matcher.add( Parser.parse( "/webhdfs/*" ), "/webhdfs/*" );
-    assertValidMatch( matcher, "/webhdfs/*", "/webhdfs/*" );
-    assertValidMatch( matcher, "/webhdfs/file", "/webhdfs/*" );
-    assertValidMatch( matcher, "/webhdfs/path/", "/webhdfs/*" );
-    assertValidMatch( matcher, "/webhdfs/path/file", null );
-    assertValidMatch( matcher, "/webhdfs/path/path/", null );
-  }
-
-  @Test
-  public void testGlob() throws URISyntaxException {
-    Matcher<String> matcher = new Matcher<String>();
-    matcher.add( Parser.parse( "/webhdfs/**" ), "/webhdfs/**" );
-    assertValidMatch( matcher, "/webhdfs/file", "/webhdfs/**" );
-    assertValidMatch( matcher, "/webhdfs/path/", "/webhdfs/**" );
-    assertValidMatch( matcher, "/webhdfs/path/file", "/webhdfs/**" );
-    assertValidMatch( matcher, "/webhdfs/path/path/", "/webhdfs/**" );
-  }
-
-  @Test
-  public void testMatrixParam() throws URISyntaxException {
-    Matcher<String> matcher = new Matcher<String>();
-    matcher.add( Parser.parse( "/webhdfs/**" ), "/webhdfs/**" );
-    matcher.add( Parser.parse( "/webhdfs/browseDirectory.jsp;dn=*" ), "/webhdfs/browseDirectory.jsp;dn=*" );
-    assertValidMatch( matcher, "/webhdfs/browseDirectory.jsp;dn=X", "/webhdfs/browseDirectory.jsp;dn=*" );
-  }
-
-  @Test
-  public void testTwoGlobsAtDifferentDepths() throws URISyntaxException {
-    Matcher<String> matcher = new Matcher<String>();
-    matcher.add( Parser.parse( "/webhdfs/**" ), "/webhdfs/**" );
-    matcher.add( Parser.parse( "/webhdfs/v1/**" ), "/webhdfs/v1/**" );
-    assertValidMatch( matcher, "/webhdfs/file", "/webhdfs/**" );
-    assertValidMatch( matcher, "/webhdfs/v1/file", "/webhdfs/v1/**" );
-
-    // Reverse the put order.
-    matcher = new Matcher<String>();
-    matcher.add( Parser.parse( "/webhdfs/v1/**" ), "/webhdfs/v1/**" );
-    matcher.add( Parser.parse( "/webhdfs/**" ), "/webhdfs/**" );
-    assertValidMatch( matcher, "/webhdfs/file", "/webhdfs/**" );
-    assertValidMatch( matcher, "/webhdfs/v1/file", "/webhdfs/v1/**" );
-  }
-
-  @Test
-  public void testGlobsVsStarsAtSameDepth() throws URISyntaxException {
-    Matcher<String> matcher = new Matcher<String>();
-    matcher.add( Parser.parse( "/webhdfs/*" ), "/webhdfs/*" );
-    matcher.add( Parser.parse( "/webhdfs/**" ), "/webhdfs/**" );
-    assertValidMatch( matcher, "/webhdfs/file", "/webhdfs/*" ); // The star should be picked in preference to the glob.
-    assertValidMatch( matcher, "/webhdfs/path/file", "/webhdfs/**" );
-
-    // Reverse the put order.
-    matcher = new Matcher<String>();
-    matcher.add( Parser.parse( "/webhdfs/**" ), "/webhdfs/**" );
-    matcher.add( Parser.parse( "/webhdfs/*" ), "/webhdfs/*" );
-    assertValidMatch( matcher, "/webhdfs/path/file", "/webhdfs/**" );
-    assertValidMatch( matcher, "/webhdfs/file", "/webhdfs/*" );
-  }
-
-  @Test
-  public void testMatchingPatternsWithinPathSegments() throws URISyntaxException {
-    Matcher<String> matcher = new Matcher<String>();
-    matcher.add( Parser.parse( "/path/{file}" ), "default" );
-    assertValidMatch( matcher, "/path/file-name", "default" );
-
-    matcher = new Matcher<String>();
-    matcher.add( Parser.parse( "/path/{file=*}" ), "*" );
-    assertValidMatch( matcher, "/path/some-name", "*" );
-
-    matcher = new Matcher<String>();
-    matcher.add( Parser.parse( "/path/{more=**}" ), "**" );
-    assertValidMatch( matcher, "/path/some-path/some-name", "**" );
-
-    matcher = new Matcher<String>();
-    matcher.add( Parser.parse( "/path/{regex=prefix*suffix}" ), "regex" );
-    assertValidMatch( matcher, "/path/prefix-middle-suffix", "regex" );
-    assertValidMatch( matcher, "/path/not-prefix-middle-suffix", null );
-  }
-
-  @Test
-  public void testMatchingPatternsWithinQuerySegments() throws URISyntaxException {
-    Matcher<String> matcher = new Matcher<String>();
-    matcher.add( Parser.parse( "?query={queryParam}" ), "default" );
-    assertValidMatch( matcher, "?query=value", "default" );
-
-    matcher = new Matcher<String>();
-    matcher.add( Parser.parse( "?query={queryParam=*}" ), "*" );
-    assertValidMatch( matcher, "?query=some-value", "*" );
-
-    matcher = new Matcher<String>();
-    matcher.add( Parser.parse( "?query={queryParam=**}" ), "**" );
-    assertValidMatch( matcher, "?query=some-value", "**" );
-
-    matcher = new Matcher<String>();
-    matcher.add( Parser.parse( "?query={queryParam=prefix*suffix}" ), "regex" );
-    assertValidMatch( matcher, "?query=prefix-middle-suffix", "regex" );
-    assertValidMatch( matcher, "?query=not-prefix-middle-suffix", null );
-  }
-
-  @Test
-  public void testMatchingForTemplatesThatVaryOnlyByQueryParams() throws URISyntaxException {
-    Matcher<String> matcher = new Matcher<String>();
-    addTemplate( matcher, "?one={queryParam}" );
-    addTemplate( matcher, "?two={queryParam}" );
-
-    assertValidMatch( matcher, "?one=value", "?one={queryParam}" );
-    assertValidMatch( matcher, "?two=value", "?two={queryParam}" );
-    assertValidMatch( matcher, "?three=value", null );
-    assertValidMatch( matcher, "?", null );
-  }
-
-  @Test
-  public void testFullUrlExtraction() throws URISyntaxException {
-    Template template;
-    Template input;
-    Matcher<?> matcher;
-    Matcher<?>.Match match;
-    Params params;
-
-    template = Parser.parse( "{scheme}://{username}:{password}@{host}:{port}/{root}/{path}/{file}?queryA={paramA}&queryB={paramB}#{fragment}" );
-    input = Parser.parse( "http://horton:hadoop@hortonworks.com:80/top/middle/end?queryA=valueA&queryB=valueB#section" );
-    matcher = new Matcher<Void>( template, null );
-    match = matcher.match( input );
-    params = match.getParams();
-
-    assertThat( params.getNames(), hasItem( "scheme" ) );
-    assertThat( params.resolve( "scheme" ), hasItem( "http" ) );
-    assertThat( params.getNames(), hasItem( "username" ) );
-    assertThat( params.resolve( "username" ), hasItem( "horton" ) );
-    assertThat( params.getNames(), hasItem( "password" ) );
-    assertThat( params.resolve( "password" ), hasItem( "hadoop" ) );
-    assertThat( params.getNames(), hasItem( "host" ) );
-    assertThat( params.resolve( "host" ), hasItem( "hortonworks.com" ) );
-    assertThat( params.getNames(), hasItem( "port" ) );
-    assertThat( params.resolve( "port" ), hasItem( "80" ) );
-    assertThat( params.getNames(), hasItem( "root" ) );
-    assertThat( params.resolve( "root" ), hasItem( "top" ) );
-    assertThat( params.getNames(), hasItem( "path" ) );
-    assertThat( params.resolve( "path" ), hasItem( "middle" ) );
-    assertThat( params.getNames(), hasItem( "file" ) );
-    assertThat( params.resolve( "file" ), hasItem( "end" ) );
-    assertThat( params.getNames(), hasItem( "paramA" ) );
-    assertThat( params.resolve( "paramA" ), hasItem( "valueA" ) );
-    assertThat( params.getNames(), hasItem( "paramB" ) );
-    assertThat( params.resolve( "paramB" ), hasItem( "valueB" ) );
-    assertThat( params.getNames(), hasItem( "fragment" ) );
-    assertThat( params.resolve( "fragment" ), hasItem( "section" ) );
-    assertThat( params.getNames().size(), equalTo( 11 ) );
-  }
-
-  @Test
-  public void testMultipleDoubleStarPathMatching() throws URISyntaxException {
-    Template template;
-    Template input;
-    Matcher<?> matcher;
-    Matcher<String> stringMatcher;
-    Matcher<?>.Match match;
-
-//    template = Parser.parse( "*://*:*/**/webhdfs/v1/**?**" );
-//    input = Parser.parse( "http://localhost:53221/gateway/cluster/webhdfs/v1/tmp/GatewayWebHdfsFuncTest/testBasicHdfsUseCase/dir?user.name=hdfs&op=MKDIRS" );
-//    matcher = new Matcher<String>( template, "test-value" );
-//    match = matcher.match( input );
-//    assertThat( (String)match.getValue(), is( "test-value" ) );
-//
-//    template = Parser.parse( "*://*:*/**/webhdfs/v1/{path=**}?{**=*}" );
-//    input = Parser.parse( "http://localhost:53221/gateway/cluster/webhdfs/v1/tmp/GatewayWebHdfsFuncTest/testBasicHdfsUseCase/dir?user.name=hdfs&op=MKDIRS" );
-//    matcher = new Matcher<String>( template, "test-value-2" );
-//    match = matcher.match( input );
-//    assertThat( (String)match.getValue(), is( "test-value-2" ) );
-//
-//    stringMatcher = new Matcher<String>();
-//    template = Parser.parse( "*://*:*/**/webhdfs/data/v1/{path=**}?host={host=*}&port={port=*}&{**=*}" );
-//    stringMatcher.add( template, "test-value-C" );
-//    template = Parser.parse( "*://*:*/**/webhdfs/v1/{path=**}?{**=*}" );
-//    stringMatcher.add( template, "test-value-B" );
-//    input = Parser.parse( "http://localhost:53221/gateway/cluster/webhdfs/v1/tmp/GatewayWebHdfsFuncTest/testBasicHdfsUseCase/dir?user.name=hdfs&op=MKDIRS" );
-//    match = stringMatcher.match( input );
-//    assertThat( match.getValue(), notNullValue() );
-//    assertThat( (String)match.getValue(), is( "test-value-B" ) );
-
-    // This is just a reverse of the above.  The order caused a bug.
-    stringMatcher = new Matcher<String>();
-    template = Parser.parse( "*://*:*/**/webhdfs/v1/{path=**}?{**=*}" );
-    stringMatcher.add( template, "test-value-B" );
-    template = Parser.parse( "*://*:*/**/webhdfs/data/v1/{path=**}?host={host=*}&port={port=*}&{**=*}" );
-    stringMatcher.add( template, "test-value-C" );
-    input = Parser.parse( "http://localhost:53221/gateway/cluster/webhdfs/v1/tmp/GatewayWebHdfsFuncTest/testBasicHdfsUseCase/dir?user.name=hdfs&op=MKDIRS" );
-    match = stringMatcher.match( input );
-    assertThat( match.getValue(), notNullValue() );
-    assertThat( (String)match.getValue(), is( "test-value-B" ) );
-
-  }
-
-  @Test
-  public void testPathExtraction() throws Exception {
-    Template template;
-    Template input;
-    Matcher<?> matcher;
-    Matcher<?>.Match match;
-    Params params;
-
-    template = Parser.parse( "{path-queryParam}" );
-    input = Parser.parse( "path-value" );
-    matcher = new Matcher<Void>( template, null );
-    match = matcher.match( input );
-    params = match.getParams();
-    assertThat( params, notNullValue() );
-    assertThat( params.getNames().size(), equalTo( 1 ) );
-    assertThat( params.getNames(), hasItem( "path-queryParam" ) );
-    assertThat( params.resolve( "path-queryParam" ).size(), equalTo( 1 ) );
-    assertThat( params.resolve( "path-queryParam" ), hasItem( "path-value" ) );
-
-    template = Parser.parse( "/some-path/{path-queryParam}" );
-    input = Parser.parse( "/some-path/path-value" );
-    matcher = new Matcher<Void>( template, null );
-    match = matcher.match( input );
-    params = match.getParams();
-    assertThat( params, notNullValue() );
-    assertThat( params.getNames().size(), equalTo( 1 ) );
-    assertThat( params.getNames(), hasItem( "path-queryParam" ) );
-    assertThat( params.resolve( "path-queryParam" ).size(), equalTo( 1 ) );
-    assertThat( params.resolve( "path-queryParam" ), hasItem( "path-value" ) );
-
-    template = Parser.parse( "/some-path/{path-queryParam}/some-other-path" );
-    input = Parser.parse( "/some-path/path-value/some-other-path" );
-    matcher = new Matcher<Void>( template, null );
-    match = matcher.match( input );
-    params = match.getParams();
-    assertThat( params, notNullValue() );
-    assertThat( params.getNames().size(), equalTo( 1 ) );
-    assertThat( params.getNames(), hasItem( "path-queryParam" ) );
-    assertThat( params.resolve( "path-queryParam" ).size(), equalTo( 1 ) );
-    assertThat( params.resolve( "path-queryParam" ), hasItem( "path-value" ) );
-
-    template = Parser.parse( "{path=**}" );
-    input = Parser.parse( "A/B" );
-    matcher = new Matcher<Void>( template, null );
-    match = matcher.match( input );
-    params = match.getParams();
-    assertThat( params, notNullValue() );
-    assertThat( params.getNames().size(), equalTo( 1 ) );
-    assertThat( params.getNames(), hasItem( "path" ) );
-    assertThat( params.resolve( "path" ).size(), equalTo( 2 ) );
-    assertThat( params.resolve( "path" ), hasItem( "A" ) );
-    assertThat( params.resolve( "path" ), hasItem( "B" ) );
-
-    template = Parser.parse( "/top/{mid=**}/end" );
-    input = Parser.parse( "/top/A/B/end" );
-    matcher = new Matcher<Void>( template, null );
-    match = matcher.match( input );
-    params = match.getParams();
-    assertThat( params, notNullValue() );
-    assertThat( params.getNames().size(), equalTo( 1 ) );
-    assertThat( params.getNames(), hasItem( "mid" ) );
-    assertThat( params.resolve( "mid" ).size(), equalTo( 2 ) );
-    assertThat( params.resolve( "mid" ), hasItem( "A" ) );
-    assertThat( params.resolve( "mid" ), hasItem( "B" ) );
-
-    template = Parser.parse( "*://*:*/{path=**}?{**}" );
-    input = Parser.parse( "http://host:port/pathA/pathB" );
-    matcher = new Matcher<Void>( template, null );
-    match = matcher.match( input );
-    params = match.getParams();
-    assertThat( params.resolve( "path" ), hasItem( "pathA" ) );
-    assertThat( params.resolve( "path" ), hasItem( "pathB" ) );
-    assertThat( params.resolve( "path" ).size(), is( 2 ) );
-
-    template = Parser.parse( "*://*:*/{path=**}?{**}" );
-    input = Parser.parse( "http://host:port/pathA/pathB" );
-    matcher = new Matcher<Void>( template, null );
-    match = matcher.match( input );
-    params = match.getParams();
-    assertThat( params.resolve( "path" ), hasItem( "pathA" ) );
-    assertThat( params.resolve( "path" ), hasItem( "pathB" ) );
-    assertThat( params.resolve( "path" ).size(), is( 2 ) );
-
-    template = Parser.parse( "*://*:*/{path=**}?{**}" );
-    input = Parser.parse( "http://host:port/pathA/pathB" );
-    matcher = new Matcher<Void>( template, null );
-    match = matcher.match( input );
-    params = match.getParams();
-    assertThat( params.resolve( "path" ), hasItem( "pathA" ) );
-    assertThat( params.resolve( "path" ), hasItem( "pathB" ) );
-    assertThat( params.resolve( "path" ).size(), is( 2 ) );
-  }
-
-  @Test
-  public void testQueryExtraction() throws Exception {
-    Template template;
-    Template input;
-    Matcher<?> matcher;
-    Matcher<?>.Match match;
-    Params params;
-
-    template = Parser.parse( "?query-queryParam={queryParam-name}" );
-    input = Parser.parse( "?query-queryParam=queryParam-value" );
-    matcher = new Matcher<Void>( template, null );
-    match = matcher.match( input );
-    params = match.getParams();
-    assertThat( params, notNullValue() );
-    assertThat( params.getNames().size(), equalTo( 1 ) );
-    assertThat( params.getNames(), hasItem( "queryParam-name" ) );
-    assertThat( params.resolve( "queryParam-name" ).size(), equalTo( 1 ) );
-    assertThat( params.resolve( "queryParam-name" ), hasItem( "queryParam-value" ) );
-
-    template = Parser.parse( "?query-queryParam={queryParam-name}" );
-    input = Parser.parse( "?query-queryParam=queryParam-value" );
-    matcher = new Matcher<Void>( template, null );
-    match = matcher.match( input );
-    params = match.getParams();
-    assertThat( params, notNullValue() );
-    assertThat( params.getNames().size(), equalTo( 1 ) );
-    assertThat( params.getNames(), hasItem( "queryParam-name" ) );
-    assertThat( params.resolve( "queryParam-name" ).size(), equalTo( 1 ) );
-    assertThat( params.resolve( "queryParam-name" ), hasItem( "queryParam-value" ) );
-  }
-
-  @Test
-  public void testEdgeCaseExtraction() throws Exception {
-    Template template;
-    Template input;
-    Matcher<?> matcher;
-    Matcher<?>.Match match;
-    Params params;
-
-    template = Parser.parse( "" );
-    input = Parser.parse( "" );
-    matcher = new Matcher<Void>( template, null );
-    match = matcher.match( input );
-    params = match.getParams();
-    assertThat( params, notNullValue() );
-    assertThat( params.getNames().size(), equalTo( 0 ) );
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-util-urltemplate/src/test/java/org/apache/knox/gateway/util/urltemplate/ParserTest.java
----------------------------------------------------------------------
diff --git a/gateway-util-urltemplate/src/test/java/org/apache/knox/gateway/util/urltemplate/ParserTest.java b/gateway-util-urltemplate/src/test/java/org/apache/knox/gateway/util/urltemplate/ParserTest.java
index 70085d4..90410ae 100644
--- a/gateway-util-urltemplate/src/test/java/org/apache/knox/gateway/util/urltemplate/ParserTest.java
+++ b/gateway-util-urltemplate/src/test/java/org/apache/knox/gateway/util/urltemplate/ParserTest.java
@@ -17,8 +17,8 @@
  */
 package org.apache.knox.gateway.util.urltemplate;
 
-import org.apache.hadoop.test.category.FastTests;
-import org.apache.hadoop.test.category.UnitTests;
+import org.apache.knox.test.category.FastTests;
+import org.apache.knox.test.category.UnitTests;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-util-urltemplate/src/test/java/org/apache/knox/gateway/util/urltemplate/RewriterTest.java
----------------------------------------------------------------------
diff --git a/gateway-util-urltemplate/src/test/java/org/apache/knox/gateway/util/urltemplate/RewriterTest.java b/gateway-util-urltemplate/src/test/java/org/apache/knox/gateway/util/urltemplate/RewriterTest.java
index 9d65b05..7bc3b85 100644
--- a/gateway-util-urltemplate/src/test/java/org/apache/knox/gateway/util/urltemplate/RewriterTest.java
+++ b/gateway-util-urltemplate/src/test/java/org/apache/knox/gateway/util/urltemplate/RewriterTest.java
@@ -17,8 +17,8 @@
  */
 package org.apache.knox.gateway.util.urltemplate;
 
-import org.apache.hadoop.test.category.FastTests;
-import org.apache.hadoop.test.category.UnitTests;
+import org.apache.knox.test.category.FastTests;
+import org.apache.knox.test.category.UnitTests;
 import org.easymock.EasyMock;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-util-urltemplate/src/test/java/org/apache/knox/gateway/util/urltemplate/SegmentTest.java
----------------------------------------------------------------------
diff --git a/gateway-util-urltemplate/src/test/java/org/apache/knox/gateway/util/urltemplate/SegmentTest.java b/gateway-util-urltemplate/src/test/java/org/apache/knox/gateway/util/urltemplate/SegmentTest.java
index 47ad08e..c88aacf 100644
--- a/gateway-util-urltemplate/src/test/java/org/apache/knox/gateway/util/urltemplate/SegmentTest.java
+++ b/gateway-util-urltemplate/src/test/java/org/apache/knox/gateway/util/urltemplate/SegmentTest.java
@@ -17,8 +17,8 @@
  */
 package org.apache.knox.gateway.util.urltemplate;
 
-import org.apache.hadoop.test.category.FastTests;
-import org.apache.hadoop.test.category.UnitTests;
+import org.apache.knox.test.category.FastTests;
+import org.apache.knox.test.category.UnitTests;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-util-urltemplate/src/test/java/org/apache/knox/gateway/util/urltemplate/TemplateTest.java
----------------------------------------------------------------------
diff --git a/gateway-util-urltemplate/src/test/java/org/apache/knox/gateway/util/urltemplate/TemplateTest.java b/gateway-util-urltemplate/src/test/java/org/apache/knox/gateway/util/urltemplate/TemplateTest.java
index d3f1c2a..5b3db90 100644
--- a/gateway-util-urltemplate/src/test/java/org/apache/knox/gateway/util/urltemplate/TemplateTest.java
+++ b/gateway-util-urltemplate/src/test/java/org/apache/knox/gateway/util/urltemplate/TemplateTest.java
@@ -17,8 +17,8 @@
  */
 package org.apache.knox.gateway.util.urltemplate;
 
-import org.apache.hadoop.test.category.FastTests;
-import org.apache.hadoop.test.category.UnitTests;
+import org.apache.knox.test.category.FastTests;
+import org.apache.knox.test.category.UnitTests;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 315f25e..5549d93 100644
--- a/pom.xml
+++ b/pom.xml
@@ -192,13 +192,13 @@
                 </plugins>
             </build>
             <properties>
-                <failsafe.group>org.apache.hadoop.test.category.VerifyTest</failsafe.group>
+                <failsafe.group>org.apache.knox.test.category.VerifyTest</failsafe.group>
             </properties>
         </profile>
         <profile>
             <id>release</id>
             <properties>
-                <failsafe.group>org.apache.hadoop.test.category.VerifyTest,org.apache.hadoop.test.category.ReleaseTest</failsafe.group>
+                <failsafe.group>org.apache.knox.test.category.VerifyTest,org.apache.knox.test.category.ReleaseTest</failsafe.group>
             </properties>
         </profile>
         <profile>
@@ -207,7 +207,7 @@
                 <activeByDefault>true</activeByDefault>
             </activation>
             <properties>
-                <failsafe.group>org.apache.hadoop.test.category.VerifyTest</failsafe.group>
+                <failsafe.group>org.apache.knox.test.category.VerifyTest</failsafe.group>
             </properties>
         </profile>
     </profiles>
@@ -308,7 +308,7 @@
                 <version>${surefire-version}</version>
                 <configuration>
                     <excludedGroups>
-                        org.apache.hadoop.test.category.SlowTests,org.apache.hadoop.test.category.ManualTests,org.apache.hadoop.test.category.VerifyTest,org.apache.hadoop.test.category.ReleaseTest
+                        org.apache.knox.test.category.SlowTests,org.apache.knox.test.category.ManualTests,org.apache.knox.test.category.VerifyTest,org.apache.knox.test.category.ReleaseTest
                     </excludedGroups>
                     <systemPropertyVariables>
                         <gateway-version>${gateway-version}</gateway-version>


[12/53] [abbrv] knox git commit: Merge branch 'master' into KNOX-998-Package_Restructuring

Posted by mo...@apache.org.
http://git-wip-us.apache.org/repos/asf/knox/blob/58780d37/gateway-server/src/test/java/org/apache/hadoop/gateway/services/token/impl/DefaultTokenAuthorityServiceTest.java
----------------------------------------------------------------------
diff --cc gateway-server/src/test/java/org/apache/hadoop/gateway/services/token/impl/DefaultTokenAuthorityServiceTest.java
index eba98a4,48616c0..da55422
--- a/gateway-server/src/test/java/org/apache/hadoop/gateway/services/token/impl/DefaultTokenAuthorityServiceTest.java
+++ b/gateway-server/src/test/java/org/apache/hadoop/gateway/services/token/impl/DefaultTokenAuthorityServiceTest.java
@@@ -21,13 -21,14 +21,15 @@@ import java.io.File
  import java.security.Principal;
  import java.util.HashMap;
  
 -import org.apache.hadoop.gateway.config.GatewayConfig;
 -import org.apache.hadoop.gateway.services.security.AliasService;
 -import org.apache.hadoop.gateway.services.security.KeystoreService;
 -import org.apache.hadoop.gateway.services.security.MasterService;
 -import org.apache.hadoop.gateway.services.security.impl.DefaultKeystoreService;
 -import org.apache.hadoop.gateway.services.security.token.JWTokenAuthority;
 -import org.apache.hadoop.gateway.services.security.token.TokenServiceException;
 -import org.apache.hadoop.gateway.services.security.token.impl.JWT;
 +import org.apache.knox.gateway.config.GatewayConfig;
 +import org.apache.knox.gateway.services.security.AliasService;
 +import org.apache.knox.gateway.services.security.KeystoreService;
 +import org.apache.knox.gateway.services.security.MasterService;
 +import org.apache.knox.gateway.services.security.impl.DefaultKeystoreService;
 +import org.apache.knox.gateway.services.security.token.JWTokenAuthority;
 +import org.apache.knox.gateway.services.security.token.impl.JWT;
++import org.apache.knox.gateway.services.security.token.TokenServiceException;
++
  import org.easymock.EasyMock;
  import org.junit.Test;
  

http://git-wip-us.apache.org/repos/asf/knox/blob/58780d37/gateway-server/src/test/java/org/apache/knox/gateway/topology/simple/SimpleDescriptorFactoryTest.java
----------------------------------------------------------------------
diff --cc gateway-server/src/test/java/org/apache/knox/gateway/topology/simple/SimpleDescriptorFactoryTest.java
index efee1d8,0000000..41a7c10
mode 100644,000000..100644
--- a/gateway-server/src/test/java/org/apache/knox/gateway/topology/simple/SimpleDescriptorFactoryTest.java
+++ b/gateway-server/src/test/java/org/apache/knox/gateway/topology/simple/SimpleDescriptorFactoryTest.java
@@@ -1,218 -1,0 +1,422 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements. See the NOTICE file distributed with this
 + * work for additional information regarding copyright ownership. The ASF
 + * licenses this file to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance with the License.
 + * You may obtain a copy of the License at
 + *
 + * http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 + * License for the specific language governing permissions and limitations under
 + * the License.
 + */
 +package org.apache.knox.gateway.topology.simple;
 +
 +import java.io.File;
 +import java.io.FileWriter;
 +import java.io.Writer;
 +import java.util.*;
 +
 +import org.junit.Test;
 +import static org.junit.Assert.*;
 +
 +
 +public class SimpleDescriptorFactoryTest {
 +
 +
 +    @Test
 +    public void testParseJSONSimpleDescriptor() throws Exception {
 +
 +        final String   discoveryType    = "AMBARI";
 +        final String   discoveryAddress = "http://c6401.ambari.apache.org:8080";
 +        final String   discoveryUser    = "admin";
 +        final String   providerConfig   = "ambari-cluster-policy.xml";
 +        final String   clusterName      = "myCluster";
 +
 +        final Map<String, List<String>> services = new HashMap<>();
 +        services.put("NODEMANAGER", null);
 +        services.put("JOBTRACKER", null);
 +        services.put("RESOURCEMANAGER", null);
 +        services.put("HIVE", Arrays.asList("http://c6401.ambari.apache.org", "http://c6402.ambari.apache.org", "http://c6403.ambari.apache.org"));
 +        services.put("AMBARIUI", Arrays.asList("http://c6401.ambari.apache.org:8080"));
 +
 +        String fileName = "test-topology.json";
 +        File testJSON = null;
 +        try {
 +            testJSON = writeJSON(fileName, discoveryType, discoveryAddress, discoveryUser, providerConfig, clusterName, services);
 +            SimpleDescriptor sd = SimpleDescriptorFactory.parse(testJSON.getAbsolutePath());
 +            validateSimpleDescriptor(sd, discoveryType, discoveryAddress, providerConfig, clusterName, services);
 +        } catch (Exception e) {
 +            e.printStackTrace();
 +        } finally {
 +            if (testJSON != null) {
 +                try {
 +                    testJSON.delete();
 +                } catch (Exception e) {
 +                    // Ignore
 +                }
 +            }
 +        }
 +    }
 +
 +    @Test
++    public void testParseJSONSimpleDescriptorWithServiceParams() throws Exception {
++
++        final String   discoveryType    = "AMBARI";
++        final String   discoveryAddress = "http://c6401.ambari.apache.org:8080";
++        final String   discoveryUser    = "admin";
++        final String   providerConfig   = "ambari-cluster-policy.xml";
++        final String   clusterName      = "myCluster";
++
++        final Map<String, List<String>> services = new HashMap<>();
++        services.put("NODEMANAGER", null);
++        services.put("JOBTRACKER", null);
++        services.put("RESOURCEMANAGER", null);
++        services.put("HIVE", Arrays.asList("http://c6401.ambari.apache.org", "http://c6402.ambari.apache.org", "http://c6403.ambari.apache.org"));
++        services.put("AMBARIUI", Collections.singletonList("http://c6401.ambari.apache.org:8080"));
++        services.put("KNOXSSO", null);
++        services.put("KNOXTOKEN", null);
++        services.put("CustomRole", Collections.singletonList("http://c6402.ambari.apache.org:1234"));
++
++        final Map<String, Map<String, String>> serviceParams = new HashMap<>();
++        Map<String, String> knoxSSOParams = new HashMap<>();
++        knoxSSOParams.put("knoxsso.cookie.secure.only", "true");
++        knoxSSOParams.put("knoxsso.token.ttl", "100000");
++        serviceParams.put("KNOXSSO", knoxSSOParams);
++
++        Map<String, String> knoxTokenParams = new HashMap<>();
++        knoxTokenParams.put("knox.token.ttl", "36000000");
++        knoxTokenParams.put("knox.token.audiences", "tokenbased");
++        knoxTokenParams.put("knox.token.target.url", "https://localhost:8443/gateway/tokenbased");
++        serviceParams.put("KNOXTOKEN", knoxTokenParams);
++
++        Map<String, String> customRoleParams = new HashMap<>();
++        customRoleParams.put("custom.param.1", "value1");
++        customRoleParams.put("custom.param.2", "value2");
++        serviceParams.put("CustomRole", customRoleParams);
++
++        String fileName = "test-topology.json";
++        File testJSON = null;
++        try {
++            testJSON = writeJSON(fileName,
++                                 discoveryType,
++                                 discoveryAddress,
++                                 discoveryUser,
++                                 providerConfig,
++                                 clusterName,
++                                 services,
++                                 serviceParams);
++            SimpleDescriptor sd = SimpleDescriptorFactory.parse(testJSON.getAbsolutePath());
++            validateSimpleDescriptor(sd, discoveryType, discoveryAddress, providerConfig, clusterName, services, serviceParams);
++        } catch (Exception e) {
++            e.printStackTrace();
++        } finally {
++            if (testJSON != null) {
++                try {
++                    testJSON.delete();
++                } catch (Exception e) {
++                    // Ignore
++                }
++            }
++        }
++    }
++
++    @Test
 +    public void testParseYAMLSimpleDescriptor() throws Exception {
 +
 +        final String   discoveryType    = "AMBARI";
 +        final String   discoveryAddress = "http://c6401.ambari.apache.org:8080";
 +        final String   discoveryUser    = "joeblow";
 +        final String   providerConfig   = "ambari-cluster-policy.xml";
 +        final String   clusterName      = "myCluster";
 +
 +        final Map<String, List<String>> services = new HashMap<>();
 +        services.put("NODEMANAGER", null);
 +        services.put("JOBTRACKER", null);
 +        services.put("RESOURCEMANAGER", null);
 +        services.put("HIVE", Arrays.asList("http://c6401.ambari.apache.org", "http://c6402.ambari.apache.org", "http://c6403.ambari.apache.org"));
 +        services.put("AMBARIUI", Arrays.asList("http://c6401.ambari.apache.org:8080"));
 +
 +        String fileName = "test-topology.yml";
 +        File testYAML = null;
 +        try {
 +            testYAML = writeYAML(fileName, discoveryType, discoveryAddress, discoveryUser, providerConfig, clusterName, services);
 +            SimpleDescriptor sd = SimpleDescriptorFactory.parse(testYAML.getAbsolutePath());
 +            validateSimpleDescriptor(sd, discoveryType, discoveryAddress, providerConfig, clusterName, services);
 +        } catch (Exception e) {
 +            e.printStackTrace();
 +        } finally {
 +            if (testYAML != null) {
 +                try {
 +                    testYAML.delete();
 +                } catch (Exception e) {
 +                    // Ignore
 +                }
 +            }
 +        }
 +    }
 +
 +
-     private void validateSimpleDescriptor(SimpleDescriptor    sd,
-                                           String              discoveryType,
-                                           String              discoveryAddress,
-                                           String              providerConfig,
-                                           String              clusterName,
++    @Test
++    public void testParseYAMLSimpleDescriptorWithServiceParams() throws Exception {
++
++        final String   discoveryType    = "AMBARI";
++        final String   discoveryAddress = "http://c6401.ambari.apache.org:8080";
++        final String   discoveryUser    = "joeblow";
++        final String   providerConfig   = "ambari-cluster-policy.xml";
++        final String   clusterName      = "myCluster";
++
++        final Map<String, List<String>> services = new HashMap<>();
++        services.put("NODEMANAGER", null);
++        services.put("JOBTRACKER", null);
++        services.put("RESOURCEMANAGER", null);
++        services.put("HIVE", Arrays.asList("http://c6401.ambari.apache.org", "http://c6402.ambari.apache.org", "http://c6403.ambari.apache.org"));
++        services.put("AMBARIUI", Arrays.asList("http://c6401.ambari.apache.org:8080"));
++        services.put("KNOXSSO", null);
++        services.put("KNOXTOKEN", null);
++        services.put("CustomRole", Collections.singletonList("http://c6402.ambari.apache.org:1234"));
++
++        final Map<String, Map<String, String>> serviceParams = new HashMap<>();
++        Map<String, String> knoxSSOParams = new HashMap<>();
++        knoxSSOParams.put("knoxsso.cookie.secure.only", "true");
++        knoxSSOParams.put("knoxsso.token.ttl", "100000");
++        serviceParams.put("KNOXSSO", knoxSSOParams);
++
++        Map<String, String> knoxTokenParams = new HashMap<>();
++        knoxTokenParams.put("knox.token.ttl", "36000000");
++        knoxTokenParams.put("knox.token.audiences", "tokenbased");
++        knoxTokenParams.put("knox.token.target.url", "https://localhost:8443/gateway/tokenbased");
++        serviceParams.put("KNOXTOKEN", knoxTokenParams);
++
++        Map<String, String> customRoleParams = new HashMap<>();
++        customRoleParams.put("custom.param.1", "value1");
++        customRoleParams.put("custom.param.2", "value2");
++        serviceParams.put("CustomRole", customRoleParams);
++
++        String fileName = "test-topology.yml";
++        File testYAML = null;
++        try {
++            testYAML = writeYAML(fileName, discoveryType, discoveryAddress, discoveryUser, providerConfig, clusterName, services, serviceParams);
++            SimpleDescriptor sd = SimpleDescriptorFactory.parse(testYAML.getAbsolutePath());
++            validateSimpleDescriptor(sd, discoveryType, discoveryAddress, providerConfig, clusterName, services, serviceParams);
++        } catch (Exception e) {
++            e.printStackTrace();
++        } finally {
++            if (testYAML != null) {
++                try {
++                    testYAML.delete();
++                } catch (Exception e) {
++                    // Ignore
++                }
++            }
++        }
++    }
++
++
++    private void validateSimpleDescriptor(SimpleDescriptor          sd,
++                                          String                    discoveryType,
++                                          String                    discoveryAddress,
++                                          String                    providerConfig,
++                                          String                    clusterName,
 +                                          Map<String, List<String>> expectedServices) {
++        validateSimpleDescriptor(sd, discoveryType, discoveryAddress, providerConfig, clusterName, expectedServices, null);
++    }
++
++
++    private void validateSimpleDescriptor(SimpleDescriptor                 sd,
++                                          String                           discoveryType,
++                                          String                           discoveryAddress,
++                                          String                           providerConfig,
++                                          String                           clusterName,
++                                          Map<String, List<String>>        expectedServices,
++                                          Map<String, Map<String, String>> expectedServiceParameters) {
 +        assertNotNull(sd);
 +        assertEquals(discoveryType, sd.getDiscoveryType());
 +        assertEquals(discoveryAddress, sd.getDiscoveryAddress());
 +        assertEquals(providerConfig, sd.getProviderConfig());
 +        assertEquals(clusterName, sd.getClusterName());
 +
 +        List<SimpleDescriptor.Service> actualServices = sd.getServices();
 +
 +        assertEquals(expectedServices.size(), actualServices.size());
 +
 +        for (SimpleDescriptor.Service actualService : actualServices) {
 +            assertTrue(expectedServices.containsKey(actualService.getName()));
 +            assertEquals(expectedServices.get(actualService.getName()), actualService.getURLs());
++
++            // Validate service parameters
++            if (expectedServiceParameters != null) {
++                if (expectedServiceParameters.containsKey(actualService.getName())) {
++                    Map<String, String> expectedParams = expectedServiceParameters.get(actualService.getName());
++
++                    Map<String, String> actualServiceParams = actualService.getParams();
++                    assertNotNull(actualServiceParams);
++
++                    // Validate the size of the service parameter set
++                    assertEquals(expectedParams.size(), actualServiceParams.size());
++
++                    // Validate the parameter contents
++                    for (String paramName : actualServiceParams.keySet()) {
++                        assertTrue(expectedParams.containsKey(paramName));
++                        assertEquals(expectedParams.get(paramName), actualServiceParams.get(paramName));
++                    }
++                }
++            }
 +        }
 +    }
 +
 +
 +    private File writeJSON(String path, String content) throws Exception {
 +        File f = new File(path);
 +
 +        Writer fw = new FileWriter(f);
 +        fw.write(content);
 +        fw.flush();
 +        fw.close();
 +
 +        return f;
 +    }
 +
 +
 +    private File writeJSON(String path,
 +                           String discoveryType,
 +                           String discoveryAddress,
 +                           String discoveryUser,
 +                           String providerConfig,
 +                           String clusterName,
 +                           Map<String, List<String>> services) throws Exception {
++        return writeJSON(path, discoveryType, discoveryAddress, discoveryUser, providerConfig, clusterName, services, null);
++    }
++
++    private File writeJSON(String path,
++                           String discoveryType,
++                           String discoveryAddress,
++                           String discoveryUser,
++                           String providerConfig,
++                           String clusterName,
++                           Map<String, List<String>> services,
++                           Map<String, Map<String, String>> serviceParams) throws Exception {
 +        File f = new File(path);
 +
 +        Writer fw = new FileWriter(f);
 +        fw.write("{" + "\n");
 +        fw.write("\"discovery-type\":\"" + discoveryType + "\",\n");
 +        fw.write("\"discovery-address\":\"" + discoveryAddress + "\",\n");
 +        fw.write("\"discovery-user\":\"" + discoveryUser + "\",\n");
 +        fw.write("\"provider-config-ref\":\"" + providerConfig + "\",\n");
 +        fw.write("\"cluster\":\"" + clusterName + "\",\n");
 +        fw.write("\"services\":[\n");
 +
 +        int i = 0;
 +        for (String name : services.keySet()) {
 +            fw.write("{\"name\":\"" + name + "\"");
++
++            // Service params
++            if (serviceParams != null && !serviceParams.isEmpty()) {
++                Map<String, String> params = serviceParams.get(name);
++                if (params != null && !params.isEmpty()) {
++                    fw.write(",\n\"params\":{\n");
++                    Iterator<String> paramNames = params.keySet().iterator();
++                    while (paramNames.hasNext()) {
++                        String paramName = paramNames.next();
++                        String paramValue = params.get(paramName);
++                        fw.write("\"" + paramName + "\":\"" + paramValue + "\"");
++                        fw.write(paramNames.hasNext() ? ",\n" : "");
++                    }
++                    fw.write("\n}");
++                }
++            }
++
++            // Service URLs
 +            List<String> urls = services.get(name);
 +            if (urls != null) {
-                 fw.write(", \"urls\":[");
++                fw.write(",\n\"urls\":[");
 +                Iterator<String> urlIter = urls.iterator();
 +                while (urlIter.hasNext()) {
 +                    fw.write("\"" + urlIter.next() + "\"");
 +                    if (urlIter.hasNext()) {
 +                        fw.write(", ");
 +                    }
 +                }
-                 fw.write("]");
++                fw.write("]\n");
 +            }
++
 +            fw.write("}");
 +            if (i++ < services.size() - 1) {
 +                fw.write(",");
 +            }
 +            fw.write("\n");
 +        }
 +        fw.write("]\n");
 +        fw.write("}\n");
 +        fw.flush();
 +        fw.close();
 +
 +        return f;
 +    }
 +
-     private File writeYAML(String path,
-                            String discoveryType,
-                            String discoveryAddress,
-                            String discoveryUser,
-                            String providerConfig,
-                            String clusterName,
++
++    private File writeYAML(String                    path,
++                           String                    discoveryType,
++                           String                    discoveryAddress,
++                           String                    discoveryUser,
++                           String                    providerConfig,
++                           String                    clusterName,
 +                           Map<String, List<String>> services) throws Exception {
++        return writeYAML(path, discoveryType, discoveryAddress, discoveryUser, providerConfig, clusterName, services, null);
++    }
++
++
++    private File writeYAML(String                           path,
++                           String                           discoveryType,
++                           String                           discoveryAddress,
++                           String                           discoveryUser,
++                           String                           providerConfig,
++                           String                           clusterName,
++                           Map<String, List<String>>        services,
++                           Map<String, Map<String, String>> serviceParams) throws Exception {
 +        File f = new File(path);
 +
 +        Writer fw = new FileWriter(f);
 +        fw.write("---" + "\n");
 +        fw.write("discovery-type: " + discoveryType + "\n");
 +        fw.write("discovery-address: " + discoveryAddress + "\n");
 +        fw.write("discovery-user: " + discoveryUser + "\n");
 +        fw.write("provider-config-ref: " + providerConfig + "\n");
 +        fw.write("cluster: " + clusterName+ "\n");
 +        fw.write("services:\n");
 +        for (String name : services.keySet()) {
 +            fw.write("    - name: " + name + "\n");
++
++            // Service params
++            if (serviceParams != null && !serviceParams.isEmpty()) {
++                if (serviceParams.containsKey(name)) {
++                    Map<String, String> params = serviceParams.get(name);
++                    fw.write("      params:\n");
++                    for (String paramName : params.keySet()) {
++                        fw.write("            " + paramName + ": " + params.get(paramName) + "\n");
++                    }
++                }
++            }
++
++            // Service URLs
 +            List<String> urls = services.get(name);
 +            if (urls != null) {
 +                fw.write("      urls:\n");
 +                for (String url : urls) {
 +                    fw.write("          - " + url + "\n");
 +                }
 +            }
 +        }
 +        fw.flush();
 +        fw.close();
 +
 +        return f;
 +    }
 +
 +
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/58780d37/gateway-server/src/test/java/org/apache/knox/gateway/topology/simple/SimpleDescriptorHandlerTest.java
----------------------------------------------------------------------
diff --cc gateway-server/src/test/java/org/apache/knox/gateway/topology/simple/SimpleDescriptorHandlerTest.java
index b5558fd,0000000..a0c977a
mode 100644,000000..100644
--- a/gateway-server/src/test/java/org/apache/knox/gateway/topology/simple/SimpleDescriptorHandlerTest.java
+++ b/gateway-server/src/test/java/org/apache/knox/gateway/topology/simple/SimpleDescriptorHandlerTest.java
@@@ -1,392 -1,0 +1,447 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.topology.simple;
 +
 +import org.apache.knox.gateway.topology.validation.TopologyValidator;
 +import org.apache.knox.gateway.util.XmlUtils;
 +import java.io.ByteArrayInputStream;
 +import java.io.File;
++import java.io.FileNotFoundException;
 +import java.io.FileOutputStream;
 +import java.io.IOException;
 +
 +import java.util.ArrayList;
 +import java.util.Collections;
 +import java.util.HashMap;
 +import java.util.List;
 +import java.util.Map;
 +import java.util.Properties;
 +
 +import javax.xml.xpath.XPath;
 +import javax.xml.xpath.XPathConstants;
 +import javax.xml.xpath.XPathFactory;
 +
 +import org.apache.commons.io.FileUtils;
 +import org.easymock.EasyMock;
 +import org.junit.Test;
 +import org.w3c.dom.Document;
 +import org.w3c.dom.Node;
 +import org.w3c.dom.NodeList;
 +import org.xml.sax.SAXException;
 +
 +import static org.junit.Assert.assertEquals;
 +import static org.junit.Assert.assertFalse;
 +import static org.junit.Assert.assertNotNull;
 +import static org.junit.Assert.assertTrue;
 +import static org.junit.Assert.fail;
 +
 +
 +public class SimpleDescriptorHandlerTest {
 +
 +    private static final String TEST_PROVIDER_CONFIG =
 +            "    <gateway>\n" +
 +                    "        <provider>\n" +
 +                    "            <role>authentication</role>\n" +
 +                    "            <name>ShiroProvider</name>\n" +
 +                    "            <enabled>true</enabled>\n" +
 +                    "            <param>\n" +
 +                    "                <!-- \n" +
 +                    "                session timeout in minutes,  this is really idle timeout,\n" +
 +                    "                defaults to 30mins, if the property value is not defined,, \n" +
 +                    "                current client authentication would expire if client idles contiuosly for more than this value\n" +
 +                    "                -->\n" +
 +                    "                <name>sessionTimeout</name>\n" +
 +                    "                <value>30</value>\n" +
 +                    "            </param>\n" +
 +                    "            <param>\n" +
 +                    "                <name>main.ldapRealm</name>\n" +
 +                    "                <value>org.apache.knox.gateway.shirorealm.KnoxLdapRealm</value>\n" +
 +                    "            </param>\n" +
 +                    "            <param>\n" +
 +                    "                <name>main.ldapContextFactory</name>\n" +
 +                    "                <value>org.apache.knox.gateway.shirorealm.KnoxLdapContextFactory</value>\n" +
 +                    "            </param>\n" +
 +                    "            <param>\n" +
 +                    "                <name>main.ldapRealm.contextFactory</name>\n" +
 +                    "                <value>$ldapContextFactory</value>\n" +
 +                    "            </param>\n" +
 +                    "            <param>\n" +
 +                    "                <name>main.ldapRealm.userDnTemplate</name>\n" +
 +                    "                <value>uid={0},ou=people,dc=hadoop,dc=apache,dc=org</value>\n" +
 +                    "            </param>\n" +
 +                    "            <param>\n" +
 +                    "                <name>main.ldapRealm.contextFactory.url</name>\n" +
 +                    "                <value>ldap://localhost:33389</value>\n" +
 +                    "            </param>\n" +
 +                    "            <param>\n" +
 +                    "                <name>main.ldapRealm.contextFactory.authenticationMechanism</name>\n" +
 +                    "                <value>simple</value>\n" +
 +                    "            </param>\n" +
 +                    "            <param>\n" +
 +                    "                <name>urls./**</name>\n" +
 +                    "                <value>authcBasic</value>\n" +
 +                    "            </param>\n" +
 +                    "        </provider>\n" +
 +                    "\n" +
 +                    "        <provider>\n" +
 +                    "            <role>identity-assertion</role>\n" +
 +                    "            <name>Default</name>\n" +
 +                    "            <enabled>true</enabled>\n" +
 +                    "        </provider>\n" +
 +                    "\n" +
 +                    "        <!--\n" +
 +                    "        Defines rules for mapping host names internal to a Hadoop cluster to externally accessible host names.\n" +
 +                    "        For example, a hadoop service running in AWS may return a response that includes URLs containing the\n" +
 +                    "        some AWS internal host name.  If the client needs to make a subsequent request to the host identified\n" +
 +                    "        in those URLs they need to be mapped to external host names that the client Knox can use to connect.\n" +
 +                    "\n" +
 +                    "        If the external hostname and internal host names are same turn of this provider by setting the value of\n" +
 +                    "        enabled parameter as false.\n" +
 +                    "\n" +
 +                    "        The name parameter specifies the external host names in a comma separated list.\n" +
 +                    "        The value parameter specifies corresponding internal host names in a comma separated list.\n" +
 +                    "\n" +
 +                    "        Note that when you are using Sandbox, the external hostname needs to be localhost, as seen in out\n" +
 +                    "        of box sandbox.xml.  This is because Sandbox uses port mapping to allow clients to connect to the\n" +
 +                    "        Hadoop services using localhost.  In real clusters, external host names would almost never be localhost.\n" +
 +                    "        -->\n" +
 +                    "        <provider>\n" +
 +                    "            <role>hostmap</role>\n" +
 +                    "            <name>static</name>\n" +
 +                    "            <enabled>true</enabled>\n" +
 +                    "            <param><name>localhost</name><value>sandbox,sandbox.hortonworks.com</value></param>\n" +
 +                    "        </provider>\n" +
 +                    "    </gateway>\n";
 +
 +
 +    /**
 +     * KNOX-1006
 +     *
-      * N.B. This test depends on the DummyServiceDiscovery extension being configured:
-      *             org.apache.knox.gateway.topology.discovery.test.extension.DummyServiceDiscovery
++     * N.B. This test depends on the PropertiesFileServiceDiscovery extension being configured:
++     *             org.apache.knox.gateway.topology.discovery.test.extension.PropertiesFileServiceDiscovery
 +     */
 +    @Test
 +    public void testSimpleDescriptorHandler() throws Exception {
 +
-         final String type = "DUMMY";
-         final String address = "http://c6401.ambari.apache.org:8080";
++        final String type = "PROPERTIES_FILE";
 +        final String clusterName = "dummy";
++
++        // Create a properties file to be the source of service discovery details for this test
++        final File discoveryConfig = File.createTempFile(getClass().getName() + "_discovery-config", ".properties");
++
++        final String address = discoveryConfig.getAbsolutePath();
++
++        final Properties DISCOVERY_PROPERTIES = new Properties();
++        DISCOVERY_PROPERTIES.setProperty(clusterName + ".name", clusterName);
++        DISCOVERY_PROPERTIES.setProperty(clusterName + ".NAMENODE", "hdfs://namenodehost:8020");
++        DISCOVERY_PROPERTIES.setProperty(clusterName + ".JOBTRACKER", "rpc://jobtrackerhostname:8050");
++        DISCOVERY_PROPERTIES.setProperty(clusterName + ".WEBHDFS", "http://webhdfshost:1234");
++        DISCOVERY_PROPERTIES.setProperty(clusterName + ".WEBHCAT", "http://webhcathost:50111/templeton");
++        DISCOVERY_PROPERTIES.setProperty(clusterName + ".OOZIE", "http://ooziehost:11000/oozie");
++        DISCOVERY_PROPERTIES.setProperty(clusterName + ".WEBHBASE", "http://webhbasehost:1234");
++        DISCOVERY_PROPERTIES.setProperty(clusterName + ".HIVE", "http://hivehostname:10001/clipath");
++        DISCOVERY_PROPERTIES.setProperty(clusterName + ".RESOURCEMANAGER", "http://remanhost:8088/ws");
++
++        try {
++            DISCOVERY_PROPERTIES.store(new FileOutputStream(discoveryConfig), null);
++        } catch (FileNotFoundException e) {
++            fail(e.getMessage());
++        }
++
 +        final Map<String, List<String>> serviceURLs = new HashMap<>();
 +        serviceURLs.put("NAMENODE", null);
 +        serviceURLs.put("JOBTRACKER", null);
 +        serviceURLs.put("WEBHDFS", null);
 +        serviceURLs.put("WEBHCAT", null);
 +        serviceURLs.put("OOZIE", null);
 +        serviceURLs.put("WEBHBASE", null);
 +        serviceURLs.put("HIVE", null);
 +        serviceURLs.put("RESOURCEMANAGER", null);
 +        serviceURLs.put("AMBARIUI", Collections.singletonList("http://c6401.ambari.apache.org:8080"));
++        serviceURLs.put("KNOXSSO", null);
 +
 +        // Write the externalized provider config to a temp file
-         File providerConfig = writeProviderConfig("ambari-cluster-policy.xml", TEST_PROVIDER_CONFIG);
++        File providerConfig = new File(System.getProperty("java.io.tmpdir"), "ambari-cluster-policy.xml");
++        FileUtils.write(providerConfig, TEST_PROVIDER_CONFIG);
 +
 +        File topologyFile = null;
 +        try {
-             File destDir = (new File(".")).getCanonicalFile();
++            File destDir = new File(System.getProperty("java.io.tmpdir")).getCanonicalFile();
++
++            Map<String, Map<String, String>> serviceParameters = new HashMap<>();
++            Map<String, String> knoxssoParams = new HashMap<>();
++            knoxssoParams.put("knoxsso.cookie.secure.only", "true");
++            knoxssoParams.put("knoxsso.token.ttl", "100000");
++            serviceParameters.put("KNOXSSO", knoxssoParams);
 +
 +            // Mock out the simple descriptor
 +            SimpleDescriptor testDescriptor = EasyMock.createNiceMock(SimpleDescriptor.class);
 +            EasyMock.expect(testDescriptor.getName()).andReturn("mysimpledescriptor").anyTimes();
 +            EasyMock.expect(testDescriptor.getDiscoveryAddress()).andReturn(address).anyTimes();
 +            EasyMock.expect(testDescriptor.getDiscoveryType()).andReturn(type).anyTimes();
 +            EasyMock.expect(testDescriptor.getDiscoveryUser()).andReturn(null).anyTimes();
 +            EasyMock.expect(testDescriptor.getProviderConfig()).andReturn(providerConfig.getAbsolutePath()).anyTimes();
 +            EasyMock.expect(testDescriptor.getClusterName()).andReturn(clusterName).anyTimes();
 +            List<SimpleDescriptor.Service> serviceMocks = new ArrayList<>();
 +            for (String serviceName : serviceURLs.keySet()) {
 +                SimpleDescriptor.Service svc = EasyMock.createNiceMock(SimpleDescriptor.Service.class);
 +                EasyMock.expect(svc.getName()).andReturn(serviceName).anyTimes();
 +                EasyMock.expect(svc.getURLs()).andReturn(serviceURLs.get(serviceName)).anyTimes();
++                EasyMock.expect(svc.getParams()).andReturn(serviceParameters.get(serviceName)).anyTimes();
 +                EasyMock.replay(svc);
 +                serviceMocks.add(svc);
 +            }
 +            EasyMock.expect(testDescriptor.getServices()).andReturn(serviceMocks).anyTimes();
 +            EasyMock.replay(testDescriptor);
 +
 +            // Invoke the simple descriptor handler
 +            Map<String, File> files =
 +                           SimpleDescriptorHandler.handle(testDescriptor,
 +                                                          providerConfig.getParentFile(), // simple desc co-located with provider config
 +                                                          destDir);
 +            topologyFile = files.get("topology");
 +
 +            // Validate the resulting topology descriptor
 +            assertTrue(topologyFile.exists());
 +
 +            // Validate the topology descriptor's correctness
 +            TopologyValidator validator = new TopologyValidator( topologyFile.getAbsolutePath() );
 +            if( !validator.validateTopology() ){
 +                throw new SAXException( validator.getErrorString() );
 +            }
 +
 +            XPathFactory xPathfactory = XPathFactory.newInstance();
 +            XPath xpath = xPathfactory.newXPath();
 +
 +            // Parse the topology descriptor
 +            Document topologyXml = XmlUtils.readXml(topologyFile);
 +
 +            // Validate the provider configuration
 +            Document extProviderConf = XmlUtils.readXml(new ByteArrayInputStream(TEST_PROVIDER_CONFIG.getBytes()));
 +            Node gatewayNode = (Node) xpath.compile("/topology/gateway").evaluate(topologyXml, XPathConstants.NODE);
 +            assertTrue("Resulting provider config should be identical to the referenced content.",
 +                       extProviderConf.getDocumentElement().isEqualNode(gatewayNode));
 +
 +            // Validate the service declarations
 +            Map<String, List<String>> topologyServiceURLs = new HashMap<>();
 +            NodeList serviceNodes =
 +                        (NodeList) xpath.compile("/topology/service").evaluate(topologyXml, XPathConstants.NODESET);
 +            for (int serviceNodeIndex=0; serviceNodeIndex < serviceNodes.getLength(); serviceNodeIndex++) {
 +                Node serviceNode = serviceNodes.item(serviceNodeIndex);
++
++                // Validate the role
 +                Node roleNode = (Node) xpath.compile("role/text()").evaluate(serviceNode, XPathConstants.NODE);
 +                assertNotNull(roleNode);
 +                String role = roleNode.getNodeValue();
++
++                // Validate the URLs
 +                NodeList urlNodes = (NodeList) xpath.compile("url/text()").evaluate(serviceNode, XPathConstants.NODESET);
 +                for(int urlNodeIndex = 0 ; urlNodeIndex < urlNodes.getLength(); urlNodeIndex++) {
 +                    Node urlNode = urlNodes.item(urlNodeIndex);
 +                    assertNotNull(urlNode);
 +                    String url = urlNode.getNodeValue();
-                     assertNotNull("Every declared service should have a URL.", url);
-                     if (!topologyServiceURLs.containsKey(role)) {
-                         topologyServiceURLs.put(role, new ArrayList<String>());
++
++                    // If the service should have a URL (some don't require it)
++                    if (serviceURLs.containsKey(role)) {
++                        assertNotNull("Declared service should have a URL.", url);
++                        if (!topologyServiceURLs.containsKey(role)) {
++                            topologyServiceURLs.put(role, new ArrayList<>());
++                        }
++                        topologyServiceURLs.get(role).add(url); // Add it for validation later
 +                    }
-                     topologyServiceURLs.get(role).add(url);
 +                }
++
++                // If params were declared in the descriptor, then validate them in the resulting topology file
++                Map<String, String> params = serviceParameters.get(role);
++                if (params != null) {
++                    NodeList paramNodes = (NodeList) xpath.compile("param").evaluate(serviceNode, XPathConstants.NODESET);
++                    for (int paramNodeIndex = 0; paramNodeIndex < paramNodes.getLength(); paramNodeIndex++) {
++                        Node paramNode = paramNodes.item(paramNodeIndex);
++                        String paramName = (String) xpath.compile("name/text()").evaluate(paramNode, XPathConstants.STRING);
++                        String paramValue = (String) xpath.compile("value/text()").evaluate(paramNode, XPathConstants.STRING);
++                        assertTrue(params.keySet().contains(paramName));
++                        assertEquals(params.get(paramName), paramValue);
++                    }
++                }
++
 +            }
-             assertEquals("Unexpected number of service declarations.", serviceURLs.size(), topologyServiceURLs.size());
++            assertEquals("Unexpected number of service declarations.", (serviceURLs.size() - 1), topologyServiceURLs.size());
 +
 +        } catch (Exception e) {
 +            e.printStackTrace();
 +            fail(e.getMessage());
 +        } finally {
 +            providerConfig.delete();
++            discoveryConfig.delete();
 +            if (topologyFile != null) {
 +                topologyFile.delete();
 +            }
 +        }
 +    }
 +
 +
 +    /**
 +     * KNOX-1006
 +     *
 +     * Verify the behavior of the SimpleDescriptorHandler when service discovery fails to produce a valid URL for
 +     * a service.
 +     *
 +     * N.B. This test depends on the PropertiesFileServiceDiscovery extension being configured:
 +     *             org.apache.hadoop.gateway.topology.discovery.test.extension.PropertiesFileServiceDiscovery
 +     */
 +    @Test
 +    public void testInvalidServiceURLFromDiscovery() throws Exception {
 +        final String CLUSTER_NAME = "myproperties";
 +
 +        // Configure the PropertiesFile Service Discovery implementation for this test
 +        final String DEFAULT_VALID_SERVICE_URL = "http://localhost:9999/thiswillwork";
 +        Properties serviceDiscoverySourceProps = new Properties();
 +        serviceDiscoverySourceProps.setProperty(CLUSTER_NAME + ".NAMENODE",
 +                                                DEFAULT_VALID_SERVICE_URL.replace("http", "hdfs"));
 +        serviceDiscoverySourceProps.setProperty(CLUSTER_NAME + ".JOBTRACKER",
 +                                                DEFAULT_VALID_SERVICE_URL.replace("http", "rpc"));
 +        serviceDiscoverySourceProps.setProperty(CLUSTER_NAME + ".WEBHDFS",         DEFAULT_VALID_SERVICE_URL);
 +        serviceDiscoverySourceProps.setProperty(CLUSTER_NAME + ".WEBHCAT",         DEFAULT_VALID_SERVICE_URL);
 +        serviceDiscoverySourceProps.setProperty(CLUSTER_NAME + ".OOZIE",           DEFAULT_VALID_SERVICE_URL);
 +        serviceDiscoverySourceProps.setProperty(CLUSTER_NAME + ".WEBHBASE",        DEFAULT_VALID_SERVICE_URL);
 +        serviceDiscoverySourceProps.setProperty(CLUSTER_NAME + ".HIVE",            "{SCHEME}://localhost:10000/");
 +        serviceDiscoverySourceProps.setProperty(CLUSTER_NAME + ".RESOURCEMANAGER", DEFAULT_VALID_SERVICE_URL);
 +        serviceDiscoverySourceProps.setProperty(CLUSTER_NAME + ".AMBARIUI",        DEFAULT_VALID_SERVICE_URL);
 +        File serviceDiscoverySource = File.createTempFile("service-discovery", ".properties");
 +        serviceDiscoverySourceProps.store(new FileOutputStream(serviceDiscoverySource),
 +                                          "Test Service Discovery Source");
 +
 +        // Prepare a mock SimpleDescriptor
 +        final String type = "PROPERTIES_FILE";
 +        final String address = serviceDiscoverySource.getAbsolutePath();
 +        final Map<String, List<String>> serviceURLs = new HashMap<>();
 +        serviceURLs.put("NAMENODE", null);
 +        serviceURLs.put("JOBTRACKER", null);
 +        serviceURLs.put("WEBHDFS", null);
 +        serviceURLs.put("WEBHCAT", null);
 +        serviceURLs.put("OOZIE", null);
 +        serviceURLs.put("WEBHBASE", null);
 +        serviceURLs.put("HIVE", null);
 +        serviceURLs.put("RESOURCEMANAGER", null);
 +        serviceURLs.put("AMBARIUI", Collections.singletonList("http://c6401.ambari.apache.org:8080"));
 +
 +        // Write the externalized provider config to a temp file
 +        File providerConfig = writeProviderConfig("ambari-cluster-policy.xml", TEST_PROVIDER_CONFIG);
 +
 +        File topologyFile = null;
 +        try {
 +            File destDir = (new File(".")).getCanonicalFile();
 +
 +            // Mock out the simple descriptor
 +            SimpleDescriptor testDescriptor = EasyMock.createNiceMock(SimpleDescriptor.class);
 +            EasyMock.expect(testDescriptor.getName()).andReturn("mysimpledescriptor").anyTimes();
 +            EasyMock.expect(testDescriptor.getDiscoveryAddress()).andReturn(address).anyTimes();
 +            EasyMock.expect(testDescriptor.getDiscoveryType()).andReturn(type).anyTimes();
 +            EasyMock.expect(testDescriptor.getDiscoveryUser()).andReturn(null).anyTimes();
 +            EasyMock.expect(testDescriptor.getProviderConfig()).andReturn(providerConfig.getAbsolutePath()).anyTimes();
 +            EasyMock.expect(testDescriptor.getClusterName()).andReturn(CLUSTER_NAME).anyTimes();
 +            List<SimpleDescriptor.Service> serviceMocks = new ArrayList<>();
 +            for (String serviceName : serviceURLs.keySet()) {
 +                SimpleDescriptor.Service svc = EasyMock.createNiceMock(SimpleDescriptor.Service.class);
 +                EasyMock.expect(svc.getName()).andReturn(serviceName).anyTimes();
 +                EasyMock.expect(svc.getURLs()).andReturn(serviceURLs.get(serviceName)).anyTimes();
 +                EasyMock.replay(svc);
 +                serviceMocks.add(svc);
 +            }
 +            EasyMock.expect(testDescriptor.getServices()).andReturn(serviceMocks).anyTimes();
 +            EasyMock.replay(testDescriptor);
 +
 +            // Invoke the simple descriptor handler
 +            Map<String, File> files =
 +                    SimpleDescriptorHandler.handle(testDescriptor,
 +                                                   providerConfig.getParentFile(), // simple desc co-located with provider config
 +                                                   destDir);
 +
 +            topologyFile = files.get("topology");
 +
 +            // Validate the resulting topology descriptor
 +            assertTrue(topologyFile.exists());
 +
 +            // Validate the topology descriptor's correctness
 +            TopologyValidator validator = new TopologyValidator( topologyFile.getAbsolutePath() );
 +            if( !validator.validateTopology() ){
 +                throw new SAXException( validator.getErrorString() );
 +            }
 +
 +            XPathFactory xPathfactory = XPathFactory.newInstance();
 +            XPath xpath = xPathfactory.newXPath();
 +
 +            // Parse the topology descriptor
 +            Document topologyXml = XmlUtils.readXml(topologyFile);
 +
 +            // Validate the provider configuration
 +            Document extProviderConf = XmlUtils.readXml(new ByteArrayInputStream(TEST_PROVIDER_CONFIG.getBytes()));
 +            Node gatewayNode = (Node) xpath.compile("/topology/gateway").evaluate(topologyXml, XPathConstants.NODE);
 +            assertTrue("Resulting provider config should be identical to the referenced content.",
 +                    extProviderConf.getDocumentElement().isEqualNode(gatewayNode));
 +
 +            // Validate the service declarations
 +            List<String> topologyServices = new ArrayList<>();
 +            Map<String, List<String>> topologyServiceURLs = new HashMap<>();
 +            NodeList serviceNodes =
 +                    (NodeList) xpath.compile("/topology/service").evaluate(topologyXml, XPathConstants.NODESET);
 +            for (int serviceNodeIndex=0; serviceNodeIndex < serviceNodes.getLength(); serviceNodeIndex++) {
 +                Node serviceNode = serviceNodes.item(serviceNodeIndex);
 +                Node roleNode = (Node) xpath.compile("role/text()").evaluate(serviceNode, XPathConstants.NODE);
 +                assertNotNull(roleNode);
 +                String role = roleNode.getNodeValue();
 +                topologyServices.add(role);
 +                NodeList urlNodes = (NodeList) xpath.compile("url/text()").evaluate(serviceNode, XPathConstants.NODESET);
 +                for(int urlNodeIndex = 0 ; urlNodeIndex < urlNodes.getLength(); urlNodeIndex++) {
 +                    Node urlNode = urlNodes.item(urlNodeIndex);
 +                    assertNotNull(urlNode);
 +                    String url = urlNode.getNodeValue();
 +                    assertNotNull("Every declared service should have a URL.", url);
 +                    if (!topologyServiceURLs.containsKey(role)) {
-                         topologyServiceURLs.put(role, new ArrayList<String>());
++                        topologyServiceURLs.put(role, new ArrayList<>());
 +                    }
 +                    topologyServiceURLs.get(role).add(url);
 +                }
 +            }
 +
 +            // There should not be a service element for HIVE, since it had no valid URLs
 +            assertEquals("Unexpected number of service declarations.", serviceURLs.size() - 1, topologyServices.size());
 +            assertFalse("The HIVE service should have been omitted from the generated topology.", topologyServices.contains("HIVE"));
 +
 +            assertEquals("Unexpected number of service URLs.", serviceURLs.size() - 1, topologyServiceURLs.size());
 +
 +        } catch (Exception e) {
 +            e.printStackTrace();
 +            fail(e.getMessage());
 +        } finally {
 +            serviceDiscoverySource.delete();
 +            providerConfig.delete();
 +            if (topologyFile != null) {
 +                topologyFile.delete();
 +            }
 +        }
 +    }
 +
 +
 +    private File writeProviderConfig(String path, String content) throws IOException {
 +        File f = new File(path);
 +        FileUtils.write(f, content);
 +        return f;
 +    }
 +
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/58780d37/gateway-service-knoxsso/src/main/java/org/apache/knox/gateway/service/knoxsso/WebSSOResource.java
----------------------------------------------------------------------
diff --cc gateway-service-knoxsso/src/main/java/org/apache/knox/gateway/service/knoxsso/WebSSOResource.java
index a97cee2,0000000..a103dac
mode 100644,000000..100644
--- a/gateway-service-knoxsso/src/main/java/org/apache/knox/gateway/service/knoxsso/WebSSOResource.java
+++ b/gateway-service-knoxsso/src/main/java/org/apache/knox/gateway/service/knoxsso/WebSSOResource.java
@@@ -1,322 -1,0 +1,334 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.service.knoxsso;
 +
 +import java.io.IOException;
 +import java.net.URI;
 +import java.net.URISyntaxException;
 +import java.security.Principal;
 +import java.util.ArrayList;
 +import java.util.Arrays;
 +import java.util.List;
 +import java.util.Map;
 +import java.util.Map.Entry;
 +
 +import javax.annotation.PostConstruct;
 +import javax.servlet.ServletContext;
 +import javax.servlet.http.Cookie;
 +import javax.servlet.http.HttpServletRequest;
 +import javax.servlet.http.HttpServletResponse;
 +import javax.servlet.http.HttpSession;
 +import javax.ws.rs.GET;
 +import javax.ws.rs.POST;
 +import javax.ws.rs.Path;
 +import javax.ws.rs.Produces;
 +import javax.ws.rs.core.Context;
 +import javax.ws.rs.core.Response;
 +import javax.ws.rs.WebApplicationException;
 +
 +import org.apache.knox.gateway.i18n.messages.MessagesFactory;
 +import org.apache.knox.gateway.services.GatewayServices;
 +import org.apache.knox.gateway.services.security.token.JWTokenAuthority;
 +import org.apache.knox.gateway.services.security.token.TokenServiceException;
 +import org.apache.knox.gateway.services.security.token.impl.JWT;
 +import org.apache.knox.gateway.util.RegExUtils;
 +import org.apache.knox.gateway.util.Urls;
 +
 +import static javax.ws.rs.core.MediaType.APPLICATION_JSON;
 +import static javax.ws.rs.core.MediaType.APPLICATION_XML;
 +
 +@Path( WebSSOResource.RESOURCE_PATH )
 +public class WebSSOResource {
 +  private static final String SSO_COOKIE_NAME = "knoxsso.cookie.name";
 +  private static final String SSO_COOKIE_SECURE_ONLY_INIT_PARAM = "knoxsso.cookie.secure.only";
 +  private static final String SSO_COOKIE_MAX_AGE_INIT_PARAM = "knoxsso.cookie.max.age";
 +  private static final String SSO_COOKIE_DOMAIN_SUFFIX_PARAM = "knoxsso.cookie.domain.suffix";
 +  private static final String SSO_COOKIE_TOKEN_TTL_PARAM = "knoxsso.token.ttl";
 +  private static final String SSO_COOKIE_TOKEN_AUDIENCES_PARAM = "knoxsso.token.audiences";
++  private static final String SSO_COOKIE_TOKEN_SIG_ALG = "knoxsso.token.sigalg";
 +  private static final String SSO_COOKIE_TOKEN_WHITELIST_PARAM = "knoxsso.redirect.whitelist.regex";
 +  private static final String SSO_ENABLE_SESSION_PARAM = "knoxsso.enable.session";
 +  private static final String ORIGINAL_URL_REQUEST_PARAM = "originalUrl";
 +  private static final String ORIGINAL_URL_COOKIE_NAME = "original-url";
 +  private static final String DEFAULT_SSO_COOKIE_NAME = "hadoop-jwt";
 +  // default for the whitelist - open up for development - relative paths and localhost only
 +  private static final String DEFAULT_WHITELIST = "^/.*$;^https?://(localhost|127.0.0.1|0:0:0:0:0:0:0:1|::1):\\d{0,9}/.*$";
++  private static final long TOKEN_TTL_DEFAULT = 30000L;
 +  static final String RESOURCE_PATH = "/api/v1/websso";
 +  private static KnoxSSOMessages log = MessagesFactory.get( KnoxSSOMessages.class );
 +  private String cookieName = null;
 +  private boolean secureOnly = true;
 +  private int maxAge = -1;
-   private long tokenTTL = 30000l;
++  private long tokenTTL = TOKEN_TTL_DEFAULT;
 +  private String whitelist = null;
 +  private String domainSuffix = null;
 +  private List<String> targetAudiences = new ArrayList<>();
 +  private boolean enableSession = false;
++  private String signatureAlgorithm = "RS256";
 +
 +  @Context
 +  HttpServletRequest request;
 +
 +  @Context
 +  HttpServletResponse response;
 +
 +  @Context
 +  ServletContext context;
 +
 +  @PostConstruct
 +  public void init() {
 +
 +    // configured cookieName
 +    cookieName = context.getInitParameter(SSO_COOKIE_NAME);
 +    if (cookieName == null) {
 +      cookieName = DEFAULT_SSO_COOKIE_NAME;
 +    }
 +
 +    String secure = context.getInitParameter(SSO_COOKIE_SECURE_ONLY_INIT_PARAM);
 +    if (secure != null) {
 +      secureOnly = ("false".equals(secure) ? false : true);
 +      if (!secureOnly) {
 +        log.cookieSecureOnly(secureOnly);
 +      }
 +    }
 +
 +    String age = context.getInitParameter(SSO_COOKIE_MAX_AGE_INIT_PARAM);
 +    if (age != null) {
 +      try {
 +        log.setMaxAge(age);
 +        maxAge = Integer.parseInt(age);
 +      }
 +      catch (NumberFormatException nfe) {
 +        log.invalidMaxAgeEncountered(age);
 +      }
 +    }
 +
 +    domainSuffix = context.getInitParameter(SSO_COOKIE_DOMAIN_SUFFIX_PARAM);
 +
 +    whitelist = context.getInitParameter(SSO_COOKIE_TOKEN_WHITELIST_PARAM);
 +    if (whitelist == null) {
 +      // default to local/relative targets
 +      whitelist = DEFAULT_WHITELIST;
 +    }
 +
 +    String audiences = context.getInitParameter(SSO_COOKIE_TOKEN_AUDIENCES_PARAM);
 +    if (audiences != null) {
 +      String[] auds = audiences.split(",");
 +      for (int i = 0; i < auds.length; i++) {
 +        targetAudiences.add(auds[i].trim());
 +      }
 +    }
 +
 +    String ttl = context.getInitParameter(SSO_COOKIE_TOKEN_TTL_PARAM);
 +    if (ttl != null) {
 +      try {
 +        tokenTTL = Long.parseLong(ttl);
++        if (tokenTTL < -1 || (tokenTTL + System.currentTimeMillis() < 0)) {
++          log.invalidTokenTTLEncountered(ttl);
++          tokenTTL = TOKEN_TTL_DEFAULT;
++        }
 +      }
 +      catch (NumberFormatException nfe) {
 +        log.invalidTokenTTLEncountered(ttl);
 +      }
 +    }
 +
 +    String enableSession = context.getInitParameter(SSO_ENABLE_SESSION_PARAM);
 +    this.enableSession = ("true".equals(enableSession));
++
++    String sigAlg = context.getInitParameter(SSO_COOKIE_TOKEN_SIG_ALG);
++    if (sigAlg != null) {
++      signatureAlgorithm = sigAlg;
++    }
 +  }
 +
 +  @GET
 +  @Produces({APPLICATION_JSON, APPLICATION_XML})
 +  public Response doGet() {
 +    return getAuthenticationToken(HttpServletResponse.SC_TEMPORARY_REDIRECT);
 +  }
 +
 +  @POST
 +  @Produces({APPLICATION_JSON, APPLICATION_XML})
 +  public Response doPost() {
 +    return getAuthenticationToken(HttpServletResponse.SC_SEE_OTHER);
 +  }
 +
 +  private Response getAuthenticationToken(int statusCode) {
 +    GatewayServices services = (GatewayServices) request.getServletContext()
 +            .getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE);
 +    boolean removeOriginalUrlCookie = true;
 +    String original = getCookieValue((HttpServletRequest) request, ORIGINAL_URL_COOKIE_NAME);
 +    if (original == null) {
 +      // in the case where there are no SAML redirects done before here
 +      // we need to get it from the request parameters
 +      removeOriginalUrlCookie = false;
 +      original = getOriginalUrlFromQueryParams();
 +      if (original.isEmpty()) {
 +        log.originalURLNotFound();
 +        throw new WebApplicationException("Original URL not found in the request.", Response.Status.BAD_REQUEST);
 +      }
 +      boolean validRedirect = RegExUtils.checkWhitelist(whitelist, original);
 +      if (!validRedirect) {
 +        log.whiteListMatchFail(original, whitelist);
 +        throw new WebApplicationException("Original URL not valid according to the configured whitelist.",
 +                Response.Status.BAD_REQUEST);
 +      }
 +    }
 +
 +    JWTokenAuthority ts = services.getService(GatewayServices.TOKEN_SERVICE);
 +    Principal p = ((HttpServletRequest)request).getUserPrincipal();
 +
 +    try {
 +      JWT token = null;
 +      if (targetAudiences.isEmpty()) {
-         token = ts.issueToken(p, "RS256", getExpiry());
++        token = ts.issueToken(p, signatureAlgorithm, getExpiry());
 +      } else {
-         token = ts.issueToken(p, targetAudiences, "RS256", getExpiry());
++        token = ts.issueToken(p, targetAudiences, signatureAlgorithm, getExpiry());
 +      }
 +
 +      // Coverity CID 1327959
 +      if( token != null ) {
 +        addJWTHadoopCookie( original, token );
 +      }
 +
 +      if (removeOriginalUrlCookie) {
 +        removeOriginalUrlCookie(response);
 +      }
 +
 +      log.aboutToRedirectToOriginal(original);
 +      response.setStatus(statusCode);
 +      response.setHeader("Location", original);
 +      try {
 +        response.getOutputStream().close();
 +      } catch (IOException e) {
 +        log.unableToCloseOutputStream(e.getMessage(), Arrays.toString(e.getStackTrace()));
 +      }
 +    }
 +    catch (TokenServiceException e) {
 +      log.unableToIssueToken(e);
 +    }
 +    URI location = null;
 +    try {
 +      location = new URI(original);
 +    }
 +    catch(URISyntaxException urise) {
 +      // todo log return error response
 +    }
 +
 +    if (!enableSession) {
 +      // invalidate the session to avoid autologin
 +      // Coverity CID 1352857
 +      HttpSession session = request.getSession(false);
 +      if( session != null ) {
 +        session.invalidate();
 +      }
 +    }
 +
 +    return Response.seeOther(location).entity("{ \"redirectTo\" : " + original + " }").build();
 +  }
 +
 +  private String getOriginalUrlFromQueryParams() {
 +    String original = request.getParameter(ORIGINAL_URL_REQUEST_PARAM);
 +    StringBuffer buf = new StringBuffer(original);
 +
 +    // Add any other query params.
 +    // Probably not ideal but will not break existing integrations by requiring
 +    // some encoding.
 +    Map<String, String[]> params = request.getParameterMap();
 +    for (Entry<String, String[]> entry : params.entrySet()) {
 +      if (!ORIGINAL_URL_REQUEST_PARAM.equals(entry.getKey())
 +          && !original.contains(entry.getKey() + "=")) {
 +        buf.append("&").append(entry.getKey());
 +        String[] values = entry.getValue();
 +        if (values.length > 0 && values[0] != null) {
 +          buf.append("=");
 +        }
 +        for (int i = 0; i < values.length; i++) {
 +          if (values[0] != null) {
 +            buf.append(values[i]);
 +            if (i < values.length-1) {
 +              buf.append("&").append(entry.getKey()).append("=");
 +            }
 +          }
 +        }
 +      }
 +    }
 +
 +    return buf.toString();
 +  }
 +
 +  private long getExpiry() {
 +    long expiry = 0l;
 +    if (tokenTTL == -1) {
 +      expiry = -1;
 +    }
 +    else {
 +      expiry = System.currentTimeMillis() + tokenTTL;
 +    }
 +    return expiry;
 +  }
 +
 +  private void addJWTHadoopCookie(String original, JWT token) {
 +    log.addingJWTCookie(token.toString());
 +    Cookie c = new Cookie(cookieName,  token.toString());
 +    c.setPath("/");
 +    try {
 +      String domain = Urls.getDomainName(original, domainSuffix);
 +      if (domain != null) {
 +        c.setDomain(domain);
 +      }
 +      c.setHttpOnly(true);
 +      if (secureOnly) {
 +        c.setSecure(true);
 +      }
 +      if (maxAge != -1) {
 +        c.setMaxAge(maxAge);
 +      }
 +      response.addCookie(c);
 +      log.addedJWTCookie();
 +    }
 +    catch(Exception e) {
 +      log.unableAddCookieToResponse(e.getMessage(), Arrays.toString(e.getStackTrace()));
 +      throw new WebApplicationException("Unable to add JWT cookie to response.");
 +    }
 +  }
 +
 +  private void removeOriginalUrlCookie(HttpServletResponse response) {
 +    Cookie c = new Cookie(ORIGINAL_URL_COOKIE_NAME, null);
 +    c.setMaxAge(0);
 +    c.setPath(RESOURCE_PATH);
 +    response.addCookie(c);
 +  }
 +
 +  private String getCookieValue(HttpServletRequest request, String name) {
 +    Cookie[] cookies = request.getCookies();
 +    String value = null;
 +    if (cookies != null) {
 +      for(Cookie cookie : cookies){
 +        if(name.equals(cookie.getName())){
 +          value = cookie.getValue();
 +        }
 +      }
 +    }
 +    if (value == null) {
 +      log.cookieNotFound(name);
 +    }
 +    return value;
 +  }
 +}


[27/53] [abbrv] knox git commit: Merge branch 'master' into KNOX-998-Package_Restructuring

Posted by mo...@apache.org.
http://git-wip-us.apache.org/repos/asf/knox/blob/2c69152f/gateway-service-admin/src/main/java/org/apache/knox/gateway/service/admin/TopologiesResource.java
----------------------------------------------------------------------
diff --cc gateway-service-admin/src/main/java/org/apache/knox/gateway/service/admin/TopologiesResource.java
index a0035fc,0000000..9ecd7fc
mode 100644,000000..100644
--- a/gateway-service-admin/src/main/java/org/apache/knox/gateway/service/admin/TopologiesResource.java
+++ b/gateway-service-admin/src/main/java/org/apache/knox/gateway/service/admin/TopologiesResource.java
@@@ -1,657 -1,0 +1,674 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.service.admin;
 +
 +import com.fasterxml.jackson.annotation.JsonProperty;
 +import org.apache.commons.io.FileUtils;
 +import org.apache.commons.io.FilenameUtils;
 +import org.apache.knox.gateway.i18n.GatewaySpiMessages;
 +import org.apache.knox.gateway.i18n.messages.MessagesFactory;
 +import org.apache.knox.gateway.service.admin.beans.BeanConverter;
 +import org.apache.knox.gateway.service.admin.beans.Topology;
 +import org.apache.knox.gateway.services.GatewayServices;
 +import org.apache.knox.gateway.config.GatewayConfig;
 +import org.apache.knox.gateway.services.topology.TopologyService;
 +
 +import javax.servlet.http.HttpServletRequest;
 +import javax.ws.rs.Consumes;
 +import javax.ws.rs.DELETE;
 +import javax.ws.rs.GET;
 +import javax.ws.rs.PUT;
 +import javax.ws.rs.Path;
 +import javax.ws.rs.PathParam;
 +import javax.ws.rs.Produces;
 +import javax.ws.rs.core.Context;
 +import javax.ws.rs.core.Response;
 +import javax.xml.bind.annotation.XmlAccessType;
 +import javax.xml.bind.annotation.XmlAccessorType;
 +import javax.xml.bind.annotation.XmlElement;
 +import javax.xml.bind.annotation.XmlElementWrapper;
 +import java.io.File;
 +import java.io.IOException;
 +import java.net.URI;
 +import java.net.URISyntaxException;
 +import java.util.ArrayList;
 +import java.util.Collection;
 +import java.util.Collections;
 +import java.util.Comparator;
 +import java.util.List;
 +
 +import static javax.ws.rs.core.MediaType.APPLICATION_JSON;
 +import static javax.ws.rs.core.MediaType.APPLICATION_XML;
 +import static javax.ws.rs.core.MediaType.TEXT_PLAIN;
 +
 +import static javax.ws.rs.core.Response.ok;
 +import static javax.ws.rs.core.Response.created;
 +import static javax.ws.rs.core.Response.notModified;
 +import static javax.ws.rs.core.Response.status;
 +
 +
 +@Path("/api/v1")
 +public class TopologiesResource {
 +
 +  private static final String XML_EXT  = ".xml";
 +  private static final String JSON_EXT = ".json";
 +
 +  private static final String TOPOLOGIES_API_PATH    = "topologies";
 +  private static final String SINGLE_TOPOLOGY_API_PATH = TOPOLOGIES_API_PATH + "/{id}";
 +  private static final String PROVIDERCONFIG_API_PATH = "providerconfig";
 +  private static final String SINGLE_PROVIDERCONFIG_API_PATH = PROVIDERCONFIG_API_PATH + "/{name}";
 +  private static final String DESCRIPTORS_API_PATH    = "descriptors";
 +  private static final String SINGLE_DESCRIPTOR_API_PATH = DESCRIPTORS_API_PATH + "/{name}";
 +
 +  private static GatewaySpiMessages log = MessagesFactory.get(GatewaySpiMessages.class);
 +
 +  @Context
 +  private HttpServletRequest request;
 +
 +  @GET
 +  @Produces({APPLICATION_JSON, APPLICATION_XML})
 +  @Path(SINGLE_TOPOLOGY_API_PATH)
 +  public Topology getTopology(@PathParam("id") String id) {
 +    GatewayServices services = (GatewayServices) request.getServletContext()
 +        .getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE);
 +    GatewayConfig config = (GatewayConfig) request.getServletContext().getAttribute(GatewayConfig.GATEWAY_CONFIG_ATTRIBUTE);
 +
 +    TopologyService ts = services.getService(GatewayServices.TOPOLOGY_SERVICE);
 +
 +    for (org.apache.knox.gateway.topology.Topology t : ts.getTopologies()) {
 +      if(t.getName().equals(id)) {
 +        try {
 +          t.setUri(new URI( buildURI(t, config, request) ));
 +        } catch (URISyntaxException se) {
 +          t.setUri(null);
 +        }
 +        return BeanConverter.getTopology(t);
 +      }
 +    }
 +    return null;
 +  }
 +
 +  @GET
 +  @Produces({APPLICATION_JSON, APPLICATION_XML})
 +  @Path(TOPOLOGIES_API_PATH)
 +  public SimpleTopologyWrapper getTopologies() {
 +    GatewayServices services = (GatewayServices) request.getServletContext()
 +        .getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE);
 +
 +
 +    TopologyService ts = services.getService(GatewayServices.TOPOLOGY_SERVICE);
 +
 +    ArrayList<SimpleTopology> st = new ArrayList<SimpleTopology>();
 +    GatewayConfig conf = (GatewayConfig) request.getServletContext().getAttribute(GatewayConfig.GATEWAY_CONFIG_ATTRIBUTE);
 +
 +    for (org.apache.knox.gateway.topology.Topology t : ts.getTopologies()) {
 +      st.add(getSimpleTopology(t, conf));
 +    }
 +
 +    Collections.sort(st, new TopologyComparator());
 +    SimpleTopologyWrapper stw = new SimpleTopologyWrapper();
 +
 +    for(SimpleTopology t : st){
 +      stw.topologies.add(t);
 +    }
 +
 +    return stw;
 +
 +  }
 +
 +  @PUT
 +  @Consumes({APPLICATION_JSON, APPLICATION_XML})
 +  @Path(SINGLE_TOPOLOGY_API_PATH)
 +  public Topology uploadTopology(@PathParam("id") String id, Topology t) {
++    Topology result = null;
 +
-     GatewayServices gs = (GatewayServices) request.getServletContext()
-         .getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE);
++    GatewayServices gs =
++                (GatewayServices) request.getServletContext().getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE);
 +
 +    t.setName(id);
 +    TopologyService ts = gs.getService(GatewayServices.TOPOLOGY_SERVICE);
 +
-     ts.deployTopology(BeanConverter.getTopology(t));
++    // Check for existing topology with the same name, to see if it had been generated
++    boolean existingGenerated = false;
++    for (org.apache.hadoop.gateway.topology.Topology existingTopology : ts.getTopologies()) {
++      if(existingTopology.getName().equals(id)) {
++        existingGenerated = existingTopology.isGenerated();
++        break;
++      }
++    }
++
++    // If a topology with the same ID exists, which had been generated, then DO NOT overwrite it because it will be
++    // out of sync with the source descriptor. Otherwise, deploy the updated version.
++    if (!existingGenerated) {
++      ts.deployTopology(BeanConverter.getTopology(t));
++      result = getTopology(id);
++    } else {
++      log.disallowedOverwritingGeneratedTopology(id);
++    }
 +
-     return getTopology(id);
++    return result;
 +  }
 +
 +  @DELETE
 +  @Produces(APPLICATION_JSON)
 +  @Path(SINGLE_TOPOLOGY_API_PATH)
 +  public Response deleteTopology(@PathParam("id") String id) {
 +    boolean deleted = false;
 +    if(!"admin".equals(id)) {
 +      GatewayServices services = (GatewayServices) request.getServletContext()
 +          .getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE);
 +
 +      TopologyService ts = services.getService(GatewayServices.TOPOLOGY_SERVICE);
 +
 +      for (org.apache.knox.gateway.topology.Topology t : ts.getTopologies()) {
 +        if(t.getName().equals(id)) {
 +          ts.deleteTopology(t);
 +          deleted = true;
 +        }
 +      }
 +    }else{
 +      deleted = false;
 +    }
 +    return ok().entity("{ \"deleted\" : " + deleted + " }").build();
 +  }
 +
 +  @GET
 +  @Produces({APPLICATION_JSON})
 +  @Path(PROVIDERCONFIG_API_PATH)
 +  public HrefListing getProviderConfigurations() {
 +    HrefListing listing = new HrefListing();
 +    listing.setHref(buildHref(request));
 +
 +    GatewayServices services =
 +            (GatewayServices) request.getServletContext().getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE);
 +
 +    List<HrefListItem> configs = new ArrayList<>();
 +    TopologyService ts = services.getService(GatewayServices.TOPOLOGY_SERVICE);
 +    // Get all the simple descriptor file names
 +    for (File providerConfig : ts.getProviderConfigurations()){
 +      String id = FilenameUtils.getBaseName(providerConfig.getName());
 +      configs.add(new HrefListItem(buildHref(id, request), providerConfig.getName()));
 +    }
 +
 +    listing.setItems(configs);
 +    return listing;
 +  }
 +
 +  @GET
 +  @Produces({APPLICATION_XML})
 +  @Path(SINGLE_PROVIDERCONFIG_API_PATH)
 +  public Response getProviderConfiguration(@PathParam("name") String name) {
 +    Response response;
 +
 +    GatewayServices services =
 +            (GatewayServices) request.getServletContext().getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE);
 +
 +    TopologyService ts = services.getService(GatewayServices.TOPOLOGY_SERVICE);
 +
 +    File providerConfigFile = null;
 +
 +    for (File pc : ts.getProviderConfigurations()){
 +      // If the file name matches the specified id
 +      if (FilenameUtils.getBaseName(pc.getName()).equals(name)) {
 +        providerConfigFile = pc;
 +        break;
 +      }
 +    }
 +
 +    if (providerConfigFile != null) {
 +      byte[] content = null;
 +      try {
 +        content = FileUtils.readFileToByteArray(providerConfigFile);
 +        response = ok().entity(content).build();
 +      } catch (IOException e) {
 +        log.failedToReadConfigurationFile(providerConfigFile.getAbsolutePath(), e);
 +        response = Response.status(Response.Status.INTERNAL_SERVER_ERROR).build();
 +      }
 +
 +    } else {
 +      response = Response.status(Response.Status.NOT_FOUND).build();
 +    }
 +    return response;
 +  }
 +
 +  @DELETE
 +  @Produces(APPLICATION_JSON)
 +  @Path(SINGLE_PROVIDERCONFIG_API_PATH)
 +  public Response deleteProviderConfiguration(@PathParam("name") String name) {
 +    Response response;
 +    GatewayServices services =
 +            (GatewayServices) request.getServletContext().getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE);
 +
 +    TopologyService ts = services.getService(GatewayServices.TOPOLOGY_SERVICE);
 +    if (ts.deleteProviderConfiguration(name)) {
 +      response = ok().entity("{ \"deleted\" : \"provider config " + name + "\" }").build();
 +    } else {
 +      response = notModified().build();
 +    }
 +    return response;
 +  }
 +
 +
 +  @DELETE
 +  @Produces(APPLICATION_JSON)
 +  @Path(SINGLE_DESCRIPTOR_API_PATH)
 +  public Response deleteSimpleDescriptor(@PathParam("name") String name) {
 +    Response response = null;
 +    if(!"admin".equals(name)) {
 +      GatewayServices services =
 +              (GatewayServices) request.getServletContext().getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE);
 +
 +      TopologyService ts = services.getService(GatewayServices.TOPOLOGY_SERVICE);
 +      if (ts.deleteDescriptor(name)) {
 +        response = ok().entity("{ \"deleted\" : \"descriptor " + name + "\" }").build();
 +      }
 +    }
 +
 +    if (response == null) {
 +      response = notModified().build();
 +    }
 +
 +    return response;
 +  }
 +
 +
 +  @PUT
 +  @Consumes({APPLICATION_XML})
 +  @Path(SINGLE_PROVIDERCONFIG_API_PATH)
 +  public Response uploadProviderConfiguration(@PathParam("name") String name, String content) {
 +    Response response = null;
 +
 +    GatewayServices gs =
 +            (GatewayServices) request.getServletContext().getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE);
 +
 +    TopologyService ts = gs.getService(GatewayServices.TOPOLOGY_SERVICE);
 +
 +    boolean isUpdate = configFileExists(ts.getProviderConfigurations(), name);
 +
 +    String filename = name.endsWith(XML_EXT) ? name : name + XML_EXT;
 +    if (ts.deployProviderConfiguration(filename, content)) {
 +      try {
 +        if (isUpdate) {
 +          response = Response.noContent().build();
 +        } else{
 +          response = created(new URI(buildHref(request))).build();
 +        }
 +      } catch (URISyntaxException e) {
 +        log.invalidResourceURI(e.getInput(), e.getReason(), e);
 +        response = status(Response.Status.BAD_REQUEST).entity("{ \"error\" : \"Failed to deploy provider configuration " + name + "\" }").build();
 +      }
 +    }
 +
 +    return response;
 +  }
 +
 +
 +  private boolean configFileExists(Collection<File> existing, String candidateName) {
 +    boolean result = false;
 +    for (File exists : existing) {
 +      if (FilenameUtils.getBaseName(exists.getName()).equals(candidateName)) {
 +        result = true;
 +        break;
 +      }
 +    }
 +    return result;
 +  }
 +
 +
 +  @PUT
 +  @Consumes({APPLICATION_JSON})
 +  @Path(SINGLE_DESCRIPTOR_API_PATH)
 +  public Response uploadSimpleDescriptor(@PathParam("name") String name, String content) {
 +    Response response = null;
 +
 +    GatewayServices gs =
 +            (GatewayServices) request.getServletContext().getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE);
 +
 +    TopologyService ts = gs.getService(GatewayServices.TOPOLOGY_SERVICE);
 +
 +    boolean isUpdate = configFileExists(ts.getDescriptors(), name);
 +
 +    String filename = name.endsWith(JSON_EXT) ? name : name + JSON_EXT;
 +    if (ts.deployDescriptor(filename, content)) {
 +      try {
 +        if (isUpdate) {
 +          response = Response.noContent().build();
 +        } else {
 +          response = created(new URI(buildHref(request))).build();
 +        }
 +      } catch (URISyntaxException e) {
 +        log.invalidResourceURI(e.getInput(), e.getReason(), e);
 +        response = status(Response.Status.BAD_REQUEST).entity("{ \"error\" : \"Failed to deploy descriptor " + name + "\" }").build();
 +      }
 +    }
 +
 +    return response;
 +  }
 +
 +
 +  @GET
 +  @Produces({APPLICATION_JSON})
 +  @Path(DESCRIPTORS_API_PATH)
 +  public HrefListing getSimpleDescriptors() {
 +    HrefListing listing = new HrefListing();
 +    listing.setHref(buildHref(request));
 +
 +    GatewayServices services =
 +            (GatewayServices) request.getServletContext().getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE);
 +
 +    List<HrefListItem> descriptors = new ArrayList<>();
 +    TopologyService ts = services.getService(GatewayServices.TOPOLOGY_SERVICE);
 +    for (File descriptor : ts.getDescriptors()){
 +      String id = FilenameUtils.getBaseName(descriptor.getName());
 +      descriptors.add(new HrefListItem(buildHref(id, request), descriptor.getName()));
 +    }
 +
 +    listing.setItems(descriptors);
 +    return listing;
 +  }
 +
 +
 +  @GET
 +  @Produces({APPLICATION_JSON, TEXT_PLAIN})
 +  @Path(SINGLE_DESCRIPTOR_API_PATH)
 +  public Response getSimpleDescriptor(@PathParam("name") String name) {
 +    Response response;
 +
 +    GatewayServices services =
 +            (GatewayServices) request.getServletContext().getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE);
 +
 +    TopologyService ts = services.getService(GatewayServices.TOPOLOGY_SERVICE);
 +
 +    File descriptorFile = null;
 +
 +    for (File sd : ts.getDescriptors()){
 +      // If the file name matches the specified id
 +      if (FilenameUtils.getBaseName(sd.getName()).equals(name)) {
 +        descriptorFile = sd;
 +        break;
 +      }
 +    }
 +
 +    if (descriptorFile != null) {
 +      String mediaType = APPLICATION_JSON;
 +
 +      byte[] content = null;
 +      try {
 +        if ("yml".equals(FilenameUtils.getExtension(descriptorFile.getName()))) {
 +          mediaType = TEXT_PLAIN;
 +        }
 +        content = FileUtils.readFileToByteArray(descriptorFile);
 +        response = ok().type(mediaType).entity(content).build();
 +      } catch (IOException e) {
 +        log.failedToReadConfigurationFile(descriptorFile.getAbsolutePath(), e);
 +        response = Response.status(Response.Status.INTERNAL_SERVER_ERROR).build();
 +      }
 +    } else {
 +      response = Response.status(Response.Status.NOT_FOUND).build();
 +    }
 +
 +    return response;
 +  }
 +
 +
 +  private static class TopologyComparator implements Comparator<SimpleTopology> {
 +    @Override
 +    public int compare(SimpleTopology t1, SimpleTopology t2) {
 +      return t1.getName().compareTo(t2.getName());
 +    }
 +  }
 +
 +
 +  String buildURI(org.apache.knox.gateway.topology.Topology topology, GatewayConfig config, HttpServletRequest req){
 +    String uri = buildXForwardBaseURL(req);
 +
 +    // Strip extra context
 +    uri = uri.replace(req.getContextPath(), "");
 +
 +    // Add the gateway path
 +    String gatewayPath;
 +    if(config.getGatewayPath() != null){
 +      gatewayPath = config.getGatewayPath();
 +    }else{
 +      gatewayPath = "gateway";
 +    }
 +    uri += "/" + gatewayPath;
 +
 +    uri += "/" + topology.getName();
 +    return uri;
 +  }
 +
 +  String buildHref(HttpServletRequest req) {
 +    return buildHref((String)null, req);
 +  }
 +
 +  String buildHref(String id, HttpServletRequest req) {
 +    String href = buildXForwardBaseURL(req);
 +    // Make sure that the pathInfo doesn't have any '/' chars at the end.
 +    String pathInfo = req.getPathInfo();
 +    while(pathInfo.endsWith("/")) {
 +      pathInfo = pathInfo.substring(0, pathInfo.length() - 1);
 +    }
 +
 +    href += pathInfo;
 +
 +    if (id != null) {
 +      href += "/" + id;
 +    }
 +
 +    return href;
 +  }
 +
 +   String buildHref(org.apache.knox.gateway.topology.Topology t, HttpServletRequest req) {
 +     return buildHref(t.getName(), req);
 +  }
 +
 +  private SimpleTopology getSimpleTopology(org.apache.knox.gateway.topology.Topology t, GatewayConfig config) {
 +    String uri = buildURI(t, config, request);
 +    String href = buildHref(t, request);
 +    return new SimpleTopology(t, uri, href);
 +  }
 +
 +  private String buildXForwardBaseURL(HttpServletRequest req){
 +    final String X_Forwarded = "X-Forwarded-";
 +    final String X_Forwarded_Context = X_Forwarded + "Context";
 +    final String X_Forwarded_Proto = X_Forwarded + "Proto";
 +    final String X_Forwarded_Host = X_Forwarded + "Host";
 +    final String X_Forwarded_Port = X_Forwarded + "Port";
 +    final String X_Forwarded_Server = X_Forwarded + "Server";
 +
 +    String baseURL = "";
 +
 +    // Get Protocol
 +    if(req.getHeader(X_Forwarded_Proto) != null){
 +      baseURL += req.getHeader(X_Forwarded_Proto) + "://";
 +    } else {
 +      baseURL += req.getProtocol() + "://";
 +    }
 +
 +    // Handle Server/Host and Port Here
 +    if (req.getHeader(X_Forwarded_Host) != null && req.getHeader(X_Forwarded_Port) != null){
 +      // Double check to see if host has port
 +      if(req.getHeader(X_Forwarded_Host).contains(req.getHeader(X_Forwarded_Port))){
 +        baseURL += req.getHeader(X_Forwarded_Host);
 +      } else {
 +        // If there's no port, add the host and port together;
 +        baseURL += req.getHeader(X_Forwarded_Host) + ":" + req.getHeader(X_Forwarded_Port);
 +      }
 +    } else if(req.getHeader(X_Forwarded_Server) != null && req.getHeader(X_Forwarded_Port) != null){
 +      // Tack on the server and port if they're available. Try host if server not available
 +      baseURL += req.getHeader(X_Forwarded_Server) + ":" + req.getHeader(X_Forwarded_Port);
 +    } else if(req.getHeader(X_Forwarded_Port) != null) {
 +      // if we at least have a port, we can use it.
 +      baseURL += req.getServerName() + ":" + req.getHeader(X_Forwarded_Port);
 +    } else {
 +      // Resort to request members
 +      baseURL += req.getServerName() + ":" + req.getLocalPort();
 +    }
 +
 +    // Handle Server context
 +    if( req.getHeader(X_Forwarded_Context) != null ) {
 +      baseURL += req.getHeader( X_Forwarded_Context );
 +    } else {
 +      baseURL += req.getContextPath();
 +    }
 +
 +    return baseURL;
 +  }
 +
 +
 +  static class HrefListing {
 +    @JsonProperty
 +    String href;
 +
 +    @JsonProperty
 +    List<HrefListItem> items;
 +
 +    HrefListing() {}
 +
 +    public void setHref(String href) {
 +      this.href = href;
 +    }
 +
 +    public String getHref() {
 +      return href;
 +    }
 +
 +    public void setItems(List<HrefListItem> items) {
 +      this.items = items;
 +    }
 +
 +    public List<HrefListItem> getItems() {
 +      return items;
 +    }
 +  }
 +
 +  static class HrefListItem {
 +    @JsonProperty
 +    String href;
 +
 +    @JsonProperty
 +    String name;
 +
 +    HrefListItem() {}
 +
 +    HrefListItem(String href, String name) {
 +      this.href = href;
 +      this.name = name;
 +    }
 +
 +    public void setHref(String href) {
 +      this.href = href;
 +    }
 +
 +    public String getHref() {
 +      return href;
 +    }
 +
 +    public void setName(String name) {
 +      this.name = name;
 +    }
 +    public String getName() {
 +      return name;
 +    }
 +  }
 +
 +
 +  @XmlAccessorType(XmlAccessType.NONE)
 +  public static class SimpleTopology {
 +
 +    @XmlElement
 +    private String name;
 +    @XmlElement
 +    private String timestamp;
 +    @XmlElement
 +    private String defaultServicePath;
 +    @XmlElement
 +    private String uri;
 +    @XmlElement
 +    private String href;
 +
 +    public SimpleTopology() {}
 +
 +    public SimpleTopology(org.apache.knox.gateway.topology.Topology t, String uri, String href) {
 +      this.name = t.getName();
 +      this.timestamp = Long.toString(t.getTimestamp());
 +      this.defaultServicePath = t.getDefaultServicePath();
 +      this.uri = uri;
 +      this.href = href;
 +    }
 +
 +    public String getName() {
 +      return name;
 +    }
 +
 +    public void setName(String n) {
 +      name = n;
 +    }
 +
 +    public String getTimestamp() {
 +      return timestamp;
 +    }
 +
 +    public void setDefaultService(String defaultServicePath) {
 +      this.defaultServicePath = defaultServicePath;
 +    }
 +
 +    public String getDefaultService() {
 +      return defaultServicePath;
 +    }
 +
 +    public void setTimestamp(String timestamp) {
 +      this.timestamp = timestamp;
 +    }
 +
 +    public String getUri() {
 +      return uri;
 +    }
 +
 +    public void setUri(String uri) {
 +      this.uri = uri;
 +    }
 +
 +    public String getHref() {
 +      return href;
 +    }
 +
 +    public void setHref(String href) {
 +      this.href = href;
 +    }
 +  }
 +
 +  @XmlAccessorType(XmlAccessType.FIELD)
 +  public static class SimpleTopologyWrapper{
 +
 +    @XmlElement(name="topology")
 +    @XmlElementWrapper(name="topologies")
 +    private List<SimpleTopology> topologies = new ArrayList<SimpleTopology>();
 +
 +    public List<SimpleTopology> getTopologies(){
 +      return topologies;
 +    }
 +
 +    public void setTopologies(List<SimpleTopology> ts){
 +      this.topologies = ts;
 +    }
 +
 +  }
 +}
 +

http://git-wip-us.apache.org/repos/asf/knox/blob/2c69152f/gateway-service-admin/src/main/java/org/apache/knox/gateway/service/admin/beans/BeanConverter.java
----------------------------------------------------------------------
diff --cc gateway-service-admin/src/main/java/org/apache/knox/gateway/service/admin/beans/BeanConverter.java
index e8d6915,0000000..e916568
mode 100644,000000..100644
--- a/gateway-service-admin/src/main/java/org/apache/knox/gateway/service/admin/beans/BeanConverter.java
+++ b/gateway-service-admin/src/main/java/org/apache/knox/gateway/service/admin/beans/BeanConverter.java
@@@ -1,170 -1,0 +1,172 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + * <p>
 + * http://www.apache.org/licenses/LICENSE-2.0
 + * <p>
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.service.admin.beans;
 +
 +import org.apache.knox.gateway.topology.Version;
 +
 +import java.util.Collection;
 +
 +public class BeanConverter {
 +
 +  public static Topology getTopology(
 +      org.apache.knox.gateway.topology.Topology topology) {
 +    Topology topologyResource = new Topology();
 +    topologyResource.setName(topology.getName());
 +    topologyResource.setTimestamp(topology.getTimestamp());
 +    topologyResource.setPath(topology.getDefaultServicePath());
 +    topologyResource.setUri(topology.getUri());
++    topologyResource.setGenerated(topology.isGenerated());
 +    for ( org.apache.knox.gateway.topology.Provider provider : topology.getProviders() ) {
 +      topologyResource.getProviders().add( getProvider(provider) );
 +    }
 +    for ( org.apache.knox.gateway.topology.Service service : topology.getServices() ) {
 +      topologyResource.getServices().add( getService(service) );
 +    }
 +    for ( org.apache.knox.gateway.topology.Application application : topology.getApplications() ) {
 +      topologyResource.getApplications().add( getApplication(application) );
 +    }
 +    return topologyResource;
 +  }
 +
 +  public static org.apache.knox.gateway.topology.Topology getTopology(Topology topology) {
 +    org.apache.knox.gateway.topology.Topology deploymentTopology = new org.apache.knox.gateway.topology.Topology();
 +    deploymentTopology.setName(topology.getName());
 +    deploymentTopology.setTimestamp(topology.getTimestamp());
 +    deploymentTopology.setDefaultServicePath(topology.getPath());
 +    deploymentTopology.setUri(topology.getUri());
++    deploymentTopology.setGenerated(topology.isGenerated());
 +    for ( Provider provider : topology.getProviders() ) {
 +      deploymentTopology.addProvider( getProvider(provider) );
 +    }
 +    for ( Service service : topology.getServices() ) {
 +      deploymentTopology.addService( getService(service) );
 +    }
 +    for ( Application application : topology.getApplications() ) {
 +      deploymentTopology.addApplication( getApplication(application) );
 +    }
 +    return deploymentTopology;
 +  }
 +
 +  private static Provider getProvider(
 +      org.apache.knox.gateway.topology.Provider provider) {
 +    Provider providerResource = new Provider();
 +    providerResource.setName(provider.getName());
 +    providerResource.setEnabled(provider.isEnabled());
 +    providerResource.setRole(provider.getRole());
 +    Collection<org.apache.knox.gateway.topology.Param> paramsList = provider.getParamsList();
 +    if (paramsList != null && !paramsList.isEmpty()) {
 +      for ( org.apache.knox.gateway.topology.Param param : paramsList ) {
 +        providerResource.getParams().add(getParam(param));
 +      }
 +    }
 +    return providerResource;
 +  }
 +
 +  private static org.apache.knox.gateway.topology.Provider getProvider(Provider provider) {
 +    org.apache.knox.gateway.topology.Provider deploymentProvider = new org.apache.knox.gateway.topology.Provider();
 +    deploymentProvider.setName(provider.getName());
 +    deploymentProvider.setEnabled(provider.isEnabled());
 +    deploymentProvider.setRole(provider.getRole());
 +    for ( Param param : provider.getParams() ) {
 +      deploymentProvider.addParam( getParam(param) );
 +    }
 +    return deploymentProvider;
 +  }
 +
 +  private static Service getService(
 +      org.apache.knox.gateway.topology.Service service) {
 +    Service serviceResource = new Service();
 +    serviceResource.setRole(service.getRole());
 +    serviceResource.setName(service.getName());
 +    Version version = service.getVersion();
 +    if (version != null) {
 +      serviceResource.setVersion(version.toString());
 +    }
 +    Collection<org.apache.knox.gateway.topology.Param> paramsList = service.getParamsList();
 +    if (paramsList != null && !paramsList.isEmpty()) {
 +      for ( org.apache.knox.gateway.topology.Param param : paramsList ) {
 +        serviceResource.getParams().add(getParam(param));
 +      }
 +    }
 +    for ( String url : service.getUrls() ) {
 +      serviceResource.getUrls().add( url );
 +    }
 +    return serviceResource;
 +  }
 +
 +  private static org.apache.knox.gateway.topology.Service getService(Service service) {
 +    org.apache.knox.gateway.topology.Service deploymentService = new org.apache.knox.gateway.topology.Service();
 +    deploymentService.setRole(service.getRole());
 +    deploymentService.setName(service.getName());
 +    if (service.getVersion() != null) {
 +      deploymentService.setVersion(new Version(service.getVersion()));
 +    }
 +    for ( Param param : service.getParams() ) {
 +      deploymentService.addParam( getParam(param) );
 +    }
 +    for ( String url : service.getUrls() ) {
 +      deploymentService.addUrl( url );
 +    }
 +    return deploymentService;
 +  }
 +
 +  private static Application getApplication(
 +      org.apache.knox.gateway.topology.Application application) {
 +    Application applicationResource = new Application();
 +    applicationResource.setRole(application.getRole());
 +    applicationResource.setName(application.getName());
 +    Version version = application.getVersion();
 +    if (version != null) {
 +      applicationResource.setVersion(version.toString());
 +    }
 +    Collection<org.apache.knox.gateway.topology.Param> paramsList = application.getParamsList();
 +    if (paramsList != null && !paramsList.isEmpty()) {
 +      for ( org.apache.knox.gateway.topology.Param param : paramsList ) {
 +        applicationResource.getParams().add(getParam(param));
 +      }
 +    }
 +    for ( String url : application.getUrls() ) {
 +      applicationResource.getUrls().add( url );
 +    }
 +    return applicationResource;
 +  }
 +
 +  private static org.apache.knox.gateway.topology.Application getApplication(Application application) {
 +    org.apache.knox.gateway.topology.Application applicationResource = new org.apache.knox.gateway.topology.Application();
 +    applicationResource.setRole(application.getRole());
 +    applicationResource.setName(application.getName());
 +    if (application.getVersion() != null) {
 +      applicationResource.setVersion(new Version(application.getVersion()));
 +    }
 +    for ( Param param : application.getParams() ) {
 +      applicationResource.addParam( getParam(param) );
 +    }
 +    for ( String url : application.getUrls() ) {
 +      applicationResource.getUrls().add( url );
 +    }
 +    return applicationResource;
 +  }
 +
 +  private static Param getParam(org.apache.knox.gateway.topology.Param param) {
 +    return new Param(param.getName(), param.getValue());
 +  }
 +
 +  private static org.apache.knox.gateway.topology.Param getParam(Param param) {
 +    return new org.apache.knox.gateway.topology.Param(param.getName(), param.getValue());
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/2c69152f/gateway-service-admin/src/main/java/org/apache/knox/gateway/service/admin/beans/Topology.java
----------------------------------------------------------------------
diff --cc gateway-service-admin/src/main/java/org/apache/knox/gateway/service/admin/beans/Topology.java
index 2d2eab8,0000000..e1a8279
mode 100644,000000..100644
--- a/gateway-service-admin/src/main/java/org/apache/knox/gateway/service/admin/beans/Topology.java
+++ b/gateway-service-admin/src/main/java/org/apache/knox/gateway/service/admin/beans/Topology.java
@@@ -1,119 -1,0 +1,130 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + * <p>
 + * http://www.apache.org/licenses/LICENSE-2.0
 + * <p>
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.service.admin.beans;
 +
 +import javax.xml.bind.annotation.XmlElement;
 +import javax.xml.bind.annotation.XmlElementWrapper;
 +import javax.xml.bind.annotation.XmlRootElement;
 +import java.net.URI;
 +import java.util.ArrayList;
 +import java.util.List;
 +
 +@XmlRootElement(name="topology")
 +public class Topology {
 +
 +  @XmlElement
 +  private URI uri;
 +
 +  @XmlElement
 +  private String name;
 +
 +  @XmlElement
 +  private String path;
 +
 +  @XmlElement
 +  private long timestamp;
 +
++  @XmlElement(name="generated")
++  private boolean isGenerated;
++
 +  @XmlElement(name="provider")
 +  @XmlElementWrapper(name="gateway")
 +  public List<Provider> providers;
 +
 +  @XmlElement(name="service")
 +  public List<Service> services;
 +
 +  @XmlElement(name="application")
 +  private List<Application> applications;
 +
 +  public Topology() {
 +  }
 +
 +  public URI getUri() {
 +    return uri;
 +  }
 +
 +  public void setUri( URI uri ) {
 +    this.uri = uri;
 +  }
 +
 +  public String getName() {
 +    return name;
 +  }
 +
 +  public void setName( String name ) {
 +    this.name = name;
 +  }
 +
 +  public long getTimestamp() {
 +    return timestamp;
 +  }
 +
 +  public void setPath( String defaultServicePath ) {
 +    this.path = defaultServicePath;
 +  }
 +
 +  public String getPath() {
 +    return path;
 +  }
 +
 +  public void setTimestamp( long timestamp ) {
 +    this.timestamp = timestamp;
 +  }
 +
++  public boolean isGenerated() {
++    return isGenerated;
++  }
++
++  public void setGenerated(boolean isGenerated) {
++    this.isGenerated = isGenerated;
++  }
++
 +  public List<Service> getServices() {
 +    if (services == null) {
 +      services = new ArrayList<>();
 +    }
 +    return services;
 +  }
 +
 +  public List<Application> getApplications() {
 +    if (applications == null) {
 +      applications = new ArrayList<>();
 +    }
 +    return applications;
 +  }
 +
 +  public List<Provider> getProviders() {
 +    if (providers == null) {
 +      providers = new ArrayList<>();
 +    }
 +    return providers;
 +  }
 +
 +  public void setProviders(List<Provider> providers) {
 +    this.providers = providers;
 +  }
 +
 +  public void setServices(List<Service> services) {
 +    this.services = services;
 +  }
 +
 +  public void setApplications(List<Application> applications) {
 +    this.applications = applications;
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/2c69152f/gateway-service-definitions/src/main/java/org/apache/knox/gateway/service/definition/CustomDispatch.java
----------------------------------------------------------------------
diff --cc gateway-service-definitions/src/main/java/org/apache/knox/gateway/service/definition/CustomDispatch.java
index 3fe81e8,0000000..ac82b39
mode 100644,000000..100644
--- a/gateway-service-definitions/src/main/java/org/apache/knox/gateway/service/definition/CustomDispatch.java
+++ b/gateway-service-definitions/src/main/java/org/apache/knox/gateway/service/definition/CustomDispatch.java
@@@ -1,80 -1,0 +1,91 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.service.definition;
 +
 +import javax.xml.bind.annotation.XmlAttribute;
 +import javax.xml.bind.annotation.XmlType;
 +
 +@XmlType(name = "dispatch")
 +public class CustomDispatch {
 +
 +  private String contributorName;
 +
 +  private String haContributorName;
 +
 +  private String className;
 +
 +  private String haClassName;
 +
 +  private String httpClientFactory;
 +
++  private boolean useTwoWaySsl = false;
++
 +  @XmlAttribute(name = "contributor-name")
 +  public String getContributorName() {
 +    return contributorName;
 +  }
 +
 +  public void setContributorName(String contributorName) {
 +    this.contributorName = contributorName;
 +  }
 +
 +  @XmlAttribute(name = "ha-contributor-name")
 +  public String getHaContributorName() {
 +    return haContributorName;
 +  }
 +
 +  public void setHaContributorName(String haContributorName) {
 +    this.haContributorName = haContributorName;
 +  }
 +
 +  @XmlAttribute(name = "classname")
 +  public String getClassName() {
 +    return className;
 +  }
 +
 +  public void setClassName(String className) {
 +    this.className = className;
 +  }
 +
 +  @XmlAttribute(name = "ha-classname")
 +  public String getHaClassName() {
 +    return haClassName;
 +  }
 +
 +  public void setHaClassName(String haContributorClassName) {
 +    this.haClassName = haContributorClassName;
 +  }
 +
 +  @XmlAttribute(name = "http-client-factory")
 +  public String getHttpClientFactory() {
 +    return httpClientFactory;
 +  }
 +
 +  public void setHttpClientFactory(String httpClientFactory) {
 +    this.httpClientFactory = httpClientFactory;
 +  }
++
++  @XmlAttribute(name = "use-two-way-ssl")
++  public boolean getUseTwoWaySsl() {
++    return useTwoWaySsl;
++  }
++
++  public void setUseTwoWaySsl(boolean useTwoWaySsl) {
++    this.useTwoWaySsl = useTwoWaySsl;
++  }
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/2c69152f/gateway-shell-launcher/pom.xml
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/knox/blob/2c69152f/gateway-shell-release/pom.xml
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/knox/blob/2c69152f/gateway-spi/src/main/java/org/apache/knox/gateway/dispatch/DefaultHttpClientFactory.java
----------------------------------------------------------------------
diff --cc gateway-spi/src/main/java/org/apache/knox/gateway/dispatch/DefaultHttpClientFactory.java
index e822364,0000000..dcb7465
mode 100644,000000..100644
--- a/gateway-spi/src/main/java/org/apache/knox/gateway/dispatch/DefaultHttpClientFactory.java
+++ b/gateway-spi/src/main/java/org/apache/knox/gateway/dispatch/DefaultHttpClientFactory.java
@@@ -1,233 -1,0 +1,270 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.dispatch;
 +
++import java.io.IOException;
++import java.security.KeyStore;
++import java.security.Principal;
++import java.util.Collections;
++import java.util.Date;
++import java.util.List;
++
++import javax.net.ssl.SSLContext;
++import javax.servlet.FilterConfig;
++
++import org.apache.knox.gateway.services.security.AliasService;
++import org.apache.knox.gateway.services.security.AliasServiceException;
++import org.apache.knox.gateway.services.security.KeystoreService;
++import org.apache.knox.gateway.services.security.MasterService;
 +import org.apache.knox.gateway.config.GatewayConfig;
 +import org.apache.knox.gateway.services.GatewayServices;
 +import org.apache.knox.gateway.services.metrics.MetricsService;
 +import org.apache.http.HttpRequest;
 +import org.apache.http.HttpResponse;
 +import org.apache.http.ProtocolException;
 +import org.apache.http.auth.AuthSchemeProvider;
 +import org.apache.http.auth.AuthScope;
 +import org.apache.http.auth.Credentials;
 +import org.apache.http.client.CookieStore;
 +import org.apache.http.client.CredentialsProvider;
 +import org.apache.http.client.HttpClient;
 +import org.apache.http.client.HttpRequestRetryHandler;
 +import org.apache.http.client.RedirectStrategy;
 +import org.apache.http.client.config.AuthSchemes;
 +import org.apache.http.client.config.RequestConfig;
 +import org.apache.http.client.methods.HttpUriRequest;
 +import org.apache.http.config.Registry;
 +import org.apache.http.config.RegistryBuilder;
++import org.apache.http.conn.ssl.SSLConnectionSocketFactory;
++import org.apache.http.conn.ssl.TrustSelfSignedStrategy;
 +import org.apache.http.cookie.Cookie;
 +import org.apache.http.impl.DefaultConnectionReuseStrategy;
 +import org.apache.http.impl.client.BasicCredentialsProvider;
 +import org.apache.http.impl.client.DefaultConnectionKeepAliveStrategy;
 +import org.apache.http.impl.client.HttpClientBuilder;
 +import org.apache.http.impl.client.HttpClients;
 +import org.apache.http.protocol.HttpContext;
++import org.apache.http.ssl.SSLContexts;
 +import org.joda.time.Period;
 +import org.joda.time.format.PeriodFormatter;
 +import org.joda.time.format.PeriodFormatterBuilder;
 +
- import javax.servlet.FilterConfig;
- import java.io.IOException;
- import java.security.Principal;
- import java.util.Collections;
- import java.util.Date;
- import java.util.List;
- 
 +public class DefaultHttpClientFactory implements HttpClientFactory {
 +
 +  @Override
 +  public HttpClient createHttpClient(FilterConfig filterConfig) {
 +    HttpClientBuilder builder = null;
 +    GatewayConfig gatewayConfig = (GatewayConfig) filterConfig.getServletContext().getAttribute(GatewayConfig.GATEWAY_CONFIG_ATTRIBUTE);
++    GatewayServices services = (GatewayServices) filterConfig.getServletContext()
++        .getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE);
 +    if (gatewayConfig != null && gatewayConfig.isMetricsEnabled()) {
-       GatewayServices services = (GatewayServices) filterConfig.getServletContext()
-           .getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE);
 +      MetricsService metricsService = services.getService(GatewayServices.METRICS_SERVICE);
 +      builder = metricsService.getInstrumented(HttpClientBuilder.class);
 +    } else {
 +      builder = HttpClients.custom();
 +    }
++    if (Boolean.parseBoolean(filterConfig.getInitParameter("useTwoWaySsl"))) {
++      char[] keypass = null;
++      MasterService ms = services.getService("MasterService");
++      AliasService as = services.getService(GatewayServices.ALIAS_SERVICE);
++      try {
++        keypass = as.getGatewayIdentityPassphrase();
++      } catch (AliasServiceException e) {
++        // nop - default passphrase will be used
++      }
++      if (keypass == null) {
++        // there has been no alias created for the key - let's assume it is the same as the keystore password
++        keypass = ms.getMasterSecret();
++      }
++
++      KeystoreService ks = services.getService(GatewayServices.KEYSTORE_SERVICE);
++      final SSLContext sslcontext;
++      try {
++        KeyStore keystoreForGateway = ks.getKeystoreForGateway();
++        sslcontext = SSLContexts.custom()
++            .loadTrustMaterial(keystoreForGateway, new TrustSelfSignedStrategy())
++            .loadKeyMaterial(keystoreForGateway, keypass)
++            .build();
++      } catch (Exception e) {
++        throw new IllegalArgumentException("Unable to create SSLContext", e);
++      }
++      builder.setSSLSocketFactory(new SSLConnectionSocketFactory(sslcontext));
++    }
 +    if ( "true".equals(System.getProperty(GatewayConfig.HADOOP_KERBEROS_SECURED)) ) {
 +      CredentialsProvider credentialsProvider = new BasicCredentialsProvider();
 +      credentialsProvider.setCredentials(AuthScope.ANY, new UseJaasCredentials());
 +
 +      Registry<AuthSchemeProvider> authSchemeRegistry = RegistryBuilder.<AuthSchemeProvider>create()
 +          .register(AuthSchemes.SPNEGO, new KnoxSpnegoAuthSchemeFactory(true))
 +          .build();
 +
 +      builder = builder.setDefaultAuthSchemeRegistry(authSchemeRegistry)
 +          .setDefaultCookieStore(new HadoopAuthCookieStore())
 +          .setDefaultCredentialsProvider(credentialsProvider);
 +    } else {
 +      builder = builder.setDefaultCookieStore(new NoCookieStore());
 +    }
 +
 +    builder.setKeepAliveStrategy( DefaultConnectionKeepAliveStrategy.INSTANCE );
 +    builder.setConnectionReuseStrategy( DefaultConnectionReuseStrategy.INSTANCE );
 +    builder.setRedirectStrategy( new NeverRedirectStrategy() );
 +    builder.setRetryHandler( new NeverRetryHandler() );
 +
 +    int maxConnections = getMaxConnections( filterConfig );
 +    builder.setMaxConnTotal( maxConnections );
 +    builder.setMaxConnPerRoute( maxConnections );
 +
 +    builder.setDefaultRequestConfig( getRequestConfig( filterConfig ) );
 +
 +    HttpClient client = builder.build();
 +    return client;
 +  }
 +
 +  private static RequestConfig getRequestConfig( FilterConfig config ) {
 +    RequestConfig.Builder builder = RequestConfig.custom();
 +    int connectionTimeout = getConnectionTimeout( config );
 +    if ( connectionTimeout != -1 ) {
 +      builder.setConnectTimeout( connectionTimeout );
 +      builder.setConnectionRequestTimeout( connectionTimeout );
 +    }
 +    int socketTimeout = getSocketTimeout( config );
 +    if( socketTimeout != -1 ) {
 +      builder.setSocketTimeout( socketTimeout );
 +    }
 +    return builder.build();
 +  }
 +
 +  private static class NoCookieStore implements CookieStore {
 +    @Override
 +    public void addCookie(Cookie cookie) {
 +      //no op
 +    }
 +
 +    @Override
 +    public List<Cookie> getCookies() {
 +      return Collections.emptyList();
 +    }
 +
 +    @Override
 +    public boolean clearExpired(Date date) {
 +      return true;
 +    }
 +
 +    @Override
 +    public void clear() {
 +      //no op
 +    }
 +  }
 +
 +  private static class NeverRedirectStrategy implements RedirectStrategy {
 +    @Override
 +    public boolean isRedirected( HttpRequest request, HttpResponse response, HttpContext context )
 +        throws ProtocolException {
 +      return false;
 +    }
 +
 +    @Override
 +    public HttpUriRequest getRedirect( HttpRequest request, HttpResponse response, HttpContext context )
 +        throws ProtocolException {
 +      return null;
 +    }
 +  }
 +
 +  private static class NeverRetryHandler implements HttpRequestRetryHandler {
 +    @Override
 +    public boolean retryRequest( IOException exception, int executionCount, HttpContext context ) {
 +      return false;
 +    }
 +  }
 +
 +  private static class UseJaasCredentials implements Credentials {
 +
 +    public String getPassword() {
 +      return null;
 +    }
 +
 +    public Principal getUserPrincipal() {
 +      return null;
 +    }
 +
 +  }
 +
 +  private int getMaxConnections( FilterConfig filterConfig ) {
 +    int maxConnections = 32;
 +    GatewayConfig config =
 +        (GatewayConfig)filterConfig.getServletContext().getAttribute( GatewayConfig.GATEWAY_CONFIG_ATTRIBUTE );
 +    if( config != null ) {
 +      maxConnections = config.getHttpClientMaxConnections();
 +    }
 +    String str = filterConfig.getInitParameter( "httpclient.maxConnections" );
 +    if( str != null ) {
 +      try {
 +        maxConnections = Integer.parseInt( str );
 +      } catch ( NumberFormatException e ) {
 +        // Ignore it and use the default.
 +      }
 +    }
 +    return maxConnections;
 +  }
 +
 +  private static int getConnectionTimeout( FilterConfig filterConfig ) {
 +    int timeout = -1;
 +    GatewayConfig globalConfig =
 +        (GatewayConfig)filterConfig.getServletContext().getAttribute( GatewayConfig.GATEWAY_CONFIG_ATTRIBUTE );
 +    if( globalConfig != null ) {
 +      timeout = globalConfig.getHttpClientConnectionTimeout();
 +    }
 +    String str = filterConfig.getInitParameter( "httpclient.connectionTimeout" );
 +    if( str != null ) {
 +      try {
 +        timeout = (int)parseTimeout( str );
 +      } catch ( Exception e ) {
 +        // Ignore it and use the default.
 +      }
 +    }
 +    return timeout;
 +  }
 +
 +  private static int getSocketTimeout( FilterConfig filterConfig ) {
 +    int timeout = -1;
 +    GatewayConfig globalConfig =
 +        (GatewayConfig)filterConfig.getServletContext().getAttribute( GatewayConfig.GATEWAY_CONFIG_ATTRIBUTE );
 +    if( globalConfig != null ) {
 +      timeout = globalConfig.getHttpClientSocketTimeout();
 +    }
 +    String str = filterConfig.getInitParameter( "httpclient.socketTimeout" );
 +    if( str != null ) {
 +      try {
 +        timeout = (int)parseTimeout( str );
 +      } catch ( Exception e ) {
 +        // Ignore it and use the default.
 +      }
 +    }
 +    return timeout;
 +  }
 +
 +  private static long parseTimeout( String s ) {
 +    PeriodFormatter f = new PeriodFormatterBuilder()
 +        .appendMinutes().appendSuffix("m"," min")
 +        .appendSeconds().appendSuffix("s"," sec")
 +        .appendMillis().toFormatter();
 +    Period p = Period.parse( s, f );
 +    return p.toStandardDuration().getMillis();
 +  }
 +
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/2c69152f/gateway-spi/src/main/java/org/apache/knox/gateway/i18n/GatewaySpiMessages.java
----------------------------------------------------------------------
diff --cc gateway-spi/src/main/java/org/apache/knox/gateway/i18n/GatewaySpiMessages.java
index 27a1905,0000000..42d69d9
mode 100644,000000..100644
--- a/gateway-spi/src/main/java/org/apache/knox/gateway/i18n/GatewaySpiMessages.java
+++ b/gateway-spi/src/main/java/org/apache/knox/gateway/i18n/GatewaySpiMessages.java
@@@ -1,91 -1,0 +1,94 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.i18n;
 +
 +import org.apache.knox.gateway.i18n.messages.Message;
 +import org.apache.knox.gateway.i18n.messages.MessageLevel;
 +import org.apache.knox.gateway.i18n.messages.Messages;
 +import org.apache.knox.gateway.i18n.messages.StackTrace;
 +
 +@Messages(logger="org.apache.knox.gateway")
 +public interface GatewaySpiMessages {
 +
 +  @Message(level = MessageLevel.ERROR, text = "Failed to load the internal principal mapping table: {0}" )
 +  void failedToLoadPrincipalMappingTable( @StackTrace( level = MessageLevel.DEBUG ) Exception e );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to execute filter: {0}" )
 +  void failedToExecuteFilter( @StackTrace( level = MessageLevel.DEBUG ) Throwable t );
 +  
 +  @Message( level = MessageLevel.ERROR, text = "Failed to encrypt passphrase: {0}" )
 +  void failedToEncryptPassphrase( @StackTrace( level = MessageLevel.DEBUG ) Exception e );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to generate secret key from password: {0}" )
 +  void failedToGenerateKeyFromPassword( @StackTrace( level = MessageLevel.DEBUG ) Exception e );
 +  
 +  @Message( level = MessageLevel.ERROR, text = "Failed to create keystore [filename={0}, type={1}]: {2}" )
 +  void failedToCreateKeystore( String fileName, String keyStoreType, @StackTrace( level = MessageLevel.DEBUG ) Exception e );
 +  
 +  @Message( level = MessageLevel.ERROR, text = "Failed to load keystore [filename={0}, type={1}]: {2}" )
 +  void failedToLoadKeystore( String fileName, String keyStoreType, @StackTrace( level = MessageLevel.DEBUG ) Exception e );
 +  
 +  @Message( level = MessageLevel.ERROR, text = "Failed to add credential: {1}" )
 +  void failedToAddCredential( @StackTrace( level = MessageLevel.DEBUG ) Exception e );
 +
 +  @Message(level = MessageLevel.ERROR, text = "Failed to remove credential: {1}")
 +  void failedToRemoveCredential(@StackTrace(level = MessageLevel.DEBUG) Exception e);
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to get credential: {1}" )
 +  void failedToGetCredential(@StackTrace( level = MessageLevel.DEBUG ) Exception e);
 +  
 +  @Message( level = MessageLevel.ERROR, text = "Failed to persist master secret: {0}" )
 +  void failedToPersistMasterSecret( @StackTrace( level = MessageLevel.DEBUG ) Exception e );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to encrypt master secret: {0}" )
 +  void failedToEncryptMasterSecret( @StackTrace( level = MessageLevel.DEBUG ) Exception e );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to initialize master service from persistent master {0}: {1}" )
 +  void failedToInitializeFromPersistentMaster( String masterFileName, @StackTrace( level = MessageLevel.DEBUG ) Exception e );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to add self signed certificate for Gateway {0}: {1}" )
 +  void failedToAddSeflSignedCertForGateway( String alias, @StackTrace( level = MessageLevel.DEBUG ) Exception e );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to get key {0}: {1}" )
 +  void failedToGetKey(String alias, @StackTrace( level = MessageLevel.DEBUG ) Exception e);
 +
 +  @Message( level = MessageLevel.DEBUG, text = "Loading from persistent master: {0}" )
 +  void loadingFromPersistentMaster( String tag );
 +
 +  @Message( level = MessageLevel.DEBUG, text = "ALIAS: {0}" )
 +  void printClusterAlias( String alias );
 +
 +  @Message( level = MessageLevel.DEBUG, text = "MASTER SERVICE == NULL: {0}" )
 +  void printMasterServiceIsNull( boolean masterServiceIsNull );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Gateway has failed to start. Unable to prompt user for master secret setup. Please consider using knoxcli.sh create-master" )
 +  void unableToPromptForMasterUseKnoxCLI();
 +
 +  @Message( level = MessageLevel.ERROR, text = "Error in generating certificate: {0}" )
 +  void failedToGenerateCertificate( @StackTrace( level = MessageLevel.ERROR ) Exception e );
 +
 +  @Message(level = MessageLevel.ERROR, text = "Failed to read configuration: {0}")
 +  void failedToReadConfigurationFile(final String filePath, @StackTrace(level = MessageLevel.DEBUG) Exception e );
 +
 +  @Message(level = MessageLevel.ERROR, text = "Invalid resource URI {0} : {1}")
 +  void invalidResourceURI(final String uri, final String reason, @StackTrace(level = MessageLevel.DEBUG) Exception e );
 +
++  @Message( level = MessageLevel.ERROR, text = "Topology {0} cannot be manually overwritten because it was generated from a simple descriptor." )
++  void disallowedOverwritingGeneratedTopology(final String topologyName);
++
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/2c69152f/gateway-spi/src/main/java/org/apache/knox/gateway/topology/Topology.java
----------------------------------------------------------------------
diff --cc gateway-spi/src/main/java/org/apache/knox/gateway/topology/Topology.java
index 815c218,0000000..e46197d
mode 100644,000000..100644
--- a/gateway-spi/src/main/java/org/apache/knox/gateway/topology/Topology.java
+++ b/gateway-spi/src/main/java/org/apache/knox/gateway/topology/Topology.java
@@@ -1,151 -1,0 +1,160 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.topology;
 +
 +import org.apache.commons.collections.map.HashedMap;
 +import org.apache.commons.collections.map.MultiKeyMap;
 +
 +import java.net.URI;
 +import java.util.ArrayList;
 +import java.util.Collection;
 +import java.util.HashMap;
 +import java.util.List;
 +import java.util.Map;
 +
 +public class Topology {
 +
 +  private URI uri;
 +  private String name;
 +  private String defaultServicePath = null;
 +  private long timestamp;
++  private boolean isGenerated;
 +  public List<Provider> providerList = new ArrayList<Provider>();
 +  private Map<String,Map<String,Provider>> providerMap = new HashMap<>();
 +  public List<Service> services = new ArrayList<Service>();
 +  private MultiKeyMap serviceMap;
 +  private List<Application> applications = new ArrayList<Application>();
 +  private Map<String,Application> applicationMap = new HashMap<>();
 +
 +  public Topology() {
 +    serviceMap = MultiKeyMap.decorate(new HashedMap());
 +  }
 +
 +  public URI getUri() {
 +    return uri;
 +  }
 +
 +  public void setUri( URI uri ) {
 +    this.uri = uri;
 +  }
 +
 +  public String getName() {
 +    return name;
 +  }
 +
 +  public void setName( String name ) {
 +    this.name = name;
 +  }
 +
 +  public long getTimestamp() {
 +    return timestamp;
 +  }
 +
 +  public void setTimestamp( long timestamp ) {
 +    this.timestamp = timestamp;
 +  }
 +
 +  public String getDefaultServicePath() {
 +    return defaultServicePath;
 +  }
 +
 +  public void setDefaultServicePath(String servicePath) {
 +    defaultServicePath = servicePath;
 +  }
 +
++  public void setGenerated(boolean isGenerated) {
++    this.isGenerated = isGenerated;
++  }
++
++  public boolean isGenerated() {
++    return isGenerated;
++  }
++
 +  public Collection<Service> getServices() {
 +    return services;
 +  }
 +
 +  public Service getService( String role, String name, Version version) {
 +    return (Service)serviceMap.get(role, name, version);
 +  }
 +
 +  public void addService( Service service ) {
 +    services.add( service );
 +    serviceMap.put(service.getRole(), service.getName(), service.getVersion(), service);
 +  }
 +
 +  public Collection<Application> getApplications() {
 +    return applications;
 +  }
 +
 +  private static String fixApplicationUrl( String url ) {
 +    if( url == null ) {
 +      url = "/";
 +    }
 +    if( !url.startsWith( "/" ) ) {
 +      url = "/" + url;
 +    }
 +    return url;
 +  }
 +
 +  public Application getApplication(String url) {
 +    return applicationMap.get( fixApplicationUrl( url ) );
 +  }
 +
 +  public void addApplication( Application application ) {
 +    applications.add( application );
 +    List<String> urls = application.getUrls();
 +    if( urls == null || urls.isEmpty() ) {
 +      applicationMap.put( fixApplicationUrl( application.getName() ), application );
 +    } else {
 +      for( String url : application.getUrls() ) {
 +        applicationMap.put( fixApplicationUrl( url ), application );
 +      }
 +    }
 +  }
 +
 +  public Collection<Provider> getProviders() {
 +    return providerList;
 +  }
 +
 +  public Provider getProvider( String role, String name ) {
 +    Provider provider = null;
 +    Map<String,Provider> nameMap = providerMap.get( role );
 +    if( nameMap != null) { 
 +      if( name != null ) {
 +        provider = nameMap.get( name );
 +      }
 +      else {
 +        provider = (Provider) nameMap.values().toArray()[0];
 +      }
 +    }
 +    return provider;
 +  }
 +
 +  public void addProvider( Provider provider ) {
 +    providerList.add( provider );
 +    String role = provider.getRole();
 +    Map<String,Provider> nameMap = providerMap.get( role );
 +    if( nameMap == null ) {
 +      nameMap = new HashMap<>();
 +      providerMap.put( role, nameMap );
 +    }
 +    nameMap.put( provider.getName(), provider );
 +  }
 +
 +}


[44/53] [abbrv] knox git commit: KNOX-998 - Merge from trunk 0.14.0 code

Posted by mo...@apache.org.
http://git-wip-us.apache.org/repos/asf/knox/blob/e766b3b7/gateway-service-remoteconfig/src/main/java/org/apache/hadoop/gateway/service/config/remote/config/RemoteConfigurationRegistry.java
----------------------------------------------------------------------
diff --git a/gateway-service-remoteconfig/src/main/java/org/apache/hadoop/gateway/service/config/remote/config/RemoteConfigurationRegistry.java b/gateway-service-remoteconfig/src/main/java/org/apache/hadoop/gateway/service/config/remote/config/RemoteConfigurationRegistry.java
deleted file mode 100644
index f3e7dbd..0000000
--- a/gateway-service-remoteconfig/src/main/java/org/apache/hadoop/gateway/service/config/remote/config/RemoteConfigurationRegistry.java
+++ /dev/null
@@ -1,139 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.gateway.service.config.remote.config;
-
-import org.apache.hadoop.gateway.service.config.remote.RemoteConfigurationRegistryConfig;
-
-import javax.xml.bind.annotation.XmlElement;
-
-class RemoteConfigurationRegistry implements RemoteConfigurationRegistryConfig {
-
-    private String name;
-    private String type;
-    private String connectionString;
-    private String namespace;
-    private String authType;
-    private String principal;
-    private String credentialAlias;
-    private String keyTab;
-    private boolean useKeyTab;
-    private boolean useTicketCache;
-
-    RemoteConfigurationRegistry() {
-    }
-
-    public void setName(String name) {
-        this.name = name;
-    }
-
-    public void setRegistryType(String type) {
-        this.type = type;
-    }
-
-    public void setConnectionString(String connectionString) {
-        this.connectionString = connectionString;
-    }
-
-    public void setNamespace(String namespace) {
-        this.namespace = namespace;
-    }
-
-    public void setAuthType(String authType) {
-        this.authType = authType;
-    }
-
-    public void setPrincipal(String principal) {
-        this.principal = principal;
-    }
-
-    public void setCredentialAlias(String alias) {
-        this.credentialAlias = alias;
-    }
-
-    public void setUseTicketCache(boolean useTicketCache) {
-        this.useTicketCache = useTicketCache;
-    }
-
-    public void setUseKeytab(boolean useKeytab) {
-        this.useKeyTab = useKeytab;
-    }
-
-    public void setKeytab(String keytab) {
-        this.keyTab = keytab;
-    }
-
-    @XmlElement(name="name")
-    public String getName() {
-        return name;
-    }
-
-    @XmlElement(name="type")
-    public String getRegistryType() {
-        return type;
-    }
-
-    @XmlElement(name="auth-type")
-    public String getAuthType() {
-        return authType;
-    }
-
-    @XmlElement(name="principal")
-    public String getPrincipal() {
-        return principal;
-    }
-
-    @XmlElement(name="credential-alias")
-    public String getCredentialAlias() {
-        return credentialAlias;
-    }
-
-    @Override
-    @XmlElement(name="address")
-    public String getConnectionString() {
-        return connectionString;
-    }
-
-    @Override
-    @XmlElement(name="namespace")
-    public String getNamespace() {
-        return namespace;
-    }
-
-    @Override
-    @XmlElement(name="use-ticket-cache")
-    public boolean isUseTicketCache() {
-        return useTicketCache;
-    }
-
-    @Override
-    @XmlElement(name="use-key-tab")
-    public boolean isUseKeyTab() {
-        return useKeyTab;
-    }
-
-    @Override
-    @XmlElement(name="keytab")
-    public String getKeytab() {
-        return keyTab;
-    }
-
-    @Override
-    public boolean isSecureRegistry() {
-        return (getAuthType() != null);
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/knox/blob/e766b3b7/gateway-service-remoteconfig/src/main/java/org/apache/hadoop/gateway/service/config/remote/zk/CuratorClientService.java
----------------------------------------------------------------------
diff --git a/gateway-service-remoteconfig/src/main/java/org/apache/hadoop/gateway/service/config/remote/zk/CuratorClientService.java b/gateway-service-remoteconfig/src/main/java/org/apache/hadoop/gateway/service/config/remote/zk/CuratorClientService.java
deleted file mode 100644
index f9b5ab3..0000000
--- a/gateway-service-remoteconfig/src/main/java/org/apache/hadoop/gateway/service/config/remote/zk/CuratorClientService.java
+++ /dev/null
@@ -1,464 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.gateway.service.config.remote.zk;
-
-import org.apache.curator.framework.CuratorFramework;
-import org.apache.curator.framework.CuratorFrameworkFactory;
-import org.apache.curator.framework.api.ACLProvider;
-import org.apache.curator.framework.imps.DefaultACLProvider;
-import org.apache.curator.framework.recipes.cache.ChildData;
-import org.apache.curator.framework.recipes.cache.NodeCache;
-import org.apache.curator.framework.recipes.cache.NodeCacheListener;
-import org.apache.curator.framework.recipes.cache.PathChildrenCache;
-import org.apache.curator.framework.recipes.cache.PathChildrenCacheEvent;
-import org.apache.curator.framework.recipes.cache.PathChildrenCacheListener;
-import org.apache.curator.retry.ExponentialBackoffRetry;
-import org.apache.hadoop.gateway.config.GatewayConfig;
-import org.apache.hadoop.gateway.i18n.messages.MessagesFactory;
-import org.apache.hadoop.gateway.service.config.remote.RemoteConfigurationMessages;
-import org.apache.hadoop.gateway.services.config.client.RemoteConfigurationRegistryClient.ChildEntryListener;
-import org.apache.hadoop.gateway.services.config.client.RemoteConfigurationRegistryClient.EntryListener;
-import org.apache.hadoop.gateway.services.config.client.RemoteConfigurationRegistryClient;
-import org.apache.hadoop.gateway.service.config.remote.RemoteConfigurationRegistryConfig;
-import org.apache.hadoop.gateway.service.config.remote.config.RemoteConfigurationRegistriesAccessor;
-import org.apache.hadoop.gateway.services.ServiceLifecycleException;
-import org.apache.hadoop.gateway.services.security.AliasService;
-import org.apache.zookeeper.ZooDefs;
-import org.apache.zookeeper.client.ZooKeeperSaslClient;
-import org.apache.zookeeper.data.ACL;
-import org.apache.zookeeper.data.Id;
-import org.apache.zookeeper.data.Stat;
-
-import java.nio.charset.Charset;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-/**
- * RemoteConfigurationRegistryClientService implementation that employs the Curator ZooKeeper client framework.
- */
-class CuratorClientService implements ZooKeeperClientService {
-
-    private static final String LOGIN_CONTEXT_NAME_PROPERTY = ZooKeeperSaslClient.LOGIN_CONTEXT_NAME_KEY;
-
-    private static final String DEFAULT_LOGIN_CONTEXT_NAME = "Client";
-
-    private static final RemoteConfigurationMessages log =
-                                                        MessagesFactory.get(RemoteConfigurationMessages.class);
-
-    private Map<String, RemoteConfigurationRegistryClient> clients = new HashMap<>();
-
-    private AliasService aliasService = null;
-
-
-    @Override
-    public void init(GatewayConfig config, Map<String, String> options) throws ServiceLifecycleException {
-
-        List<RemoteConfigurationRegistryConfig> registryConfigs = new ArrayList<>();
-
-        // Load the remote registry configurations
-        registryConfigs.addAll(RemoteConfigurationRegistriesAccessor.getRemoteRegistryConfigurations(config));
-
-        // Configure registry authentication
-        RemoteConfigurationRegistryJAASConfig.configure(registryConfigs, aliasService);
-
-        if (registryConfigs.size() > 1) {
-            // Warn about current limit on number of supported client configurations
-            log.multipleRemoteRegistryConfigurations();
-        }
-
-        // Create the clients
-        for (RemoteConfigurationRegistryConfig registryConfig : registryConfigs) {
-            if (TYPE.equalsIgnoreCase(registryConfig.getRegistryType())) {
-                RemoteConfigurationRegistryClient registryClient = createClient(registryConfig);
-                clients.put(registryConfig.getName(), registryClient);
-            }
-        }
-    }
-
-    @Override
-    public void setAliasService(AliasService aliasService) {
-        this.aliasService = aliasService;
-    }
-
-    @Override
-    public void start() throws ServiceLifecycleException {
-    }
-
-    @Override
-    public void stop() throws ServiceLifecycleException {
-    }
-
-    @Override
-    public RemoteConfigurationRegistryClient get(String name) {
-        return clients.get(name);
-    }
-
-
-    private RemoteConfigurationRegistryClient createClient(RemoteConfigurationRegistryConfig config) {
-        ACLProvider aclProvider;
-        if (config.isSecureRegistry()) {
-            configureSasl(config);
-            aclProvider = new SASLOwnerACLProvider();
-        } else {
-            // Clear SASL system property
-            System.clearProperty(LOGIN_CONTEXT_NAME_PROPERTY);
-            aclProvider = new DefaultACLProvider();
-        }
-
-        CuratorFramework client = CuratorFrameworkFactory.builder()
-                                                         .connectString(config.getConnectionString())
-                                                         .retryPolicy(new ExponentialBackoffRetry(1000, 3))
-                                                         .aclProvider(aclProvider)
-                                                         .build();
-        client.start();
-
-        return (new ClientAdapter(client, config));
-    }
-
-
-    private void configureSasl(RemoteConfigurationRegistryConfig config) {
-        String registryName = config.getName();
-        if (registryName == null) {
-            registryName = DEFAULT_LOGIN_CONTEXT_NAME;
-        }
-        System.setProperty(LOGIN_CONTEXT_NAME_PROPERTY, registryName);
-    }
-
-
-    private static final class ClientAdapter implements RemoteConfigurationRegistryClient {
-
-        private static final String DEFAULT_ENCODING = "UTF-8";
-
-        private CuratorFramework delegate;
-
-        private RemoteConfigurationRegistryConfig config;
-
-        private Map<String, NodeCache> entryNodeCaches = new HashMap<>();
-
-        ClientAdapter(CuratorFramework delegate, RemoteConfigurationRegistryConfig config) {
-            this.delegate = delegate;
-            this.config = config;
-        }
-
-        @Override
-        public String getAddress() {
-            return config.getConnectionString();
-        }
-
-        @Override
-        public boolean isAuthenticationConfigured() {
-            return config.isSecureRegistry();
-        }
-
-        @Override
-        public boolean entryExists(String path) {
-            Stat s = null;
-            try {
-                s = delegate.checkExists().forPath(path);
-            } catch (Exception e) {
-                // Ignore
-            }
-            return (s != null);
-        }
-
-        @Override
-        public List<RemoteConfigurationRegistryClient.EntryACL> getACL(String path) {
-            List<RemoteConfigurationRegistryClient.EntryACL> acl = new ArrayList<>();
-            try {
-                List<ACL> zkACL = delegate.getACL().forPath(path);
-                if (zkACL != null) {
-                    for (ACL aclEntry : zkACL) {
-                        RemoteConfigurationRegistryClient.EntryACL entryACL = new ZooKeeperACLAdapter(aclEntry);
-                        acl.add(entryACL);
-                    }
-                }
-            } catch (Exception e) {
-                log.errorHandlingRemoteConfigACL(path, e);
-            }
-            return acl;
-        }
-
-        @Override
-        public void setACL(String path, List<EntryACL> entryACLs) {
-            // Translate the abstract ACLs into ZooKeeper ACLs
-            List<ACL> delegateACLs = new ArrayList<>();
-            for (EntryACL entryACL : entryACLs) {
-                String scheme = entryACL.getType();
-                String id = entryACL.getId();
-                int permissions = 0;
-                if (entryACL.canWrite()) {
-                    permissions = ZooDefs.Perms.ALL;
-                } else if (entryACL.canRead()){
-                    permissions = ZooDefs.Perms.READ;
-                }
-                delegateACLs.add(new ACL(permissions, new Id(scheme, id)));
-            }
-
-            try {
-                // Set the ACLs for the path
-                delegate.setACL().withACL(delegateACLs).forPath(path);
-            } catch (Exception e) {
-                log.errorSettingEntryACL(path, e);
-            }
-        }
-
-        @Override
-        public List<String> listChildEntries(String path) {
-            List<String> result = null;
-            try {
-                result = delegate.getChildren().forPath(path);
-            } catch (Exception e) {
-                log.errorInteractingWithRemoteConfigRegistry(e);
-            }
-            return result;
-        }
-
-        @Override
-        public void addChildEntryListener(String path, ChildEntryListener listener) throws Exception {
-            PathChildrenCache childCache = new PathChildrenCache(delegate, path, false);
-            childCache.getListenable().addListener(new ChildEntryListenerAdapter(this, listener));
-            childCache.start();
-        }
-
-        @Override
-        public void addEntryListener(String path, EntryListener listener) throws Exception {
-            NodeCache nodeCache = new NodeCache(delegate, path);
-            nodeCache.getListenable().addListener(new EntryListenerAdapter(this, nodeCache, listener));
-            nodeCache.start();
-            entryNodeCaches.put(path, nodeCache);
-        }
-
-        @Override
-        public void removeEntryListener(String path) throws Exception {
-            NodeCache nodeCache = entryNodeCaches.remove(path);
-            if (nodeCache != null) {
-                nodeCache.close();
-            }
-        }
-
-        @Override
-        public String getEntryData(String path) {
-            return getEntryData(path, DEFAULT_ENCODING);
-        }
-
-        @Override
-        public String getEntryData(String path, String encoding) {
-            String result = null;
-            try {
-                byte[] data = delegate.getData().forPath(path);
-                if (data != null) {
-                    result = new String(data, Charset.forName(encoding));
-                }
-            } catch (Exception e) {
-                log.errorInteractingWithRemoteConfigRegistry(e);
-            }
-            return result;
-        }
-
-        @Override
-        public void createEntry(String path) {
-            try {
-                if (delegate.checkExists().forPath(path) == null) {
-                    delegate.create().forPath(path);
-                }
-            } catch (Exception e) {
-                log.errorInteractingWithRemoteConfigRegistry(e);
-            }
-        }
-
-        @Override
-        public void createEntry(String path, String data) {
-            createEntry(path, data, DEFAULT_ENCODING);
-        }
-
-        @Override
-        public void createEntry(String path, String data, String encoding) {
-            try {
-                createEntry(path);
-                setEntryData(path, data, encoding);
-            } catch (Exception e) {
-                log.errorInteractingWithRemoteConfigRegistry(e);
-            }
-        }
-
-        @Override
-        public int setEntryData(String path, String data) {
-            return setEntryData(path, data, DEFAULT_ENCODING);
-        }
-
-        @Override
-        public int setEntryData(String path, String data, String encoding) {
-            int version = 0;
-            try {
-                Stat s = delegate.setData().forPath(path, data.getBytes(Charset.forName(encoding)));
-                if (s != null) {
-                    version = s.getVersion();
-                }
-            } catch (Exception e) {
-                log.errorInteractingWithRemoteConfigRegistry(e);
-            }
-            return version;
-        }
-
-        @Override
-        public void deleteEntry(String path) {
-            try {
-                delegate.delete().forPath(path);
-            } catch (Exception e) {
-                log.errorInteractingWithRemoteConfigRegistry(e);
-            }
-        }
-    }
-
-    /**
-     * SASL ACLProvider
-     */
-    private static class SASLOwnerACLProvider implements ACLProvider {
-
-        private final List<ACL> saslACL;
-
-        private SASLOwnerACLProvider() {
-            this.saslACL = ZooDefs.Ids.CREATOR_ALL_ACL; // All permissions for any authenticated user
-        }
-
-        @Override
-        public List<ACL> getDefaultAcl() {
-            return saslACL;
-        }
-
-        @Override
-        public List<ACL> getAclForPath(String path) {
-            return getDefaultAcl();
-        }
-    }
-
-
-    private static final class ChildEntryListenerAdapter implements PathChildrenCacheListener {
-
-        private RemoteConfigurationRegistryClient client;
-        private ChildEntryListener delegate;
-
-        ChildEntryListenerAdapter(RemoteConfigurationRegistryClient client, ChildEntryListener delegate) {
-            this.client = client;
-            this.delegate = delegate;
-        }
-
-        @Override
-        public void childEvent(CuratorFramework curatorFramework, PathChildrenCacheEvent pathChildrenCacheEvent)
-                throws Exception {
-            ChildData childData = pathChildrenCacheEvent.getData();
-            if (childData != null) {
-                ChildEntryListener.Type eventType = adaptType(pathChildrenCacheEvent.getType());
-                if (eventType != null) {
-                    delegate.childEvent(client, eventType, childData.getPath());
-                }
-            }
-        }
-
-        private ChildEntryListener.Type adaptType(PathChildrenCacheEvent.Type type) {
-            ChildEntryListener.Type adapted = null;
-
-            switch(type) {
-                case CHILD_ADDED:
-                    adapted = ChildEntryListener.Type.ADDED;
-                    break;
-                case CHILD_REMOVED:
-                    adapted = ChildEntryListener.Type.REMOVED;
-                    break;
-                case CHILD_UPDATED:
-                    adapted = ChildEntryListener.Type.UPDATED;
-                    break;
-            }
-
-            return adapted;
-        }
-    }
-
-    private static final class EntryListenerAdapter implements NodeCacheListener {
-
-        private RemoteConfigurationRegistryClient client;
-        private EntryListener delegate;
-        private NodeCache nodeCache;
-
-        EntryListenerAdapter(RemoteConfigurationRegistryClient client, NodeCache nodeCache, EntryListener delegate) {
-            this.client = client;
-            this.nodeCache = nodeCache;
-            this.delegate = delegate;
-        }
-
-        @Override
-        public void nodeChanged() throws Exception {
-            String path = null;
-            byte[] data = null;
-
-            ChildData cd = nodeCache.getCurrentData();
-            if (cd != null) {
-                path = cd.getPath();
-                data = cd.getData();
-            }
-
-            if (path != null) {
-                delegate.entryChanged(client, path, data);
-            }
-        }
-    }
-
-    /**
-     * ACL adapter
-     */
-    private static final class ZooKeeperACLAdapter implements RemoteConfigurationRegistryClient.EntryACL {
-        private String type;
-        private String id;
-        private int permissions;
-
-        ZooKeeperACLAdapter(ACL acl) {
-            this.permissions = acl.getPerms();
-            this.type = acl.getId().getScheme();
-            this.id = acl.getId().getId();
-        }
-
-        @Override
-        public String getId() {
-            return id;
-        }
-
-        @Override
-        public String getType() {
-            return type;
-        }
-
-        @Override
-        public Object getPermissions() {
-            return permissions;
-        }
-
-        @Override
-        public boolean canRead() {
-            return (permissions >= ZooDefs.Perms.READ);
-        }
-
-        @Override
-        public boolean canWrite() {
-            return (permissions >= ZooDefs.Perms.WRITE);
-        }
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/knox/blob/e766b3b7/gateway-service-remoteconfig/src/main/java/org/apache/hadoop/gateway/service/config/remote/zk/RemoteConfigurationRegistryJAASConfig.java
----------------------------------------------------------------------
diff --git a/gateway-service-remoteconfig/src/main/java/org/apache/hadoop/gateway/service/config/remote/zk/RemoteConfigurationRegistryJAASConfig.java b/gateway-service-remoteconfig/src/main/java/org/apache/hadoop/gateway/service/config/remote/zk/RemoteConfigurationRegistryJAASConfig.java
deleted file mode 100644
index 0b5a693..0000000
--- a/gateway-service-remoteconfig/src/main/java/org/apache/hadoop/gateway/service/config/remote/zk/RemoteConfigurationRegistryJAASConfig.java
+++ /dev/null
@@ -1,179 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.gateway.service.config.remote.zk;
-
-import org.apache.hadoop.gateway.i18n.messages.MessagesFactory;
-import org.apache.hadoop.gateway.service.config.remote.RemoteConfigurationMessages;
-import org.apache.hadoop.gateway.service.config.remote.RemoteConfigurationRegistryConfig;
-import org.apache.hadoop.gateway.services.security.AliasService;
-import org.apache.hadoop.gateway.services.security.AliasServiceException;
-
-import javax.security.auth.login.AppConfigurationEntry;
-import javax.security.auth.login.Configuration;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-/**
- * Configuration decorator that adds SASL JAAS configuration to whatever JAAS config is already applied.
- */
-class RemoteConfigurationRegistryJAASConfig extends Configuration {
-
-    // Underlying SASL mechanisms supported
-    enum SASLMechanism {
-        Unsupported,
-        Kerberos,
-        Digest
-    }
-
-    static final Map<String, String> digestLoginModules = new HashMap<>();
-    static {
-        digestLoginModules.put("ZOOKEEPER", "org.apache.zookeeper.server.auth.DigestLoginModule");
-    }
-
-    private static final RemoteConfigurationMessages log = MessagesFactory.get(RemoteConfigurationMessages.class);
-
-    // Cache the current JAAS configuration
-    private Configuration delegate = Configuration.getConfiguration();
-
-    private AliasService aliasService;
-
-    private Map<String, AppConfigurationEntry[]> contextEntries =  new HashMap<>();
-
-    static RemoteConfigurationRegistryJAASConfig configure(List<RemoteConfigurationRegistryConfig> configs, AliasService aliasService) {
-        return new RemoteConfigurationRegistryJAASConfig(configs, aliasService);
-    }
-
-    private RemoteConfigurationRegistryJAASConfig(List<RemoteConfigurationRegistryConfig> configs, AliasService aliasService) {
-        this.aliasService = aliasService;
-
-        // Populate context entries
-        List<AppConfigurationEntry> appConfigEntries = new ArrayList<>();
-        for (RemoteConfigurationRegistryConfig config : configs) {
-            if (config.isSecureRegistry()) {
-                contextEntries.put(config.getName(), createEntries(config));
-            }
-        }
-
-        // If there is at least one context entry, then set this as the client configuration
-        if (!contextEntries.isEmpty()) {
-            // TODO: PJZ: ZooKeeper 3.6.0 will have per-client JAAS Configuration support; Upgrade ASAP!!
-            // For now, set this as the static JAAS configuration
-            Configuration.setConfiguration(this);
-        }
-    }
-
-    @Override
-    public AppConfigurationEntry[] getAppConfigurationEntry(String name) {
-        AppConfigurationEntry[] result = null;
-
-        // First, try the delegate's context entries
-        result = delegate.getAppConfigurationEntry(name);
-        if (result == null || result.length < 1) {
-            // Try our additional context entries
-            result = contextEntries.get(name);
-        }
-
-        return result;
-    }
-
-    private AppConfigurationEntry[] createEntries(RemoteConfigurationRegistryConfig config) {
-        AppConfigurationEntry[] result = null;
-
-        AppConfigurationEntry entry = createEntry(config);
-        if (entry != null) {
-            // Only supporting a single app config entry per configuration/context
-            result = new AppConfigurationEntry[1];
-            result[0] = createEntry(config);
-        } else {
-            result = new AppConfigurationEntry[0];
-        }
-        return result;
-    }
-
-    private AppConfigurationEntry createEntry(RemoteConfigurationRegistryConfig config) {
-        AppConfigurationEntry entry = null;
-
-        Map<String, String> opts = new HashMap<>();
-        SASLMechanism saslMechanism = getSASLMechanism(config.getAuthType());
-        switch (saslMechanism) {
-            case Digest:
-                // Digest auth options
-                opts.put("username", config.getPrincipal());
-
-                char[] credential = null;
-                if (aliasService != null) {
-                    try {
-                        credential = aliasService.getPasswordFromAliasForGateway(config.getCredentialAlias());
-                    } catch (AliasServiceException e) {
-                        log.unresolvedCredentialAlias(config.getCredentialAlias());
-                    }
-                } else {
-                    throw new IllegalArgumentException("The AliasService is required to resolve credential aliases.");
-                }
-
-                if (credential != null) {
-                    opts.put("password", new String(credential));
-                }
-                break;
-            case Kerberos:
-                opts.put("isUseTicketCache", String.valueOf(config.isUseTicketCache()));
-                opts.put("isUseKeyTab", String.valueOf(config.isUseKeyTab()));
-                opts.put("keyTab", config.getKeytab());
-                opts.put("principal", config.getPrincipal());
-        }
-
-        if (!opts.isEmpty()) {
-            entry = new AppConfigurationEntry(getLoginModuleName(config.getRegistryType(), saslMechanism),
-                                              AppConfigurationEntry.LoginModuleControlFlag.REQUIRED,
-                                              opts);
-        }
-
-        return entry;
-    }
-
-    private static String getLoginModuleName(String registryType, SASLMechanism saslMechanism) {
-        String loginModuleName = null;
-
-        switch (saslMechanism) {
-            case Kerberos:
-                if (System.getProperty("java.vendor").contains("IBM")) {
-                    loginModuleName = "com.ibm.security.auth.module.Krb5LoginModule";
-                } else {
-                    loginModuleName = "com.sun.security.auth.module.Krb5LoginModule";
-                }
-                break;
-            case Digest:
-                loginModuleName = digestLoginModules.get(registryType.toUpperCase());
-        }
-        return loginModuleName;
-    }
-
-    private static SASLMechanism getSASLMechanism(String authType) {
-        SASLMechanism result = SASLMechanism.Unsupported;
-        for (SASLMechanism at : SASLMechanism.values()) {
-            if (at.name().equalsIgnoreCase(authType)) {
-                result = at;
-                break;
-            }
-        }
-        return result;
-    }
-
-
-}

http://git-wip-us.apache.org/repos/asf/knox/blob/e766b3b7/gateway-service-remoteconfig/src/main/java/org/apache/hadoop/gateway/service/config/remote/zk/ZooKeeperClientService.java
----------------------------------------------------------------------
diff --git a/gateway-service-remoteconfig/src/main/java/org/apache/hadoop/gateway/service/config/remote/zk/ZooKeeperClientService.java b/gateway-service-remoteconfig/src/main/java/org/apache/hadoop/gateway/service/config/remote/zk/ZooKeeperClientService.java
deleted file mode 100644
index c4add4a..0000000
--- a/gateway-service-remoteconfig/src/main/java/org/apache/hadoop/gateway/service/config/remote/zk/ZooKeeperClientService.java
+++ /dev/null
@@ -1,25 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.gateway.service.config.remote.zk;
-
-import org.apache.hadoop.gateway.services.config.client.RemoteConfigurationRegistryClientService;
-
-public interface ZooKeeperClientService extends RemoteConfigurationRegistryClientService {
-
-    String TYPE = "ZooKeeper";
-
-}

http://git-wip-us.apache.org/repos/asf/knox/blob/e766b3b7/gateway-service-remoteconfig/src/main/java/org/apache/hadoop/gateway/service/config/remote/zk/ZooKeeperClientServiceProvider.java
----------------------------------------------------------------------
diff --git a/gateway-service-remoteconfig/src/main/java/org/apache/hadoop/gateway/service/config/remote/zk/ZooKeeperClientServiceProvider.java b/gateway-service-remoteconfig/src/main/java/org/apache/hadoop/gateway/service/config/remote/zk/ZooKeeperClientServiceProvider.java
deleted file mode 100644
index f30d3da..0000000
--- a/gateway-service-remoteconfig/src/main/java/org/apache/hadoop/gateway/service/config/remote/zk/ZooKeeperClientServiceProvider.java
+++ /dev/null
@@ -1,34 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.gateway.service.config.remote.zk;
-
-import org.apache.hadoop.gateway.service.config.remote.RemoteConfigurationRegistryClientServiceProvider;
-
-
-public class ZooKeeperClientServiceProvider implements RemoteConfigurationRegistryClientServiceProvider {
-
-    @Override
-    public String getType() {
-        return ZooKeeperClientService.TYPE;
-    }
-
-    @Override
-    public ZooKeeperClientService newInstance() {
-        return new CuratorClientService();
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/knox/blob/e766b3b7/gateway-service-remoteconfig/src/main/java/org/apache/knox/gateway/service/config/remote/RemoteConfigurationMessages.java
----------------------------------------------------------------------
diff --git a/gateway-service-remoteconfig/src/main/java/org/apache/knox/gateway/service/config/remote/RemoteConfigurationMessages.java b/gateway-service-remoteconfig/src/main/java/org/apache/knox/gateway/service/config/remote/RemoteConfigurationMessages.java
new file mode 100644
index 0000000..057c8c5
--- /dev/null
+++ b/gateway-service-remoteconfig/src/main/java/org/apache/knox/gateway/service/config/remote/RemoteConfigurationMessages.java
@@ -0,0 +1,49 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.knox.gateway.service.config.remote;
+
+import org.apache.knox.gateway.i18n.messages.Message;
+import org.apache.knox.gateway.i18n.messages.MessageLevel;
+import org.apache.knox.gateway.i18n.messages.Messages;
+import org.apache.knox.gateway.i18n.messages.StackTrace;
+
+
+/**
+ *
+ */
+@Messages(logger="org.apache.knox.gateway.service.config.remote")
+public interface RemoteConfigurationMessages {
+
+    @Message(level = MessageLevel.WARN,
+             text = "Multiple remote configuration registries are not currently supported if any of them requires authentication.")
+    void multipleRemoteRegistryConfigurations();
+
+    @Message(level = MessageLevel.ERROR, text = "Failed to resolve the credential alias {0}")
+    void unresolvedCredentialAlias(final String alias);
+
+    @Message(level = MessageLevel.ERROR, text = "An error occurred interacting with the remote configuration registry : {0}")
+    void errorInteractingWithRemoteConfigRegistry(@StackTrace(level = MessageLevel.DEBUG) Exception e);
+
+    @Message(level = MessageLevel.ERROR, text = "An error occurred handling the ACL for remote configuration {0} : {1}")
+    void errorHandlingRemoteConfigACL(final String path,
+                                      @StackTrace(level = MessageLevel.DEBUG) Exception e);
+
+    @Message(level = MessageLevel.ERROR, text = "An error occurred setting the ACL for remote configuration {0} : {1}")
+    void errorSettingEntryACL(final String path,
+                              @StackTrace(level = MessageLevel.DEBUG) Exception e);
+
+}

http://git-wip-us.apache.org/repos/asf/knox/blob/e766b3b7/gateway-service-remoteconfig/src/main/java/org/apache/knox/gateway/service/config/remote/RemoteConfigurationRegistryClientServiceFactory.java
----------------------------------------------------------------------
diff --git a/gateway-service-remoteconfig/src/main/java/org/apache/knox/gateway/service/config/remote/RemoteConfigurationRegistryClientServiceFactory.java b/gateway-service-remoteconfig/src/main/java/org/apache/knox/gateway/service/config/remote/RemoteConfigurationRegistryClientServiceFactory.java
new file mode 100644
index 0000000..f1719b6
--- /dev/null
+++ b/gateway-service-remoteconfig/src/main/java/org/apache/knox/gateway/service/config/remote/RemoteConfigurationRegistryClientServiceFactory.java
@@ -0,0 +1,41 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.knox.gateway.service.config.remote;
+
+import org.apache.knox.gateway.config.GatewayConfig;
+import org.apache.knox.gateway.services.config.client.RemoteConfigurationRegistryClientService;
+
+import java.util.ServiceLoader;
+
+public class RemoteConfigurationRegistryClientServiceFactory {
+
+    public static RemoteConfigurationRegistryClientService newInstance(GatewayConfig config) {
+        RemoteConfigurationRegistryClientService rcs = null;
+
+        ServiceLoader<RemoteConfigurationRegistryClientServiceProvider> providers =
+                                             ServiceLoader.load(RemoteConfigurationRegistryClientServiceProvider.class);
+        for (RemoteConfigurationRegistryClientServiceProvider provider : providers) {
+            rcs = provider.newInstance();
+            if (rcs != null) {
+                break;
+            }
+        }
+
+        return rcs;
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/knox/blob/e766b3b7/gateway-service-remoteconfig/src/main/java/org/apache/knox/gateway/service/config/remote/RemoteConfigurationRegistryClientServiceProvider.java
----------------------------------------------------------------------
diff --git a/gateway-service-remoteconfig/src/main/java/org/apache/knox/gateway/service/config/remote/RemoteConfigurationRegistryClientServiceProvider.java b/gateway-service-remoteconfig/src/main/java/org/apache/knox/gateway/service/config/remote/RemoteConfigurationRegistryClientServiceProvider.java
new file mode 100644
index 0000000..8f69e47
--- /dev/null
+++ b/gateway-service-remoteconfig/src/main/java/org/apache/knox/gateway/service/config/remote/RemoteConfigurationRegistryClientServiceProvider.java
@@ -0,0 +1,27 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.knox.gateway.service.config.remote;
+
+import org.apache.knox.gateway.services.config.client.RemoteConfigurationRegistryClientService;
+
+public interface RemoteConfigurationRegistryClientServiceProvider {
+
+    String getType();
+
+    RemoteConfigurationRegistryClientService newInstance();
+
+}

http://git-wip-us.apache.org/repos/asf/knox/blob/e766b3b7/gateway-service-remoteconfig/src/main/java/org/apache/knox/gateway/service/config/remote/RemoteConfigurationRegistryConfig.java
----------------------------------------------------------------------
diff --git a/gateway-service-remoteconfig/src/main/java/org/apache/knox/gateway/service/config/remote/RemoteConfigurationRegistryConfig.java b/gateway-service-remoteconfig/src/main/java/org/apache/knox/gateway/service/config/remote/RemoteConfigurationRegistryConfig.java
new file mode 100644
index 0000000..cbebad7
--- /dev/null
+++ b/gateway-service-remoteconfig/src/main/java/org/apache/knox/gateway/service/config/remote/RemoteConfigurationRegistryConfig.java
@@ -0,0 +1,43 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.knox.gateway.service.config.remote;
+
+public interface RemoteConfigurationRegistryConfig {
+
+    String getName();
+
+    String getRegistryType();
+
+    String getConnectionString();
+
+    String getNamespace();
+
+    boolean isSecureRegistry();
+
+    String getAuthType(); // digest, kerberos, etc...
+
+    String getPrincipal();
+
+    String getCredentialAlias();
+
+    String getKeytab();
+
+    boolean isUseTicketCache();
+
+    boolean isUseKeyTab();
+
+}

http://git-wip-us.apache.org/repos/asf/knox/blob/e766b3b7/gateway-service-remoteconfig/src/main/java/org/apache/knox/gateway/service/config/remote/config/DefaultRemoteConfigurationRegistries.java
----------------------------------------------------------------------
diff --git a/gateway-service-remoteconfig/src/main/java/org/apache/knox/gateway/service/config/remote/config/DefaultRemoteConfigurationRegistries.java b/gateway-service-remoteconfig/src/main/java/org/apache/knox/gateway/service/config/remote/config/DefaultRemoteConfigurationRegistries.java
new file mode 100644
index 0000000..0b2f248
--- /dev/null
+++ b/gateway-service-remoteconfig/src/main/java/org/apache/knox/gateway/service/config/remote/config/DefaultRemoteConfigurationRegistries.java
@@ -0,0 +1,104 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.knox.gateway.service.config.remote.config;
+
+import org.apache.knox.gateway.config.GatewayConfig;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * A set of RemoteConfigurationRegistry configurations based on a set of property name-value pairs.
+ */
+class DefaultRemoteConfigurationRegistries extends RemoteConfigurationRegistries {
+
+    private static final String PROPERTY_DELIM       = ";";
+    private static final String PROPERTY_VALUE_DELIM = "=";
+
+    private List<RemoteConfigurationRegistry> configuredRegistries = new ArrayList<>();
+
+    /**
+     * Derive the remote registry configurations from the specified GatewayConfig.
+     *
+     * @param gc The source GatewayConfig
+     */
+    DefaultRemoteConfigurationRegistries(GatewayConfig gc) {
+        List<String> configRegistryNames = gc.getRemoteRegistryConfigurationNames();
+        for (String configRegistryName : configRegistryNames) {
+            configuredRegistries.add(extractConfigForRegistry(gc, configRegistryName));
+        }
+    }
+
+    /**
+     * Extract the configuration for the specified registry configuration name.
+     *
+     * @param gc           The GatewayConfig from which to extract the registry config.
+     * @param registryName The name of the registry config.
+     *
+     * @return The resulting RemoteConfigurationRegistry object, or null.
+     */
+    private static RemoteConfigurationRegistry extractConfigForRegistry(GatewayConfig gc, String registryName) {
+        RemoteConfigurationRegistry result = new RemoteConfigurationRegistry();
+
+        result.setName(registryName);
+
+        Map<String, String> properties = parsePropertyValue(gc.getRemoteRegistryConfiguration(registryName));
+
+        result.setRegistryType(properties.get(GatewayConfig.REMOTE_CONFIG_REGISTRY_TYPE));
+        result.setConnectionString(properties.get(GatewayConfig.REMOTE_CONFIG_REGISTRY_ADDRESS));
+        result.setNamespace(properties.get(GatewayConfig.REMOTE_CONFIG_REGISTRY_NAMESPACE));
+        result.setAuthType(properties.get(GatewayConfig.REMOTE_CONFIG_REGISTRY_AUTH_TYPE));
+        result.setPrincipal(properties.get(GatewayConfig.REMOTE_CONFIG_REGISTRY_PRINCIPAL));
+        result.setCredentialAlias(properties.get(GatewayConfig.REMOTE_CONFIG_REGISTRY_CREDENTIAL_ALIAS));
+        result.setKeytab(properties.get(GatewayConfig.REMOTE_CONFIG_REGISTRY_KEYTAB));
+        result.setUseKeytab(Boolean.valueOf(properties.get(GatewayConfig.REMOTE_CONFIG_REGISTRY_USE_KEYTAB)));
+        result.setUseTicketCache(Boolean.valueOf(properties.get(GatewayConfig.REMOTE_CONFIG_REGISTRY_USE_TICKET_CACHE)));
+
+        return result;
+    }
+
+    /**
+     * Parse the specified registry config properties String.
+     *
+     * @param value The property value content from GatewayConfig.
+     *
+     * @return A Map of the parsed properties and their respective values.
+     */
+    private static Map<String, String> parsePropertyValue(final String value) {
+        Map<String, String> result = new HashMap<>();
+
+        if (value != null) {
+            String[] props = value.split(PROPERTY_DELIM);
+            for (String prop : props) {
+                String[] split = prop.split(PROPERTY_VALUE_DELIM);
+                String propName  = split[0];
+                String propValue = (split.length > 1) ? split[1] : null;
+                result.put(propName, propValue);
+            }
+        }
+
+        return result;
+    }
+
+    @Override
+    List<RemoteConfigurationRegistry> getRegistryConfigurations() {
+        return configuredRegistries;
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/knox/blob/e766b3b7/gateway-service-remoteconfig/src/main/java/org/apache/knox/gateway/service/config/remote/config/RemoteConfigurationRegistries.java
----------------------------------------------------------------------
diff --git a/gateway-service-remoteconfig/src/main/java/org/apache/knox/gateway/service/config/remote/config/RemoteConfigurationRegistries.java b/gateway-service-remoteconfig/src/main/java/org/apache/knox/gateway/service/config/remote/config/RemoteConfigurationRegistries.java
new file mode 100644
index 0000000..16434aa
--- /dev/null
+++ b/gateway-service-remoteconfig/src/main/java/org/apache/knox/gateway/service/config/remote/config/RemoteConfigurationRegistries.java
@@ -0,0 +1,33 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.knox.gateway.service.config.remote.config;
+
+import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlRootElement;
+import java.util.ArrayList;
+import java.util.List;
+
+@XmlRootElement(name="remote-configuration-registries")
+class RemoteConfigurationRegistries {
+
+    private List<RemoteConfigurationRegistry> registryConfigurations = new ArrayList<>();
+
+    @XmlElement(name="remote-configuration-registry")
+    List<RemoteConfigurationRegistry> getRegistryConfigurations() {
+        return registryConfigurations;
+    }
+}

http://git-wip-us.apache.org/repos/asf/knox/blob/e766b3b7/gateway-service-remoteconfig/src/main/java/org/apache/knox/gateway/service/config/remote/config/RemoteConfigurationRegistriesAccessor.java
----------------------------------------------------------------------
diff --git a/gateway-service-remoteconfig/src/main/java/org/apache/knox/gateway/service/config/remote/config/RemoteConfigurationRegistriesAccessor.java b/gateway-service-remoteconfig/src/main/java/org/apache/knox/gateway/service/config/remote/config/RemoteConfigurationRegistriesAccessor.java
new file mode 100644
index 0000000..c32816e
--- /dev/null
+++ b/gateway-service-remoteconfig/src/main/java/org/apache/knox/gateway/service/config/remote/config/RemoteConfigurationRegistriesAccessor.java
@@ -0,0 +1,60 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.knox.gateway.service.config.remote.config;
+
+import org.apache.knox.gateway.config.GatewayConfig;
+import org.apache.knox.gateway.service.config.remote.RemoteConfigurationRegistryConfig;
+
+import java.io.File;
+import java.util.ArrayList;
+import java.util.List;
+
+public class RemoteConfigurationRegistriesAccessor {
+
+    // System property for specifying a reference to an XML configuration external to the gateway config
+    private static final String XML_CONFIG_REFERENCE_SYSTEM_PROPERTY_NAME =
+                                                                "org.apache.knox.gateway.remote.registry.config.file";
+
+
+    public static List<RemoteConfigurationRegistryConfig> getRemoteRegistryConfigurations(GatewayConfig gatewayConfig) {
+        List<RemoteConfigurationRegistryConfig> result = new ArrayList<>();
+
+        boolean useReferencedFile = false;
+
+        // First check for the system property pointing to a valid XML config for the remote registries
+        String remoteConfigRegistryConfigFilename = System.getProperty(XML_CONFIG_REFERENCE_SYSTEM_PROPERTY_NAME);
+        if (remoteConfigRegistryConfigFilename != null) {
+            File remoteConfigRegistryConfigFile = new File(remoteConfigRegistryConfigFilename);
+            if (remoteConfigRegistryConfigFile.exists()) {
+                useReferencedFile = true;
+                // Parse the file, and build the registry config set
+                result.addAll(RemoteConfigurationRegistriesParser.getConfig(remoteConfigRegistryConfigFilename));
+            }
+        }
+
+        // If the system property was not set to a valid reference to another config file, then try to derive the
+        // registry configurations from the gateway config.
+        if (!useReferencedFile) {
+            RemoteConfigurationRegistries remoteConfigRegistries =
+                                                            new DefaultRemoteConfigurationRegistries(gatewayConfig);
+            result.addAll(remoteConfigRegistries.getRegistryConfigurations());
+        }
+
+        return result;
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/knox/blob/e766b3b7/gateway-service-remoteconfig/src/main/java/org/apache/knox/gateway/service/config/remote/config/RemoteConfigurationRegistriesParser.java
----------------------------------------------------------------------
diff --git a/gateway-service-remoteconfig/src/main/java/org/apache/knox/gateway/service/config/remote/config/RemoteConfigurationRegistriesParser.java b/gateway-service-remoteconfig/src/main/java/org/apache/knox/gateway/service/config/remote/config/RemoteConfigurationRegistriesParser.java
new file mode 100644
index 0000000..f6347f8
--- /dev/null
+++ b/gateway-service-remoteconfig/src/main/java/org/apache/knox/gateway/service/config/remote/config/RemoteConfigurationRegistriesParser.java
@@ -0,0 +1,48 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.knox.gateway.service.config.remote.config;
+
+import org.apache.knox.gateway.service.config.remote.RemoteConfigurationRegistryConfig;
+
+import javax.xml.bind.JAXBContext;
+import javax.xml.bind.JAXBException;
+import javax.xml.bind.Unmarshaller;
+import java.io.File;
+import java.util.ArrayList;
+import java.util.List;
+
+class RemoteConfigurationRegistriesParser {
+
+    static List<RemoteConfigurationRegistryConfig> getConfig(String configFilename) {
+        List<RemoteConfigurationRegistryConfig> result = new ArrayList<>();
+
+        File file = new File(configFilename);
+
+        try {
+            JAXBContext jaxbContext = JAXBContext.newInstance(RemoteConfigurationRegistries.class);
+            Unmarshaller jaxbUnmarshaller = jaxbContext.createUnmarshaller();
+            RemoteConfigurationRegistries parsedContent = (RemoteConfigurationRegistries) jaxbUnmarshaller.unmarshal(file);
+            if (parsedContent != null) {
+                result.addAll(parsedContent.getRegistryConfigurations());
+            }
+        } catch (JAXBException e) {
+            e.printStackTrace();
+        }
+
+        return result;
+    }
+}

http://git-wip-us.apache.org/repos/asf/knox/blob/e766b3b7/gateway-service-remoteconfig/src/main/java/org/apache/knox/gateway/service/config/remote/config/RemoteConfigurationRegistry.java
----------------------------------------------------------------------
diff --git a/gateway-service-remoteconfig/src/main/java/org/apache/knox/gateway/service/config/remote/config/RemoteConfigurationRegistry.java b/gateway-service-remoteconfig/src/main/java/org/apache/knox/gateway/service/config/remote/config/RemoteConfigurationRegistry.java
new file mode 100644
index 0000000..1fdbd9e
--- /dev/null
+++ b/gateway-service-remoteconfig/src/main/java/org/apache/knox/gateway/service/config/remote/config/RemoteConfigurationRegistry.java
@@ -0,0 +1,139 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.knox.gateway.service.config.remote.config;
+
+import org.apache.knox.gateway.service.config.remote.RemoteConfigurationRegistryConfig;
+
+import javax.xml.bind.annotation.XmlElement;
+
+class RemoteConfigurationRegistry implements RemoteConfigurationRegistryConfig {
+
+    private String name;
+    private String type;
+    private String connectionString;
+    private String namespace;
+    private String authType;
+    private String principal;
+    private String credentialAlias;
+    private String keyTab;
+    private boolean useKeyTab;
+    private boolean useTicketCache;
+
+    RemoteConfigurationRegistry() {
+    }
+
+    public void setName(String name) {
+        this.name = name;
+    }
+
+    public void setRegistryType(String type) {
+        this.type = type;
+    }
+
+    public void setConnectionString(String connectionString) {
+        this.connectionString = connectionString;
+    }
+
+    public void setNamespace(String namespace) {
+        this.namespace = namespace;
+    }
+
+    public void setAuthType(String authType) {
+        this.authType = authType;
+    }
+
+    public void setPrincipal(String principal) {
+        this.principal = principal;
+    }
+
+    public void setCredentialAlias(String alias) {
+        this.credentialAlias = alias;
+    }
+
+    public void setUseTicketCache(boolean useTicketCache) {
+        this.useTicketCache = useTicketCache;
+    }
+
+    public void setUseKeytab(boolean useKeytab) {
+        this.useKeyTab = useKeytab;
+    }
+
+    public void setKeytab(String keytab) {
+        this.keyTab = keytab;
+    }
+
+    @XmlElement(name="name")
+    public String getName() {
+        return name;
+    }
+
+    @XmlElement(name="type")
+    public String getRegistryType() {
+        return type;
+    }
+
+    @XmlElement(name="auth-type")
+    public String getAuthType() {
+        return authType;
+    }
+
+    @XmlElement(name="principal")
+    public String getPrincipal() {
+        return principal;
+    }
+
+    @XmlElement(name="credential-alias")
+    public String getCredentialAlias() {
+        return credentialAlias;
+    }
+
+    @Override
+    @XmlElement(name="address")
+    public String getConnectionString() {
+        return connectionString;
+    }
+
+    @Override
+    @XmlElement(name="namespace")
+    public String getNamespace() {
+        return namespace;
+    }
+
+    @Override
+    @XmlElement(name="use-ticket-cache")
+    public boolean isUseTicketCache() {
+        return useTicketCache;
+    }
+
+    @Override
+    @XmlElement(name="use-key-tab")
+    public boolean isUseKeyTab() {
+        return useKeyTab;
+    }
+
+    @Override
+    @XmlElement(name="keytab")
+    public String getKeytab() {
+        return keyTab;
+    }
+
+    @Override
+    public boolean isSecureRegistry() {
+        return (getAuthType() != null);
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/knox/blob/e766b3b7/gateway-service-remoteconfig/src/main/java/org/apache/knox/gateway/service/config/remote/zk/CuratorClientService.java
----------------------------------------------------------------------
diff --git a/gateway-service-remoteconfig/src/main/java/org/apache/knox/gateway/service/config/remote/zk/CuratorClientService.java b/gateway-service-remoteconfig/src/main/java/org/apache/knox/gateway/service/config/remote/zk/CuratorClientService.java
new file mode 100644
index 0000000..b97a2c6
--- /dev/null
+++ b/gateway-service-remoteconfig/src/main/java/org/apache/knox/gateway/service/config/remote/zk/CuratorClientService.java
@@ -0,0 +1,464 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.knox.gateway.service.config.remote.zk;
+
+import org.apache.curator.framework.CuratorFramework;
+import org.apache.curator.framework.CuratorFrameworkFactory;
+import org.apache.curator.framework.api.ACLProvider;
+import org.apache.curator.framework.imps.DefaultACLProvider;
+import org.apache.curator.framework.recipes.cache.ChildData;
+import org.apache.curator.framework.recipes.cache.NodeCache;
+import org.apache.curator.framework.recipes.cache.NodeCacheListener;
+import org.apache.curator.framework.recipes.cache.PathChildrenCache;
+import org.apache.curator.framework.recipes.cache.PathChildrenCacheEvent;
+import org.apache.curator.framework.recipes.cache.PathChildrenCacheListener;
+import org.apache.curator.retry.ExponentialBackoffRetry;
+import org.apache.knox.gateway.config.GatewayConfig;
+import org.apache.knox.gateway.i18n.messages.MessagesFactory;
+import org.apache.knox.gateway.service.config.remote.RemoteConfigurationMessages;
+import org.apache.knox.gateway.services.config.client.RemoteConfigurationRegistryClient.ChildEntryListener;
+import org.apache.knox.gateway.services.config.client.RemoteConfigurationRegistryClient.EntryListener;
+import org.apache.knox.gateway.services.config.client.RemoteConfigurationRegistryClient;
+import org.apache.knox.gateway.service.config.remote.RemoteConfigurationRegistryConfig;
+import org.apache.knox.gateway.service.config.remote.config.RemoteConfigurationRegistriesAccessor;
+import org.apache.knox.gateway.services.ServiceLifecycleException;
+import org.apache.knox.gateway.services.security.AliasService;
+import org.apache.zookeeper.ZooDefs;
+import org.apache.zookeeper.client.ZooKeeperSaslClient;
+import org.apache.zookeeper.data.ACL;
+import org.apache.zookeeper.data.Id;
+import org.apache.zookeeper.data.Stat;
+
+import java.nio.charset.Charset;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * RemoteConfigurationRegistryClientService implementation that employs the Curator ZooKeeper client framework.
+ */
+class CuratorClientService implements ZooKeeperClientService {
+
+    private static final String LOGIN_CONTEXT_NAME_PROPERTY = ZooKeeperSaslClient.LOGIN_CONTEXT_NAME_KEY;
+
+    private static final String DEFAULT_LOGIN_CONTEXT_NAME = "Client";
+
+    private static final RemoteConfigurationMessages log =
+                                                        MessagesFactory.get(RemoteConfigurationMessages.class);
+
+    private Map<String, RemoteConfigurationRegistryClient> clients = new HashMap<>();
+
+    private AliasService aliasService = null;
+
+
+    @Override
+    public void init(GatewayConfig config, Map<String, String> options) throws ServiceLifecycleException {
+
+        List<RemoteConfigurationRegistryConfig> registryConfigs = new ArrayList<>();
+
+        // Load the remote registry configurations
+        registryConfigs.addAll(RemoteConfigurationRegistriesAccessor.getRemoteRegistryConfigurations(config));
+
+        // Configure registry authentication
+        RemoteConfigurationRegistryJAASConfig.configure(registryConfigs, aliasService);
+
+        if (registryConfigs.size() > 1) {
+            // Warn about current limit on number of supported client configurations
+            log.multipleRemoteRegistryConfigurations();
+        }
+
+        // Create the clients
+        for (RemoteConfigurationRegistryConfig registryConfig : registryConfigs) {
+            if (TYPE.equalsIgnoreCase(registryConfig.getRegistryType())) {
+                RemoteConfigurationRegistryClient registryClient = createClient(registryConfig);
+                clients.put(registryConfig.getName(), registryClient);
+            }
+        }
+    }
+
+    @Override
+    public void setAliasService(AliasService aliasService) {
+        this.aliasService = aliasService;
+    }
+
+    @Override
+    public void start() throws ServiceLifecycleException {
+    }
+
+    @Override
+    public void stop() throws ServiceLifecycleException {
+    }
+
+    @Override
+    public RemoteConfigurationRegistryClient get(String name) {
+        return clients.get(name);
+    }
+
+
+    private RemoteConfigurationRegistryClient createClient(RemoteConfigurationRegistryConfig config) {
+        ACLProvider aclProvider;
+        if (config.isSecureRegistry()) {
+            configureSasl(config);
+            aclProvider = new SASLOwnerACLProvider();
+        } else {
+            // Clear SASL system property
+            System.clearProperty(LOGIN_CONTEXT_NAME_PROPERTY);
+            aclProvider = new DefaultACLProvider();
+        }
+
+        CuratorFramework client = CuratorFrameworkFactory.builder()
+                                                         .connectString(config.getConnectionString())
+                                                         .retryPolicy(new ExponentialBackoffRetry(1000, 3))
+                                                         .aclProvider(aclProvider)
+                                                         .build();
+        client.start();
+
+        return (new ClientAdapter(client, config));
+    }
+
+
+    private void configureSasl(RemoteConfigurationRegistryConfig config) {
+        String registryName = config.getName();
+        if (registryName == null) {
+            registryName = DEFAULT_LOGIN_CONTEXT_NAME;
+        }
+        System.setProperty(LOGIN_CONTEXT_NAME_PROPERTY, registryName);
+    }
+
+
+    private static final class ClientAdapter implements RemoteConfigurationRegistryClient {
+
+        private static final String DEFAULT_ENCODING = "UTF-8";
+
+        private CuratorFramework delegate;
+
+        private RemoteConfigurationRegistryConfig config;
+
+        private Map<String, NodeCache> entryNodeCaches = new HashMap<>();
+
+        ClientAdapter(CuratorFramework delegate, RemoteConfigurationRegistryConfig config) {
+            this.delegate = delegate;
+            this.config = config;
+        }
+
+        @Override
+        public String getAddress() {
+            return config.getConnectionString();
+        }
+
+        @Override
+        public boolean isAuthenticationConfigured() {
+            return config.isSecureRegistry();
+        }
+
+        @Override
+        public boolean entryExists(String path) {
+            Stat s = null;
+            try {
+                s = delegate.checkExists().forPath(path);
+            } catch (Exception e) {
+                // Ignore
+            }
+            return (s != null);
+        }
+
+        @Override
+        public List<RemoteConfigurationRegistryClient.EntryACL> getACL(String path) {
+            List<RemoteConfigurationRegistryClient.EntryACL> acl = new ArrayList<>();
+            try {
+                List<ACL> zkACL = delegate.getACL().forPath(path);
+                if (zkACL != null) {
+                    for (ACL aclEntry : zkACL) {
+                        RemoteConfigurationRegistryClient.EntryACL entryACL = new ZooKeeperACLAdapter(aclEntry);
+                        acl.add(entryACL);
+                    }
+                }
+            } catch (Exception e) {
+                log.errorHandlingRemoteConfigACL(path, e);
+            }
+            return acl;
+        }
+
+        @Override
+        public void setACL(String path, List<EntryACL> entryACLs) {
+            // Translate the abstract ACLs into ZooKeeper ACLs
+            List<ACL> delegateACLs = new ArrayList<>();
+            for (EntryACL entryACL : entryACLs) {
+                String scheme = entryACL.getType();
+                String id = entryACL.getId();
+                int permissions = 0;
+                if (entryACL.canWrite()) {
+                    permissions = ZooDefs.Perms.ALL;
+                } else if (entryACL.canRead()){
+                    permissions = ZooDefs.Perms.READ;
+                }
+                delegateACLs.add(new ACL(permissions, new Id(scheme, id)));
+            }
+
+            try {
+                // Set the ACLs for the path
+                delegate.setACL().withACL(delegateACLs).forPath(path);
+            } catch (Exception e) {
+                log.errorSettingEntryACL(path, e);
+            }
+        }
+
+        @Override
+        public List<String> listChildEntries(String path) {
+            List<String> result = null;
+            try {
+                result = delegate.getChildren().forPath(path);
+            } catch (Exception e) {
+                log.errorInteractingWithRemoteConfigRegistry(e);
+            }
+            return result;
+        }
+
+        @Override
+        public void addChildEntryListener(String path, ChildEntryListener listener) throws Exception {
+            PathChildrenCache childCache = new PathChildrenCache(delegate, path, false);
+            childCache.getListenable().addListener(new ChildEntryListenerAdapter(this, listener));
+            childCache.start();
+        }
+
+        @Override
+        public void addEntryListener(String path, EntryListener listener) throws Exception {
+            NodeCache nodeCache = new NodeCache(delegate, path);
+            nodeCache.getListenable().addListener(new EntryListenerAdapter(this, nodeCache, listener));
+            nodeCache.start();
+            entryNodeCaches.put(path, nodeCache);
+        }
+
+        @Override
+        public void removeEntryListener(String path) throws Exception {
+            NodeCache nodeCache = entryNodeCaches.remove(path);
+            if (nodeCache != null) {
+                nodeCache.close();
+            }
+        }
+
+        @Override
+        public String getEntryData(String path) {
+            return getEntryData(path, DEFAULT_ENCODING);
+        }
+
+        @Override
+        public String getEntryData(String path, String encoding) {
+            String result = null;
+            try {
+                byte[] data = delegate.getData().forPath(path);
+                if (data != null) {
+                    result = new String(data, Charset.forName(encoding));
+                }
+            } catch (Exception e) {
+                log.errorInteractingWithRemoteConfigRegistry(e);
+            }
+            return result;
+        }
+
+        @Override
+        public void createEntry(String path) {
+            try {
+                if (delegate.checkExists().forPath(path) == null) {
+                    delegate.create().forPath(path);
+                }
+            } catch (Exception e) {
+                log.errorInteractingWithRemoteConfigRegistry(e);
+            }
+        }
+
+        @Override
+        public void createEntry(String path, String data) {
+            createEntry(path, data, DEFAULT_ENCODING);
+        }
+
+        @Override
+        public void createEntry(String path, String data, String encoding) {
+            try {
+                createEntry(path);
+                setEntryData(path, data, encoding);
+            } catch (Exception e) {
+                log.errorInteractingWithRemoteConfigRegistry(e);
+            }
+        }
+
+        @Override
+        public int setEntryData(String path, String data) {
+            return setEntryData(path, data, DEFAULT_ENCODING);
+        }
+
+        @Override
+        public int setEntryData(String path, String data, String encoding) {
+            int version = 0;
+            try {
+                Stat s = delegate.setData().forPath(path, data.getBytes(Charset.forName(encoding)));
+                if (s != null) {
+                    version = s.getVersion();
+                }
+            } catch (Exception e) {
+                log.errorInteractingWithRemoteConfigRegistry(e);
+            }
+            return version;
+        }
+
+        @Override
+        public void deleteEntry(String path) {
+            try {
+                delegate.delete().forPath(path);
+            } catch (Exception e) {
+                log.errorInteractingWithRemoteConfigRegistry(e);
+            }
+        }
+    }
+
+    /**
+     * SASL ACLProvider
+     */
+    private static class SASLOwnerACLProvider implements ACLProvider {
+
+        private final List<ACL> saslACL;
+
+        private SASLOwnerACLProvider() {
+            this.saslACL = ZooDefs.Ids.CREATOR_ALL_ACL; // All permissions for any authenticated user
+        }
+
+        @Override
+        public List<ACL> getDefaultAcl() {
+            return saslACL;
+        }
+
+        @Override
+        public List<ACL> getAclForPath(String path) {
+            return getDefaultAcl();
+        }
+    }
+
+
+    private static final class ChildEntryListenerAdapter implements PathChildrenCacheListener {
+
+        private RemoteConfigurationRegistryClient client;
+        private ChildEntryListener delegate;
+
+        ChildEntryListenerAdapter(RemoteConfigurationRegistryClient client, ChildEntryListener delegate) {
+            this.client = client;
+            this.delegate = delegate;
+        }
+
+        @Override
+        public void childEvent(CuratorFramework curatorFramework, PathChildrenCacheEvent pathChildrenCacheEvent)
+                throws Exception {
+            ChildData childData = pathChildrenCacheEvent.getData();
+            if (childData != null) {
+                ChildEntryListener.Type eventType = adaptType(pathChildrenCacheEvent.getType());
+                if (eventType != null) {
+                    delegate.childEvent(client, eventType, childData.getPath());
+                }
+            }
+        }
+
+        private ChildEntryListener.Type adaptType(PathChildrenCacheEvent.Type type) {
+            ChildEntryListener.Type adapted = null;
+
+            switch(type) {
+                case CHILD_ADDED:
+                    adapted = ChildEntryListener.Type.ADDED;
+                    break;
+                case CHILD_REMOVED:
+                    adapted = ChildEntryListener.Type.REMOVED;
+                    break;
+                case CHILD_UPDATED:
+                    adapted = ChildEntryListener.Type.UPDATED;
+                    break;
+            }
+
+            return adapted;
+        }
+    }
+
+    private static final class EntryListenerAdapter implements NodeCacheListener {
+
+        private RemoteConfigurationRegistryClient client;
+        private EntryListener delegate;
+        private NodeCache nodeCache;
+
+        EntryListenerAdapter(RemoteConfigurationRegistryClient client, NodeCache nodeCache, EntryListener delegate) {
+            this.client = client;
+            this.nodeCache = nodeCache;
+            this.delegate = delegate;
+        }
+
+        @Override
+        public void nodeChanged() throws Exception {
+            String path = null;
+            byte[] data = null;
+
+            ChildData cd = nodeCache.getCurrentData();
+            if (cd != null) {
+                path = cd.getPath();
+                data = cd.getData();
+            }
+
+            if (path != null) {
+                delegate.entryChanged(client, path, data);
+            }
+        }
+    }
+
+    /**
+     * ACL adapter
+     */
+    private static final class ZooKeeperACLAdapter implements RemoteConfigurationRegistryClient.EntryACL {
+        private String type;
+        private String id;
+        private int permissions;
+
+        ZooKeeperACLAdapter(ACL acl) {
+            this.permissions = acl.getPerms();
+            this.type = acl.getId().getScheme();
+            this.id = acl.getId().getId();
+        }
+
+        @Override
+        public String getId() {
+            return id;
+        }
+
+        @Override
+        public String getType() {
+            return type;
+        }
+
+        @Override
+        public Object getPermissions() {
+            return permissions;
+        }
+
+        @Override
+        public boolean canRead() {
+            return (permissions >= ZooDefs.Perms.READ);
+        }
+
+        @Override
+        public boolean canWrite() {
+            return (permissions >= ZooDefs.Perms.WRITE);
+        }
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/knox/blob/e766b3b7/gateway-service-remoteconfig/src/main/java/org/apache/knox/gateway/service/config/remote/zk/RemoteConfigurationRegistryJAASConfig.java
----------------------------------------------------------------------
diff --git a/gateway-service-remoteconfig/src/main/java/org/apache/knox/gateway/service/config/remote/zk/RemoteConfigurationRegistryJAASConfig.java b/gateway-service-remoteconfig/src/main/java/org/apache/knox/gateway/service/config/remote/zk/RemoteConfigurationRegistryJAASConfig.java
new file mode 100644
index 0000000..f75634b
--- /dev/null
+++ b/gateway-service-remoteconfig/src/main/java/org/apache/knox/gateway/service/config/remote/zk/RemoteConfigurationRegistryJAASConfig.java
@@ -0,0 +1,179 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.knox.gateway.service.config.remote.zk;
+
+import org.apache.knox.gateway.i18n.messages.MessagesFactory;
+import org.apache.knox.gateway.service.config.remote.RemoteConfigurationMessages;
+import org.apache.knox.gateway.service.config.remote.RemoteConfigurationRegistryConfig;
+import org.apache.knox.gateway.services.security.AliasService;
+import org.apache.knox.gateway.services.security.AliasServiceException;
+
+import javax.security.auth.login.AppConfigurationEntry;
+import javax.security.auth.login.Configuration;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Configuration decorator that adds SASL JAAS configuration to whatever JAAS config is already applied.
+ */
+class RemoteConfigurationRegistryJAASConfig extends Configuration {
+
+    // Underlying SASL mechanisms supported
+    enum SASLMechanism {
+        Unsupported,
+        Kerberos,
+        Digest
+    }
+
+    static final Map<String, String> digestLoginModules = new HashMap<>();
+    static {
+        digestLoginModules.put("ZOOKEEPER", "org.apache.zookeeper.server.auth.DigestLoginModule");
+    }
+
+    private static final RemoteConfigurationMessages log = MessagesFactory.get(RemoteConfigurationMessages.class);
+
+    // Cache the current JAAS configuration
+    private Configuration delegate = Configuration.getConfiguration();
+
+    private AliasService aliasService;
+
+    private Map<String, AppConfigurationEntry[]> contextEntries =  new HashMap<>();
+
+    static RemoteConfigurationRegistryJAASConfig configure(List<RemoteConfigurationRegistryConfig> configs, AliasService aliasService) {
+        return new RemoteConfigurationRegistryJAASConfig(configs, aliasService);
+    }
+
+    private RemoteConfigurationRegistryJAASConfig(List<RemoteConfigurationRegistryConfig> configs, AliasService aliasService) {
+        this.aliasService = aliasService;
+
+        // Populate context entries
+        List<AppConfigurationEntry> appConfigEntries = new ArrayList<>();
+        for (RemoteConfigurationRegistryConfig config : configs) {
+            if (config.isSecureRegistry()) {
+                contextEntries.put(config.getName(), createEntries(config));
+            }
+        }
+
+        // If there is at least one context entry, then set this as the client configuration
+        if (!contextEntries.isEmpty()) {
+            // TODO: PJZ: ZooKeeper 3.6.0 will have per-client JAAS Configuration support; Upgrade ASAP!!
+            // For now, set this as the static JAAS configuration
+            Configuration.setConfiguration(this);
+        }
+    }
+
+    @Override
+    public AppConfigurationEntry[] getAppConfigurationEntry(String name) {
+        AppConfigurationEntry[] result = null;
+
+        // First, try the delegate's context entries
+        result = delegate.getAppConfigurationEntry(name);
+        if (result == null || result.length < 1) {
+            // Try our additional context entries
+            result = contextEntries.get(name);
+        }
+
+        return result;
+    }
+
+    private AppConfigurationEntry[] createEntries(RemoteConfigurationRegistryConfig config) {
+        AppConfigurationEntry[] result = null;
+
+        AppConfigurationEntry entry = createEntry(config);
+        if (entry != null) {
+            // Only supporting a single app config entry per configuration/context
+            result = new AppConfigurationEntry[1];
+            result[0] = createEntry(config);
+        } else {
+            result = new AppConfigurationEntry[0];
+        }
+        return result;
+    }
+
+    private AppConfigurationEntry createEntry(RemoteConfigurationRegistryConfig config) {
+        AppConfigurationEntry entry = null;
+
+        Map<String, String> opts = new HashMap<>();
+        SASLMechanism saslMechanism = getSASLMechanism(config.getAuthType());
+        switch (saslMechanism) {
+            case Digest:
+                // Digest auth options
+                opts.put("username", config.getPrincipal());
+
+                char[] credential = null;
+                if (aliasService != null) {
+                    try {
+                        credential = aliasService.getPasswordFromAliasForGateway(config.getCredentialAlias());
+                    } catch (AliasServiceException e) {
+                        log.unresolvedCredentialAlias(config.getCredentialAlias());
+                    }
+                } else {
+                    throw new IllegalArgumentException("The AliasService is required to resolve credential aliases.");
+                }
+
+                if (credential != null) {
+                    opts.put("password", new String(credential));
+                }
+                break;
+            case Kerberos:
+                opts.put("isUseTicketCache", String.valueOf(config.isUseTicketCache()));
+                opts.put("isUseKeyTab", String.valueOf(config.isUseKeyTab()));
+                opts.put("keyTab", config.getKeytab());
+                opts.put("principal", config.getPrincipal());
+        }
+
+        if (!opts.isEmpty()) {
+            entry = new AppConfigurationEntry(getLoginModuleName(config.getRegistryType(), saslMechanism),
+                                              AppConfigurationEntry.LoginModuleControlFlag.REQUIRED,
+                                              opts);
+        }
+
+        return entry;
+    }
+
+    private static String getLoginModuleName(String registryType, SASLMechanism saslMechanism) {
+        String loginModuleName = null;
+
+        switch (saslMechanism) {
+            case Kerberos:
+                if (System.getProperty("java.vendor").contains("IBM")) {
+                    loginModuleName = "com.ibm.security.auth.module.Krb5LoginModule";
+                } else {
+                    loginModuleName = "com.sun.security.auth.module.Krb5LoginModule";
+                }
+                break;
+            case Digest:
+                loginModuleName = digestLoginModules.get(registryType.toUpperCase());
+        }
+        return loginModuleName;
+    }
+
+    private static SASLMechanism getSASLMechanism(String authType) {
+        SASLMechanism result = SASLMechanism.Unsupported;
+        for (SASLMechanism at : SASLMechanism.values()) {
+            if (at.name().equalsIgnoreCase(authType)) {
+                result = at;
+                break;
+            }
+        }
+        return result;
+    }
+
+
+}

http://git-wip-us.apache.org/repos/asf/knox/blob/e766b3b7/gateway-service-remoteconfig/src/main/java/org/apache/knox/gateway/service/config/remote/zk/ZooKeeperClientService.java
----------------------------------------------------------------------
diff --git a/gateway-service-remoteconfig/src/main/java/org/apache/knox/gateway/service/config/remote/zk/ZooKeeperClientService.java b/gateway-service-remoteconfig/src/main/java/org/apache/knox/gateway/service/config/remote/zk/ZooKeeperClientService.java
new file mode 100644
index 0000000..17c93e0
--- /dev/null
+++ b/gateway-service-remoteconfig/src/main/java/org/apache/knox/gateway/service/config/remote/zk/ZooKeeperClientService.java
@@ -0,0 +1,25 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.knox.gateway.service.config.remote.zk;
+
+import org.apache.knox.gateway.services.config.client.RemoteConfigurationRegistryClientService;
+
+public interface ZooKeeperClientService extends RemoteConfigurationRegistryClientService {
+
+    String TYPE = "ZooKeeper";
+
+}


[37/53] [abbrv] knox git commit: Merge branch 'master' into KNOX-998-Package_Restructuring

Posted by mo...@apache.org.
http://git-wip-us.apache.org/repos/asf/knox/blob/22a7304a/gateway-server/src/main/java/org/apache/knox/gateway/config/impl/GatewayConfigImpl.java
----------------------------------------------------------------------
diff --cc gateway-server/src/main/java/org/apache/knox/gateway/config/impl/GatewayConfigImpl.java
index c7b8df5,0000000..bc4fc31
mode 100644,000000..100644
--- a/gateway-server/src/main/java/org/apache/knox/gateway/config/impl/GatewayConfigImpl.java
+++ b/gateway-server/src/main/java/org/apache/knox/gateway/config/impl/GatewayConfigImpl.java
@@@ -1,926 -1,0 +1,987 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.config.impl;
 +
 +import org.apache.commons.io.FilenameUtils;
 +import org.apache.commons.lang3.StringUtils;
 +import org.apache.hadoop.conf.Configuration;
 +import org.apache.hadoop.fs.Path;
 +import org.apache.knox.gateway.GatewayMessages;
 +import org.apache.knox.gateway.config.GatewayConfig;
 +import org.apache.knox.gateway.i18n.messages.MessagesFactory;
 +import org.joda.time.Period;
 +import org.joda.time.format.PeriodFormatter;
 +import org.joda.time.format.PeriodFormatterBuilder;
 +
 +import java.io.File;
 +import java.net.InetSocketAddress;
 +import java.net.MalformedURLException;
 +import java.net.URL;
 +import java.net.UnknownHostException;
 +import java.util.ArrayList;
 +import java.util.Arrays;
 +import java.util.Collections;
 +import java.util.List;
 +import java.util.Map;
 +import java.util.concurrent.ConcurrentHashMap;
++import java.util.concurrent.TimeUnit;
 +
 +/**
 + * The configuration for the Gateway.
 + *
 + * The Gateway configuration variables are described in gateway-default.xml
 + *
 + * The Gateway specific configuration is split into two layers:
 + *
 + * 1. gateway-default.xml - All the configuration variables that the
 + *    Gateway needs.  These are the defaults that ship with the app
 + *    and should only be changed by the app developers.
 + *
 + * 2. gateway-site.xml - The (possibly empty) configuration that the
 + *    system administrator can set variables for their Hadoop cluster.
 + *
 + * To find the gateway configuration files the following process is used.
 + * First, if the GATEWAY_HOME system property contains a valid directory name,
 + * an attempt will be made to read the configuration files from that directory.
 + * Second, if the GATEWAY_HOME environment variable contains a valid directory name,
 + * an attempt will be made to read the configuration files from that directory.
 + * Third, an attempt will be made to load the configuration files from the directory
 + * specified via the "user.dir" system property.
 + * Fourth, an attempt will be made to load the configuration files from the classpath.
 + * Last, defaults will be used for all values will be used.
 + *
 + * If GATEWAY_HOME isn't set via either the system property or environment variable then
 + * a value for this will be defaulted.  The default selected will be the directory that
 + * contained the last loaded configuration file that was not contained in a JAR.  If
 + * no such configuration file is loaded the value of the "user.dir" system property will be used
 + * as the value of GATEWAY_HOME.  This is important to consider for any relative file names as they
 + * will be resolved relative to the value of GATEWAY_HOME.  One such relative value is the
 + * name of the directory containing cluster topologies.  This value default to "clusters".
 + */
 +public class GatewayConfigImpl extends Configuration implements GatewayConfig {
 +
 +  private static final String GATEWAY_DEFAULT_TOPOLOGY_NAME_PARAM = "default.app.topology.name";
 +  private static final String GATEWAY_DEFAULT_TOPOLOGY_NAME = null;
 +
 +  private static final GatewayMessages log = MessagesFactory.get( GatewayMessages.class );
 +
 +  private static final String GATEWAY_CONFIG_DIR_PREFIX = "conf";
 +
 +  private static final String GATEWAY_CONFIG_FILE_PREFIX = "gateway";
 +
 +  private static final String DEFAULT_STACKS_SERVICES_DIR = "services";
 +
 +  private static final String DEFAULT_APPLICATIONS_DIR = "applications";
 +
 +  public static final String[] GATEWAY_CONFIG_FILENAMES = {
 +      GATEWAY_CONFIG_DIR_PREFIX + "/" + GATEWAY_CONFIG_FILE_PREFIX + "-default.xml",
 +      GATEWAY_CONFIG_DIR_PREFIX + "/" + GATEWAY_CONFIG_FILE_PREFIX + "-site.xml"
 +  };
 +
 +//  private static final String[] HADOOP_CONF_FILENAMES = {
 +//      "core-default.xml",
 +//      "core-site.xml"
 +////      "hdfs-default.xml",
 +////      "hdfs-site.xml",
 +////      "mapred-default.xml",
 +////      "mapred-site.xml"
 +//  };
 +
 +//  private static final String[] HADOOP_PREFIX_VARS = {
 +//      "HADOOP_PREFIX",
 +//      "HADOOP_HOME"
 +//  };
 +
 +  public static final String HTTP_HOST = GATEWAY_CONFIG_FILE_PREFIX + ".host";
 +  public static final String HTTP_PORT = GATEWAY_CONFIG_FILE_PREFIX + ".port";
 +  public static final String HTTP_PATH = GATEWAY_CONFIG_FILE_PREFIX + ".path";
 +  public static final String DEPLOYMENT_DIR = GATEWAY_CONFIG_FILE_PREFIX + ".deployment.dir";
 +  public static final String SECURITY_DIR = GATEWAY_CONFIG_FILE_PREFIX + ".security.dir";
 +  public static final String DATA_DIR = GATEWAY_CONFIG_FILE_PREFIX + ".data.dir";
 +  public static final String STACKS_SERVICES_DIR = GATEWAY_CONFIG_FILE_PREFIX + ".services.dir";
 +  public static final String GLOBAL_RULES_SERVICES = GATEWAY_CONFIG_FILE_PREFIX + ".global.rules.services";
 +  public static final String APPLICATIONS_DIR = GATEWAY_CONFIG_FILE_PREFIX + ".applications.dir";
 +  public static final String HADOOP_CONF_DIR = GATEWAY_CONFIG_FILE_PREFIX + ".hadoop.conf.dir";
 +  public static final String FRONTEND_URL = GATEWAY_CONFIG_FILE_PREFIX + ".frontend.url";
 +  private static final String TRUST_ALL_CERTS = GATEWAY_CONFIG_FILE_PREFIX + ".trust.all.certs";
 +  private static final String CLIENT_AUTH_NEEDED = GATEWAY_CONFIG_FILE_PREFIX + ".client.auth.needed";
 +  private static final String CLIENT_AUTH_WANTED = GATEWAY_CONFIG_FILE_PREFIX + ".client.auth.wanted";
 +  private static final String TRUSTSTORE_PATH = GATEWAY_CONFIG_FILE_PREFIX + ".truststore.path";
 +  private static final String TRUSTSTORE_TYPE = GATEWAY_CONFIG_FILE_PREFIX + ".truststore.type";
 +  private static final String KEYSTORE_TYPE = GATEWAY_CONFIG_FILE_PREFIX + ".keystore.type";
 +  private static final String XFORWARDED_ENABLED = GATEWAY_CONFIG_FILE_PREFIX + ".xforwarded.enabled";
 +  private static final String EPHEMERAL_DH_KEY_SIZE = GATEWAY_CONFIG_FILE_PREFIX + ".jdk.tls.ephemeralDHKeySize";
 +  private static final String HTTP_CLIENT_MAX_CONNECTION = GATEWAY_CONFIG_FILE_PREFIX + ".httpclient.maxConnections";
 +  private static final String HTTP_CLIENT_CONNECTION_TIMEOUT = GATEWAY_CONFIG_FILE_PREFIX + ".httpclient.connectionTimeout";
 +  private static final String HTTP_CLIENT_SOCKET_TIMEOUT = GATEWAY_CONFIG_FILE_PREFIX + ".httpclient.socketTimeout";
 +  private static final String THREAD_POOL_MAX = GATEWAY_CONFIG_FILE_PREFIX + ".threadpool.max";
 +  public static final String HTTP_SERVER_REQUEST_BUFFER = GATEWAY_CONFIG_FILE_PREFIX + ".httpserver.requestBuffer";
 +  public static final String HTTP_SERVER_REQUEST_HEADER_BUFFER = GATEWAY_CONFIG_FILE_PREFIX + ".httpserver.requestHeaderBuffer";
 +  public static final String HTTP_SERVER_RESPONSE_BUFFER = GATEWAY_CONFIG_FILE_PREFIX + ".httpserver.responseBuffer";
 +  public static final String HTTP_SERVER_RESPONSE_HEADER_BUFFER = GATEWAY_CONFIG_FILE_PREFIX + ".httpserver.responseHeaderBuffer";
 +  public static final String DEPLOYMENTS_BACKUP_VERSION_LIMIT = GATEWAY_CONFIG_FILE_PREFIX + ".deployment.backup.versionLimit";
 +  public static final String DEPLOYMENTS_BACKUP_AGE_LIMIT = GATEWAY_CONFIG_FILE_PREFIX + ".deployment.backup.ageLimit";
 +  public static final String METRICS_ENABLED = GATEWAY_CONFIG_FILE_PREFIX + ".metrics.enabled";
 +  public static final String JMX_METRICS_REPORTING_ENABLED = GATEWAY_CONFIG_FILE_PREFIX + ".jmx.metrics.reporting.enabled";
 +  public static final String GRAPHITE_METRICS_REPORTING_ENABLED = GATEWAY_CONFIG_FILE_PREFIX + ".graphite.metrics.reporting.enabled";
 +  public static final String GRAPHITE_METRICS_REPORTING_HOST = GATEWAY_CONFIG_FILE_PREFIX + ".graphite.metrics.reporting.host";
 +  public static final String GRAPHITE_METRICS_REPORTING_PORT = GATEWAY_CONFIG_FILE_PREFIX + ".graphite.metrics.reporting.port";
 +  public static final String GRAPHITE_METRICS_REPORTING_FREQUENCY = GATEWAY_CONFIG_FILE_PREFIX + ".graphite.metrics.reporting.frequency";
 +  public static final String GATEWAY_IDLE_TIMEOUT = GATEWAY_CONFIG_FILE_PREFIX + ".idle.timeout";
 +  public static final String REMOTE_IP_HEADER_NAME = GATEWAY_CONFIG_FILE_PREFIX + ".remote.ip.header.name";
 +
 +  /* @since 0.10 Websocket config variables */
 +  public static final String WEBSOCKET_FEATURE_ENABLED = GATEWAY_CONFIG_FILE_PREFIX + ".websocket.feature.enabled";
 +  public static final String WEBSOCKET_MAX_TEXT_MESSAGE_SIZE = GATEWAY_CONFIG_FILE_PREFIX + ".websocket.max.text.size";
 +  public static final String WEBSOCKET_MAX_BINARY_MESSAGE_SIZE = GATEWAY_CONFIG_FILE_PREFIX + ".websocket.max.binary.size";
 +  public static final String WEBSOCKET_MAX_TEXT_MESSAGE_BUFFER_SIZE = GATEWAY_CONFIG_FILE_PREFIX + ".websocket.max.text.buffer.size";
 +  public static final String WEBSOCKET_MAX_BINARY_MESSAGE_BUFFER_SIZE = GATEWAY_CONFIG_FILE_PREFIX + ".websocket.max.binary.buffer.size";
 +  public static final String WEBSOCKET_INPUT_BUFFER_SIZE = GATEWAY_CONFIG_FILE_PREFIX + ".websocket.input.buffer.size";
 +  public static final String WEBSOCKET_ASYNC_WRITE_TIMEOUT = GATEWAY_CONFIG_FILE_PREFIX + ".websocket.async.write.timeout";
 +  public static final String WEBSOCKET_IDLE_TIMEOUT = GATEWAY_CONFIG_FILE_PREFIX + ".websocket.idle.timeout";
 +
 +  /**
 +   * Properties for for gateway port mapping feature
 +   */
 +  public static final String GATEWAY_PORT_MAPPING_PREFIX = GATEWAY_CONFIG_FILE_PREFIX + ".port.mapping.";
 +  public static final String GATEWAY_PORT_MAPPING_REGEX = GATEWAY_CONFIG_FILE_PREFIX + "\\.port\\.mapping\\..*";
 +  public static final String GATEWAY_PORT_MAPPING_ENABLED = GATEWAY_PORT_MAPPING_PREFIX + "enabled";
 +
 +  /**
 +   * Comma seperated list of MIME Types to be compressed by Knox on the way out.
 +   *
 +   * @since 0.12
 +   */
 +  public static final String MIME_TYPES_TO_COMPRESS = GATEWAY_CONFIG_FILE_PREFIX
 +      + ".gzip.compress.mime.types";
 +
++  public static final String CLUSTER_CONFIG_MONITOR_PREFIX = GATEWAY_CONFIG_FILE_PREFIX + ".cluster.config.monitor.";
++  public static final String CLUSTER_CONFIG_MONITOR_INTERVAL_SUFFIX = ".interval";
++  public static final String CLUSTER_CONFIG_MONITOR_ENABLED_SUFFIX = ".enabled";
++
++
 +  // These config property names are not inline with the convention of using the
 +  // GATEWAY_CONFIG_FILE_PREFIX as is done by those above. These are left for
 +  // backward compatibility. 
 +  // LET'S NOT CONTINUE THIS PATTERN BUT LEAVE THEM FOR NOW.
 +  private static final String SSL_ENABLED = "ssl.enabled";
 +  private static final String SSL_EXCLUDE_PROTOCOLS = "ssl.exclude.protocols";
 +  private static final String SSL_INCLUDE_CIPHERS = "ssl.include.ciphers";
 +  private static final String SSL_EXCLUDE_CIPHERS = "ssl.exclude.ciphers";
 +  // END BACKWARD COMPATIBLE BLOCK
 +  
 +  public static final String DEFAULT_HTTP_PORT = "8888";
 +  public static final String DEFAULT_HTTP_PATH = "gateway";
 +  public static final String DEFAULT_DEPLOYMENT_DIR = "deployments";
 +  public static final String DEFAULT_SECURITY_DIR = "security";
 +  public static final String DEFAULT_DATA_DIR = "data";
++  private static final String PROVIDERCONFIG_DIR_NAME = "shared-providers";
++  private static final String DESCRIPTORS_DIR_NAME = "descriptors";
 +
 +  /* Websocket defaults */
 +  public static final boolean DEFAULT_WEBSOCKET_FEATURE_ENABLED = false;
 +  public static final int DEFAULT_WEBSOCKET_MAX_TEXT_MESSAGE_SIZE = Integer.MAX_VALUE;;
 +  public static final int DEFAULT_WEBSOCKET_MAX_BINARY_MESSAGE_SIZE = Integer.MAX_VALUE;;
 +  public static final int DEFAULT_WEBSOCKET_MAX_TEXT_MESSAGE_BUFFER_SIZE = 32768;
 +  public static final int DEFAULT_WEBSOCKET_MAX_BINARY_MESSAGE_BUFFER_SIZE = 32768;
 +  public static final int DEFAULT_WEBSOCKET_INPUT_BUFFER_SIZE = 4096;
 +  public static final int DEFAULT_WEBSOCKET_ASYNC_WRITE_TIMEOUT = 60000;
 +  public static final int DEFAULT_WEBSOCKET_IDLE_TIMEOUT = 300000;
 +
 +  public static final boolean DEFAULT_GATEWAY_PORT_MAPPING_ENABLED = true;
 +
 +  /**
 +   * Default list of MIME Type to be compressed.
 +   * @since 0.12
 +   */
 +  public static final String DEFAULT_MIME_TYPES_TO_COMPRESS = "text/html, text/plain, text/xml, text/css, "
 +      + "application/javascript, application/x-javascript, text/javascript";
 +
 +  public static final String COOKIE_SCOPING_ENABLED = GATEWAY_CONFIG_FILE_PREFIX + ".scope.cookies.feature.enabled";
 +  public static final boolean DEFAULT_COOKIE_SCOPING_FEATURE_ENABLED = false;
 +  private static final String CRYPTO_ALGORITHM = GATEWAY_CONFIG_FILE_PREFIX + ".crypto.algorithm";
 +  private static final String CRYPTO_PBE_ALGORITHM = GATEWAY_CONFIG_FILE_PREFIX + ".crypto.pbe.algorithm";
 +  private static final String CRYPTO_TRANSFORMATION = GATEWAY_CONFIG_FILE_PREFIX + ".crypto.transformation";
 +  private static final String CRYPTO_SALTSIZE = GATEWAY_CONFIG_FILE_PREFIX + ".crypto.salt.size";
 +  private static final String CRYPTO_ITERATION_COUNT = GATEWAY_CONFIG_FILE_PREFIX + ".crypto.iteration.count";
 +  private static final String CRYPTO_KEY_LENGTH = GATEWAY_CONFIG_FILE_PREFIX + ".crypto.key.length";
 +  public static final String SERVER_HEADER_ENABLED = GATEWAY_CONFIG_FILE_PREFIX + ".server.header.enabled";
 +
++  /* @since 0.15 Remote configuration monitoring */
++  static final String CONFIG_REGISTRY_PREFIX = GATEWAY_CONFIG_FILE_PREFIX + ".remote.config.registry";
++  static final String REMOTE_CONFIG_MONITOR_CLIENT_NAME = GATEWAY_CONFIG_FILE_PREFIX + ".remote.config.monitor.client";
++
 +  private static List<String> DEFAULT_GLOBAL_RULES_SERVICES;
 +
 +
 +  public GatewayConfigImpl() {
 +    init();
 +  }
 +
 +  private String getVar( String variableName, String defaultValue ) {
 +    String value = get( variableName );
 +    if( value == null ) {
 +      value = System.getProperty( variableName );
 +    }
 +    if( value == null ) {
 +      value = System.getenv( variableName );
 +    }
 +    if( value == null ) {
 +      value = defaultValue;
 +    }
 +    return value;
 +  }
 +
 +  private String getGatewayHomeDir() {
 +    String home = get(
 +        GATEWAY_HOME_VAR,
 +        System.getProperty(
 +            GATEWAY_HOME_VAR,
 +            System.getenv( GATEWAY_HOME_VAR ) ) );
 +    return home;
 +  }
 +
 +  private void setGatewayHomeDir( String dir ) {
 +    set( GATEWAY_HOME_VAR, dir );
 +  }
 +
 +  @Override
 +  public String getGatewayConfDir() {
 +    String value = getVar( GATEWAY_CONF_HOME_VAR, getGatewayHomeDir() + File.separator + "conf"  );
 +    return FilenameUtils.normalize(value);
 +  }
 +
 +  @Override
 +  public String getGatewayDataDir() {
 +    String systemValue =
 +        System.getProperty(GATEWAY_DATA_HOME_VAR, System.getenv(GATEWAY_DATA_HOME_VAR));
 +    String dataDir = null;
 +    if (systemValue != null) {
 +      dataDir = systemValue;
 +    } else {
 +      dataDir = get(DATA_DIR, getGatewayHomeDir() + File.separator + DEFAULT_DATA_DIR);
 +    }
-     return dataDir;
++    return FilenameUtils.normalize(dataDir);
 +  }
 +
 +  @Override
 +  public String getGatewayServicesDir() {
 +    return get(STACKS_SERVICES_DIR, getGatewayDataDir() + File.separator + DEFAULT_STACKS_SERVICES_DIR);
 +  }
 +
 +  @Override
 +  public String getGatewayApplicationsDir() {
 +    return get(APPLICATIONS_DIR, getGatewayDataDir() + File.separator + DEFAULT_APPLICATIONS_DIR);
 +  }
 +
 +  @Override
 +  public String getHadoopConfDir() {
 +    return get( HADOOP_CONF_DIR );
 +  }
 +
 +  private void init() {
 +    // Load environment variables.
 +    for( Map.Entry<String, String> e : System.getenv().entrySet() ) {
 +      set( "env." + e.getKey(), e.getValue() );
 +    }
 +    // Load system properties.
 +    for( Map.Entry<Object, Object> p : System.getProperties().entrySet() ) {
 +      set( "sys." + p.getKey().toString(), p.getValue().toString() );
 +    }
 +
 +    URL lastFileUrl = null;
 +    for( String fileName : GATEWAY_CONFIG_FILENAMES ) {
 +      lastFileUrl = loadConfig( fileName, lastFileUrl );
 +    }
 +    //set default services list
 +    setDefaultGlobalRulesServices();
 +
 +    initGatewayHomeDir( lastFileUrl );
 +
 +    // log whether the scoping cookies to the gateway.path feature is enabled
 +    log.cookieScopingFeatureEnabled(isCookieScopingToPathEnabled());
 +  }
 +
 +  private void setDefaultGlobalRulesServices() {
 +    DEFAULT_GLOBAL_RULES_SERVICES = new ArrayList<>();
 +    DEFAULT_GLOBAL_RULES_SERVICES.add("NAMENODE");
 +    DEFAULT_GLOBAL_RULES_SERVICES.add("JOBTRACKER");
 +    DEFAULT_GLOBAL_RULES_SERVICES.add("WEBHDFS");
 +    DEFAULT_GLOBAL_RULES_SERVICES.add("WEBHCAT");
 +    DEFAULT_GLOBAL_RULES_SERVICES.add("OOZIE");
 +    DEFAULT_GLOBAL_RULES_SERVICES.add("WEBHBASE");
 +    DEFAULT_GLOBAL_RULES_SERVICES.add("HIVE");
 +    DEFAULT_GLOBAL_RULES_SERVICES.add("RESOURCEMANAGER");
 +  }
 +
 +  private void initGatewayHomeDir( URL lastFileUrl ) {
 +    String home = System.getProperty( GATEWAY_HOME_VAR );
 +    if( home != null ) {
 +      set( GATEWAY_HOME_VAR, home );
 +      log.settingGatewayHomeDir( "system property", home );
 +      return;
 +    }
 +    home = System.getenv( GATEWAY_HOME_VAR );
 +    if( home != null ) {
 +      set( GATEWAY_HOME_VAR, home );
 +      log.settingGatewayHomeDir( "environment variable", home );
 +      return;
 +    }
 +    if( lastFileUrl != null ) {
 +      File file = new File( lastFileUrl.getFile() ).getAbsoluteFile();
 +      File dir = file.getParentFile().getParentFile(); // Move up two levels to get to parent of conf.
 +      if( dir.exists() && dir.canRead() )
 +        home = dir.getAbsolutePath();
 +      set( GATEWAY_HOME_VAR, home );
 +      log.settingGatewayHomeDir( "configuration file location", home );
 +      return;
 +    }
 +    home = System.getProperty( "user.dir" );
 +    if( home != null ) {
 +      set( GATEWAY_HOME_VAR, home );
 +      log.settingGatewayHomeDir( "user.dir system property", home );
 +      return;
 +    }
 +  }
 +
 +  // 1. GATEWAY_HOME system property
 +  // 2. GATEWAY_HOME environment variable
 +  // 3. user.dir system property
 +  // 4. class path
 +  private URL loadConfig( String fileName, URL lastFileUrl ) {
 +    lastFileUrl = loadConfigFile( System.getProperty( GATEWAY_HOME_VAR ), fileName );
 +    if( lastFileUrl == null ) {
 +      lastFileUrl = loadConfigFile( System.getenv( GATEWAY_HOME_VAR ), fileName );
 +    }
 +    if( lastFileUrl == null ) {
 +      lastFileUrl = loadConfigFile( System.getProperty( "user.dir" ), fileName );
 +    }
 +    if( lastFileUrl == null ) {
 +      lastFileUrl = loadConfigResource( fileName );
 +    }
 +    if( lastFileUrl != null && !"file".equals( lastFileUrl.getProtocol() ) ) {
 +      lastFileUrl = null;
 +    }
 +    return lastFileUrl;
 +  }
 +
 +  private URL loadConfigFile( String dir, String file ) {
 +    URL url = null;
 +    if( dir != null ) {
 +      File f = new File( dir, file );
 +      if( f.exists() ) {
 +        String path = f.getAbsolutePath();
 +        try {
 +          url = f.toURI().toURL();
 +          addResource( new Path( path ) );
 +          log.loadingConfigurationFile( path );
 +        } catch ( MalformedURLException e ) {
 +          log.failedToLoadConfig( path, e );
 +        }
 +      }
 +    }
 +    return url;
 +  }
 +
 +  private URL loadConfigResource( String file ) {
 +    URL url = getResource( file );
 +    if( url != null ) {
 +      log.loadingConfigurationResource( url.toExternalForm() );
 +      addResource( url );
 +    }
 +    return url;
 +  }
 +
 +  @Override
 +  public String getGatewayHost() {
 +    String host = get( HTTP_HOST, "0.0.0.0" );
 +    return host;
 +  }
 +
 +  @Override
 +  public int getGatewayPort() {
 +    return Integer.parseInt( get( HTTP_PORT, DEFAULT_HTTP_PORT ) );
 +  }
 +
 +  @Override
 +  public String getGatewayPath() {
 +    return get( HTTP_PATH, DEFAULT_HTTP_PATH );
 +  }
 +
 +  @Override
++  public String getGatewayProvidersConfigDir() {
++    return getGatewayConfDir() + File.separator + PROVIDERCONFIG_DIR_NAME;
++  }
++
++  @Override
++  public String getGatewayDescriptorsDir() {
++    return getGatewayConfDir() + File.separator + DESCRIPTORS_DIR_NAME;
++  }
++
++  @Override
 +  public String getGatewayTopologyDir() {
 +    return getGatewayConfDir() + File.separator + "topologies";
 +  }
 +
 +  @Override
 +  public String getGatewayDeploymentDir() {
 +    return get(DEPLOYMENT_DIR, getGatewayDataDir() + File.separator + DEFAULT_DEPLOYMENT_DIR);
 +  }
 +
 +  @Override
 +  public String getGatewaySecurityDir() {
 +    return get(SECURITY_DIR, getGatewayDataDir() + File.separator + DEFAULT_SECURITY_DIR);
 +  }
 +
 +  @Override
 +  public InetSocketAddress getGatewayAddress() throws UnknownHostException {
 +    String host = getGatewayHost();
 +    int port = getGatewayPort();
 +    InetSocketAddress address = new InetSocketAddress( host, port );
 +    return address;
 +  }
 +
 +  @Override
 +  public boolean isSSLEnabled() {
 +    String enabled = get( SSL_ENABLED, "true" );
 +    
 +    return "true".equals(enabled);
 +  }
 +
 +  @Override
 +  public boolean isHadoopKerberosSecured() {
 +    String hadoopKerberosSecured = get( HADOOP_KERBEROS_SECURED, "false" );
 +    return "true".equals(hadoopKerberosSecured);
 +  }
 +
 +  @Override
 +  public String getKerberosConfig() {
 +    return get( KRB5_CONFIG ) ;
 +  }
 +
 +  @Override
 +  public boolean isKerberosDebugEnabled() {
 +    String kerberosDebugEnabled = get( KRB5_DEBUG, "false" );
 +    return "true".equals(kerberosDebugEnabled);
 +  }
 +  
 +  @Override
 +  public String getKerberosLoginConfig() {
 +    return get( KRB5_LOGIN_CONFIG );
 +  }
 +
 +  /* (non-Javadoc)
 +   * @see GatewayConfig#getDefaultTopologyName()
 +   */
 +  @Override
 +  public String getDefaultTopologyName() {
 +    String name = get(GATEWAY_DEFAULT_TOPOLOGY_NAME_PARAM);
 +    return name != null ? name : GATEWAY_DEFAULT_TOPOLOGY_NAME;
 +  }
 +
 +  /* (non-Javadoc)
 +   * @see GatewayConfig#getDefaultAppRedirectPath()
 +   */
 +  @Override
 +  public String getDefaultAppRedirectPath() {
 +    String defTopo = getDefaultTopologyName();
 +    if( defTopo == null ) {
 +      return null;
 +    } else {
 +      return "/" + getGatewayPath() + "/" + defTopo;
 +    }
 +  }
 +
 +  /* (non-Javadoc)
 +   * @see GatewayConfig#getFrontendUrl()
 +   */
 +  @Override
 +  public String getFrontendUrl() {
 +    String url = get( FRONTEND_URL, null );
 +    return url;
 +  }
 +
 +  /* (non-Javadoc)
 +   * @see GatewayConfig#getExcludedSSLProtocols()
 +   */
 +  @Override
 +  public List<String> getExcludedSSLProtocols() {
 +    List<String> protocols = null;
 +    String value = get(SSL_EXCLUDE_PROTOCOLS);
 +    if (!"none".equals(value)) {
 +      protocols = Arrays.asList(value.split("\\s*,\\s*"));
 +    }
 +    return protocols;
 +  }
 +
 +  @Override
 +  public List<String> getIncludedSSLCiphers() {
 +    List<String> list = null;
 +    String value = get(SSL_INCLUDE_CIPHERS);
 +    if (value != null && !value.isEmpty() && !"none".equalsIgnoreCase(value.trim())) {
 +      list = Arrays.asList(value.trim().split("\\s*,\\s*"));
 +    }
 +    return list;
 +  }
 +
 +  @Override
 +  public List<String> getExcludedSSLCiphers() {
 +    List<String> list = null;
 +    String value = get(SSL_EXCLUDE_CIPHERS);
 +    if (value != null && !value.isEmpty() && !"none".equalsIgnoreCase(value.trim())) {
 +      list = Arrays.asList(value.trim().split("\\s*,\\s*"));
 +    }
 +    return list;
 +  }
 +
 +  /* (non-Javadoc)
 +   * @see GatewayConfig#isClientAuthNeeded()
 +   */
 +  @Override
 +  public boolean isClientAuthNeeded() {
 +    String clientAuthNeeded = get( CLIENT_AUTH_NEEDED, "false" );
 +    return "true".equals(clientAuthNeeded);
 +  }
 +
 +  /* (non-Javadoc)
 +   * @see org.apache.knox.gateway.config.GatewayConfig#isClientAuthWanted()
 +   */
 +  @Override
 +  public boolean isClientAuthWanted() {
 +    String clientAuthWanted = get( CLIENT_AUTH_WANTED, "false" );
 +    return "true".equals(clientAuthWanted);
 +  }
 +
 +  /* (non-Javadoc)
 +   * @see GatewayConfig#getTruststorePath()
 +   */
 +  @Override
 +  public String getTruststorePath() {
 +    return get( TRUSTSTORE_PATH, null);
 +  }
 +
 +  /* (non-Javadoc)
 +   * @see GatewayConfig#getTrustAllCerts()
 +   */
 +  @Override
 +  public boolean getTrustAllCerts() {
 +    String trustAllCerts = get( TRUST_ALL_CERTS, "false" );
 +    return "true".equals(trustAllCerts);
 +  }
 +  
 +  /* (non-Javadoc)
 +   * @see GatewayConfig#getTruststorePath()
 +   */
 +  @Override
 +  public String getTruststoreType() {
 +    return get( TRUSTSTORE_TYPE, "JKS");
 +  }
 +
 +  /* (non-Javadoc)
 +   * @see GatewayConfig#getTruststorePath()
 +   */
 +  @Override
 +  public String getKeystoreType() {
 +    return get( KEYSTORE_TYPE, "JKS");
 +  }
 +
 +  @Override
 +  public boolean isXForwardedEnabled() {
 +    String xForwardedEnabled = get( XFORWARDED_ENABLED, "true" );
 +    return "true".equals(xForwardedEnabled);
 +  }
 +
 +  /* (non-Javadoc)
 +   * @see GatewayConfig#getEphemeralDHKeySize()
 +   */
 +  @Override
 +  public String getEphemeralDHKeySize() {
 +    return get( EPHEMERAL_DH_KEY_SIZE, "2048");
 +  }
 +
 +  /* (non-Javadoc)
 +   * @see GatewayConfig#getHttpClientMaxConnections()
 +   */
 +  @Override
 +  public int getHttpClientMaxConnections() {
 +    return getInt( HTTP_CLIENT_MAX_CONNECTION, 32 );
 +  }
 +
 +  @Override
 +  public int getHttpClientConnectionTimeout() {
 +    int t = -1;
-     String s = get( HTTP_CLIENT_CONNECTION_TIMEOUT, null );
++    String s = get( HTTP_CLIENT_CONNECTION_TIMEOUT, String.valueOf(TimeUnit.SECONDS.toMillis(20)));
 +    if ( s != null ) {
 +      try {
 +        t = (int)parseNetworkTimeout( s );
 +      } catch ( Exception e ) {
 +        // Ignore it and use the default.
 +      }
 +    }
 +    return t;
 +  }
 +
 +  @Override
 +  public int getHttpClientSocketTimeout() {
 +    int t = -1;
-     String s = get( HTTP_CLIENT_SOCKET_TIMEOUT, null );
++    String s = get( HTTP_CLIENT_SOCKET_TIMEOUT, String.valueOf(TimeUnit.SECONDS.toMillis(20)) );
 +    if ( s != null ) {
 +      try {
 +        t = (int)parseNetworkTimeout( s );
 +      } catch ( Exception e ) {
 +        // Ignore it and use the default.
 +      }
 +    }
 +    return t;
 +  }
 +
 +  /* (non-Javadoc)
 +   * @see GatewayConfig#getThreadPoolMax()
 +   */
 +  @Override
 +  public int getThreadPoolMax() {
 +    int i = getInt( THREAD_POOL_MAX, 254 );
 +    // Testing has shown that a value lower than 5 prevents Jetty from servicing request.
 +    if( i < 5 ) {
 +      i = 5;
 +    }
 +    return i;
 +  }
 +
 +  @Override
 +  public int getHttpServerRequestBuffer() {
 +    int i = getInt( HTTP_SERVER_REQUEST_BUFFER, 16 * 1024 );
 +    return i;
 +  }
 +
 +  @Override
 +  public int getHttpServerRequestHeaderBuffer() {
 +    int i = getInt( HTTP_SERVER_REQUEST_HEADER_BUFFER, 8 * 1024 );
 +    return i;
 +  }
 +
 +  @Override
 +  public int getHttpServerResponseBuffer() {
 +    int i = getInt( HTTP_SERVER_RESPONSE_BUFFER, 32 * 1024 );
 +    return i;
 +  }
 +
 +  @Override
 +  public int getHttpServerResponseHeaderBuffer() {
 +    int i = getInt( HTTP_SERVER_RESPONSE_HEADER_BUFFER, 8 * 1024 );
 +    return i;
 +  }
 +
 +  @Override
 +  public int getGatewayDeploymentsBackupVersionLimit() {
 +    int i = getInt( DEPLOYMENTS_BACKUP_VERSION_LIMIT, 5 );
 +    if( i < 0 ) {
 +      i = -1;
 +    }
 +    return i;
 +  }
 +
 +  @Override
 +  public long getGatewayIdleTimeout() {
 +    return getLong(GATEWAY_IDLE_TIMEOUT, 300000l);
 +  }
 +
 +  @Override
 +  public long getGatewayDeploymentsBackupAgeLimit() {
 +    PeriodFormatter f = new PeriodFormatterBuilder().appendDays().toFormatter();
 +    String s = get( DEPLOYMENTS_BACKUP_AGE_LIMIT, "-1" );
 +    long d;
 +    try {
 +      Period p = Period.parse( s, f );
 +      d = p.toStandardDuration().getMillis();
 +      if( d < 0 ) {
 +        d = -1;
 +      }
 +    } catch( Exception e ) {
 +      d = -1;
 +    }
 +    return d;
 +  }
 +
 +  @Override
 +  public String getSigningKeystoreName() {
 +    return get(SIGNING_KEYSTORE_NAME);
 +  }
 +
 +  @Override
 +  public String getSigningKeyAlias() {
 +    return get(SIGNING_KEY_ALIAS);
 +  }
 +
 +  @Override
 +  public List<String> getGlobalRulesServices() {
 +    String value = get( GLOBAL_RULES_SERVICES );
 +    if ( value != null && !value.isEmpty() && !"none".equalsIgnoreCase(value.trim()) ) {
 +      return Arrays.asList( value.trim().split("\\s*,\\s*") );
 +    }
 +    return DEFAULT_GLOBAL_RULES_SERVICES;
 +  }
 +
 +  @Override
 +  public boolean isMetricsEnabled() {
 +    String metricsEnabled = get( METRICS_ENABLED, "false" );
 +    return "true".equals(metricsEnabled);
 +  }
 +
 +  @Override
 +  public boolean isJmxMetricsReportingEnabled() {
 +    String enabled = get( JMX_METRICS_REPORTING_ENABLED, "false" );
 +    return "true".equals(enabled);
 +  }
 +
 +  @Override
 +  public boolean isGraphiteMetricsReportingEnabled() {
 +    String enabled = get( GRAPHITE_METRICS_REPORTING_ENABLED, "false" );
 +    return "true".equals(enabled);
 +  }
 +
 +  @Override
 +  public String getGraphiteHost() {
 +    String host = get( GRAPHITE_METRICS_REPORTING_HOST, "localhost" );
 +    return host;
 +  }
 +
 +  @Override
 +  public int getGraphitePort() {
 +    int i = getInt( GRAPHITE_METRICS_REPORTING_PORT, 32772 );
 +    return i;
 +  }
 +
 +  @Override
 +  public int getGraphiteReportingFrequency() {
 +    int i = getInt( GRAPHITE_METRICS_REPORTING_FREQUENCY, 1 );
 +    return i;
 +  }
 +
 +  /* (non-Javadoc)
 +   * @see GatewayConfig#isWebsocketEnabled()
 +   */
 +  @Override
 +  public boolean isWebsocketEnabled() {
 +    final String result = get( WEBSOCKET_FEATURE_ENABLED, Boolean.toString(DEFAULT_WEBSOCKET_FEATURE_ENABLED));
 +    return Boolean.parseBoolean(result);
 +  }
 +
 +  /* (non-Javadoc)
 +   * @see GatewayConfig#websocketMaxTextMessageSize()
 +   */
 +  @Override
 +  public int getWebsocketMaxTextMessageSize() {
 +    return getInt( WEBSOCKET_MAX_TEXT_MESSAGE_SIZE, DEFAULT_WEBSOCKET_MAX_TEXT_MESSAGE_SIZE);
 +  }
 +
 +  /* (non-Javadoc)
 +   * @see GatewayConfig#websocketMaxBinaryMessageSize()
 +   */
 +  @Override
 +  public int getWebsocketMaxBinaryMessageSize() {
 +    return getInt( WEBSOCKET_MAX_BINARY_MESSAGE_SIZE, DEFAULT_WEBSOCKET_MAX_BINARY_MESSAGE_SIZE);
 +  }
 +
 +  /* (non-Javadoc)
 +   * @see GatewayConfig#websocketMaxTextMessageBufferSize()
 +   */
 +  @Override
 +  public int getWebsocketMaxTextMessageBufferSize() {
 +    return getInt( WEBSOCKET_MAX_TEXT_MESSAGE_BUFFER_SIZE, DEFAULT_WEBSOCKET_MAX_TEXT_MESSAGE_BUFFER_SIZE);
 +  }
 +
 +  /* (non-Javadoc)
 +   * @see GatewayConfig#websocketMaxBinaryMessageBufferSize()
 +   */
 +  @Override
 +  public int getWebsocketMaxBinaryMessageBufferSize() {
 +    return getInt( WEBSOCKET_MAX_BINARY_MESSAGE_BUFFER_SIZE, DEFAULT_WEBSOCKET_MAX_BINARY_MESSAGE_BUFFER_SIZE);
 +  }
 +
 +  /* (non-Javadoc)
 +   * @see GatewayConfig#websocketInputBufferSize()
 +   */
 +  @Override
 +  public int getWebsocketInputBufferSize() {
 +    return getInt( WEBSOCKET_INPUT_BUFFER_SIZE, DEFAULT_WEBSOCKET_INPUT_BUFFER_SIZE);
 +  }
 +
 +  /* (non-Javadoc)
 +   * @see GatewayConfig#websocketAsyncWriteTimeout()
 +   */
 +  @Override
 +  public int getWebsocketAsyncWriteTimeout() {
 +    return getInt( WEBSOCKET_ASYNC_WRITE_TIMEOUT, DEFAULT_WEBSOCKET_ASYNC_WRITE_TIMEOUT);
 +  }
 +
 +  /* (non-Javadoc)
 +   * @see GatewayConfig#websocketIdleTimeout()
 +   */
 +  @Override
 +  public int getWebsocketIdleTimeout() {
 +    return getInt( WEBSOCKET_IDLE_TIMEOUT, DEFAULT_WEBSOCKET_IDLE_TIMEOUT);
 +  }
 +
 +  /*
 +   * (non-Javadoc)
 +   *
 +   * @see
 +   * GatewayConfig#getMimeTypesToCompress()
 +   */
 +  @Override
 +  public List<String> getMimeTypesToCompress() {
 +    List<String> mimeTypes = null;
 +    String value = get(MIME_TYPES_TO_COMPRESS, DEFAULT_MIME_TYPES_TO_COMPRESS);
 +    if (value != null && !value.isEmpty()) {
 +      mimeTypes = Arrays.asList(value.trim().split("\\s*,\\s*"));
 +    }
 +    return mimeTypes;
 +  }
 +
 +  /**
 +   * Map of Topology names and their ports.
 +   *
 +   * @return
 +   */
 +  @Override
 +  public Map<String, Integer> getGatewayPortMappings() {
 +
 +    final Map<String, Integer> result = new ConcurrentHashMap<String, Integer>();
 +    final Map<String, String> properties = getValByRegex(GATEWAY_PORT_MAPPING_REGEX);
 +
 +    // Convert port no. from string to int
 +    for(final Map.Entry<String, String> e : properties.entrySet()) {
 +      // ignore the GATEWAY_PORT_MAPPING_ENABLED property
 +      if(!e.getKey().equalsIgnoreCase(GATEWAY_PORT_MAPPING_ENABLED)) {
 +        // extract the topology name and use it as a key
 +        result.put(StringUtils.substringAfter(e.getKey(), GATEWAY_PORT_MAPPING_PREFIX), Integer.parseInt(e.getValue()) );
 +      }
 +
 +    }
 +
 +    return Collections.unmodifiableMap(result);
 +  }
 +
 +  /**
 +   * Is the Port Mapping feature on ?
 +   *
 +   * @return
 +   */
 +  @Override
 +  public boolean isGatewayPortMappingEnabled() {
 +    final String result = get( GATEWAY_PORT_MAPPING_ENABLED, Boolean.toString(DEFAULT_GATEWAY_PORT_MAPPING_ENABLED));
 +    return Boolean.parseBoolean(result);
 +  }
 +
 +  private static long parseNetworkTimeout(String s ) {
 +    PeriodFormatter f = new PeriodFormatterBuilder()
 +        .appendMinutes().appendSuffix("m"," min")
 +        .appendSeconds().appendSuffix("s"," sec")
 +        .appendMillis().toFormatter();
 +    Period p = Period.parse( s, f );
 +    return p.toStandardDuration().getMillis();
 +  }
 +
 +  @Override
 +  public boolean isCookieScopingToPathEnabled() {
 +    final boolean result = Boolean.parseBoolean(get(COOKIE_SCOPING_ENABLED,
 +            Boolean.toString(DEFAULT_COOKIE_SCOPING_FEATURE_ENABLED)));
 +    return result;
 +  }
 +
 +  @Override
 +  public String getHeaderNameForRemoteAddress() {
 +    String value = getVar(REMOTE_IP_HEADER_NAME, "X-Forwarded-For");
 +    return value;
 +  }
 +
 +  @Override
 +  public String getAlgorithm() {
 +	return getVar(CRYPTO_ALGORITHM, null);
 +  }
 +
 +  @Override
 +  public String getPBEAlgorithm() {
 +	return getVar(CRYPTO_PBE_ALGORITHM, null);
 +  }
 +
 +  @Override
 +  public String getTransformation() {
 +	return getVar(CRYPTO_TRANSFORMATION, null);
 +  }
 +
 +  @Override
 +  public String getSaltSize() {
 +	return getVar(CRYPTO_SALTSIZE, null);
 +  }
 +
 +  @Override
 +  public String getIterationCount() {
 +	return getVar(CRYPTO_ITERATION_COUNT, null);
 +  }
 +
 +  @Override
 +  public String getKeyLength() {
 +	return getVar(CRYPTO_KEY_LENGTH, null);
 +  }
 +
 +  @Override
 +  public boolean isGatewayServerHeaderEnabled() {
 +    return Boolean.parseBoolean(getVar(SERVER_HEADER_ENABLED, "true"));
 +  }
++
++  @Override
++  public int getClusterMonitorPollingInterval(String type) {
++    return getInt(CLUSTER_CONFIG_MONITOR_PREFIX + type.toLowerCase() + CLUSTER_CONFIG_MONITOR_INTERVAL_SUFFIX, -1);
++  }
++  
++  @Override
++  public boolean isClusterMonitorEnabled(String type) {
++    return getBoolean(CLUSTER_CONFIG_MONITOR_PREFIX + type.toLowerCase() + CLUSTER_CONFIG_MONITOR_ENABLED_SUFFIX, true);
++  }
++
++  @Override
++  public List<String> getRemoteRegistryConfigurationNames() {
++    List<String> result = new ArrayList<>();
++
++    // Iterate over all the properties in this configuration
++    for (Map.Entry<String, String> entry : this) {
++      String propertyName = entry.getKey();
++
++      // Search for all the remote config registry properties
++      if (propertyName.startsWith(CONFIG_REGISTRY_PREFIX)) {
++        String registryName = propertyName.substring(CONFIG_REGISTRY_PREFIX.length() + 1);
++        result.add(registryName);
++      }
++    }
++
++    return result;
++  }
++
++  @Override
++  public String getRemoteRegistryConfiguration(String name) {
++    return get(CONFIG_REGISTRY_PREFIX + "." + name );
++  }
++
++  @Override
++  public String getRemoteConfigurationMonitorClientName() {
++    return get(REMOTE_CONFIG_MONITOR_CLIENT_NAME);
++  }
++
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/22a7304a/gateway-server/src/main/java/org/apache/knox/gateway/services/CLIGatewayServices.java
----------------------------------------------------------------------
diff --cc gateway-server/src/main/java/org/apache/knox/gateway/services/CLIGatewayServices.java
index 3f29930,0000000..a1ed549
mode 100644,000000..100644
--- a/gateway-server/src/main/java/org/apache/knox/gateway/services/CLIGatewayServices.java
+++ b/gateway-server/src/main/java/org/apache/knox/gateway/services/CLIGatewayServices.java
@@@ -1,143 -1,0 +1,153 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.services;
 +
 +import org.apache.knox.gateway.GatewayMessages;
 +import org.apache.knox.gateway.config.GatewayConfig;
 +import org.apache.knox.gateway.deploy.DeploymentContext;
 +import org.apache.knox.gateway.descriptor.FilterParamDescriptor;
 +import org.apache.knox.gateway.descriptor.ResourceDescriptor;
 +import org.apache.knox.gateway.i18n.messages.MessagesFactory;
++import org.apache.knox.gateway.service.config.remote.RemoteConfigurationRegistryClientServiceFactory;
++import org.apache.hadoop.gateway.services.config.client.RemoteConfigurationRegistryClientService;
 +import org.apache.knox.gateway.services.topology.impl.DefaultTopologyService;
 +import org.apache.knox.gateway.services.security.impl.DefaultAliasService;
 +import org.apache.knox.gateway.services.security.impl.DefaultCryptoService;
 +import org.apache.knox.gateway.services.security.impl.DefaultKeystoreService;
 +import org.apache.knox.gateway.services.security.impl.CLIMasterService;
 +import org.apache.knox.gateway.topology.Provider;
 +
 +import java.util.Collection;
 +import java.util.HashMap;
 +import java.util.List;
 +import java.util.Map;
 +
 +public class CLIGatewayServices implements GatewayServices {
 +
 +  private static GatewayMessages log = MessagesFactory.get( GatewayMessages.class );
 +
 +  private Map<String,Service> services = new HashMap<>();
 +  private CLIMasterService ms = null;
 +  private DefaultKeystoreService ks = null;
 +
 +  public CLIGatewayServices() {
 +    super();
 +  }
 +
 +  public void init(GatewayConfig config, Map<String,String> options) throws ServiceLifecycleException {
 +    ms = new CLIMasterService();
 +    ms.init(config, options);
 +    services.put("MasterService", ms);
 +
 +    ks = new DefaultKeystoreService();
 +    ks.setMasterService(ms);
 +    ks.init(config, options);
 +    services.put(KEYSTORE_SERVICE, ks);
 +    
 +    DefaultAliasService alias = new DefaultAliasService();
 +    alias.setKeystoreService(ks);
 +    alias.init(config, options);
 +    services.put(ALIAS_SERVICE, alias);
 +
 +    DefaultCryptoService crypto = new DefaultCryptoService();
 +    crypto.setKeystoreService(ks);
 +    crypto.setAliasService(alias);
 +    crypto.init(config, options);
 +    services.put(CRYPTO_SERVICE, crypto);
 +
 +    DefaultTopologyService tops = new DefaultTopologyService();
 +    tops.init(  config, options  );
 +    services.put(TOPOLOGY_SERVICE, tops);
++
++    RemoteConfigurationRegistryClientService registryClientService =
++                                                    RemoteConfigurationRegistryClientServiceFactory.newInstance(config);
++    registryClientService.setAliasService(alias);
++    registryClientService.init(config, options);
++    services.put(REMOTE_REGISTRY_CLIENT_SERVICE, registryClientService);
 +  }
 +  
 +  public void start() throws ServiceLifecycleException {
 +    ms.start();
 +
 +    ks.start();
 +
 +    DefaultAliasService alias = (DefaultAliasService) services.get(ALIAS_SERVICE);
 +    alias.start();
 +
 +    DefaultTopologyService tops = (DefaultTopologyService)services.get(TOPOLOGY_SERVICE);
 +    tops.start();
++
++    (services.get(REMOTE_REGISTRY_CLIENT_SERVICE)).start();
 +  }
 +
 +  public void stop() throws ServiceLifecycleException {
 +    ms.stop();
 +
 +    ks.stop();
 +
 +    DefaultAliasService alias = (DefaultAliasService) services.get(ALIAS_SERVICE);
 +    alias.stop();
 +
 +    DefaultTopologyService tops = (DefaultTopologyService)services.get(TOPOLOGY_SERVICE);
 +    tops.stop();
 +  }
 +  
 +  /* (non-Javadoc)
 +   * @see org.apache.knox.gateway.GatewayServices#getServiceNames()
 +   */
 +  @Override
 +  public Collection<String> getServiceNames() {
 +    return services.keySet();
 +  }
 +  
 +  /* (non-Javadoc)
 +   * @see org.apache.knox.gateway.GatewayServices#getService(java.lang.String)
 +   */
 +  @Override
 +  public <T> T getService(String serviceName) {
 +    return (T)services.get( serviceName );
 +  }
 +
 +  @Override
 +  public String getRole() {
 +    return "Services";
 +  }
 +
 +  @Override
 +  public String getName() {
 +    return "GatewayServices";
 +  }
 +
 +  @Override
 +  public void initializeContribution(DeploymentContext context) {
 +  }
 +
 +  @Override
 +  public void contributeProvider(DeploymentContext context, Provider provider) {
 +  }
 +
 +  @Override
 +  public void contributeFilter(DeploymentContext context, Provider provider,
 +      org.apache.knox.gateway.topology.Service service,
 +      ResourceDescriptor resource, List<FilterParamDescriptor> params) {
 +  }
 +
 +  @Override
 +  public void finalizeContribution(DeploymentContext context) {
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/22a7304a/gateway-server/src/main/java/org/apache/knox/gateway/services/DefaultGatewayServices.java
----------------------------------------------------------------------
diff --cc gateway-server/src/main/java/org/apache/knox/gateway/services/DefaultGatewayServices.java
index c2acd54,0000000..7542d75
mode 100644,000000..100644
--- a/gateway-server/src/main/java/org/apache/knox/gateway/services/DefaultGatewayServices.java
+++ b/gateway-server/src/main/java/org/apache/knox/gateway/services/DefaultGatewayServices.java
@@@ -1,223 -1,0 +1,245 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.services;
 +
 +import org.apache.knox.gateway.GatewayMessages;
 +import org.apache.knox.gateway.config.GatewayConfig;
 +import org.apache.knox.gateway.deploy.DeploymentContext;
 +import org.apache.knox.gateway.descriptor.FilterParamDescriptor;
 +import org.apache.knox.gateway.descriptor.ResourceDescriptor;
 +import org.apache.knox.gateway.i18n.messages.MessagesFactory;
++import org.apache.knox.gateway.service.config.remote.RemoteConfigurationRegistryClientServiceFactory;
++import org.apache.knox.gateway.services.config.client.RemoteConfigurationRegistryClientService;
 +import org.apache.knox.gateway.services.registry.impl.DefaultServiceDefinitionRegistry;
 +import org.apache.knox.gateway.services.metrics.impl.DefaultMetricsService;
++import org.apache.knox.gateway.services.topology.impl.DefaultClusterConfigurationMonitorService;
 +import org.apache.knox.gateway.services.topology.impl.DefaultTopologyService;
 +import org.apache.knox.gateway.services.hostmap.impl.DefaultHostMapperService;
 +import org.apache.knox.gateway.services.registry.impl.DefaultServiceRegistryService;
 +import org.apache.knox.gateway.services.security.KeystoreServiceException;
 +import org.apache.knox.gateway.services.security.SSLService;
 +import org.apache.knox.gateway.services.security.impl.DefaultAliasService;
 +import org.apache.knox.gateway.services.security.impl.DefaultCryptoService;
 +import org.apache.knox.gateway.services.security.impl.DefaultKeystoreService;
 +import org.apache.knox.gateway.services.security.impl.DefaultMasterService;
 +import org.apache.knox.gateway.services.security.impl.JettySSLService;
 +import org.apache.knox.gateway.services.token.impl.DefaultTokenAuthorityService;
 +import org.apache.knox.gateway.topology.Provider;
 +
 +import java.util.Collection;
 +import java.util.HashMap;
 +import java.util.List;
 +import java.util.Map;
 +
 +public class DefaultGatewayServices implements GatewayServices {
 +
 +  private static GatewayMessages log = MessagesFactory.get( GatewayMessages.class );
 +
 +  private Map<String,Service> services = new HashMap<>();
 +  private DefaultMasterService ms = null;
 +  private DefaultKeystoreService ks = null;
 +
 +  public DefaultGatewayServices() {
 +    super();
 +  }
 +
 +  public void init(GatewayConfig config, Map<String,String> options) throws ServiceLifecycleException {
 +    ms = new DefaultMasterService();
 +    ms.init(config, options);
 +    services.put("MasterService", ms);
 +
 +    ks = new DefaultKeystoreService();
 +    ks.setMasterService(ms);
 +    ks.init(config, options);
 +    services.put(KEYSTORE_SERVICE, ks);
 +    
 +    DefaultAliasService alias = new DefaultAliasService();
 +    alias.setKeystoreService(ks);
 +    alias.setMasterService(ms);
 +    alias.init(config, options);
 +    services.put(ALIAS_SERVICE, alias);
 +
 +    DefaultCryptoService crypto = new DefaultCryptoService();
 +    crypto.setKeystoreService(ks);
 +    crypto.setAliasService(alias);
 +    crypto.init(config, options);
 +    services.put(CRYPTO_SERVICE, crypto);
 +    
 +    DefaultTokenAuthorityService ts = new DefaultTokenAuthorityService();
 +    ts.setAliasService(alias);
 +    ts.setKeystoreService(ks);
 +    ts.init(config, options);
 +    // prolly should not allow the token service to be looked up?
 +    services.put(TOKEN_SERVICE, ts);
 +    
 +    JettySSLService ssl = new JettySSLService();
 +    ssl.setAliasService(alias);
 +    ssl.setKeystoreService(ks);
 +    ssl.setMasterService(ms);
 +    ssl.init(config, options);
 +    services.put(SSL_SERVICE, ssl);
 +
 +    DefaultServiceRegistryService sr = new DefaultServiceRegistryService();
 +    sr.setCryptoService( crypto );
 +    sr.init( config, options );
 +    services.put( SERVICE_REGISTRY_SERVICE, sr );
 +
 +    DefaultHostMapperService hm = new DefaultHostMapperService();
 +    hm.init( config, options );
 +    services.put( HOST_MAPPING_SERVICE, hm );
 +
 +    DefaultServerInfoService sis = new DefaultServerInfoService();
 +    sis.init( config, options );
 +    services.put( SERVER_INFO_SERVICE, sis );
 +
++    RemoteConfigurationRegistryClientService registryClientService =
++                                                    RemoteConfigurationRegistryClientServiceFactory.newInstance(config);
++    registryClientService.setAliasService(alias);
++    registryClientService.init(config, options);
++    services.put(REMOTE_REGISTRY_CLIENT_SERVICE, registryClientService);
++
++    DefaultClusterConfigurationMonitorService ccs = new DefaultClusterConfigurationMonitorService();
++    ccs.setAliasService(alias);
++    ccs.init(config, options);
++    services.put(CLUSTER_CONFIGURATION_MONITOR_SERVICE, ccs);
++
 +    DefaultTopologyService tops = new DefaultTopologyService();
 +    tops.setAliasService(alias);
 +    tops.init(  config, options  );
 +    services.put(  TOPOLOGY_SERVICE, tops  );
 +
 +    DefaultServiceDefinitionRegistry sdr = new DefaultServiceDefinitionRegistry();
 +    sdr.init( config, options );
 +    services.put( SERVICE_DEFINITION_REGISTRY, sdr );
 +
 +    DefaultMetricsService metricsService = new DefaultMetricsService();
 +    metricsService.init( config, options );
 +    services.put( METRICS_SERVICE, metricsService );
 +  }
-   
++
 +  public void start() throws ServiceLifecycleException {
 +    ms.start();
 +
 +    ks.start();
 +
 +    DefaultAliasService alias = (DefaultAliasService) services.get(ALIAS_SERVICE);
 +    alias.start();
 +
 +    SSLService ssl = (SSLService) services.get(SSL_SERVICE);
 +    ssl.start();
 +
 +    ServerInfoService sis = (ServerInfoService) services.get(SERVER_INFO_SERVICE);
 +    sis.start();
 +
++    RemoteConfigurationRegistryClientService clientService =
++                            (RemoteConfigurationRegistryClientService)services.get(REMOTE_REGISTRY_CLIENT_SERVICE);
++    clientService.start();
++
++    (services.get(CLUSTER_CONFIGURATION_MONITOR_SERVICE)).start();
++
 +    DefaultTopologyService tops = (DefaultTopologyService)services.get(TOPOLOGY_SERVICE);
 +    tops.start();
 +
 +    DefaultMetricsService metricsService = (DefaultMetricsService) services.get(METRICS_SERVICE);
 +    metricsService.start();
 +  }
 +
 +  public void stop() throws ServiceLifecycleException {
 +    ms.stop();
 +
 +    ks.stop();
 +
++    (services.get(CLUSTER_CONFIGURATION_MONITOR_SERVICE)).stop();
++
 +    DefaultAliasService alias = (DefaultAliasService) services.get(ALIAS_SERVICE);
 +    alias.stop();
 +
 +    SSLService ssl = (SSLService) services.get(SSL_SERVICE);
 +    ssl.stop();
 +
 +    ServerInfoService sis = (ServerInfoService) services.get(SERVER_INFO_SERVICE);
 +    sis.stop();
 +
 +    DefaultTopologyService tops = (DefaultTopologyService)services.get(TOPOLOGY_SERVICE);
 +    tops.stop();
 +
 +    DefaultMetricsService metricsService = (DefaultMetricsService) services.get(METRICS_SERVICE);
 +    metricsService.stop();
 +
 +  }
 +  
 +  /* (non-Javadoc)
 +   * @see org.apache.knox.gateway.GatewayServices#getServiceNames()
 +   */
 +  @Override
 +  public Collection<String> getServiceNames() {
 +    return services.keySet();
 +  }
 +  
 +  /* (non-Javadoc)
 +   * @see org.apache.knox.gateway.GatewayServices#getService(java.lang.String)
 +   */
 +  @Override
 +  public <T> T getService(String serviceName) {
 +    return (T)services.get(serviceName);
 +  }
 +
 +  @Override
 +  public String getRole() {
 +    return "Services";
 +  }
 +
 +  @Override
 +  public String getName() {
 +    return "GatewayServices";
 +  }
 +
 +  @Override
 +  public void initializeContribution(DeploymentContext context) {
 +    // setup credential store as appropriate
 +    String clusterName = context.getTopology().getName();
 +    try {
 +      if (!ks.isCredentialStoreForClusterAvailable(clusterName)) {
 +        log.creatingCredentialStoreForCluster(clusterName);
 +        ks.createCredentialStoreForCluster(clusterName);
 +      }
 +      else {
 +        log.credentialStoreForClusterFoundNotCreating(clusterName);
 +      }
 +    } catch (KeystoreServiceException e) {
 +      throw new RuntimeException("Credential store was found but was unable to be loaded - the provided (or persisted) master secret may not match the password for the credential store.", e);
 +    }
 +  }
 +
 +  @Override
 +  public void contributeProvider(DeploymentContext context, Provider provider) {
 +  }
 +
 +  @Override
 +  public void contributeFilter(DeploymentContext context, Provider provider,
 +      org.apache.knox.gateway.topology.Service service,
 +      ResourceDescriptor resource, List<FilterParamDescriptor> params) {
 +  }
 +
 +  @Override
 +  public void finalizeContribution(DeploymentContext context) {
 +    // Tell the provider the location of the descriptor.
 +    context.getWebAppDescriptor().createListener().listenerClass( GatewayServicesContextListener.class.getName() );
 +    context.getWebAppDescriptor().createListener().listenerClass(GatewayMetricsServletContextListener.class.getName());
 +  }
 +}


[35/53] [abbrv] knox git commit: Merge branch 'master' into KNOX-998-Package_Restructuring

Posted by mo...@apache.org.
http://git-wip-us.apache.org/repos/asf/knox/blob/22a7304a/gateway-server/src/main/java/org/apache/knox/gateway/util/KnoxCLI.java
----------------------------------------------------------------------
diff --cc gateway-server/src/main/java/org/apache/knox/gateway/util/KnoxCLI.java
index 9071a1d,0000000..9a87dd0
mode 100644,000000..100644
--- a/gateway-server/src/main/java/org/apache/knox/gateway/util/KnoxCLI.java
+++ b/gateway-server/src/main/java/org/apache/knox/gateway/util/KnoxCLI.java
@@@ -1,1777 -1,0 +1,2154 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.util;
 +
 +import java.io.BufferedReader;
 +import java.io.Console;
 +import java.io.File;
 +import java.io.IOException;
 +import java.io.InputStream;
 +import java.io.InputStreamReader;
 +import java.io.PrintStream;
 +import java.net.InetAddress;
 +import java.net.UnknownHostException;
 +import java.security.cert.Certificate;
 +import java.util.Arrays;
 +import java.util.HashMap;
 +import java.util.HashSet;
 +import java.util.List;
 +import java.util.Map;
 +import java.util.Properties;
 +import java.util.UUID;
 +import javax.net.ssl.SSLContext;
 +import javax.net.ssl.SSLException;
 +
 +import org.apache.commons.codec.binary.Base64;
 +import org.apache.commons.io.FileUtils;
 +import org.apache.hadoop.conf.Configuration;
 +import org.apache.hadoop.conf.Configured;
 +import org.apache.knox.gateway.GatewayCommandLine;
 +import org.apache.knox.gateway.config.GatewayConfig;
 +import org.apache.knox.gateway.config.impl.GatewayConfigImpl;
 +import org.apache.knox.gateway.deploy.DeploymentFactory;
 +import org.apache.knox.gateway.services.CLIGatewayServices;
 +import org.apache.knox.gateway.services.GatewayServices;
 +import org.apache.knox.gateway.services.Service;
 +import org.apache.knox.gateway.services.ServiceLifecycleException;
++import org.apache.knox.gateway.services.config.client.RemoteConfigurationRegistryClient;
++import org.apache.knox.gateway.services.config.client.RemoteConfigurationRegistryClientService;
 +import org.apache.knox.gateway.services.security.AliasService;
 +import org.apache.knox.gateway.services.security.KeystoreService;
 +import org.apache.knox.gateway.services.security.KeystoreServiceException;
 +import org.apache.knox.gateway.services.security.MasterService;
 +import org.apache.knox.gateway.services.security.impl.X509CertificateUtil;
 +import org.apache.knox.gateway.services.topology.TopologyService;
 +import org.apache.knox.gateway.topology.Provider;
 +import org.apache.knox.gateway.topology.Topology;
 +import org.apache.knox.gateway.topology.validation.TopologyValidator;
 +import org.apache.hadoop.util.Tool;
 +import org.apache.hadoop.util.ToolRunner;
 +import org.apache.http.client.ClientProtocolException;
 +import org.apache.http.client.methods.CloseableHttpResponse;
 +import org.apache.http.client.methods.HttpGet;
 +import org.apache.http.conn.ssl.SSLContexts;
 +import org.apache.http.conn.ssl.TrustSelfSignedStrategy;
 +import org.apache.http.impl.client.CloseableHttpClient;
 +import org.apache.http.impl.client.HttpClients;
 +import org.apache.log4j.PropertyConfigurator;
 +import org.apache.shiro.SecurityUtils;
 +import org.apache.shiro.authc.AuthenticationException;
 +import org.apache.shiro.authc.UsernamePasswordToken;
 +import org.apache.shiro.config.ConfigurationException;
 +import org.apache.shiro.config.Ini;
 +import org.apache.shiro.config.IniSecurityManagerFactory;
 +import org.apache.shiro.subject.Subject;
 +import org.apache.shiro.util.Factory;
 +import org.apache.shiro.util.ThreadContext;
 +import org.eclipse.persistence.oxm.MediaType;
 +import org.jboss.shrinkwrap.api.exporter.ExplodedExporter;
 +import org.jboss.shrinkwrap.api.spec.EnterpriseArchive;
++
 +/**
 + *
 + */
 +public class KnoxCLI extends Configured implements Tool {
 +
 +  private static final String USAGE_PREFIX = "KnoxCLI {cmd} [options]";
 +  static final private String COMMANDS =
 +      "   [--help]\n" +
 +      "   [" + VersionCommand.USAGE + "]\n" +
 +      "   [" + MasterCreateCommand.USAGE + "]\n" +
 +      "   [" + CertCreateCommand.USAGE + "]\n" +
 +      "   [" + CertExportCommand.USAGE + "]\n" +
 +      "   [" + AliasCreateCommand.USAGE + "]\n" +
 +      "   [" + AliasDeleteCommand.USAGE + "]\n" +
 +      "   [" + AliasListCommand.USAGE + "]\n" +
 +      "   [" + RedeployCommand.USAGE + "]\n" +
 +      "   [" + ListTopologiesCommand.USAGE + "]\n" +
 +      "   [" + ValidateTopologyCommand.USAGE + "]\n" +
 +      "   [" + LDAPAuthCommand.USAGE + "]\n" +
 +      "   [" + LDAPSysBindCommand.USAGE + "]\n" +
-       "   [" + ServiceTestCommand.USAGE + "]\n";
++      "   [" + ServiceTestCommand.USAGE + "]\n" +
++      "   [" + RemoteRegistryClientsListCommand.USAGE + "]\n" +
++      "   [" + RemoteRegistryUploadProviderConfigCommand.USAGE + "]\n" +
++      "   [" + RemoteRegistryUploadDescriptorCommand.USAGE + "]\n" +
++      "   [" + RemoteRegistryDeleteProviderConfigCommand.USAGE + "]\n" +
++      "   [" + RemoteRegistryDeleteDescriptorCommand.USAGE + "]\n" +
++      "   [" + RemoteRegistryGetACLCommand.USAGE + "]\n";
 +
 +  /** allows stdout to be captured if necessary */
 +  public PrintStream out = System.out;
 +  /** allows stderr to be captured if necessary */
 +  public PrintStream err = System.err;
 +
 +  private static GatewayServices services = new CLIGatewayServices();
 +  private Command command;
 +  private String value = null;
 +  private String cluster = null;
 +  private String path = null;
 +  private String generate = "false";
 +  private String hostname = null;
 +  private String port = null;
 +  private boolean force = false;
 +  private boolean debug = false;
 +  private String user = null;
 +  private String pass = null;
 +  private boolean groups = false;
 +
++  private String remoteRegistryClient = null;
++  private String remoteRegistryEntryName = null;
++
 +  // For testing only
 +  private String master = null;
 +  private String type = null;
 +
 +  /* (non-Javadoc)
 +   * @see org.apache.hadoop.util.Tool#run(java.lang.String[])
 +   */
 +  @Override
 +  public int run(String[] args) throws Exception {
 +    int exitCode = 0;
 +    try {
 +      exitCode = init(args);
 +      if (exitCode != 0) {
 +        return exitCode;
 +      }
 +      if (command != null && command.validate()) {
 +        initializeServices( command instanceof MasterCreateCommand );
 +        command.execute();
 +      } else if (!(command instanceof MasterCreateCommand)){
 +        out.println("ERROR: Invalid Command" + "\n" + "Unrecognized option:" +
 +            args[0] + "\n" +
 +            "A fatal exception has occurred. Program will exit.");
 +        exitCode = -2;
 +      }
 +    } catch (ServiceLifecycleException sle) {
 +      out.println("ERROR: Internal Error: Please refer to the knoxcli.log " +
 +          "file for details. " + sle.getMessage());
 +    } catch (Exception e) {
 +      e.printStackTrace( err );
 +      err.flush();
 +      return -3;
 +    }
 +    return exitCode;
 +  }
 +
 +  GatewayServices getGatewayServices() {
 +    return services;
 +  }
 +
 +  private void initializeServices(boolean persisting) throws ServiceLifecycleException {
 +    GatewayConfig config = getGatewayConfig();
 +    Map<String,String> options = new HashMap<>();
 +    options.put(GatewayCommandLine.PERSIST_LONG, Boolean.toString(persisting));
 +    if (master != null) {
 +      options.put("master", master);
 +    }
 +    services.init(config, options);
 +  }
 +
 +  /**
 +   * Parse the command line arguments and initialize the data
 +   * <pre>
 +   * % knoxcli version
 +   * % knoxcli list-topologies
 +   * % knoxcli master-create keyName [--size size] [--generate]
 +   * % knoxcli create-alias alias [--cluster clustername] [--generate] [--value v]
 +   * % knoxcli list-alias [--cluster clustername]
 +   * % knoxcli delete=alias alias [--cluster clustername]
 +   * % knoxcli create-cert alias [--hostname h]
 +   * % knoxcli redeploy [--cluster clustername]
 +   * % knoxcli validate-topology [--cluster clustername] | [--path <path/to/file>]
 +   * % knoxcli user-auth-test [--cluster clustername] [--u username] [--p password]
 +   * % knoxcli system-user-auth-test [--cluster clustername] [--d]
 +   * % knoxcli service-test [--u user] [--p password] [--cluster clustername] [--hostname name] [--port port]
-    *
++   * % knoxcli list-registry-clients
++   * % knoxcli get-registry-acl entryName --registry-client name
++   * % knoxcli upload-provider-config filePath --registry-client name [--entry-name entryName]
++   * % knoxcli upload-descriptor filePath --registry-client name [--entry-name entryName]
++   * % knoxcli delete-provider-config providerConfig --registry-client name
++   * % knoxcli delete-descriptor descriptor --registry-client name
 +   * </pre>
 +   * @param args
 +   * @return
 +   * @throws IOException
 +   */
 +  private int init(String[] args) throws IOException {
 +    if (args.length == 0) {
 +      printKnoxShellUsage();
 +      return -1;
 +    }
 +    for (int i = 0; i < args.length; i++) { // parse command line
 +      if (args[i].equals("create-master")) {
 +        command = new MasterCreateCommand();
 +        if ((args.length > i + 1) && args[i + 1].equals("--help")) {
 +          printKnoxShellUsage();
 +          return -1;
 +        }
 +      } else if (args[i].equals("delete-alias")) {
 +        String alias = null;
 +        if (args.length >= 2) {
 +          alias = args[++i];
 +        }
 +        command = new AliasDeleteCommand(alias);
 +        if (alias == null || alias.equals("--help")) {
 +          printKnoxShellUsage();
 +          return -1;
 +        }
 +      } else if (args[i].equals("create-alias")) {
 +        String alias = null;
 +        if (args.length >= 2) {
 +          alias = args[++i];
 +        }
 +        command = new AliasCreateCommand(alias);
 +        if (alias == null || alias.equals("--help")) {
 +          printKnoxShellUsage();
 +          return -1;
 +        }
 +      } else if (args[i].equals("create-cert")) {
 +        command = new CertCreateCommand();
 +        if ((args.length > i + 1) && args[i + 1].equals("--help")) {
 +          printKnoxShellUsage();
 +          return -1;
 +        }
 +      } else if (args[i].equals("export-cert")) {
 +        command = new CertExportCommand();
 +        if ((args.length > i + 1) && args[i + 1].equals("--help")) {
 +          printKnoxShellUsage();
 +          return -1;
 +        }
 +      }else if(args[i].equals("user-auth-test")) {
 +        if(i + 1 >= args.length) {
 +          printKnoxShellUsage();
 +          return -1;
 +        } else {
 +          command = new LDAPAuthCommand();
 +        }
 +      } else if(args[i].equals("system-user-auth-test")) {
 +        if (i + 1 >= args.length){
 +          printKnoxShellUsage();
 +          return -1;
 +        } else {
 +          command = new LDAPSysBindCommand();
 +        }
 +      } else if (args[i].equals("list-alias")) {
 +        command = new AliasListCommand();
 +      } else if (args[i].equals("--value")) {
 +        if( i+1 >= args.length || args[i+1].startsWith( "-" ) ) {
 +          printKnoxShellUsage();
 +          return -1;
 +        }
 +        this.value = args[++i];
 +        if ( command != null && command instanceof MasterCreateCommand ) {
 +          this.master = this.value;
 +        }
 +      } else if ( args[i].equals("version") ) {
 +        command = new VersionCommand();
 +      } else if ( args[i].equals("redeploy") ) {
 +        command = new RedeployCommand();
 +      } else if ( args[i].equals("validate-topology") ) {
 +        if(i + 1 >= args.length) {
 +          printKnoxShellUsage();
 +          return -1;
 +        } else {
 +          command = new ValidateTopologyCommand();
 +        }
 +      } else if( args[i].equals("list-topologies") ){
 +        command = new ListTopologiesCommand();
 +      }else if ( args[i].equals("--cluster") || args[i].equals("--topology") ) {
 +        if( i+1 >= args.length || args[i+1].startsWith( "-" ) ) {
 +          printKnoxShellUsage();
 +          return -1;
 +        }
 +        this.cluster = args[++i];
 +      } else if (args[i].equals("service-test")) {
-         if( i + 1 >= args[i].length()) {
++        if( i + 1 >= args.length) {
 +          printKnoxShellUsage();
 +          return -1;
 +        } else {
 +          command = new ServiceTestCommand();
 +        }
 +      } else if (args[i].equals("--generate")) {
 +        if ( command != null && command instanceof MasterCreateCommand ) {
 +          this.master = UUID.randomUUID().toString();
 +        } else {
 +          this.generate = "true";
 +        }
 +      } else if(args[i].equals("--type")) {
 +        if( i+1 >= args.length || args[i+1].startsWith( "-" ) ) {
 +          printKnoxShellUsage();
 +          return -1;
 +        }
 +        this.type = args[++i];
 +      } else if(args[i].equals("--path")) {
 +        if( i+1 >= args.length || args[i+1].startsWith( "-" ) ) {
 +          printKnoxShellUsage();
 +          return -1;
 +        }
 +        this.path = args[++i];
 +      }else if (args[i].equals("--hostname")) {
 +        if( i+1 >= args.length || args[i+1].startsWith( "-" ) ) {
 +          printKnoxShellUsage();
 +          return -1;
 +        }
 +        this.hostname = args[++i];
 +      } else if (args[i].equals("--port")) {
 +        if( i+1 >= args.length || args[i+1].startsWith( "-" ) ) {
 +          printKnoxShellUsage();
 +          return -1;
 +        }
 +        this.port = args[++i];
 +      } else if (args[i].equals("--master")) {
 +        // For testing only
 +        if( i+1 >= args.length || args[i+1].startsWith( "-" ) ) {
 +          printKnoxShellUsage();
 +          return -1;
 +        }
 +        this.master = args[++i];
 +      } else if (args[i].equals("--force")) {
 +        this.force = true;
 +      } else if (args[i].equals("--help")) {
 +        printKnoxShellUsage();
 +        return -1;
 +      } else if(args[i].equals("--d")) {
 +        this.debug = true;
 +      } else if(args[i].equals("--u")) {
 +        if(i + 1 <= args.length) {
 +          this.user = args[++i];
 +        } else{
 +          printKnoxShellUsage();
 +          return -1;
 +        }
 +      } else if(args[i].equals("--p")) {
 +        if(i + 1 <= args.length) {
 +          this.pass = args[++i];
 +        } else{
 +          printKnoxShellUsage();
 +          return -1;
 +        }
 +      } else if (args[i].equals("--g")) {
 +        this.groups = true;
++      } else if (args[i].equals("list-registry-clients")) {
++        command = new RemoteRegistryClientsListCommand();
++      } else if (args[i].equals("--registry-client")) {
++        if (i + 1 >= args.length || args[i + 1].startsWith("-")) {
++          printKnoxShellUsage();
++          return -1;
++        }
++        this.remoteRegistryClient = args[++i];
++      } else if (args[i].equalsIgnoreCase("upload-provider-config")) {
++        String fileName;
++        if (i <= (args.length - 1)) {
++          fileName = args[++i];
++          command = new RemoteRegistryUploadProviderConfigCommand(fileName);
++        } else {
++          printKnoxShellUsage();
++          return -1;
++        }
++      } else if (args[i].equals("upload-descriptor")) {
++        String fileName;
++        if (i <= (args.length - 1)) {
++          fileName = args[++i];
++          command = new RemoteRegistryUploadDescriptorCommand(fileName);
++        } else {
++          printKnoxShellUsage();
++          return -1;
++        }
++      } else if (args[i].equals("--entry-name")) {
++        if (i <= (args.length - 1)) {
++          remoteRegistryEntryName = args[++i];
++        } else {
++          printKnoxShellUsage();
++          return -1;
++        }
++      } else if (args[i].equals("delete-descriptor")) {
++        if (i <= (args.length - 1)) {
++          String entry = args[++i];
++          command = new RemoteRegistryDeleteDescriptorCommand(entry);
++        } else {
++          printKnoxShellUsage();
++          return -1;
++        }
++      } else if (args[i].equals("delete-provider-config")) {
++        if (i <= (args.length - 1)) {
++          String entry = args[++i];
++          command = new RemoteRegistryDeleteProviderConfigCommand(entry);
++        } else {
++          printKnoxShellUsage();
++          return -1;
++        }
++      } else if (args[i].equalsIgnoreCase("get-registry-acl")) {
++        if (i <= (args.length - 1)) {
++          String entry = args[++i];
++          command = new RemoteRegistryGetACLCommand(entry);
++        } else {
++          printKnoxShellUsage();
++          return -1;
++        }
 +      } else {
 +        printKnoxShellUsage();
 +        //ToolRunner.printGenericCommandUsage(System.err);
 +        return -1;
 +      }
 +    }
 +    return 0;
 +  }
 +
 +  private void printKnoxShellUsage() {
 +    out.println( USAGE_PREFIX + "\n" + COMMANDS );
 +    if ( command != null ) {
 +      out.println(command.getUsage());
 +    } else {
 +      char[] chars = new char[79];
 +      Arrays.fill( chars, '=' );
 +      String div = new String( chars );
 +
 +      out.println( div );
 +      out.println( VersionCommand.USAGE + "\n\n" + VersionCommand.DESC );
 +      out.println();
 +      out.println( div );
 +      out.println( MasterCreateCommand.USAGE + "\n\n" + MasterCreateCommand.DESC );
 +      out.println();
 +      out.println( div );
 +      out.println( CertCreateCommand.USAGE + "\n\n" + CertCreateCommand.DESC );
 +      out.println();
 +      out.println( div );
 +      out.println( CertExportCommand.USAGE + "\n\n" + CertExportCommand.DESC );
 +      out.println();
 +      out.println( div );
 +      out.println( AliasCreateCommand.USAGE + "\n\n" + AliasCreateCommand.DESC );
 +      out.println();
 +      out.println( div );
 +      out.println( AliasDeleteCommand.USAGE + "\n\n" + AliasDeleteCommand.DESC );
 +      out.println();
 +      out.println( div );
 +      out.println( AliasListCommand.USAGE + "\n\n" + AliasListCommand.DESC );
 +      out.println();
 +      out.println( div );
 +      out.println( RedeployCommand.USAGE + "\n\n" + RedeployCommand.DESC );
 +      out.println();
 +      out.println( div );
 +      out.println(ValidateTopologyCommand.USAGE + "\n\n" + ValidateTopologyCommand.DESC);
 +      out.println();
 +      out.println( div );
 +      out.println(ListTopologiesCommand.USAGE + "\n\n" + ListTopologiesCommand.DESC);
 +      out.println();
 +      out.println( div );
 +      out.println(LDAPAuthCommand.USAGE + "\n\n" + LDAPAuthCommand.DESC);
 +      out.println();
 +      out.println( div );
 +      out.println(LDAPSysBindCommand.USAGE + "\n\n" + LDAPSysBindCommand.DESC);
 +      out.println();
 +      out.println( div );
 +      out.println(ServiceTestCommand.USAGE + "\n\n" + ServiceTestCommand.DESC);
 +      out.println();
 +      out.println( div );
++      out.println(RemoteRegistryClientsListCommand.USAGE + "\n\n" + RemoteRegistryClientsListCommand.DESC);
++      out.println();
++      out.println( div );
++      out.println(RemoteRegistryGetACLCommand.USAGE + "\n\n" + RemoteRegistryGetACLCommand.DESC);
++      out.println();
++      out.println( div );
++      out.println(RemoteRegistryUploadProviderConfigCommand.USAGE + "\n\n" + RemoteRegistryUploadProviderConfigCommand.DESC);
++      out.println();
++      out.println( div );
++      out.println(RemoteRegistryUploadDescriptorCommand.USAGE + "\n\n" + RemoteRegistryUploadDescriptorCommand.DESC);
++      out.println();
++      out.println( div );
++      out.println(RemoteRegistryDeleteProviderConfigCommand.USAGE + "\n\n" + RemoteRegistryDeleteProviderConfigCommand.DESC);
++      out.println();
++      out.println( div );
++      out.println(RemoteRegistryDeleteDescriptorCommand.USAGE + "\n\n" + RemoteRegistryDeleteDescriptorCommand.DESC);
++      out.println();
++      out.println( div );
 +    }
 +  }
 +
 +  private abstract class Command {
 +
 +    public boolean validate() {
 +      return true;
 +    }
 +
 +    protected Service getService(String serviceName) {
 +      Service service = null;
 +
 +      return service;
 +    }
 +
 +    public abstract void execute() throws Exception;
 +
 +    public abstract String getUsage();
 +
 +    protected AliasService getAliasService() {
 +      AliasService as = services.getService(GatewayServices.ALIAS_SERVICE);
 +      return as;
 +    }
 +
 +    protected KeystoreService getKeystoreService() {
 +      KeystoreService ks = services.getService(GatewayServices.KEYSTORE_SERVICE);
 +      return ks;
 +    }
 +
 +    protected TopologyService getTopologyService()  {
 +      TopologyService ts = services.getService(GatewayServices.TOPOLOGY_SERVICE);
 +      return ts;
 +    }
++
++    protected RemoteConfigurationRegistryClientService getRemoteConfigRegistryClientService() {
++      return services.getService(GatewayServices.REMOTE_REGISTRY_CLIENT_SERVICE);
++    }
++
 +  }
 +
 + private class AliasListCommand extends Command {
 +
 +  public static final String USAGE = "list-alias [--cluster clustername]";
 +  public static final String DESC = "The list-alias command lists all of the aliases\n" +
 +                                    "for the given hadoop --cluster. The default\n" +
 +                                    "--cluster being the gateway itself.";
 +
 +   /* (non-Javadoc)
 +    * @see KnoxCLI.Command#execute()
 +    */
 +   @Override
 +   public void execute() throws Exception {
 +     AliasService as = getAliasService();
 +      KeystoreService keystoreService = getKeystoreService();
 +
 +     if (cluster == null) {
 +       cluster = "__gateway";
 +     }
 +      boolean credentialStoreForClusterAvailable =
 +          keystoreService.isCredentialStoreForClusterAvailable(cluster);
 +      if (credentialStoreForClusterAvailable) {
 +        out.println("Listing aliases for: " + cluster);
 +        List<String> aliases = as.getAliasesForCluster(cluster);
 +        for (String alias : aliases) {
 +          out.println(alias);
 +        }
 +        out.println("\n" + aliases.size() + " items.");
 +      } else {
 +        out.println("Invalid cluster name provided: " + cluster);
 +      }
 +   }
 +
 +   /* (non-Javadoc)
-     * @see KnoxCLI.Command#getUsage()
++    * @see org.apache.knox.gateway.util.KnoxCLI.Command#getUsage()
 +    */
 +   @Override
 +   public String getUsage() {
 +     return USAGE + ":\n\n" + DESC;
 +   }
 + }
 +
 + public class CertExportCommand extends Command {
 +
 +   public static final String USAGE = "export-cert";
 +   public static final String DESC = "The export-cert command exports the public certificate\n" +
 +                                     "from the a gateway.jks keystore with the alias of gateway-identity.";
 +   private static final String GATEWAY_CREDENTIAL_STORE_NAME = "__gateway";
 +   private static final String GATEWAY_IDENTITY_PASSPHRASE = "gateway-identity-passphrase";
 +
 +    public CertExportCommand() {
 +    }
 +
 +    private GatewayConfig getGatewayConfig() {
 +      GatewayConfig result;
 +      Configuration conf = getConf();
 +      if( conf != null && conf instanceof GatewayConfig ) {
 +        result = (GatewayConfig)conf;
 +      } else {
 +        result = new GatewayConfigImpl();
 +      }
 +      return result;
 +    }
 +
 +    /* (non-Javadoc)
-      * @see KnoxCLI.Command#execute()
++     * @see org.apache.knox.gateway.util.KnoxCLI.Command#execute()
 +     */
 +    @Override
 +    public void execute() throws Exception {
 +      KeystoreService ks = getKeystoreService();
 +
 +      AliasService as = getAliasService();
 +
 +      if (ks != null) {
 +        try {
 +          if (!ks.isKeystoreForGatewayAvailable()) {
 +            out.println("No keystore has been created for the gateway. Please use the create-cert command or populate with a CA signed cert of your own.");
 +          }
 +          char[] passphrase = as.getPasswordFromAliasForCluster(GATEWAY_CREDENTIAL_STORE_NAME, GATEWAY_IDENTITY_PASSPHRASE);
 +          if (passphrase == null) {
 +            MasterService ms = services.getService("MasterService");
 +            passphrase = ms.getMasterSecret();
 +          }
 +          Certificate cert = ks.getKeystoreForGateway().getCertificate("gateway-identity");
 +          String keyStoreDir = getGatewayConfig().getGatewaySecurityDir() + File.separator + "keystores" + File.separator;
 +          File ksd = new File(keyStoreDir);
 +          if (!ksd.exists()) {
 +            if( !ksd.mkdirs() ) {
 +              // certainly should not happen if the keystore is known to be available
 +              throw new ServiceLifecycleException("Unable to create keystores directory" + ksd.getAbsolutePath());
 +            }
 +          }
 +          if ("PEM".equals(type) || type == null) {
 +            X509CertificateUtil.writeCertificateToFile(cert, new File(keyStoreDir + "gateway-identity.pem"));
 +            out.println("Certificate gateway-identity has been successfully exported to: " + keyStoreDir + "gateway-identity.pem");
 +          }
 +          else if ("JKS".equals(type)) {
 +            X509CertificateUtil.writeCertificateToJKS(cert, new File(keyStoreDir + "gateway-client-trust.jks"));
 +            out.println("Certificate gateway-identity has been successfully exported to: " + keyStoreDir + "gateway-client-trust.jks");
 +          }
 +          else {
 +            out.println("Invalid type for export file provided. Export has not been done. Please use: [PEM|JKS] default value is PEM.");
 +          }
 +        } catch (KeystoreServiceException e) {
 +          throw new ServiceLifecycleException("Keystore was not loaded properly - the provided (or persisted) master secret may not match the password for the keystore.", e);
 +        }
 +      }
 +    }
 +
 +    /* (non-Javadoc)
-      * @see KnoxCLI.Command#getUsage()
++     * @see org.apache.knox.gateway.util.KnoxCLI.Command#getUsage()
 +     */
 +    @Override
 +    public String getUsage() {
 +      return USAGE + ":\n\n" + DESC;
 +    }
 +  }
 +
 + public class CertCreateCommand extends Command {
 +
 +  public static final String USAGE = "create-cert [--hostname h]";
 +  public static final String DESC = "The create-cert command creates and populates\n" +
 +                                    "a gateway.jks keystore with a self-signed certificate\n" +
 +                                    "to be used as the gateway identity. It also adds an alias\n" +
 +                                    "to the __gateway-credentials.jceks credential store for the\n" +
 +                                    "key passphrase.";
 +  private static final String GATEWAY_CREDENTIAL_STORE_NAME = "__gateway";
 +  private static final String GATEWAY_IDENTITY_PASSPHRASE = "gateway-identity-passphrase";
 +
 +   public CertCreateCommand() {
 +   }
 +
 +   /* (non-Javadoc)
-     * @see KnoxCLI.Command#execute()
++    * @see org.apache.knox.gateway.util.KnoxCLI.Command#execute()
 +    */
 +   @Override
 +   public void execute() throws Exception {
 +     KeystoreService ks = getKeystoreService();
 +
 +     AliasService as = getAliasService();
 +
 +     if (ks != null) {
 +       try {
 +         if (!ks.isCredentialStoreForClusterAvailable(GATEWAY_CREDENTIAL_STORE_NAME)) {
 +//           log.creatingCredentialStoreForGateway();
 +           ks.createCredentialStoreForCluster(GATEWAY_CREDENTIAL_STORE_NAME);
 +         }
 +         else {
 +//           log.credentialStoreForGatewayFoundNotCreating();
 +         }
 +         // LET'S NOT GENERATE A DIFFERENT KEY PASSPHRASE BY DEFAULT ANYMORE
 +         // IF A DEPLOYMENT WANTS TO CHANGE THE KEY PASSPHRASE TO MAKE IT MORE SECURE THEN
 +         // THEY CAN ADD THE ALIAS EXPLICITLY WITH THE CLI
 +         //as.generateAliasForCluster(GATEWAY_CREDENTIAL_STORE_NAME, GATEWAY_IDENTITY_PASSPHRASE);
 +       } catch (KeystoreServiceException e) {
 +         throw new ServiceLifecycleException("Keystore was not loaded properly - the provided (or persisted) master secret may not match the password for the keystore.", e);
 +       }
 +
 +       try {
 +         if (!ks.isKeystoreForGatewayAvailable()) {
 +//           log.creatingKeyStoreForGateway();
 +           ks.createKeystoreForGateway();
 +         }
 +         else {
 +//           log.keyStoreForGatewayFoundNotCreating();
 +         }
 +         char[] passphrase = as.getPasswordFromAliasForCluster(GATEWAY_CREDENTIAL_STORE_NAME, GATEWAY_IDENTITY_PASSPHRASE);
 +         if (passphrase == null) {
 +           MasterService ms = services.getService("MasterService");
 +           passphrase = ms.getMasterSecret();
 +         }
 +         ks.addSelfSignedCertForGateway("gateway-identity", passphrase, hostname);
 +//         logAndValidateCertificate();
 +         out.println("Certificate gateway-identity has been successfully created.");
 +       } catch (KeystoreServiceException e) {
 +         throw new ServiceLifecycleException("Keystore was not loaded properly - the provided (or persisted) master secret may not match the password for the keystore.", e);
 +       }
 +     }
 +   }
 +
 +   /* (non-Javadoc)
-     * @see KnoxCLI.Command#getUsage()
++    * @see org.apache.knox.gateway.util.KnoxCLI.Command#getUsage()
 +    */
 +   @Override
 +   public String getUsage() {
 +     return USAGE + ":\n\n" + DESC;
 +   }
 +
 + }
 +
 + public class AliasCreateCommand extends Command {
 +
 +  public static final String USAGE = "create-alias aliasname [--cluster clustername] " +
 +                                     "[ (--value v) | (--generate) ]";
 +  public static final String DESC = "The create-alias command will create an alias\n"
 +                                       + "and secret pair within the credential store for the\n"
 +                                       + "indicated --cluster otherwise within the gateway\n"
 +                                       + "credential store. The actual secret may be specified via\n"
 +                                       + "the --value option or --generate (will create a random secret\n"
 +                                       + "for you) or user will be prompt to provide password.";
 +
 +  private String name = null;
 +
 +  /**
 +    * @param alias
 +    */
 +   public AliasCreateCommand(String alias) {
 +     name = alias;
 +   }
 +
 +   /* (non-Javadoc)
-     * @see KnoxCLI.Command#execute()
++    * @see org.apache.knox.gateway.util.KnoxCLI.Command#execute()
 +    */
 +   @Override
 +   public void execute() throws Exception {
 +     AliasService as = getAliasService();
 +     if (cluster == null) {
 +       cluster = "__gateway";
 +     }
 +     if (value != null) {
 +       as.addAliasForCluster(cluster, name, value);
 +       out.println(name + " has been successfully created.");
 +     }
 +     else {
 +       if ("true".equals(generate)) {
 +         as.generateAliasForCluster(cluster, name);
 +         out.println(name + " has been successfully generated.");
 +       }
 +       else {
 +          value = new String(promptUserForPassword());
 +          as.addAliasForCluster(cluster, name, value);
 +          out.println(name + " has been successfully created.");
 +       }
 +     }
 +   }
 +
 +   /* (non-Javadoc)
-     * @see KnoxCLI.Command#getUsage()
++    * @see org.apache.knox.gateway.util.KnoxCLI.Command#getUsage()
 +    */
 +   @Override
 +   public String getUsage() {
 +     return USAGE + ":\n\n" + DESC;
 +   }
 +
 +    protected char[] promptUserForPassword() {
 +      char[] password = null;
 +      Console c = System.console();
 +      if (c == null) {
 +        System.err
 +            .println("No console to fetch password from user.Consider setting via --generate or --value.");
 +        System.exit(1);
 +      }
 +
 +      boolean noMatch;
 +      do {
 +        char[] newPassword1 = c.readPassword("Enter password: ");
 +        char[] newPassword2 = c.readPassword("Enter password again: ");
 +        noMatch = !Arrays.equals(newPassword1, newPassword2);
 +        if (noMatch) {
 +          c.format("Passwords don't match. Try again.%n");
 +        } else {
 +          password = Arrays.copyOf(newPassword1, newPassword1.length);
 +        }
 +        Arrays.fill(newPassword1, ' ');
 +        Arrays.fill(newPassword2, ' ');
 +      } while (noMatch);
 +      return password;
 +    }
 +
 + }
 +
 + /**
 +  *
 +  */
 + public class AliasDeleteCommand extends Command {
 +  public static final String USAGE = "delete-alias aliasname [--cluster clustername]";
 +  public static final String DESC = "The delete-alias command removes the\n" +
 +                                    "indicated alias from the --cluster specific\n" +
 +                                    "credential store or the gateway credential store.";
 +
 +  private String name = null;
 +
 +  /**
 +    * @param alias
 +    */
 +   public AliasDeleteCommand(String alias) {
 +     name = alias;
 +   }
 +
 +   /* (non-Javadoc)
-     * @see KnoxCLI.Command#execute()
++    * @see org.apache.knox.gateway.util.KnoxCLI.Command#execute()
 +    */
 +   @Override
 +   public void execute() throws Exception {
 +     AliasService as = getAliasService();
 +      KeystoreService keystoreService = getKeystoreService();
 +     if (as != null) {
 +       if (cluster == null) {
 +         cluster = "__gateway";
 +       }
 +        boolean credentialStoreForClusterAvailable =
 +            keystoreService.isCredentialStoreForClusterAvailable(cluster);
 +        if (credentialStoreForClusterAvailable) {
 +          List<String> aliasesForCluster = as.getAliasesForCluster(cluster);
 +          if (null == aliasesForCluster || !aliasesForCluster.contains(name)) {
 +            out.println("Deletion of Alias: " + name + " from cluster: " + cluster + " Failed. "
 +                + "\n" + "No such alias exists in the cluster.");
 +          } else {
 +            as.removeAliasForCluster(cluster, name);
 +            out.println(name + " has been successfully deleted.");
 +          }
 +        } else {
 +          out.println("Invalid cluster name provided: " + cluster);
 +        }
 +     }
 +   }
 +
 +   /* (non-Javadoc)
-     * @see KnoxCLI.Command#getUsage()
++    * @see org.apache.knox.gateway.util.KnoxCLI.Command#getUsage()
 +    */
 +   @Override
 +   public String getUsage() {
 +     return USAGE + ":\n\n" + DESC;
 +   }
 +
 + }
 +
 + /**
 +  *
 +  */
 + public class MasterCreateCommand extends Command {
 +  public static final String USAGE = "create-master [--force]";
 +  public static final String DESC = "The create-master command persists the\n" +
 +                                    "master secret in a file located at:\n" +
 +                                    "{GATEWAY_HOME}/data/security/master. It\n" +
 +                                    "will prompt the user for the secret to persist.\n" +
 +                                    "Use --force to overwrite the master secret.";
 +
 +   public MasterCreateCommand() {
 +   }
 +
 +   private GatewayConfig getGatewayConfig() {
 +     GatewayConfig result;
 +     Configuration conf = getConf();
 +     if( conf != null && conf instanceof GatewayConfig ) {
 +       result = (GatewayConfig)conf;
 +     } else {
 +       result = new GatewayConfigImpl();
 +     }
 +     return result;
 +   }
 +
 +   public boolean validate() {
 +     boolean valid = true;
 +     GatewayConfig config = getGatewayConfig();
 +     File dir = new File( config.getGatewaySecurityDir() );
 +     File file = new File( dir, "master" );
 +     if( file.exists() ) {
 +       if( force ) {
 +         if( !file.canWrite() ) {
 +           out.println(
 +               "This command requires write permissions on the master secret file: " +
 +                   file.getAbsolutePath() );
 +           valid = false;
 +         } else if( !file.canWrite() ) {
 +           out.println(
 +               "This command requires write permissions on the master secret file: " +
 +                   file.getAbsolutePath() );
 +           valid = false;
 +         } else {
 +           valid = file.delete();
 +           if( !valid ) {
 +             out.println(
 +                 "Unable to delete the master secret file: " +
 +                     file.getAbsolutePath() );
 +           }
 +         }
 +       } else {
 +         out.println(
 +             "Master secret is already present on disk. " +
 +                 "Please be aware that overwriting it will require updating other security artifacts. " +
 +                 " Use --force to overwrite the existing master secret." );
 +         valid = false;
 +       }
 +     } else if( dir.exists() && !dir.canWrite() ) {
 +       out.println(
 +           "This command requires write permissions on the security directory: " +
 +               dir.getAbsolutePath() );
 +       valid = false;
 +     }
 +     return valid;
 +   }
 +
 +   /* (non-Javadoc)
-     * @see KnoxCLI.Command#execute()
++    * @see org.apache.knox.gateway.util.KnoxCLI.Command#execute()
 +    */
 +   @Override
 +   public void execute() throws Exception {
 +     out.println("Master secret has been persisted to disk.");
 +   }
 +
 +   /* (non-Javadoc)
-     * @see KnoxCLI.Command#getUsage()
++    * @see org.apache.knox.gateway.util.KnoxCLI.Command#getUsage()
 +    */
 +   @Override
 +   public String getUsage() {
 +     return USAGE + ":\n\n" + DESC;
 +   }
 + }
 +
 +  private class VersionCommand extends Command {
 +
 +    public static final String USAGE = "version";
 +    public static final String DESC = "Displays Knox version information.";
 +
 +    @Override
 +    public void execute() throws Exception {
 +      Properties buildProperties = loadBuildProperties();
 +      System.out.println(
 +          String.format(
 +              "Apache Knox: %s (%s)",
 +              buildProperties.getProperty( "build.version", "unknown" ),
 +              buildProperties.getProperty( "build.hash", "unknown" ) ) );
 +    }
 +
 +    @Override
 +    public String getUsage() {
 +      return USAGE + ":\n\n" + DESC;
 +    }
 +
 +  }
 +
 +  private class RedeployCommand extends Command {
 +
 +    public static final String USAGE = "redeploy [--cluster clustername]";
 +    public static final String DESC =
 +        "Redeploys one or all of the gateway's clusters (a.k.a topologies).";
 +
 +    @Override
 +    public void execute() throws Exception {
 +      TopologyService ts = getTopologyService();
 +      ts.reloadTopologies();
 +      if (cluster != null) {
 +        if (validateClusterName(cluster, ts)) {
 +          ts.redeployTopologies(cluster);
 +        }
 +        else {
 +          out.println("Invalid cluster name provided. Nothing to redeploy.");
 +        }
 +      }
 +    }
 +
 +    /**
 +     * @param cluster
 +     * @param ts
 +     */
 +    private boolean validateClusterName(String cluster, TopologyService ts) {
 +      boolean valid = false;
 +      for (Topology t : ts.getTopologies() ) {
 +        if (t.getName().equals(cluster)) {
 +          valid = true;
 +          break;
 +        }
 +      }
 +      return valid;
 +    }
 +
 +    @Override
 +    public String getUsage() {
 +      return USAGE + ":\n\n" + DESC;
 +    }
 +
 +  }
 +
 +  private class ValidateTopologyCommand extends Command {
 +
 +    public static final String USAGE = "validate-topology [--cluster clustername] | [--path \"path/to/file\"]";
 +    public static final String DESC = "Ensures that a cluster's description (a.k.a topology) \n" +
 +        "follows the correct formatting rules.\n" +
 +        "use the list-topologies command to get a list of available cluster names";
 +    private String file = "";
 +
 +    @Override
 +    public String getUsage() {
 +      return USAGE + ":\n\n" + DESC;
 +    }
 +
 +    public void execute() throws Exception {
 +      GatewayConfig gc = getGatewayConfig();
 +      String topDir = gc.getGatewayTopologyDir();
 +
 +      if(path != null) {
 +        file = path;
 +      } else if(cluster == null) {
 +        // The following block of code retreieves the list of files in the topologies directory
 +        File tops = new File(topDir + "/topologies");
 +        if(tops.isDirectory()) {
 +          out.println("List of files available in the topologies directory");
 +          for (File f : tops.listFiles()) {
 +            if(f.getName().endsWith(".xml")) {
 +              String fName = f.getName().replace(".xml", "");
 +              out.println(fName);
 +            }
 +          }
 +          return;
 +        } else {
 +          out.println("Could not locate topologies directory");
 +          return;
 +        }
 +
 +      } else {
 +        file = topDir + "/" + cluster + ".xml";
 +      }
 +
 +      // The following block checks a topology against the XSD
 +      out.println();
 +      out.println("File to be validated: ");
 +      out.println(file);
 +      out.println("==========================================");
 +
 +      if(new File(file).exists()) {
 +        TopologyValidator tv = new TopologyValidator(file);
 +
 +        if(tv.validateTopology()) {
 +          out.println("Topology file validated successfully");
 +        } else {
 +          out.println(tv.getErrorString()) ;
 +          out.println("Topology validation unsuccessful");
 +        }
 +      } else {
 +        out.println("The topology file specified does not exist.");
 +      }
 +    }
 +
 +  }
 +
 +  private class ListTopologiesCommand extends Command {
 +
 +    public static final String USAGE = "list-topologies";
 +    public static final String DESC = "Retrieves a list of the available topologies within the\n" +
 +        "default topologies directory. Will return topologies that may not be deployed due\n" +
 +        "errors in file formatting.";
 +
 +    @Override
 +    public String getUsage() {
 +      return USAGE + ":\n\n" + DESC;
 +    }
 +
 +    @Override
 +    public void execute() {
 +
 +      String confDir = getGatewayConfig().getGatewayConfDir();
 +      File tops = new File(confDir + "/topologies");
 +      out.println("List of files available in the topologies directory");
 +      out.println(tops.toString());
 +      if(tops.isDirectory()) {
 +        for (File f : tops.listFiles()) {
 +          if(f.getName().endsWith(".xml")) {
 +            String fName = f.getName().replace(".xml", "");
 +            out.println(fName);
 +          }
 +        }
 +        return;
 +      } else {
 +        out.println("ERR: Topologies directory does not exist.");
 +        return;
 +      }
 +
 +    }
 +
 +  }
 +
 +  private class LDAPCommand extends Command {
 +
 +    public static final String USAGE = "ldap-command";
 +    public static final String DESC = "This is an internal command. It should not be used.";
 +    protected String username = null;
 +    protected char[] password = null;
 +    protected static final String debugMessage = "For more information use --d for debug output.";
 +    protected Topology topology;
 +
 +    @Override
 +    public String getUsage() {
 +      return USAGE + ":\n\n" + DESC;
 +    }
 +
 +    @Override
 +    public void execute() {
 +      out.println("This command does not have any functionality.");
 +    }
 +
 +
 +//    First define a few Exceptions
 +    protected class NoSuchTopologyException extends Exception {
 +      public NoSuchTopologyException() {}
 +      public NoSuchTopologyException(String message) { super(message); }
 +    }
 +    protected class MissingPasswordException extends Exception {
 +      public MissingPasswordException() {}
 +      public MissingPasswordException(String message) { super(message); }
 +    }
 +
 +    protected class MissingUsernameException extends Exception {
 +      public MissingUsernameException() {};
 +      public MissingUsernameException(String message) { super(message); }
 +    }
 +
 +    protected class BadSubjectException extends Exception {
 +      public BadSubjectException() {}
 +      public BadSubjectException(String message) { super(message); }
 +    }
 +
 +    protected class NoSuchProviderException extends Exception {
 +      public NoSuchProviderException() {}
 +      public NoSuchProviderException(String name, String role, String topology) {
 +        super("Could not find provider with role: " + role + ", name: " + name + " inside of topology: " + topology);
 +      }
 +    }
 +
 +    //    returns false if any errors are printed
 +    protected boolean hasShiroProviderErrors(Topology topology, boolean groupLookup) {
 +//      First let's define the variables that represent the ShiroProvider params
 +      String mainLdapRealm = "main.ldapRealm";
 +      String contextFactory = mainLdapRealm + ".contextFactory";
 +      String groupContextFactory = "main.ldapGroupContextFactory";
 +      String authorizationEnabled = mainLdapRealm + ".authorizationEnabled";
 +      String userSearchAttributeName = mainLdapRealm + ".userSearchAttributeName";
 +      String userObjectClass = mainLdapRealm + ".userObjectClass";
 +      String authenticationMechanism = mainLdapRealm + ".authenticationMechanism"; // Should not be used up to v0.6.0)
 +      String searchBase = mainLdapRealm + ".searchBase";
 +      String groupSearchBase = mainLdapRealm + ".groupSearchBase";
 +      String userSearchBase = mainLdapRealm + ".userSearchBase";
 +      String groupObjectClass = mainLdapRealm + ".groupObjectClass";
 +      String memberAttribute = mainLdapRealm + ".memberAttribute";
 +      String memberAttributeValueTemplate = mainLdapRealm + ".memberAttributeValueTemplate";
 +      String systemUsername = contextFactory + ".systemUsername";
 +      String systemPassword = contextFactory + ".systemPassword";
 +      String url = contextFactory + ".url";
 +      String userDnTemplate = mainLdapRealm + ".userDnTemplate";
 +
 +
 +      Provider shiro = topology.getProvider("authentication", "ShiroProvider");
 +      if(shiro != null) {
 +        Map<String, String> params = shiro.getParams();
 +        int errs = 0;
 +        if(groupLookup) {
 +          int errors = 0;
 +          errors += hasParam(params, groupContextFactory, true) ? 0 : 1;
 +          errors += hasParam(params, groupObjectClass, true) ? 0 : 1;
 +          errors += hasParam(params, memberAttributeValueTemplate, true) ? 0 : 1;
 +          errors += hasParam(params, memberAttribute, true) ? 0 : 1;
 +          errors += hasParam(params, authorizationEnabled, true) ? 0 : 1;
 +          errors += hasParam(params, systemUsername, true) ? 0 : 1;
 +          errors += hasParam(params, systemPassword, true) ? 0 : 1;
 +          errors += hasParam(params, userSearchBase, true) ? 0 : 1;
 +          errors += hasParam(params, groupSearchBase, true) ? 0 : 1;
 +          errs += errors;
 +
 +        } else {
 +
 +//        Realm + Url is always required.
 +          errs += hasParam(params, mainLdapRealm, true) ? 0 : 1;
 +          errs += hasParam(params, url, true) ? 0 : 1;
 +
 +          if(hasParam(params, authorizationEnabled, false)) {
 +            int errors = 0;
 +            int searchBaseErrors = 0;
 +            errors += hasParam(params, systemUsername, true) ? 0 : 1;
 +            errors += hasParam(params, systemPassword, true) ? 0 : 1;
 +            searchBaseErrors += hasParam(params, searchBase, false) ? 0 : hasParam(params, userSearchBase, false) ? 0 : 1;
 +            if (searchBaseErrors > 0) {
 +              out.println("Warn: Both " + searchBase + " and " + userSearchBase + " are missing from the topology");
 +            }
 +            errors += searchBaseErrors;
 +            errs += errors;
 +          }
 +
 +//        If any one of these is present they must all be present
 +          if( hasParam(params, userSearchAttributeName, false) ||
 +              hasParam(params, userObjectClass, false) ||
 +              hasParam(params, searchBase, false) ||
 +              hasParam(params, userSearchBase, false)) {
 +
 +            int errors = 0;
 +            errors += hasParam(params, userSearchAttributeName, true) ? 0 : 1;
 +            errors += hasParam(params, userObjectClass, true) ? 0 : 1;
 +            errors += hasParam(params, searchBase, false) ? 0 : hasParam(params, userSearchBase, false) ? 0 : 1;
 +            errors += hasParam(params, systemUsername, true) ? 0 : 1;
 +            errors += hasParam(params, systemPassword, true) ? 0 : 1;
 +
 +            if(errors > 0) {
 +              out.println(userSearchAttributeName + " or " + userObjectClass + " or " + searchBase + " or " + userSearchBase + " was found in the topology");
 +              out.println("If any one of the above params is present then " + userSearchAttributeName + 
 +                  " and " + userObjectClass + " must both be present and either " + searchBase + " or " + userSearchBase + " must also be present.");
 +            }
 +            errs += errors;
 +          } else {
 +            errs += hasParam(params, userDnTemplate, true) ?  0 : 1;
 +
 +          }
 +        }
 +        return (errs > 0);
 +      } else {
 +        out.println("Could not obtain ShiroProvider");
 +        return true;
 +      }
 +    }
 +
 +    // Checks to see if the param name is present. If not, notify the user
 +    protected boolean hasParam(Map<String, String> params, String key, boolean notifyUser){
 +      if(params.get(key) == null){
 +        if(notifyUser) { out.println("Warn: " + key + " is not present in topology"); }
 +        return false;
 +      } else { return true; }
 +    }
 +
 +    /**
 +     *
 +     * @param ini - the path to the shiro.ini file within a topology deployment.
 +     * @param token - token for username and password
 +     * @return - true/false whether a user was successfully able to authenticate or not.
 +     */
 +    protected boolean authenticateUser(Ini ini, UsernamePasswordToken token){
 +      boolean result = false;
 +      try {
 +        Subject subject = getSubject(ini);
 +        try{
 +          subject.login(token);
 +          if(subject.isAuthenticated()){
 +            result = true;
 +          }
 +        } catch (AuthenticationException e){
 +          out.println(e.toString());
 +          out.println(e.getCause().getMessage());
 +          if (debug) {
 +            e.printStackTrace(out);
 +          } else {
 +            out.println(debugMessage);
 +          }
 +        } finally {
 +          subject.logout();
 +        }
 +      } catch (BadSubjectException e) {
 +        out.println(e.toString());
 +        if (debug){
 +          e.printStackTrace();
 +        } else {
 +          out.println(debugMessage);
 +        }
 +      } catch (ConfigurationException e) {
 +        out.println(e.toString());
 +      } catch ( Exception e ) {
 +        out.println(e.getCause());
 +        out.println(e.toString());
 +      }
 +      return result;
 +    }
 +
 +    protected boolean authenticateUser(String config, UsernamePasswordToken token) throws ConfigurationException {
 +      Ini ini = new Ini();
 +      try {
 +        ini.loadFromPath(config);
 +        return authenticateUser(ini, token);
 +      } catch (ConfigurationException e) {
 +        throw e;
 +      }
 +    }
 +
 +    /**
 +     *
 +     * @param userDn - fully qualified userDn used for LDAP authentication
 +     * @return - returns the principal found in the userDn after "uid="
 +     */
 +    protected String getPrincipal(String userDn){
 +      String result = "";
 +
 +//      Need to determine whether we are using AD or LDAP?
 +//      LDAP userDn usually starts with "uid="
 +//      AD userDn usually starts with cn/CN
 +//      Find the userDN template
 +
 +      try {
 +        Topology t = getTopology(cluster);
 +        Provider shiro = t.getProvider("authentication", "ShiroProvider");
 +
 +        String p1 = shiro.getParams().get("main.ldapRealm.userDnTemplate");
 +
 +//        We know everything between first "=" and "," will be part of the principal.
 +        int eq = userDn.indexOf("=");
 +        int com = userDn.indexOf(",");
 +        if(eq != -1 && com > eq && com != -1) {
 +          result = userDn.substring(eq + 1, com);
 +        } else {
 +          result = "";
 +        }
 +      } catch (NoSuchTopologyException e) {
 +        out.println(e.toString());
 +        result = userDn;
 +      } finally {
 +        return result;
 +      }
 +    }
 +
 +    /**
 +     *
 +     * @param t - topology configuration to use
 +     * @param config - the path to the shiro.ini file from the topology deployment.
 +     * @return - true/false whether LDAP successfully authenticated with system credentials.
 +     */
 +    protected boolean testSysBind(Topology t, String config) {
 +      boolean result = false;
 +      String username;
 +      char[] password;
 +
 +      try {
 +//        Pull out contextFactory.url param for light shiro config
 +        Provider shiro = t.getProvider("authentication", "ShiroProvider");
 +        Map<String, String> params = shiro.getParams();
 +        String url = params.get("main.ldapRealm.contextFactory.url");
 +
 +//        Build the Ini with minimum requirements
 +        Ini ini = new Ini();
 +        ini.addSection("main");
 +        ini.setSectionProperty("main", "ldapRealm", "org.apache.knox.gateway.shirorealm.KnoxLdapRealm");
 +        ini.setSectionProperty("main", "ldapContextFactory", "org.apache.knox.gateway.shirorealm.KnoxLdapContextFactory");
 +        ini.setSectionProperty("main", "ldapRealm.contextFactory.url", url);
 +
 +        username = getSystemUsername(t);
 +        password = getSystemPassword(t);
 +        result = authenticateUser(ini, new UsernamePasswordToken(username, password));
 +      } catch (MissingUsernameException | NoSuchProviderException | MissingPasswordException e) {
 +        out.println(e.toString());
 +      } catch (NullPointerException e) {
 +        out.println(e.toString());
 +      }
 +      return result;
 +    }
 +
 +    /**
 +     *
 +     * @param t - topology configuration to use
 +     * @return - the principal of the systemUsername specified in topology. null if non-existent
 +     */
 +    private String getSystemUsername(Topology t) throws MissingUsernameException, NoSuchProviderException {
 +      final String SYSTEM_USERNAME = "main.ldapRealm.contextFactory.systemUsername";
 +      String user = null;
 +      Provider shiroProvider = t.getProvider("authentication", "ShiroProvider");
 +      if(shiroProvider != null){
 +        Map<String, String> params = shiroProvider.getParams();
 +        String userDn = params.get(SYSTEM_USERNAME);
 +        user = userDn;
 +      } else {
 +        throw new NoSuchProviderException("ShiroProvider", "authentication", t.getName());
 +      }
 +      return user;
 +    }
 +
 +    /**
 +     *
 +     * @param t - topology configuration to use
 +     * @return - the systemPassword specified in topology. null if non-existent
 +     */
 +    private char[] getSystemPassword(Topology t) throws NoSuchProviderException, MissingPasswordException{
 +      final String SYSTEM_PASSWORD = "main.ldapRealm.contextFactory.systemPassword";
 +      String pass = null;
 +      Provider shiro = t.getProvider("authentication", "ShiroProvider");
 +      if(shiro != null){
 +        Map<String, String> params = shiro.getParams();
 +        pass = params.get(SYSTEM_PASSWORD);
 +      } else {
 +        throw new NoSuchProviderException("ShiroProvider", "authentication", t.getName());
 +      }
 +
 +      if(pass != null) {
 +        return pass.toCharArray();
 +      } else {
 +        throw new MissingPasswordException("ShiroProvider did not contain param: " + SYSTEM_PASSWORD);
 +      }
 +    }
 +
 +    /**
 +     *
 +     * @param config - the shiro.ini config file created in topology deployment.
 +     * @return returns the Subject given by the shiro config's settings.
 +     */
 +    protected Subject getSubject(Ini config) throws BadSubjectException {
 +      try {
 +        ThreadContext.unbindSubject();
 +        Factory factory = new IniSecurityManagerFactory(config);
 +        org.apache.shiro.mgt.SecurityManager securityManager = (org.apache.shiro.mgt.SecurityManager) factory.getInstance();
 +        SecurityUtils.setSecurityManager(securityManager);
 +        Subject subject = SecurityUtils.getSubject();
 +        if( subject != null) {
 +          return subject;
 +        } else {
 +          out.println("Error Creating Subject from config at: " + config);
 +        }
 +      } catch (Exception e){
 +        out.println(e.toString());
 +      }
 +      throw new BadSubjectException("Subject could not be created with Shiro Config at " + config);
 +    }
 +
 +    protected Subject getSubject(String config) throws ConfigurationException {
 +      Ini ini = new Ini();
 +      ini.loadFromPath(config);
 +      try {
 +        return getSubject(ini);
 +      } catch (BadSubjectException e) {
 +        throw new ConfigurationException("Could not get Subject with Ini at " + config);
 +      }
 +    }
 +
 +    /**
 +     * prompts the user for credentials in the command line if necessary
 +     * populates the username and password members.
 +     */
 +    protected void promptCredentials() {
 +      if(this.username == null){
 +        Console c = System.console();
 +        if( c != null) {
 +          this.username = c.readLine("Username: ");
 +        }else{
 +          try {
 +            BufferedReader reader = new BufferedReader(new InputStreamReader(System.in));
 +            out.println("Username: ");
 +            this.username = reader.readLine();
 +            reader.close();
 +          } catch (IOException e){
 +            out.println(e.toString());
 +            this.username = "";
 +          }
 +        }
 +      }
 +
 +      if(this.password == null){
 +        Console c = System.console();
 +        if( c != null) {
 +          this.password = c.readPassword("Password: ");
 +        }else{
 +          try {
 +            BufferedReader reader = new BufferedReader(new InputStreamReader(System.in));
 +            out.println("Password: ");
 +            String pw = reader.readLine();
 +            if(pw != null){
 +              this.password = pw.toCharArray();
 +            } else {
 +              this.password = new char[0];
 +            }
 +            reader.close();
 +          } catch (IOException e){
 +            out.println(e.toString());
 +            this.password = new char[0];
 +          }
 +        }
 +      }
 +    }
 +
 +    /**
 +     *
 +     * @param topologyName - the name of the topology to retrieve
 +     * @return - Topology object with specified name. null if topology doesn't exist in TopologyService
 +     */
 +    protected Topology getTopology(String topologyName) throws NoSuchTopologyException {
 +      TopologyService ts = getTopologyService();
 +      ts.reloadTopologies();
 +      for (Topology t : ts.getTopologies()) {
 +        if(t.getName().equals(topologyName)) {
 +          return t;
 +        }
 +      }
 +      throw new  NoSuchTopologyException("Topology " + topologyName + " does not" +
 +          " exist in the topologies directory.");
 +    }
 +
 +    /**
 +     *
 +     * @param t - Topology to use for config
 +     * @return - path of shiro.ini config file.
 +     */
 +    protected String getConfig(Topology t){
 +      File tmpDir = new File(System.getProperty("java.io.tmpdir"));
 +      DeploymentFactory.setGatewayServices(services);
 +      EnterpriseArchive archive = DeploymentFactory.createDeployment(getGatewayConfig(), t);
 +      File war = archive.as(ExplodedExporter.class).exportExploded(tmpDir, t.getName() + "_deploy.tmp");
 +      war.deleteOnExit();
 +      String config = war.getAbsolutePath() + "/%2F/WEB-INF/shiro.ini";
 +      try{
 +        FileUtils.forceDeleteOnExit(war);
 +      } catch (IOException e) {
 +        out.println(e.toString());
 +        war.deleteOnExit();
 +      }
 +      return config;
 +    }
 +
 +    /**
 +     * populates username and password if they were passed as arguments, if not will prompt user for them.
 +     */
 +    void acquireCredentials(){
 +      if(user != null){
 +        this.username = user;
 +      }
 +      if(pass != null){
 +        this.password = pass.toCharArray();
 +      }
 +      promptCredentials();
 +    }
 +
 +    /**
 +     *
 +     * @return - true or false if the topology was acquired from the topology service and populated in the topology
 +     * field.
 +     */
 +    protected boolean acquireTopology(){
 +      try {
 +        topology = getTopology(cluster);
 +      } catch (NoSuchTopologyException e) {
 +        out.println(e.toString());
 +        return false;
 +      }
 +      return true;
 +    }
 +  }
 +
 +  private class LDAPAuthCommand extends LDAPCommand {
 +
 +    public static final String USAGE = "user-auth-test [--cluster clustername] [--u username] [--p password] [--g]";
 +    public static final String DESC = "This command tests a cluster's configuration ability to\n " +
 +        "authenticate a user with a cluster's ShiroProvider settings.\n Use \"--g\" if you want to list the groups a" +
 +        " user is a member of. \nOptional: [--u username]: Provide a username argument to the command\n" +
 +        "Optional: [--p password]: Provide a password argument to the command.\n" +
 +        "If a username and password argument are not supplied, the terminal will prompt you for one.";
 +
 +    private static final String  SUBJECT_USER_GROUPS = "subject.userGroups";
 +    private HashSet<String> groupSet = new HashSet<>();
 +
 +    @Override
 +    public String getUsage() {
 +      return USAGE + ":\n\n" + DESC;
 +    }
 +
 +    @Override
 +    public void execute() {
 +      if(!acquireTopology()){
 +        return;
 +      }
 +      acquireCredentials();
 +
 +      if(topology.getProvider("authentication", "ShiroProvider") == null) {
 +        out.println("ERR: This tool currently only works with Shiro as the authentication provider.");
 +        out.println("Please update the topology to use \"ShiroProvider\" as the authentication provider.");
 +        return;
 +      }
 +
 +      String config = getConfig(topology);
 +
 +      if(new File(config).exists()) {
 +          if(authenticateUser(config, new UsernamePasswordToken(username, password))) {
 +            out.println("LDAP authentication successful!");
 +            if(groups) {
 +              if(testSysBind(topology, config)) {
 +                groupSet = getGroups(topology, new UsernamePasswordToken(username, password));
 +                if(groupSet == null || groupSet.isEmpty()) {
 +                  out.println(username + " does not belong to any groups");
 +                  if(groups) {
 +                    hasShiroProviderErrors(topology, true);
 +                    out.println("You were looking for this user's groups but this user does not belong to any.");
 +                    out.println("Your topology file may be incorrectly configured for group lookup.");
 +                  }
 +                } else {
 +                  for (Object o : groupSet.toArray()) {
 +                    out.println(username + " is a member of: " + o.toString());
 +                  }
 +                }
 +              }
 +            }
 +          } else {
 +            out.println("ERR: Unable to authenticate user: " + username);
 +          }
 +      } else {
 +        out.println("ERR: No shiro config file found.");
 +      }
 +    }
 +
 +    private HashSet<String> getGroups(Topology t, UsernamePasswordToken token){
 +      HashSet<String> groups = null;
 +      try {
 +        Subject subject = getSubject(getConfig(t));
 +        if(!subject.isAuthenticated()) {
 +          subject.login(token);
 +        }
 +        subject.hasRole(""); //Populate subject groups
 +        groups = (HashSet) subject.getSession().getAttribute(SUBJECT_USER_GROUPS);
 +        subject.logout();
 +      } catch (AuthenticationException e) {
 +        out.println("Error retrieving groups");
 +        out.println(e.toString());
 +        if(debug) {
 +          e.printStackTrace();
 +        } else {
 +          out.println(debugMessage);
 +        }
 +      } catch (ConfigurationException e) {
 +        out.println(e.toString());
 +        if(debug){
 +          e.printStackTrace();
 +        }
 +      }
 +      return groups;
 +    }
 +
 +  }
 +
 +  public class LDAPSysBindCommand extends LDAPCommand {
 +
 +    public static final String USAGE = "system-user-auth-test [--cluster clustername] [--d]";
 +    public static final String DESC = "This command tests a cluster configuration's ability to\n " +
 +        "authenticate a user with a cluster's ShiroProvider settings.";
 +
 +    @Override
 +    public String getUsage() {
 +      return USAGE + ":\n\n" + DESC;
 +    }
 +
 +    @Override
 +    public void execute() {
 +
 +      if(!acquireTopology()) {
 +        return;
 +      }
 +
 +      if(hasShiroProviderErrors(topology, false)) {
 +        out.println("Topology warnings present. SystemUser may not bind.");
 +      }
 +
 +      if(testSysBind(topology, getConfig(topology))) {
 +        out.println("System LDAP Bind successful.");
 +      } else {
 +        out.println("Unable to successfully bind to LDAP server with topology credentials. Are your parameters correct?");
 +      }
 +    }
 +  }
 +
 +  private GatewayConfig getGatewayConfig() {
 +    GatewayConfig result;
 +    Configuration conf = getConf();
 +    if(conf != null && conf instanceof GatewayConfig) {
 +      result = (GatewayConfig) conf;
 +    } else {
 +      result = new GatewayConfigImpl();
 +    }
 +    return result;
 +  }
 +
 +  public class ServiceTestCommand extends Command {
 +    public static final String USAGE = "service-test [--u username] [--p password] [--cluster clustername] [--hostname name] " +
 +        "[--port port]";
-     public static final String DESC = "This command requires a running instance of Knox to be present on the same " +
-         "machine. It will execute a test to make sure all services are accessible through the gateway URLs. Errors are " +
-         "reported and suggestions to resolve any problems are returned. JSON formatted.";
++    public static final String DESC =
++                        "This command requires a running instance of Knox to be present on the same machine.\n" +
++                        "It will execute a test to make sure all services are accessible through the gateway URLs.\n" +
++                        "Errors are reported and suggestions to resolve any problems are returned. JSON formatted.\n";
 +
 +    private boolean ssl = true;
 +    private int attempts = 0;
 +
 +    @Override
 +    public String getUsage() { return USAGE + ":\n\n" + DESC; };
 +
 +    @Override
 +    public void execute() {
 +      attempts++;
 +      SSLContext ctx = null;
 +      CloseableHttpClient client;
 +      String http = "http://";
 +      String https = "https://";
 +      GatewayConfig conf = getGatewayConfig();
 +      String gatewayPort;
 +      String host;
 +
 +
 +      if(cluster == null) {
 +        printKnoxShellUsage();
 +        out.println("A --cluster argument is required.");
 +        return;
 +      }
 +
 +      if(hostname != null) {
 +        host = hostname;
 +      } else {
 +        try {
 +          host = InetAddress.getLocalHost().getHostAddress();
 +        } catch (UnknownHostException e) {
 +          out.println(e.toString());
 +          out.println("Defaulting address to localhost. Use --hostname option to specify a different hostname");
 +          host = "localhost";
 +        }
 +      }
 +
 +      if (port != null) {
 +        gatewayPort = port;
 +      } else if (conf.getGatewayPort() > -1) {
 +        gatewayPort = Integer.toString(conf.getGatewayPort());
 +      } else {
 +        out.println("Could not get port. Please supply it using the --port option");
 +        return;
 +      }
 +
 +
 +      String path = "/" + conf.getGatewayPath();
 +      String topology = "/" + cluster;
 +      String httpServiceTestURL = http + host + ":" + gatewayPort + path + topology + "/service-test";
 +      String httpsServiceTestURL = https + host + ":" + gatewayPort + path + topology + "/service-test";
 +
 +      String authString = "";
 +//    Create Authorization String
 +      if( user != null && pass != null) {
 +        authString = "Basic " + Base64.encodeBase64String((user + ":" + pass).getBytes());
 +      } else {
 +        out.println("Username and/or password not supplied. Expect HTTP 401 Unauthorized responses.");
 +      }
 +
 +//    Attempt to build SSL context for HTTP client.
 +      try {
 +        ctx = SSLContexts.custom().loadTrustMaterial(null, new TrustSelfSignedStrategy()).build();
 +      } catch (Exception e) {
 +        out.println(e.toString());
 +      }
 +
 +//    Initialize the HTTP client
 +      if(ctx == null) {
 +        client = HttpClients.createDefault();
 +      } else {
 +        client = HttpClients.custom().setSslcontext(ctx).build();
 +      }
 +
 +      HttpGet request;
 +      if(ssl) {
 +        request = new HttpGet(httpsServiceTestURL);
 +      } else {
 +        request = new HttpGet(httpServiceTestURL);
 +      }
 +
 +
 +      request.setHeader("Authorization", authString);
 +      request.setHeader("Accept", MediaType.APPLICATION_JSON.getMediaType());
 +      try {
 +        out.println(request.toString());
 +        CloseableHttpResponse response = client.execute(request);
 +
 +        switch (response.getStatusLine().getStatusCode()) {
 +
 +          case 200:
 +            response.getEntity().writeTo(out);
 +            break;
 +          case 404:
 +            out.println("Could not find service-test resource");
 +            out.println("Make sure you have configured the SERVICE-TEST service in your topology.");
 +            break;
 +          case 500:
 +            out.println("HTTP 500 Server error");
 +            break;
 +
 +          default:
 +            out.println("Unexpected HTTP response code.");
 +            out.println(response.getStatusLine().toString());
 +            response.getEntity().writeTo(out);
 +            break;
 +        }
 +
 +        response.close();
 +        request.releaseConnection();
 +
 +      } catch (ClientProtocolException e) {
 +        out.println(e.toString());
 +        if (debug) {
 +          e.printStackTrace(out);
 +        }
 +      } catch (SSLException e) {
 +        out.println(e.toString());
 +        retryRequest();
 +      } catch (IOException e) {
 +        out.println(e.toString());
 +        retryRequest();
 +        if(debug) {
 +          e.printStackTrace(out);
 +        }
 +      } finally {
 +        try {
 +          client.close();
 +        } catch (IOException e) {
 +          out.println(e.toString());
 +        }
 +      }
 +
 +    }
 +
 +    public void retryRequest(){
 +      if(attempts < 2) {
 +        if(ssl) {
 +          ssl = false;
 +          out.println("Attempting request without SSL.");
 +        } else {
 +          ssl = true;
 +          out.println("Attempting request with SSL ");
 +        }
 +        execute();
 +      } else {
 +        out.println("Unable to successfully make request. Try using the API with cURL.");
 +      }
 +    }
 +
 +  }
 +
++  public class RemoteRegistryClientsListCommand extends Command {
++
++    static final String USAGE = "list-registry-clients";
++    static final String DESC = "Lists all of the remote configuration registry clients defined in gateway-site.xml.\n";
++
++    /* (non-Javadoc)
++     * @see org.apache.hadoop.gateway.util.KnoxCLI.Command#execute()
++     */
++    @Override
++    public void execute() throws Exception {
++      GatewayConfig config = getGatewayConfig();
++      List<String> remoteConfigRegistryClientNames = config.getRemoteRegistryConfigurationNames();
++      if (!remoteConfigRegistryClientNames.isEmpty()) {
++        out.println("Listing remote configuration registry clients:");
++        for (String name : remoteConfigRegistryClientNames) {
++          out.println(name);
++        }
++      }
++    }
++
++    /* (non-Javadoc)
++     * @see org.apache.hadoop.gateway.util.KnoxCLI.Command#getUsage()
++     */
++    @Override
++    public String getUsage() {
++      return USAGE + ":\n\n" + DESC;
++    }
++ }
++
++
++  /**
++   * Base class for remote config registry upload commands
++   */
++  public abstract class RemoteRegistryUploadCommand extends Command {
++    protected static final String ROOT_ENTRY = "/knox";
++    protected static final String CONFIG_ENTRY = ROOT_ENTRY + "/config";
++    protected static final String PROVIDER_CONFIG_ENTRY = CONFIG_ENTRY + "/shared-providers";
++    protected static final String DESCRIPTORS__ENTRY = CONFIG_ENTRY + "/descriptors";
++
++    private File sourceFile = null;
++    protected String filename = null;
++
++    protected RemoteRegistryUploadCommand(String sourceFileName) {
++      this.filename = sourceFileName;
++    }
++
++    private void upload(RemoteConfigurationRegistryClient client, String entryPath, File source) throws Exception {
++      String content = FileUtils.readFileToString(source);
++      if (client.entryExists(entryPath)) {
++        // If it exists, then we're going to set the data
++        client.setEntryData(entryPath, content);
++      } else {
++        // If it does not exist, then create it and set the data
++        client.createEntry(entryPath, content);
++      }
++    }
++
++    File getSourceFile() {
++      if (sourceFile == null) {
++        sourceFile = new File(filename);
++      }
++      return sourceFile;
++    }
++
++    String getEntryName(String prefixPath) {
++      String entryName = remoteRegistryEntryName;
++      if (entryName == null) {
++        File sourceFile = getSourceFile();
++        if (sourceFile.exists()) {
++          String path = sourceFile.getAbsolutePath();
++          entryName = path.substring(path.lastIndexOf(File.separator) + 1);
++        } else {
++          out.println("Could not locate source file: " + filename);
++        }
++      }
++      return prefixPath + "/" + entryName;
++    }
++
++    protected void execute(String entryName, File sourceFile) throws Exception {
++      if (remoteRegistryClient != null) {
++        RemoteConfigurationRegistryClientService cs = getRemoteConfigRegistryClientService();
++        RemoteConfigurationRegistryClient client = cs.get(remoteRegistryClient);
++        if (client != null) {
++          if (entryName != null) {
++            upload(client, entryName, sourceFile);
++          }
++        } else {
++          out.println("No remote configuration registry identified by '" + remoteRegistryClient + "' could be found.");
++        }
++      } else {
++        out.println("Missing required argument : --registry-client\n");
++      }
++    }
++
++  }
++
++
++  public class RemoteRegistryUploadProviderConfigCommand extends RemoteRegistryUploadCommand {
++
++    static final String USAGE = "upload-provider-config providerConfigFile --registry-client name [--entry-name entryName]";
++    static final String DESC = "Uploads a provider configuration to the specified remote registry client, optionally " +
++                               "renaming the entry.\nIf the entry name is not specified, the name of the uploaded " +
++                               "file is used.\n";
++
++    RemoteRegistryUploadProviderConfigCommand(String fileName) {
++      super(fileName);
++    }
++
++    /* (non-Javadoc)
++     * @see org.apache.hadoop.gateway.util.KnoxCLI.Command#execute()
++     */
++    @Override
++    public void execute() throws Exception {
++      super.execute(getEntryName(PROVIDER_CONFIG_ENTRY), getSourceFile());
++    }
++
++    /* (non-Javadoc)
++     * @see org.apache.hadoop.gateway.util.KnoxCLI.Command#getUsage()
++     */
++    @Override
++    public String getUsage() {
++      return USAGE + ":\n\n" + DESC;
++    }
++  }
++
++
++  public class RemoteRegistryUploadDescriptorCommand extends RemoteRegistryUploadCommand {
++
++    static final String USAGE = "upload-descriptor descriptorFile --registry-client name [--entry-name entryName]";
++    static final String DESC = "Uploads a simple descriptor using the specified remote registry client, optionally " +
++                               "renaming the entry.\nIf the entry name is not specified, the name of the uploaded " +
++                               "file is used.\n";
++
++    RemoteRegistryUploadDescriptorCommand(String fileName) {
++      super(fileName);
++    }
++
++    /* (non-Javadoc)
++     * @see org.apache.hadoop.gateway.util.KnoxCLI.Command#execute()
++     */
++    @Override
++    public void execute() throws Exception {
++      super.execute(getEntryName(DESCRIPTORS__ENTRY), getSourceFile());
++    }
++
++    /* (non-Javadoc)
++     * @see org.apache.hadoop.gateway.util.KnoxCLI.Command#getUsage()
++     */
++    @Override
++    public String getUsage() {
++      return USAGE + ":\n\n" + DESC;
++    }
++  }
++
++
++  public class RemoteRegistryGetACLCommand extends Command {
++
++    static final String USAGE = "get-registry-acl entry --registry-client name";
++    static final String DESC = "Presents the ACL settings for the specified remote registry entry.\n";
++
++    private String entry = null;
++
++    RemoteRegistryGetACLCommand(String entry) {
++      this.entry = entry;
++    }
++
++    /* (non-Javadoc)
++     * @see org.apache.hadoop.gateway.util.KnoxCLI.Command#execute()
++     */
++    @Override
++    public void execute() throws Exception {
++      if (remoteRegistryClient != null) {
++        RemoteConfigurationRegistryClientService cs = getRemoteConfigRegistryClientService();
++        RemoteConfigurationRegistryClient client = cs.get(remoteRegistryClient);
++        if (client != null) {
++          if (entry != null) {
++            List<RemoteConfigurationRegistryClient.EntryACL> acls = client.getACL(entry);
++            for (RemoteConfigurationRegistryClient.EntryACL acl : acls) {
++              out.println(acl.getType() + ":" + acl.getId() + ":" + acl.getPermissions());
++            }
++          }
++        } else {
++          out.println("No remote configuration registry identified by '" + remoteRegistryClient + "' could be found.");
++        }
++      } else {
++        out.println("Missing required argument : --registry-client\n");
++      }
++    }
++
++    /* (non-Javadoc)
++     * @see org.apache.hadoop.gateway.util.KnoxCLI.Command#getUsage()
++     */
++    @Override
++    public String getUsage() {
++      return USAGE + ":\n\n" + DESC;
++    }
++  }
++
++
++  /**
++   * Base class for remote config registry delete commands
++   */
++  public abstract class RemoteRegistryDeleteCommand extends Command {
++    protected static final String ROOT_ENTRY = "/knox";
++    protected static final String CONFIG_ENTRY = ROOT_ENTRY + "/config";
++    protected static final String PROVIDER_CONFIG_ENTRY = CONFIG_ENTRY + "/shared-providers";
++    protected static final String DESCRIPTORS__ENTRY = CONFIG_ENTRY + "/descriptors";
++
++    protected String entryName = null;
++
++    protected RemoteRegistryDeleteCommand(String entryName) {
++      this.entryName = entryName;
++    }
++
++    private void delete(RemoteConfigurationRegistryClient client, String entryPath) throws Exception {
++      if (client.entryExists(entryPath)) {
++        // If it exists, then delete it
++        client.deleteEntry(entryPath);
++      }
++    }
++
++    protected void execute(String entryName) throws Exception {
++      if (remoteRegistryClient != null) {
++        RemoteConfigurationRegistryClientService cs = getRemoteConfigRegistryClientService();
++        RemoteConfigurationRegistryClient client = cs.get(remoteRegistryClient);
++        if (client != null) {
++          if (entryName != null) {
++            delete(client, entryName);
++          }
++        } else {
++          out.println("No remote configuration registry identified by '" + remoteRegistryClient + "' could be found.");
++        }
++      } else {
++        out.println("Missing required argument : --registry-client\n");
++      }
++    }
++  }
++
++
++  public class RemoteRegistryDeleteProviderConfigCommand extends RemoteRegistryDeleteCommand {
++    static final String USAGE = "delete-provider-config providerConfig --registry-client name";
++    static final String DESC = "Deletes a shared provider configuration from the specified remote registry.\n";
++
++    public RemoteRegistryDeleteProviderConfigCommand(String entryName) {
++      super(entryName);
++    }
++
++    @Override
++    public void execute() throws Exception {
++      execute(PROVIDER_CONFIG_ENTRY + "/" + entryName);
++    }
++
++    @Override
++    public String getUsage() {
++      return USAGE + ":\n\n" + DESC;
++    }
++  }
++
++
++  public class RemoteRegistryDeleteDescriptorCommand extends RemoteRegistryDeleteCommand {
++    static final String USAGE = "delete-descriptor descriptor --registry-client name";
++    static final String DESC = "Deletes a simple descriptor from the specified remote registry.\n";
++
++    public RemoteRegistryDeleteDescriptorCommand(String entryName) {
++      super(entryName);
++    }
++
++    @Override
++    public void execute() throws Exception {
++      execute(DESCRIPTORS__ENTRY + "/" + entryName);
++    }
++
++    @Override
++    public String getUsage() {
++      return USAGE + ":\n\n" + DESC;
++    }
++  }
++
++
 +  private static Properties loadBuildProperties() {
 +    Properties properties = new Properties();
 +    InputStream inputStream = KnoxCLI.class.getClassLoader().getResourceAsStream( "build.properties" );
 +    if( inputStream != null ) {
 +      try {
 +        properties.load( inputStream );
 +        inputStream.close();
 +      } catch( IOException e ) {
 +        // Ignore.
 +      }
 +    }
 +    return properties;
 +  }
 +
 +  /**
 +   * @param args
 +   * @throws Exception
 +   */
 +  public static void main(String[] args) throws Exception {
 +    PropertyConfigurator.configure( System.getProperty( "log4j.configuration" ) );
 +    int res = ToolRunner.run(new GatewayConfigImpl(), new KnoxCLI(), args);
 +    System.exit(res);
 +  }
 +}


[47/53] [abbrv] knox git commit: KNOX-998 - Merge from trunk 0.14.0 code

Posted by mo...@apache.org.
KNOX-998 - Merge from trunk 0.14.0 code


Project: http://git-wip-us.apache.org/repos/asf/knox/repo
Commit: http://git-wip-us.apache.org/repos/asf/knox/commit/e766b3b7
Tree: http://git-wip-us.apache.org/repos/asf/knox/tree/e766b3b7
Diff: http://git-wip-us.apache.org/repos/asf/knox/diff/e766b3b7

Branch: refs/heads/master
Commit: e766b3b77bf2d3a0a00e4f8bf8ef261a5f8122fb
Parents: 22a7304
Author: Sandeep More <mo...@apache.org>
Authored: Thu Dec 14 16:11:49 2017 -0500
Committer: Sandeep More <mo...@apache.org>
Committed: Thu Dec 14 16:11:49 2017 -0500

----------------------------------------------------------------------
 .../discovery/ambari/AmbariClientCommon.java    | 102 ----
 ...bariClusterConfigurationMonitorProvider.java |  35 --
 .../ambari/AmbariConfigurationMonitor.java      | 525 ----------------
 .../topology/discovery/ambari/RESTInvoker.java  | 136 -----
 .../discovery/ambari/AmbariClientCommon.java    | 102 ++++
 ...bariClusterConfigurationMonitorProvider.java |  36 ++
 .../ambari/AmbariConfigurationMonitor.java      | 525 ++++++++++++++++
 .../topology/discovery/ambari/RESTInvoker.java  | 136 +++++
 ...iscovery.ClusterConfigurationMonitorProvider |  19 -
 ...iscovery.ClusterConfigurationMonitorProvider |  19 +
 .../ambari/AmbariConfigurationMonitorTest.java  | 319 ----------
 .../ambari/AmbariConfigurationMonitorTest.java  | 319 ++++++++++
 ...faultClusterConfigurationMonitorService.java |  81 ---
 .../DefaultConfigurationMonitorProvider.java    |  31 -
 .../DefaultRemoteConfigurationMonitor.java      | 228 -------
 .../RemoteConfigurationMonitorFactory.java      |  74 ---
 .../gateway/services/CLIGatewayServices.java    |   2 +-
 ...faultClusterConfigurationMonitorService.java |  81 +++
 .../DefaultConfigurationMonitorProvider.java    |  31 +
 .../DefaultRemoteConfigurationMonitor.java      | 228 +++++++
 .../RemoteConfigurationMonitorFactory.java      |  74 +++
 .../org/apache/knox/gateway/util/KnoxCLI.java   |  16 +-
 ...y.monitor.RemoteConfigurationMonitorProvider |  19 -
 ...y.monitor.RemoteConfigurationMonitorProvider |  19 +
 ...emoteConfigurationRegistryClientService.java | 263 --------
 ...figurationRegistryClientServiceProvider.java |  32 -
 .../ZooKeeperConfigurationMonitorTest.java      | 355 -----------
 ...emoteConfigurationRegistryClientService.java | 263 ++++++++
 ...figurationRegistryClientServiceProvider.java |  32 +
 .../ZooKeeperConfigurationMonitorTest.java      | 355 +++++++++++
 .../apache/knox/gateway/util/KnoxCLITest.java   |   2 +-
 ...teConfigurationRegistryClientServiceProvider |  19 -
 ...teConfigurationRegistryClientServiceProvider |  19 +
 .../services/ambariui/2.2.1/service.xml         |   0
 .../remote/RemoteConfigurationMessages.java     |  49 --
 ...nfigurationRegistryClientServiceFactory.java |  41 --
 ...figurationRegistryClientServiceProvider.java |  27 -
 .../RemoteConfigurationRegistryConfig.java      |  43 --
 .../DefaultRemoteConfigurationRegistries.java   | 104 ----
 .../config/RemoteConfigurationRegistries.java   |  33 -
 .../RemoteConfigurationRegistriesAccessor.java  |  60 --
 .../RemoteConfigurationRegistriesParser.java    |  48 --
 .../config/RemoteConfigurationRegistry.java     | 139 -----
 .../config/remote/zk/CuratorClientService.java  | 464 --------------
 .../RemoteConfigurationRegistryJAASConfig.java  | 179 ------
 .../remote/zk/ZooKeeperClientService.java       |  25 -
 .../zk/ZooKeeperClientServiceProvider.java      |  34 --
 .../remote/RemoteConfigurationMessages.java     |  49 ++
 ...nfigurationRegistryClientServiceFactory.java |  41 ++
 ...figurationRegistryClientServiceProvider.java |  27 +
 .../RemoteConfigurationRegistryConfig.java      |  43 ++
 .../DefaultRemoteConfigurationRegistries.java   | 104 ++++
 .../config/RemoteConfigurationRegistries.java   |  33 +
 .../RemoteConfigurationRegistriesAccessor.java  |  60 ++
 .../RemoteConfigurationRegistriesParser.java    |  48 ++
 .../config/RemoteConfigurationRegistry.java     | 139 +++++
 .../config/remote/zk/CuratorClientService.java  | 464 ++++++++++++++
 .../RemoteConfigurationRegistryJAASConfig.java  | 179 ++++++
 .../remote/zk/ZooKeeperClientService.java       |  25 +
 .../zk/ZooKeeperClientServiceProvider.java      |  34 ++
 ...teConfigurationRegistryClientServiceProvider |  19 -
 ...teConfigurationRegistryClientServiceProvider |  19 +
 ...efaultRemoteConfigurationRegistriesTest.java | 184 ------
 ...teConfigurationRegistryConfigParserTest.java | 108 ----
 .../util/RemoteRegistryConfigTestUtils.java     | 117 ----
 ...eConfigurationRegistryClientServiceTest.java | 424 -------------
 ...moteConfigurationRegistryJAASConfigTest.java | 255 --------
 ...efaultRemoteConfigurationRegistriesTest.java | 184 ++++++
 ...teConfigurationRegistryConfigParserTest.java | 115 ++++
 .../util/RemoteRegistryConfigTestUtils.java     | 117 ++++
 ...eConfigurationRegistryClientServiceTest.java | 424 +++++++++++++
 ...moteConfigurationRegistryJAASConfigTest.java | 255 ++++++++
 .../RemoteConfigurationRegistryClient.java      |  80 ---
 ...emoteConfigurationRegistryClientService.java |  28 -
 .../ClusterConfigurationMonitorService.java     |  43 --
 .../discovery/ClusterConfigurationMonitor.java  |  48 --
 .../ClusterConfigurationMonitorProvider.java    |  27 -
 .../monitor/RemoteConfigurationMonitor.java     |  24 -
 .../RemoteConfigurationMonitorProvider.java     |  34 --
 .../RemoteConfigurationRegistryClient.java      |  80 +++
 ...emoteConfigurationRegistryClientService.java |  28 +
 .../ClusterConfigurationMonitorService.java     |  43 ++
 .../discovery/ClusterConfigurationMonitor.java  |  48 ++
 .../ClusterConfigurationMonitorProvider.java    |  27 +
 .../monitor/RemoteConfigurationMonitor.java     |  24 +
 .../RemoteConfigurationMonitorProvider.java     |  34 ++
 .../SimpleDescriptorHandlerFuncTest.java        | 275 ---------
 .../monitor/RemoteConfigurationMonitorTest.java | 603 -------------------
 .../SimpleDescriptorHandlerFuncTest.java        | 275 +++++++++
 .../monitor/RemoteConfigurationMonitorTest.java | 603 +++++++++++++++++++
 ...eway.topology.discovery.ServiceDiscoveryType |  19 -
 ...eway.topology.discovery.ServiceDiscoveryType |  19 +
 92 files changed, 5790 insertions(+), 5782 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/knox/blob/e766b3b7/gateway-discovery-ambari/src/main/java/org/apache/hadoop/gateway/topology/discovery/ambari/AmbariClientCommon.java
----------------------------------------------------------------------
diff --git a/gateway-discovery-ambari/src/main/java/org/apache/hadoop/gateway/topology/discovery/ambari/AmbariClientCommon.java b/gateway-discovery-ambari/src/main/java/org/apache/hadoop/gateway/topology/discovery/ambari/AmbariClientCommon.java
deleted file mode 100644
index a2bf4ea..0000000
--- a/gateway-discovery-ambari/src/main/java/org/apache/hadoop/gateway/topology/discovery/ambari/AmbariClientCommon.java
+++ /dev/null
@@ -1,102 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.gateway.topology.discovery.ambari;
-
-import net.minidev.json.JSONArray;
-import net.minidev.json.JSONObject;
-import org.apache.hadoop.gateway.i18n.messages.MessagesFactory;
-import org.apache.hadoop.gateway.services.security.AliasService;
-import org.apache.hadoop.gateway.topology.discovery.ServiceDiscoveryConfig;
-
-import java.util.HashMap;
-import java.util.Map;
-
-class AmbariClientCommon {
-
-    static final String AMBARI_CLUSTERS_URI = "/api/v1/clusters";
-
-    static final String AMBARI_HOSTROLES_URI =
-                                    AMBARI_CLUSTERS_URI + "/%s/services?fields=components/host_components/HostRoles";
-
-    static final String AMBARI_SERVICECONFIGS_URI =
-                                    AMBARI_CLUSTERS_URI + "/%s/configurations/service_config_versions?is_current=true";
-
-    private static final AmbariServiceDiscoveryMessages log = MessagesFactory.get(AmbariServiceDiscoveryMessages.class);
-
-    private RESTInvoker restClient;
-
-
-    AmbariClientCommon(AliasService aliasService) {
-        this(new RESTInvoker(aliasService));
-    }
-
-
-    AmbariClientCommon(RESTInvoker restInvoker) {
-        this.restClient = restInvoker;
-    }
-
-
-
-    Map<String, Map<String, AmbariCluster.ServiceConfiguration>> getActiveServiceConfigurations(String clusterName,
-                                                                                                ServiceDiscoveryConfig config) {
-        return getActiveServiceConfigurations(config.getAddress(),
-                                              clusterName,
-                                              config.getUser(),
-                                              config.getPasswordAlias());
-    }
-
-
-    Map<String, Map<String, AmbariCluster.ServiceConfiguration>> getActiveServiceConfigurations(String discoveryAddress,
-                                                                                                String clusterName,
-                                                                                                String discoveryUser,
-                                                                                                String discoveryPwdAlias) {
-        Map<String, Map<String, AmbariCluster.ServiceConfiguration>> serviceConfigurations = new HashMap<>();
-
-        String serviceConfigsURL = String.format("%s" + AMBARI_SERVICECONFIGS_URI, discoveryAddress, clusterName);
-
-        JSONObject serviceConfigsJSON = restClient.invoke(serviceConfigsURL, discoveryUser, discoveryPwdAlias);
-        if (serviceConfigsJSON != null) {
-            // Process the service configurations
-            JSONArray serviceConfigs = (JSONArray) serviceConfigsJSON.get("items");
-            for (Object serviceConfig : serviceConfigs) {
-                String serviceName = (String) ((JSONObject) serviceConfig).get("service_name");
-                JSONArray configurations = (JSONArray) ((JSONObject) serviceConfig).get("configurations");
-                for (Object configuration : configurations) {
-                    String configType = (String) ((JSONObject) configuration).get("type");
-                    String configVersion = String.valueOf(((JSONObject) configuration).get("version"));
-
-                    Map<String, String> configProps = new HashMap<>();
-                    JSONObject configProperties = (JSONObject) ((JSONObject) configuration).get("properties");
-                    for (String propertyName : configProperties.keySet()) {
-                        configProps.put(propertyName, String.valueOf(((JSONObject) configProperties).get(propertyName)));
-                    }
-                    if (!serviceConfigurations.containsKey(serviceName)) {
-                        serviceConfigurations.put(serviceName, new HashMap<>());
-                    }
-                    serviceConfigurations.get(serviceName).put(configType,
-                                                               new AmbariCluster.ServiceConfiguration(configType,
-                                                                                                      configVersion,
-                                                                                                      configProps));
-                }
-            }
-        }
-
-        return serviceConfigurations;
-    }
-
-
-}

http://git-wip-us.apache.org/repos/asf/knox/blob/e766b3b7/gateway-discovery-ambari/src/main/java/org/apache/hadoop/gateway/topology/discovery/ambari/AmbariClusterConfigurationMonitorProvider.java
----------------------------------------------------------------------
diff --git a/gateway-discovery-ambari/src/main/java/org/apache/hadoop/gateway/topology/discovery/ambari/AmbariClusterConfigurationMonitorProvider.java b/gateway-discovery-ambari/src/main/java/org/apache/hadoop/gateway/topology/discovery/ambari/AmbariClusterConfigurationMonitorProvider.java
deleted file mode 100644
index 3b31124..0000000
--- a/gateway-discovery-ambari/src/main/java/org/apache/hadoop/gateway/topology/discovery/ambari/AmbariClusterConfigurationMonitorProvider.java
+++ /dev/null
@@ -1,35 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.gateway.topology.discovery.ambari;
-
-import org.apache.hadoop.gateway.config.GatewayConfig;
-import org.apache.hadoop.gateway.services.security.AliasService;
-import org.apache.hadoop.gateway.topology.discovery.ClusterConfigurationMonitor;
-import org.apache.hadoop.gateway.topology.discovery.ClusterConfigurationMonitorProvider;
-
-public class AmbariClusterConfigurationMonitorProvider implements ClusterConfigurationMonitorProvider {
-
-    @Override
-    public String getType() {
-        return AmbariConfigurationMonitor.getType();
-    }
-
-    @Override
-    public ClusterConfigurationMonitor newInstance(GatewayConfig config, AliasService aliasService) {
-        return new AmbariConfigurationMonitor(config, aliasService);
-    }
-}

http://git-wip-us.apache.org/repos/asf/knox/blob/e766b3b7/gateway-discovery-ambari/src/main/java/org/apache/hadoop/gateway/topology/discovery/ambari/AmbariConfigurationMonitor.java
----------------------------------------------------------------------
diff --git a/gateway-discovery-ambari/src/main/java/org/apache/hadoop/gateway/topology/discovery/ambari/AmbariConfigurationMonitor.java b/gateway-discovery-ambari/src/main/java/org/apache/hadoop/gateway/topology/discovery/ambari/AmbariConfigurationMonitor.java
deleted file mode 100644
index e4b5e43..0000000
--- a/gateway-discovery-ambari/src/main/java/org/apache/hadoop/gateway/topology/discovery/ambari/AmbariConfigurationMonitor.java
+++ /dev/null
@@ -1,525 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.gateway.topology.discovery.ambari;
-
-import org.apache.commons.io.FileUtils;
-import org.apache.hadoop.gateway.config.GatewayConfig;
-import org.apache.hadoop.gateway.i18n.messages.MessagesFactory;
-import org.apache.hadoop.gateway.services.security.AliasService;
-import org.apache.hadoop.gateway.topology.discovery.ClusterConfigurationMonitor;
-import org.apache.hadoop.gateway.topology.discovery.ServiceDiscoveryConfig;
-
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Properties;
-import java.util.concurrent.locks.ReadWriteLock;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
-
-
-class AmbariConfigurationMonitor implements ClusterConfigurationMonitor {
-
-    private static final String TYPE = "Ambari";
-
-    private static final String CLUSTERS_DATA_DIR_NAME = "clusters";
-
-    private static final String PERSISTED_FILE_COMMENT = "Generated File. Do Not Edit!";
-
-    private static final String PROP_CLUSTER_PREFIX = "cluster.";
-    private static final String PROP_CLUSTER_SOURCE = PROP_CLUSTER_PREFIX + "source";
-    private static final String PROP_CLUSTER_NAME   = PROP_CLUSTER_PREFIX + "name";
-    private static final String PROP_CLUSTER_USER   = PROP_CLUSTER_PREFIX + "user";
-    private static final String PROP_CLUSTER_ALIAS  = PROP_CLUSTER_PREFIX + "pwd.alias";
-
-    static final String INTERVAL_PROPERTY_NAME = "org.apache.hadoop.gateway.topology.discovery.ambari.monitor.interval";
-
-
-    private static final AmbariServiceDiscoveryMessages log = MessagesFactory.get(AmbariServiceDiscoveryMessages.class);
-
-    // Ambari address
-    //    clusterName -> ServiceDiscoveryConfig
-    //
-    Map<String, Map<String, ServiceDiscoveryConfig>> clusterMonitorConfigurations = new HashMap<>();
-
-    // Ambari address
-    //    clusterName
-    //        configType -> version
-    //
-    Map<String, Map<String, Map<String, String>>> ambariClusterConfigVersions = new HashMap<>();
-
-    ReadWriteLock configVersionsLock = new ReentrantReadWriteLock();
-
-    private List<ConfigurationChangeListener> changeListeners = new ArrayList<>();
-
-    private AmbariClientCommon ambariClient;
-
-    PollingConfigAnalyzer internalMonitor;
-
-    GatewayConfig gatewayConfig = null;
-
-    static String getType() {
-        return TYPE;
-    }
-
-    AmbariConfigurationMonitor(GatewayConfig config, AliasService aliasService) {
-        this.gatewayConfig   = config;
-        this.ambariClient    = new AmbariClientCommon(aliasService);
-        this.internalMonitor = new PollingConfigAnalyzer(this);
-
-        // Override the default polling interval if it has been configured
-        int interval = config.getClusterMonitorPollingInterval(getType());
-        if (interval > 0) {
-            setPollingInterval(interval);
-        }
-
-        init();
-    }
-
-    @Override
-    public void setPollingInterval(int interval) {
-        internalMonitor.setInterval(interval);
-    }
-
-    private void init() {
-        loadDiscoveryConfiguration();
-        loadClusterVersionData();
-    }
-
-    /**
-     * Load any previously-persisted service discovery configurations.
-     * This is necessary for checking previously-deployed topologies.
-     */
-    private void loadDiscoveryConfiguration() {
-        File persistenceDir = getPersistenceDir();
-        if (persistenceDir != null) {
-            Collection<File> persistedConfigs = FileUtils.listFiles(persistenceDir, new String[]{"conf"}, false);
-            for (File persisted : persistedConfigs) {
-                Properties props = new Properties();
-                try {
-                    props.load(new FileInputStream(persisted));
-
-                    addDiscoveryConfig(props.getProperty(PROP_CLUSTER_NAME), new ServiceDiscoveryConfig() {
-                                                            public String getAddress() {
-                                                                return props.getProperty(PROP_CLUSTER_SOURCE);
-                                                            }
-
-                                                            public String getUser() {
-                                                                return props.getProperty(PROP_CLUSTER_USER);
-                                                            }
-
-                                                            public String getPasswordAlias() {
-                                                                return props.getProperty(PROP_CLUSTER_ALIAS);
-                                                            }
-                                                        });
-                } catch (IOException e) {
-                    log.failedToLoadClusterMonitorServiceDiscoveryConfig(getType(), e);
-                }
-            }
-        }
-    }
-
-    /**
-     * Load any previously-persisted cluster configuration version records, so the monitor will check
-     * previously-deployed topologies against the current cluster configuration.
-     */
-    private void loadClusterVersionData() {
-        File persistenceDir = getPersistenceDir();
-        if (persistenceDir != null) {
-            Collection<File> persistedConfigs = FileUtils.listFiles(getPersistenceDir(), new String[]{"ver"}, false);
-            for (File persisted : persistedConfigs) {
-                Properties props = new Properties();
-                try {
-                    props.load(new FileInputStream(persisted));
-
-                    String source = props.getProperty(PROP_CLUSTER_SOURCE);
-                    String clusterName = props.getProperty(PROP_CLUSTER_NAME);
-
-                    Map<String, String> configVersions = new HashMap<>();
-                    for (String name : props.stringPropertyNames()) {
-                        if (!name.startsWith(PROP_CLUSTER_PREFIX)) { // Ignore implementation-specific properties
-                            configVersions.put(name, props.getProperty(name));
-                        }
-                    }
-
-                    // Map the config versions to the cluster name
-                    addClusterConfigVersions(source, clusterName, configVersions);
-
-                } catch (IOException e) {
-                    log.failedToLoadClusterMonitorConfigVersions(getType(), e);
-                }
-            }
-        }
-    }
-
-    private void persistDiscoveryConfiguration(String clusterName, ServiceDiscoveryConfig sdc) {
-        File persistenceDir = getPersistenceDir();
-        if (persistenceDir != null) {
-
-            Properties props = new Properties();
-            props.setProperty(PROP_CLUSTER_NAME, clusterName);
-            props.setProperty(PROP_CLUSTER_SOURCE, sdc.getAddress());
-
-            String username = sdc.getUser();
-            if (username != null) {
-                props.setProperty(PROP_CLUSTER_USER, username);
-            }
-            String pwdAlias = sdc.getPasswordAlias();
-            if (pwdAlias != null) {
-                props.setProperty(PROP_CLUSTER_ALIAS, pwdAlias);
-            }
-
-            persist(props, getDiscoveryConfigPersistenceFile(sdc.getAddress(), clusterName));
-        }
-    }
-
-    private void persistClusterVersionData(String address, String clusterName, Map<String, String> configVersions) {
-        File persistenceDir = getPersistenceDir();
-        if (persistenceDir != null) {
-            Properties props = new Properties();
-            props.setProperty(PROP_CLUSTER_NAME, clusterName);
-            props.setProperty(PROP_CLUSTER_SOURCE, address);
-            for (String name : configVersions.keySet()) {
-                props.setProperty(name, configVersions.get(name));
-            }
-
-            persist(props, getConfigVersionsPersistenceFile(address, clusterName));
-        }
-    }
-
-    private void persist(Properties props, File dest) {
-        try {
-            props.store(new FileOutputStream(dest), PERSISTED_FILE_COMMENT);
-        } catch (Exception e) {
-            log.failedToPersistClusterMonitorData(getType(), dest.getAbsolutePath(), e);
-        }
-    }
-
-    private File getPersistenceDir() {
-        File persistenceDir = null;
-
-        File dataDir = new File(gatewayConfig.getGatewayDataDir());
-        if (dataDir.exists()) {
-            File clustersDir = new File(dataDir, CLUSTERS_DATA_DIR_NAME);
-            if (!clustersDir.exists()) {
-                clustersDir.mkdirs();
-            }
-            persistenceDir = clustersDir;
-        }
-
-        return persistenceDir;
-    }
-
-    private File getDiscoveryConfigPersistenceFile(String address, String clusterName) {
-        return getPersistenceFile(address, clusterName, "conf");
-    }
-
-    private File getConfigVersionsPersistenceFile(String address, String clusterName) {
-        return getPersistenceFile(address, clusterName, "ver");
-    }
-
-    private File getPersistenceFile(String address, String clusterName, String ext) {
-        String fileName = address.replace(":", "_").replace("/", "_") + "-" + clusterName + "." + ext;
-        return new File(getPersistenceDir(), fileName);
-    }
-
-    /**
-     * Add cluster configuration details to the monitor's in-memory record.
-     *
-     * @param address        An Ambari instance address.
-     * @param clusterName    The name of a cluster associated with the Ambari instance.
-     * @param configVersions A Map of configuration types and their corresponding versions.
-     */
-    private void addClusterConfigVersions(String address, String clusterName, Map<String, String> configVersions) {
-        configVersionsLock.writeLock().lock();
-        try {
-            ambariClusterConfigVersions.computeIfAbsent(address, k -> new HashMap<>())
-                                       .put(clusterName, configVersions);
-        } finally {
-            configVersionsLock.writeLock().unlock();
-        }
-    }
-
-    public void start() {
-        (new Thread(internalMonitor, "AmbariConfigurationMonitor")).start();
-    }
-
-    public void stop() {
-        internalMonitor.stop();
-    }
-
-    @Override
-    public void addListener(ConfigurationChangeListener listener) {
-        changeListeners.add(listener);
-    }
-
-    /**
-     * Add discovery configuration details for the specified cluster, so the monitor knows how to connect to check for
-     * changes.
-     *
-     * @param clusterName The name of the cluster.
-     * @param config      The associated service discovery configuration.
-     */
-    void addDiscoveryConfig(String clusterName, ServiceDiscoveryConfig config) {
-        clusterMonitorConfigurations.computeIfAbsent(config.getAddress(), k -> new HashMap<>()).put(clusterName, config);
-    }
-
-
-    /**
-     * Get the service discovery configuration associated with the specified Ambari instance and cluster.
-     *
-     * @param address     An Ambari instance address.
-     * @param clusterName The name of a cluster associated with the Ambari instance.
-     *
-     * @return The associated ServiceDiscoveryConfig object.
-     */
-    ServiceDiscoveryConfig getDiscoveryConfig(String address, String clusterName) {
-        ServiceDiscoveryConfig config = null;
-        if (clusterMonitorConfigurations.containsKey(address)) {
-            config = clusterMonitorConfigurations.get(address).get(clusterName);
-        }
-        return config;
-    }
-
-
-    /**
-     * Add cluster configuration data to the monitor, which it will use when determining if configuration has changed.
-     *
-     * @param cluster         An AmbariCluster object.
-     * @param discoveryConfig The discovery configuration associated with the cluster.
-     */
-    void addClusterConfigVersions(AmbariCluster cluster, ServiceDiscoveryConfig discoveryConfig) {
-
-        String clusterName = cluster.getName();
-
-        // Register the cluster discovery configuration for the monitor connections
-        persistDiscoveryConfiguration(clusterName, discoveryConfig);
-        addDiscoveryConfig(clusterName, discoveryConfig);
-
-        // Build the set of configuration versions
-        Map<String, String> configVersions = new HashMap<>();
-        Map<String, Map<String, AmbariCluster.ServiceConfiguration>> serviceConfigs = cluster.getServiceConfigurations();
-        for (String serviceName : serviceConfigs.keySet()) {
-            Map<String, AmbariCluster.ServiceConfiguration> configTypeVersionMap = serviceConfigs.get(serviceName);
-            for (AmbariCluster.ServiceConfiguration config : configTypeVersionMap.values()) {
-                String configType = config.getType();
-                String version = config.getVersion();
-                configVersions.put(configType, version);
-            }
-        }
-
-        persistClusterVersionData(discoveryConfig.getAddress(), clusterName, configVersions);
-        addClusterConfigVersions(discoveryConfig.getAddress(), clusterName, configVersions);
-    }
-
-
-    /**
-     * Remove the configuration record for the specified Ambari instance and cluster name.
-     *
-     * @param address     An Ambari instance address.
-     * @param clusterName The name of a cluster associated with the Ambari instance.
-     *
-     * @return The removed data; A Map of configuration types and their corresponding versions.
-     */
-    Map<String, String> removeClusterConfigVersions(String address, String clusterName) {
-        Map<String, String> result = new HashMap<>();
-
-        configVersionsLock.writeLock().lock();
-        try {
-            if (ambariClusterConfigVersions.containsKey(address)) {
-                result.putAll(ambariClusterConfigVersions.get(address).remove(clusterName));
-            }
-        } finally {
-            configVersionsLock.writeLock().unlock();
-        }
-
-        // Delete the associated persisted record
-        File persisted = getConfigVersionsPersistenceFile(address, clusterName);
-        if (persisted.exists()) {
-            persisted.delete();
-        }
-
-        return result;
-    }
-
-    /**
-     * Get the cluster configuration details for the specified cluster and Ambari instance.
-     *
-     * @param address     An Ambari instance address.
-     * @param clusterName The name of a cluster associated with the Ambari instance.
-     *
-     * @return A Map of configuration types and their corresponding versions.
-     */
-    Map<String, String> getClusterConfigVersions(String address, String clusterName) {
-        Map<String, String> result = new HashMap<>();
-
-        configVersionsLock.readLock().lock();
-        try {
-            if (ambariClusterConfigVersions.containsKey(address)) {
-                result.putAll(ambariClusterConfigVersions.get(address).get(clusterName));
-            }
-        } finally {
-            configVersionsLock.readLock().unlock();
-        }
-
-        return result;
-    }
-
-
-    /**
-     * Get all the clusters the monitor knows about.
-     *
-     * @return A Map of Ambari instance addresses to associated cluster names.
-     */
-    Map<String, List<String>> getClusterNames() {
-        Map<String, List<String>> result = new HashMap<>();
-
-        configVersionsLock.readLock().lock();
-        try {
-            for (String address : ambariClusterConfigVersions.keySet()) {
-                List<String> clusterNames = new ArrayList<>();
-                clusterNames.addAll(ambariClusterConfigVersions.get(address).keySet());
-                result.put(address, clusterNames);
-            }
-        } finally {
-            configVersionsLock.readLock().unlock();
-        }
-
-        return result;
-
-    }
-
-
-    /**
-     * Notify registered change listeners.
-     *
-     * @param source      The address of the Ambari instance from which the cluster details were determined.
-     * @param clusterName The name of the cluster whose configuration details have changed.
-     */
-    void notifyChangeListeners(String source, String clusterName) {
-        for (ConfigurationChangeListener listener : changeListeners) {
-            listener.onConfigurationChange(source, clusterName);
-        }
-    }
-
-
-    /**
-     * Request the current active configuration version info from Ambari.
-     *
-     * @param address     The Ambari instance address.
-     * @param clusterName The name of the cluster for which the details are desired.
-     *
-     * @return A Map of service configuration types and their corresponding versions.
-     */
-    Map<String, String> getUpdatedConfigVersions(String address, String clusterName) {
-        Map<String, String> configVersions = new HashMap<>();
-
-        Map<String, Map<String, AmbariCluster.ServiceConfiguration>> serviceConfigs =
-                    ambariClient.getActiveServiceConfigurations(clusterName, getDiscoveryConfig(address, clusterName));
-
-        for (Map<String, AmbariCluster.ServiceConfiguration> serviceConfig : serviceConfigs.values()) {
-            for (AmbariCluster.ServiceConfiguration config : serviceConfig.values()) {
-                configVersions.put(config.getType(), config.getVersion());
-            }
-        }
-
-        return configVersions;
-    }
-
-
-    /**
-     * The thread that polls Ambari for configuration details for clusters associated with discovered topologies,
-     * compares them with the current recorded values, and notifies any listeners when differences are discovered.
-     */
-    static final class PollingConfigAnalyzer implements Runnable {
-
-        private static final int DEFAULT_POLLING_INTERVAL = 60;
-
-        // Polling interval in seconds
-        private int interval = DEFAULT_POLLING_INTERVAL;
-
-        private AmbariConfigurationMonitor delegate;
-
-        private boolean isActive = false;
-
-        PollingConfigAnalyzer(AmbariConfigurationMonitor delegate) {
-            this.delegate = delegate;
-            this.interval = Integer.getInteger(INTERVAL_PROPERTY_NAME, PollingConfigAnalyzer.DEFAULT_POLLING_INTERVAL);
-        }
-
-        void setInterval(int interval) {
-            this.interval = interval;
-        }
-
-
-        void stop() {
-            isActive = false;
-        }
-
-        @Override
-        public void run() {
-            isActive = true;
-
-            log.startedAmbariConfigMonitor(interval);
-
-            while (isActive) {
-                for (Map.Entry<String, List<String>> entry : delegate.getClusterNames().entrySet()) {
-                    String address = entry.getKey();
-                    for (String clusterName : entry.getValue()) {
-                        Map<String, String> configVersions = delegate.getClusterConfigVersions(address, clusterName);
-                        if (configVersions != null && !configVersions.isEmpty()) {
-                            Map<String, String> updatedVersions = delegate.getUpdatedConfigVersions(address, clusterName);
-                            if (updatedVersions != null && !updatedVersions.isEmpty()) {
-                                boolean configHasChanged = false;
-
-                                // If the config sets don't match in size, then something has changed
-                                if (updatedVersions.size() != configVersions.size()) {
-                                    configHasChanged = true;
-                                } else {
-                                    // Perform the comparison of all the config versions
-                                    for (Map.Entry<String, String> configVersion : configVersions.entrySet()) {
-                                        if (!updatedVersions.get(configVersion.getKey()).equals(configVersion.getValue())) {
-                                            configHasChanged = true;
-                                            break;
-                                        }
-                                    }
-                                }
-
-                                // If a change has occurred, notify the listeners
-                                if (configHasChanged) {
-                                    delegate.notifyChangeListeners(address, clusterName);
-                                }
-                            }
-                        }
-                    }
-                }
-
-                try {
-                    Thread.sleep(interval * 1000);
-                } catch (InterruptedException e) {
-                    // Ignore
-                }
-            }
-        }
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/knox/blob/e766b3b7/gateway-discovery-ambari/src/main/java/org/apache/hadoop/gateway/topology/discovery/ambari/RESTInvoker.java
----------------------------------------------------------------------
diff --git a/gateway-discovery-ambari/src/main/java/org/apache/hadoop/gateway/topology/discovery/ambari/RESTInvoker.java b/gateway-discovery-ambari/src/main/java/org/apache/hadoop/gateway/topology/discovery/ambari/RESTInvoker.java
deleted file mode 100644
index 6a6fad8..0000000
--- a/gateway-discovery-ambari/src/main/java/org/apache/hadoop/gateway/topology/discovery/ambari/RESTInvoker.java
+++ /dev/null
@@ -1,136 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.gateway.topology.discovery.ambari;
-
-import net.minidev.json.JSONObject;
-import net.minidev.json.JSONValue;
-import org.apache.hadoop.gateway.config.ConfigurationException;
-import org.apache.hadoop.gateway.i18n.messages.MessagesFactory;
-import org.apache.hadoop.gateway.services.security.AliasService;
-import org.apache.hadoop.gateway.services.security.AliasServiceException;
-import org.apache.http.HttpEntity;
-import org.apache.http.HttpStatus;
-import org.apache.http.client.methods.CloseableHttpResponse;
-import org.apache.http.client.methods.HttpGet;
-import org.apache.http.impl.client.CloseableHttpClient;
-import org.apache.http.message.BasicHeader;
-import org.apache.http.util.EntityUtils;
-
-import java.io.IOException;
-
-class RESTInvoker {
-
-    private static final String DEFAULT_USER_ALIAS = "ambari.discovery.user";
-    private static final String DEFAULT_PWD_ALIAS  = "ambari.discovery.password";
-
-    private static final AmbariServiceDiscoveryMessages log = MessagesFactory.get(AmbariServiceDiscoveryMessages.class);
-
-    private AliasService aliasService = null;
-
-    private CloseableHttpClient httpClient = org.apache.http.impl.client.HttpClients.createDefault();
-
-
-    RESTInvoker(AliasService aliasService) {
-        this.aliasService = aliasService;
-    }
-
-
-    JSONObject invoke(String url, String username, String passwordAlias) {
-        JSONObject result = null;
-
-        CloseableHttpResponse response = null;
-        try {
-            HttpGet request = new HttpGet(url);
-
-            // If no configured username, then use default username alias
-            String password = null;
-            if (username == null) {
-                if (aliasService != null) {
-                    try {
-                        char[] defaultUser = aliasService.getPasswordFromAliasForGateway(DEFAULT_USER_ALIAS);
-                        if (defaultUser != null) {
-                            username = new String(defaultUser);
-                        }
-                    } catch (AliasServiceException e) {
-                        log.aliasServiceUserError(DEFAULT_USER_ALIAS, e.getLocalizedMessage());
-                    }
-                }
-
-                // If username is still null
-                if (username == null) {
-                    log.aliasServiceUserNotFound();
-                    throw new ConfigurationException("No username is configured for Ambari service discovery.");
-                }
-            }
-
-            if (aliasService != null) {
-                // If no password alias is configured, then try the default alias
-                if (passwordAlias == null) {
-                    passwordAlias = DEFAULT_PWD_ALIAS;
-                }
-
-                try {
-                    char[] pwd = aliasService.getPasswordFromAliasForGateway(passwordAlias);
-                    if (pwd != null) {
-                        password = new String(pwd);
-                    }
-
-                } catch (AliasServiceException e) {
-                    log.aliasServicePasswordError(passwordAlias, e.getLocalizedMessage());
-                }
-            }
-
-            // If the password could not be determined
-            if (password == null) {
-                log.aliasServicePasswordNotFound();
-                throw new ConfigurationException("No password is configured for Ambari service discovery.");
-            }
-
-            // Add an auth header if credentials are available
-            String encodedCreds =
-                    org.apache.commons.codec.binary.Base64.encodeBase64String((username + ":" + password).getBytes());
-            request.addHeader(new BasicHeader("Authorization", "Basic " + encodedCreds));
-
-            response = httpClient.execute(request);
-
-            if (HttpStatus.SC_OK == response.getStatusLine().getStatusCode()) {
-                HttpEntity entity = response.getEntity();
-                if (entity != null) {
-                    result = (JSONObject) JSONValue.parse((EntityUtils.toString(entity)));
-                    log.debugJSON(result.toJSONString());
-                } else {
-                    log.noJSON(url);
-                }
-            } else {
-                log.unexpectedRestResponseStatusCode(url, response.getStatusLine().getStatusCode());
-            }
-
-        } catch (IOException e) {
-            log.restInvocationError(url, e);
-        } finally {
-            if(response != null) {
-                try {
-                    response.close();
-                } catch (IOException e) {
-                    // Ignore
-                }
-            }
-        }
-        return result;
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/knox/blob/e766b3b7/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariClientCommon.java
----------------------------------------------------------------------
diff --git a/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariClientCommon.java b/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariClientCommon.java
new file mode 100644
index 0000000..9e5dcb3
--- /dev/null
+++ b/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariClientCommon.java
@@ -0,0 +1,102 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.knox.gateway.topology.discovery.ambari;
+
+import net.minidev.json.JSONArray;
+import net.minidev.json.JSONObject;
+import org.apache.knox.gateway.i18n.messages.MessagesFactory;
+import org.apache.knox.gateway.services.security.AliasService;
+import org.apache.knox.gateway.topology.discovery.ServiceDiscoveryConfig;
+
+import java.util.HashMap;
+import java.util.Map;
+
+class AmbariClientCommon {
+
+    static final String AMBARI_CLUSTERS_URI = "/api/v1/clusters";
+
+    static final String AMBARI_HOSTROLES_URI =
+                                    AMBARI_CLUSTERS_URI + "/%s/services?fields=components/host_components/HostRoles";
+
+    static final String AMBARI_SERVICECONFIGS_URI =
+                                    AMBARI_CLUSTERS_URI + "/%s/configurations/service_config_versions?is_current=true";
+
+    private static final AmbariServiceDiscoveryMessages log = MessagesFactory.get(AmbariServiceDiscoveryMessages.class);
+
+    private RESTInvoker restClient;
+
+
+    AmbariClientCommon(AliasService aliasService) {
+        this(new RESTInvoker(aliasService));
+    }
+
+
+    AmbariClientCommon(RESTInvoker restInvoker) {
+        this.restClient = restInvoker;
+    }
+
+
+
+    Map<String, Map<String, AmbariCluster.ServiceConfiguration>> getActiveServiceConfigurations(String clusterName,
+                                                                                                ServiceDiscoveryConfig config) {
+        return getActiveServiceConfigurations(config.getAddress(),
+                                              clusterName,
+                                              config.getUser(),
+                                              config.getPasswordAlias());
+    }
+
+
+    Map<String, Map<String, AmbariCluster.ServiceConfiguration>> getActiveServiceConfigurations(String discoveryAddress,
+                                                                                                String clusterName,
+                                                                                                String discoveryUser,
+                                                                                                String discoveryPwdAlias) {
+        Map<String, Map<String, AmbariCluster.ServiceConfiguration>> serviceConfigurations = new HashMap<>();
+
+        String serviceConfigsURL = String.format("%s" + AMBARI_SERVICECONFIGS_URI, discoveryAddress, clusterName);
+
+        JSONObject serviceConfigsJSON = restClient.invoke(serviceConfigsURL, discoveryUser, discoveryPwdAlias);
+        if (serviceConfigsJSON != null) {
+            // Process the service configurations
+            JSONArray serviceConfigs = (JSONArray) serviceConfigsJSON.get("items");
+            for (Object serviceConfig : serviceConfigs) {
+                String serviceName = (String) ((JSONObject) serviceConfig).get("service_name");
+                JSONArray configurations = (JSONArray) ((JSONObject) serviceConfig).get("configurations");
+                for (Object configuration : configurations) {
+                    String configType = (String) ((JSONObject) configuration).get("type");
+                    String configVersion = String.valueOf(((JSONObject) configuration).get("version"));
+
+                    Map<String, String> configProps = new HashMap<>();
+                    JSONObject configProperties = (JSONObject) ((JSONObject) configuration).get("properties");
+                    for (String propertyName : configProperties.keySet()) {
+                        configProps.put(propertyName, String.valueOf(((JSONObject) configProperties).get(propertyName)));
+                    }
+                    if (!serviceConfigurations.containsKey(serviceName)) {
+                        serviceConfigurations.put(serviceName, new HashMap<>());
+                    }
+                    serviceConfigurations.get(serviceName).put(configType,
+                                                               new AmbariCluster.ServiceConfiguration(configType,
+                                                                                                      configVersion,
+                                                                                                      configProps));
+                }
+            }
+        }
+
+        return serviceConfigurations;
+    }
+
+
+}

http://git-wip-us.apache.org/repos/asf/knox/blob/e766b3b7/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariClusterConfigurationMonitorProvider.java
----------------------------------------------------------------------
diff --git a/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariClusterConfigurationMonitorProvider.java b/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariClusterConfigurationMonitorProvider.java
new file mode 100644
index 0000000..95b0280
--- /dev/null
+++ b/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariClusterConfigurationMonitorProvider.java
@@ -0,0 +1,36 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.knox.gateway.topology.discovery.ambari;
+
+import org.apache.knox.gateway.config.GatewayConfig;
+import org.apache.knox.gateway.services.security.AliasService;
+import org.apache.knox.gateway.topology.discovery.ClusterConfigurationMonitor;
+import org.apache.knox.gateway.topology.discovery.ClusterConfigurationMonitorProvider;
+
+public class AmbariClusterConfigurationMonitorProvider implements
+    ClusterConfigurationMonitorProvider {
+
+    @Override
+    public String getType() {
+        return AmbariConfigurationMonitor.getType();
+    }
+
+    @Override
+    public ClusterConfigurationMonitor newInstance(GatewayConfig config, AliasService aliasService) {
+        return new AmbariConfigurationMonitor(config, aliasService);
+    }
+}

http://git-wip-us.apache.org/repos/asf/knox/blob/e766b3b7/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariConfigurationMonitor.java
----------------------------------------------------------------------
diff --git a/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariConfigurationMonitor.java b/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariConfigurationMonitor.java
new file mode 100644
index 0000000..c3aa27a
--- /dev/null
+++ b/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariConfigurationMonitor.java
@@ -0,0 +1,525 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.knox.gateway.topology.discovery.ambari;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.knox.gateway.config.GatewayConfig;
+import org.apache.knox.gateway.i18n.messages.MessagesFactory;
+import org.apache.knox.gateway.services.security.AliasService;
+import org.apache.knox.gateway.topology.discovery.ClusterConfigurationMonitor;
+import org.apache.knox.gateway.topology.discovery.ServiceDiscoveryConfig;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import java.util.concurrent.locks.ReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+
+
+class AmbariConfigurationMonitor implements ClusterConfigurationMonitor {
+
+    private static final String TYPE = "Ambari";
+
+    private static final String CLUSTERS_DATA_DIR_NAME = "clusters";
+
+    private static final String PERSISTED_FILE_COMMENT = "Generated File. Do Not Edit!";
+
+    private static final String PROP_CLUSTER_PREFIX = "cluster.";
+    private static final String PROP_CLUSTER_SOURCE = PROP_CLUSTER_PREFIX + "source";
+    private static final String PROP_CLUSTER_NAME   = PROP_CLUSTER_PREFIX + "name";
+    private static final String PROP_CLUSTER_USER   = PROP_CLUSTER_PREFIX + "user";
+    private static final String PROP_CLUSTER_ALIAS  = PROP_CLUSTER_PREFIX + "pwd.alias";
+
+    static final String INTERVAL_PROPERTY_NAME = "org.apache.knox.gateway.topology.discovery.ambari.monitor.interval";
+
+
+    private static final AmbariServiceDiscoveryMessages log = MessagesFactory.get(AmbariServiceDiscoveryMessages.class);
+
+    // Ambari address
+    //    clusterName -> ServiceDiscoveryConfig
+    //
+    Map<String, Map<String, ServiceDiscoveryConfig>> clusterMonitorConfigurations = new HashMap<>();
+
+    // Ambari address
+    //    clusterName
+    //        configType -> version
+    //
+    Map<String, Map<String, Map<String, String>>> ambariClusterConfigVersions = new HashMap<>();
+
+    ReadWriteLock configVersionsLock = new ReentrantReadWriteLock();
+
+    private List<ConfigurationChangeListener> changeListeners = new ArrayList<>();
+
+    private AmbariClientCommon ambariClient;
+
+    PollingConfigAnalyzer internalMonitor;
+
+    GatewayConfig gatewayConfig = null;
+
+    static String getType() {
+        return TYPE;
+    }
+
+    AmbariConfigurationMonitor(GatewayConfig config, AliasService aliasService) {
+        this.gatewayConfig   = config;
+        this.ambariClient    = new AmbariClientCommon(aliasService);
+        this.internalMonitor = new PollingConfigAnalyzer(this);
+
+        // Override the default polling interval if it has been configured
+        int interval = config.getClusterMonitorPollingInterval(getType());
+        if (interval > 0) {
+            setPollingInterval(interval);
+        }
+
+        init();
+    }
+
+    @Override
+    public void setPollingInterval(int interval) {
+        internalMonitor.setInterval(interval);
+    }
+
+    private void init() {
+        loadDiscoveryConfiguration();
+        loadClusterVersionData();
+    }
+
+    /**
+     * Load any previously-persisted service discovery configurations.
+     * This is necessary for checking previously-deployed topologies.
+     */
+    private void loadDiscoveryConfiguration() {
+        File persistenceDir = getPersistenceDir();
+        if (persistenceDir != null) {
+            Collection<File> persistedConfigs = FileUtils.listFiles(persistenceDir, new String[]{"conf"}, false);
+            for (File persisted : persistedConfigs) {
+                Properties props = new Properties();
+                try {
+                    props.load(new FileInputStream(persisted));
+
+                    addDiscoveryConfig(props.getProperty(PROP_CLUSTER_NAME), new ServiceDiscoveryConfig() {
+                                                            public String getAddress() {
+                                                                return props.getProperty(PROP_CLUSTER_SOURCE);
+                                                            }
+
+                                                            public String getUser() {
+                                                                return props.getProperty(PROP_CLUSTER_USER);
+                                                            }
+
+                                                            public String getPasswordAlias() {
+                                                                return props.getProperty(PROP_CLUSTER_ALIAS);
+                                                            }
+                                                        });
+                } catch (IOException e) {
+                    log.failedToLoadClusterMonitorServiceDiscoveryConfig(getType(), e);
+                }
+            }
+        }
+    }
+
+    /**
+     * Load any previously-persisted cluster configuration version records, so the monitor will check
+     * previously-deployed topologies against the current cluster configuration.
+     */
+    private void loadClusterVersionData() {
+        File persistenceDir = getPersistenceDir();
+        if (persistenceDir != null) {
+            Collection<File> persistedConfigs = FileUtils.listFiles(getPersistenceDir(), new String[]{"ver"}, false);
+            for (File persisted : persistedConfigs) {
+                Properties props = new Properties();
+                try {
+                    props.load(new FileInputStream(persisted));
+
+                    String source = props.getProperty(PROP_CLUSTER_SOURCE);
+                    String clusterName = props.getProperty(PROP_CLUSTER_NAME);
+
+                    Map<String, String> configVersions = new HashMap<>();
+                    for (String name : props.stringPropertyNames()) {
+                        if (!name.startsWith(PROP_CLUSTER_PREFIX)) { // Ignore implementation-specific properties
+                            configVersions.put(name, props.getProperty(name));
+                        }
+                    }
+
+                    // Map the config versions to the cluster name
+                    addClusterConfigVersions(source, clusterName, configVersions);
+
+                } catch (IOException e) {
+                    log.failedToLoadClusterMonitorConfigVersions(getType(), e);
+                }
+            }
+        }
+    }
+
+    private void persistDiscoveryConfiguration(String clusterName, ServiceDiscoveryConfig sdc) {
+        File persistenceDir = getPersistenceDir();
+        if (persistenceDir != null) {
+
+            Properties props = new Properties();
+            props.setProperty(PROP_CLUSTER_NAME, clusterName);
+            props.setProperty(PROP_CLUSTER_SOURCE, sdc.getAddress());
+
+            String username = sdc.getUser();
+            if (username != null) {
+                props.setProperty(PROP_CLUSTER_USER, username);
+            }
+            String pwdAlias = sdc.getPasswordAlias();
+            if (pwdAlias != null) {
+                props.setProperty(PROP_CLUSTER_ALIAS, pwdAlias);
+            }
+
+            persist(props, getDiscoveryConfigPersistenceFile(sdc.getAddress(), clusterName));
+        }
+    }
+
+    private void persistClusterVersionData(String address, String clusterName, Map<String, String> configVersions) {
+        File persistenceDir = getPersistenceDir();
+        if (persistenceDir != null) {
+            Properties props = new Properties();
+            props.setProperty(PROP_CLUSTER_NAME, clusterName);
+            props.setProperty(PROP_CLUSTER_SOURCE, address);
+            for (String name : configVersions.keySet()) {
+                props.setProperty(name, configVersions.get(name));
+            }
+
+            persist(props, getConfigVersionsPersistenceFile(address, clusterName));
+        }
+    }
+
+    private void persist(Properties props, File dest) {
+        try {
+            props.store(new FileOutputStream(dest), PERSISTED_FILE_COMMENT);
+        } catch (Exception e) {
+            log.failedToPersistClusterMonitorData(getType(), dest.getAbsolutePath(), e);
+        }
+    }
+
+    private File getPersistenceDir() {
+        File persistenceDir = null;
+
+        File dataDir = new File(gatewayConfig.getGatewayDataDir());
+        if (dataDir.exists()) {
+            File clustersDir = new File(dataDir, CLUSTERS_DATA_DIR_NAME);
+            if (!clustersDir.exists()) {
+                clustersDir.mkdirs();
+            }
+            persistenceDir = clustersDir;
+        }
+
+        return persistenceDir;
+    }
+
+    private File getDiscoveryConfigPersistenceFile(String address, String clusterName) {
+        return getPersistenceFile(address, clusterName, "conf");
+    }
+
+    private File getConfigVersionsPersistenceFile(String address, String clusterName) {
+        return getPersistenceFile(address, clusterName, "ver");
+    }
+
+    private File getPersistenceFile(String address, String clusterName, String ext) {
+        String fileName = address.replace(":", "_").replace("/", "_") + "-" + clusterName + "." + ext;
+        return new File(getPersistenceDir(), fileName);
+    }
+
+    /**
+     * Add cluster configuration details to the monitor's in-memory record.
+     *
+     * @param address        An Ambari instance address.
+     * @param clusterName    The name of a cluster associated with the Ambari instance.
+     * @param configVersions A Map of configuration types and their corresponding versions.
+     */
+    private void addClusterConfigVersions(String address, String clusterName, Map<String, String> configVersions) {
+        configVersionsLock.writeLock().lock();
+        try {
+            ambariClusterConfigVersions.computeIfAbsent(address, k -> new HashMap<>())
+                                       .put(clusterName, configVersions);
+        } finally {
+            configVersionsLock.writeLock().unlock();
+        }
+    }
+
+    public void start() {
+        (new Thread(internalMonitor, "AmbariConfigurationMonitor")).start();
+    }
+
+    public void stop() {
+        internalMonitor.stop();
+    }
+
+    @Override
+    public void addListener(ConfigurationChangeListener listener) {
+        changeListeners.add(listener);
+    }
+
+    /**
+     * Add discovery configuration details for the specified cluster, so the monitor knows how to connect to check for
+     * changes.
+     *
+     * @param clusterName The name of the cluster.
+     * @param config      The associated service discovery configuration.
+     */
+    void addDiscoveryConfig(String clusterName, ServiceDiscoveryConfig config) {
+        clusterMonitorConfigurations.computeIfAbsent(config.getAddress(), k -> new HashMap<>()).put(clusterName, config);
+    }
+
+
+    /**
+     * Get the service discovery configuration associated with the specified Ambari instance and cluster.
+     *
+     * @param address     An Ambari instance address.
+     * @param clusterName The name of a cluster associated with the Ambari instance.
+     *
+     * @return The associated ServiceDiscoveryConfig object.
+     */
+    ServiceDiscoveryConfig getDiscoveryConfig(String address, String clusterName) {
+        ServiceDiscoveryConfig config = null;
+        if (clusterMonitorConfigurations.containsKey(address)) {
+            config = clusterMonitorConfigurations.get(address).get(clusterName);
+        }
+        return config;
+    }
+
+
+    /**
+     * Add cluster configuration data to the monitor, which it will use when determining if configuration has changed.
+     *
+     * @param cluster         An AmbariCluster object.
+     * @param discoveryConfig The discovery configuration associated with the cluster.
+     */
+    void addClusterConfigVersions(AmbariCluster cluster, ServiceDiscoveryConfig discoveryConfig) {
+
+        String clusterName = cluster.getName();
+
+        // Register the cluster discovery configuration for the monitor connections
+        persistDiscoveryConfiguration(clusterName, discoveryConfig);
+        addDiscoveryConfig(clusterName, discoveryConfig);
+
+        // Build the set of configuration versions
+        Map<String, String> configVersions = new HashMap<>();
+        Map<String, Map<String, AmbariCluster.ServiceConfiguration>> serviceConfigs = cluster.getServiceConfigurations();
+        for (String serviceName : serviceConfigs.keySet()) {
+            Map<String, AmbariCluster.ServiceConfiguration> configTypeVersionMap = serviceConfigs.get(serviceName);
+            for (AmbariCluster.ServiceConfiguration config : configTypeVersionMap.values()) {
+                String configType = config.getType();
+                String version = config.getVersion();
+                configVersions.put(configType, version);
+            }
+        }
+
+        persistClusterVersionData(discoveryConfig.getAddress(), clusterName, configVersions);
+        addClusterConfigVersions(discoveryConfig.getAddress(), clusterName, configVersions);
+    }
+
+
+    /**
+     * Remove the configuration record for the specified Ambari instance and cluster name.
+     *
+     * @param address     An Ambari instance address.
+     * @param clusterName The name of a cluster associated with the Ambari instance.
+     *
+     * @return The removed data; A Map of configuration types and their corresponding versions.
+     */
+    Map<String, String> removeClusterConfigVersions(String address, String clusterName) {
+        Map<String, String> result = new HashMap<>();
+
+        configVersionsLock.writeLock().lock();
+        try {
+            if (ambariClusterConfigVersions.containsKey(address)) {
+                result.putAll(ambariClusterConfigVersions.get(address).remove(clusterName));
+            }
+        } finally {
+            configVersionsLock.writeLock().unlock();
+        }
+
+        // Delete the associated persisted record
+        File persisted = getConfigVersionsPersistenceFile(address, clusterName);
+        if (persisted.exists()) {
+            persisted.delete();
+        }
+
+        return result;
+    }
+
+    /**
+     * Get the cluster configuration details for the specified cluster and Ambari instance.
+     *
+     * @param address     An Ambari instance address.
+     * @param clusterName The name of a cluster associated with the Ambari instance.
+     *
+     * @return A Map of configuration types and their corresponding versions.
+     */
+    Map<String, String> getClusterConfigVersions(String address, String clusterName) {
+        Map<String, String> result = new HashMap<>();
+
+        configVersionsLock.readLock().lock();
+        try {
+            if (ambariClusterConfigVersions.containsKey(address)) {
+                result.putAll(ambariClusterConfigVersions.get(address).get(clusterName));
+            }
+        } finally {
+            configVersionsLock.readLock().unlock();
+        }
+
+        return result;
+    }
+
+
+    /**
+     * Get all the clusters the monitor knows about.
+     *
+     * @return A Map of Ambari instance addresses to associated cluster names.
+     */
+    Map<String, List<String>> getClusterNames() {
+        Map<String, List<String>> result = new HashMap<>();
+
+        configVersionsLock.readLock().lock();
+        try {
+            for (String address : ambariClusterConfigVersions.keySet()) {
+                List<String> clusterNames = new ArrayList<>();
+                clusterNames.addAll(ambariClusterConfigVersions.get(address).keySet());
+                result.put(address, clusterNames);
+            }
+        } finally {
+            configVersionsLock.readLock().unlock();
+        }
+
+        return result;
+
+    }
+
+
+    /**
+     * Notify registered change listeners.
+     *
+     * @param source      The address of the Ambari instance from which the cluster details were determined.
+     * @param clusterName The name of the cluster whose configuration details have changed.
+     */
+    void notifyChangeListeners(String source, String clusterName) {
+        for (ConfigurationChangeListener listener : changeListeners) {
+            listener.onConfigurationChange(source, clusterName);
+        }
+    }
+
+
+    /**
+     * Request the current active configuration version info from Ambari.
+     *
+     * @param address     The Ambari instance address.
+     * @param clusterName The name of the cluster for which the details are desired.
+     *
+     * @return A Map of service configuration types and their corresponding versions.
+     */
+    Map<String, String> getUpdatedConfigVersions(String address, String clusterName) {
+        Map<String, String> configVersions = new HashMap<>();
+
+        Map<String, Map<String, AmbariCluster.ServiceConfiguration>> serviceConfigs =
+                    ambariClient.getActiveServiceConfigurations(clusterName, getDiscoveryConfig(address, clusterName));
+
+        for (Map<String, AmbariCluster.ServiceConfiguration> serviceConfig : serviceConfigs.values()) {
+            for (AmbariCluster.ServiceConfiguration config : serviceConfig.values()) {
+                configVersions.put(config.getType(), config.getVersion());
+            }
+        }
+
+        return configVersions;
+    }
+
+
+    /**
+     * The thread that polls Ambari for configuration details for clusters associated with discovered topologies,
+     * compares them with the current recorded values, and notifies any listeners when differences are discovered.
+     */
+    static final class PollingConfigAnalyzer implements Runnable {
+
+        private static final int DEFAULT_POLLING_INTERVAL = 60;
+
+        // Polling interval in seconds
+        private int interval = DEFAULT_POLLING_INTERVAL;
+
+        private AmbariConfigurationMonitor delegate;
+
+        private boolean isActive = false;
+
+        PollingConfigAnalyzer(AmbariConfigurationMonitor delegate) {
+            this.delegate = delegate;
+            this.interval = Integer.getInteger(INTERVAL_PROPERTY_NAME, PollingConfigAnalyzer.DEFAULT_POLLING_INTERVAL);
+        }
+
+        void setInterval(int interval) {
+            this.interval = interval;
+        }
+
+
+        void stop() {
+            isActive = false;
+        }
+
+        @Override
+        public void run() {
+            isActive = true;
+
+            log.startedAmbariConfigMonitor(interval);
+
+            while (isActive) {
+                for (Map.Entry<String, List<String>> entry : delegate.getClusterNames().entrySet()) {
+                    String address = entry.getKey();
+                    for (String clusterName : entry.getValue()) {
+                        Map<String, String> configVersions = delegate.getClusterConfigVersions(address, clusterName);
+                        if (configVersions != null && !configVersions.isEmpty()) {
+                            Map<String, String> updatedVersions = delegate.getUpdatedConfigVersions(address, clusterName);
+                            if (updatedVersions != null && !updatedVersions.isEmpty()) {
+                                boolean configHasChanged = false;
+
+                                // If the config sets don't match in size, then something has changed
+                                if (updatedVersions.size() != configVersions.size()) {
+                                    configHasChanged = true;
+                                } else {
+                                    // Perform the comparison of all the config versions
+                                    for (Map.Entry<String, String> configVersion : configVersions.entrySet()) {
+                                        if (!updatedVersions.get(configVersion.getKey()).equals(configVersion.getValue())) {
+                                            configHasChanged = true;
+                                            break;
+                                        }
+                                    }
+                                }
+
+                                // If a change has occurred, notify the listeners
+                                if (configHasChanged) {
+                                    delegate.notifyChangeListeners(address, clusterName);
+                                }
+                            }
+                        }
+                    }
+                }
+
+                try {
+                    Thread.sleep(interval * 1000);
+                } catch (InterruptedException e) {
+                    // Ignore
+                }
+            }
+        }
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/knox/blob/e766b3b7/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/RESTInvoker.java
----------------------------------------------------------------------
diff --git a/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/RESTInvoker.java b/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/RESTInvoker.java
new file mode 100644
index 0000000..8830115
--- /dev/null
+++ b/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/RESTInvoker.java
@@ -0,0 +1,136 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.knox.gateway.topology.discovery.ambari;
+
+import net.minidev.json.JSONObject;
+import net.minidev.json.JSONValue;
+import org.apache.knox.gateway.config.ConfigurationException;
+import org.apache.knox.gateway.i18n.messages.MessagesFactory;
+import org.apache.knox.gateway.services.security.AliasService;
+import org.apache.knox.gateway.services.security.AliasServiceException;
+import org.apache.http.HttpEntity;
+import org.apache.http.HttpStatus;
+import org.apache.http.client.methods.CloseableHttpResponse;
+import org.apache.http.client.methods.HttpGet;
+import org.apache.http.impl.client.CloseableHttpClient;
+import org.apache.http.message.BasicHeader;
+import org.apache.http.util.EntityUtils;
+
+import java.io.IOException;
+
+class RESTInvoker {
+
+    private static final String DEFAULT_USER_ALIAS = "ambari.discovery.user";
+    private static final String DEFAULT_PWD_ALIAS  = "ambari.discovery.password";
+
+    private static final AmbariServiceDiscoveryMessages log = MessagesFactory.get(AmbariServiceDiscoveryMessages.class);
+
+    private AliasService aliasService = null;
+
+    private CloseableHttpClient httpClient = org.apache.http.impl.client.HttpClients.createDefault();
+
+
+    RESTInvoker(AliasService aliasService) {
+        this.aliasService = aliasService;
+    }
+
+
+    JSONObject invoke(String url, String username, String passwordAlias) {
+        JSONObject result = null;
+
+        CloseableHttpResponse response = null;
+        try {
+            HttpGet request = new HttpGet(url);
+
+            // If no configured username, then use default username alias
+            String password = null;
+            if (username == null) {
+                if (aliasService != null) {
+                    try {
+                        char[] defaultUser = aliasService.getPasswordFromAliasForGateway(DEFAULT_USER_ALIAS);
+                        if (defaultUser != null) {
+                            username = new String(defaultUser);
+                        }
+                    } catch (AliasServiceException e) {
+                        log.aliasServiceUserError(DEFAULT_USER_ALIAS, e.getLocalizedMessage());
+                    }
+                }
+
+                // If username is still null
+                if (username == null) {
+                    log.aliasServiceUserNotFound();
+                    throw new ConfigurationException("No username is configured for Ambari service discovery.");
+                }
+            }
+
+            if (aliasService != null) {
+                // If no password alias is configured, then try the default alias
+                if (passwordAlias == null) {
+                    passwordAlias = DEFAULT_PWD_ALIAS;
+                }
+
+                try {
+                    char[] pwd = aliasService.getPasswordFromAliasForGateway(passwordAlias);
+                    if (pwd != null) {
+                        password = new String(pwd);
+                    }
+
+                } catch (AliasServiceException e) {
+                    log.aliasServicePasswordError(passwordAlias, e.getLocalizedMessage());
+                }
+            }
+
+            // If the password could not be determined
+            if (password == null) {
+                log.aliasServicePasswordNotFound();
+                throw new ConfigurationException("No password is configured for Ambari service discovery.");
+            }
+
+            // Add an auth header if credentials are available
+            String encodedCreds =
+                    org.apache.commons.codec.binary.Base64.encodeBase64String((username + ":" + password).getBytes());
+            request.addHeader(new BasicHeader("Authorization", "Basic " + encodedCreds));
+
+            response = httpClient.execute(request);
+
+            if (HttpStatus.SC_OK == response.getStatusLine().getStatusCode()) {
+                HttpEntity entity = response.getEntity();
+                if (entity != null) {
+                    result = (JSONObject) JSONValue.parse((EntityUtils.toString(entity)));
+                    log.debugJSON(result.toJSONString());
+                } else {
+                    log.noJSON(url);
+                }
+            } else {
+                log.unexpectedRestResponseStatusCode(url, response.getStatusLine().getStatusCode());
+            }
+
+        } catch (IOException e) {
+            log.restInvocationError(url, e);
+        } finally {
+            if(response != null) {
+                try {
+                    response.close();
+                } catch (IOException e) {
+                    // Ignore
+                }
+            }
+        }
+        return result;
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/knox/blob/e766b3b7/gateway-discovery-ambari/src/main/resources/META-INF/services/org.apache.hadoop.gateway.topology.discovery.ClusterConfigurationMonitorProvider
----------------------------------------------------------------------
diff --git a/gateway-discovery-ambari/src/main/resources/META-INF/services/org.apache.hadoop.gateway.topology.discovery.ClusterConfigurationMonitorProvider b/gateway-discovery-ambari/src/main/resources/META-INF/services/org.apache.hadoop.gateway.topology.discovery.ClusterConfigurationMonitorProvider
deleted file mode 100644
index d9b2b05..0000000
--- a/gateway-discovery-ambari/src/main/resources/META-INF/services/org.apache.hadoop.gateway.topology.discovery.ClusterConfigurationMonitorProvider
+++ /dev/null
@@ -1,19 +0,0 @@
-##########################################################################
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-##########################################################################
-
-org.apache.hadoop.gateway.topology.discovery.ambari.AmbariClusterConfigurationMonitorProvider
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/knox/blob/e766b3b7/gateway-discovery-ambari/src/main/resources/META-INF/services/org.apache.knox.gateway.topology.discovery.ClusterConfigurationMonitorProvider
----------------------------------------------------------------------
diff --git a/gateway-discovery-ambari/src/main/resources/META-INF/services/org.apache.knox.gateway.topology.discovery.ClusterConfigurationMonitorProvider b/gateway-discovery-ambari/src/main/resources/META-INF/services/org.apache.knox.gateway.topology.discovery.ClusterConfigurationMonitorProvider
new file mode 100644
index 0000000..280485f
--- /dev/null
+++ b/gateway-discovery-ambari/src/main/resources/META-INF/services/org.apache.knox.gateway.topology.discovery.ClusterConfigurationMonitorProvider
@@ -0,0 +1,19 @@
+##########################################################################
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##########################################################################
+
+org.apache.knox.gateway.topology.discovery.ambari.AmbariClusterConfigurationMonitorProvider
\ No newline at end of file


[53/53] [abbrv] knox git commit: KNOX-998 - Merge from 0.14.0 master

Posted by mo...@apache.org.
KNOX-998 - Merge from 0.14.0 master


Project: http://git-wip-us.apache.org/repos/asf/knox/repo
Commit: http://git-wip-us.apache.org/repos/asf/knox/commit/92e2ec59
Tree: http://git-wip-us.apache.org/repos/asf/knox/tree/92e2ec59
Diff: http://git-wip-us.apache.org/repos/asf/knox/diff/92e2ec59

Branch: refs/heads/master
Commit: 92e2ec59a5940a9e7c67ec5cd29044f811dee40a
Parents: e5fd062
Author: Sandeep More <mo...@apache.org>
Authored: Tue Jan 9 14:51:08 2018 -0500
Committer: Sandeep More <mo...@apache.org>
Committed: Tue Jan 9 14:51:08 2018 -0500

----------------------------------------------------------------------
 .../discovery/ambari/ServiceURLCreator.java     | 32 --------
 .../discovery/ambari/ServiceURLFactory.java     | 75 -----------------
 .../discovery/ambari/WebHdfsUrlCreator.java     | 84 --------------------
 .../discovery/ambari/ServiceURLCreator.java     | 32 ++++++++
 .../discovery/ambari/ServiceURLFactory.java     | 75 +++++++++++++++++
 .../discovery/ambari/WebHdfsUrlCreator.java     | 84 ++++++++++++++++++++
 6 files changed, 191 insertions(+), 191 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/knox/blob/92e2ec59/gateway-discovery-ambari/src/main/java/org/apache/hadoop/gateway/topology/discovery/ambari/ServiceURLCreator.java
----------------------------------------------------------------------
diff --git a/gateway-discovery-ambari/src/main/java/org/apache/hadoop/gateway/topology/discovery/ambari/ServiceURLCreator.java b/gateway-discovery-ambari/src/main/java/org/apache/hadoop/gateway/topology/discovery/ambari/ServiceURLCreator.java
deleted file mode 100644
index 8295155..0000000
--- a/gateway-discovery-ambari/src/main/java/org/apache/hadoop/gateway/topology/discovery/ambari/ServiceURLCreator.java
+++ /dev/null
@@ -1,32 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.gateway.topology.discovery.ambari;
-
-import java.util.List;
-
-public interface ServiceURLCreator {
-
-  /**
-   * Creates one or more cluster-specific URLs for the specified service.
-   *
-   * @param service The service identifier.
-   *
-   * @return A List of created URL strings; the list may be empty.
-   */
-  List<String> create(String service);
-
-}

http://git-wip-us.apache.org/repos/asf/knox/blob/92e2ec59/gateway-discovery-ambari/src/main/java/org/apache/hadoop/gateway/topology/discovery/ambari/ServiceURLFactory.java
----------------------------------------------------------------------
diff --git a/gateway-discovery-ambari/src/main/java/org/apache/hadoop/gateway/topology/discovery/ambari/ServiceURLFactory.java b/gateway-discovery-ambari/src/main/java/org/apache/hadoop/gateway/topology/discovery/ambari/ServiceURLFactory.java
deleted file mode 100644
index fa9f89a..0000000
--- a/gateway-discovery-ambari/src/main/java/org/apache/hadoop/gateway/topology/discovery/ambari/ServiceURLFactory.java
+++ /dev/null
@@ -1,75 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.gateway.topology.discovery.ambari;
-
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-/**
- * Factory for creating cluster-specific service URLs.
- */
-public class ServiceURLFactory {
-
-  private Map<String, ServiceURLCreator> urlCreators = new HashMap<>();
-
-  private ServiceURLCreator defaultURLCreator = null;
-
-
-  private ServiceURLFactory(AmbariCluster cluster) {
-    // Default URL creator
-    defaultURLCreator = new AmbariDynamicServiceURLCreator(cluster);
-
-    // Custom (internal) URL creators
-    urlCreators.put("WEBHDFS", new WebHdfsUrlCreator(cluster));
-  }
-
-
-  /**
-   * Create a new factory for the specified cluster.
-   *
-   * @param cluster The cluster.
-   *
-   * @return A ServiceURLFactory instance.
-   */
-  public static ServiceURLFactory newInstance(AmbariCluster cluster) {
-    return new ServiceURLFactory(cluster);
-  }
-
-
-  /**
-   * Create one or more cluster-specific URLs for the specified service.
-   *
-   * @param service The service.
-   *
-   * @return A List of service URL strings; the list may be empty.
-   */
-  public List<String> create(String service) {
-    List<String> urls = new ArrayList<>();
-
-    ServiceURLCreator creator = urlCreators.get(service);
-    if (creator == null) {
-      creator = defaultURLCreator;
-    }
-
-    urls.addAll(creator.create(service));
-
-    return urls;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/knox/blob/92e2ec59/gateway-discovery-ambari/src/main/java/org/apache/hadoop/gateway/topology/discovery/ambari/WebHdfsUrlCreator.java
----------------------------------------------------------------------
diff --git a/gateway-discovery-ambari/src/main/java/org/apache/hadoop/gateway/topology/discovery/ambari/WebHdfsUrlCreator.java b/gateway-discovery-ambari/src/main/java/org/apache/hadoop/gateway/topology/discovery/ambari/WebHdfsUrlCreator.java
deleted file mode 100644
index 1d11c66..0000000
--- a/gateway-discovery-ambari/src/main/java/org/apache/hadoop/gateway/topology/discovery/ambari/WebHdfsUrlCreator.java
+++ /dev/null
@@ -1,84 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.gateway.topology.discovery.ambari;
-
-import org.apache.hadoop.gateway.i18n.messages.MessagesFactory;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-
-/**
- * A ServiceURLCreator implementation for WEBHDFS.
- */
-public class WebHdfsUrlCreator implements ServiceURLCreator {
-
-  private static final String SERVICE = "WEBHDFS";
-
-  private AmbariServiceDiscoveryMessages log = MessagesFactory.get(AmbariServiceDiscoveryMessages.class);
-
-  private AmbariCluster cluster = null;
-
-  WebHdfsUrlCreator(AmbariCluster cluster) {
-    this.cluster = cluster;
-  }
-
-  @Override
-  public List<String> create(String service) {
-    List<String> urls = new ArrayList<>();
-
-    if (SERVICE.equals(service)) {
-      AmbariCluster.ServiceConfiguration sc = cluster.getServiceConfiguration("HDFS", "hdfs-site");
-
-      // First, check if it's HA config
-      String nameServices = null;
-      AmbariComponent nameNodeComp = cluster.getComponent("NAMENODE");
-      if (nameNodeComp != null) {
-        nameServices = nameNodeComp.getConfigProperty("dfs.nameservices");
-      }
-
-      if (nameServices != null && !nameServices.isEmpty()) {
-        // If it is an HA configuration
-        Map<String, String> props = sc.getProperties();
-
-        // Name node HTTP addresses are defined as properties of the form:
-        //      dfs.namenode.http-address.<NAMESERVICES>.nn<INDEX>
-        // So, this iterates over the nn<INDEX> properties until there is no such property (since it cannot be known how
-        // many are defined by any other means).
-        int i = 1;
-        String propertyValue = getHANameNodeHttpAddress(props, nameServices, i++);
-        while (propertyValue != null) {
-          urls.add(createURL(propertyValue));
-          propertyValue = getHANameNodeHttpAddress(props, nameServices, i++);
-        }
-      } else { // If it's not an HA configuration, get the single name node HTTP address
-        urls.add(createURL(sc.getProperties().get("dfs.namenode.http-address")));
-      }
-    }
-
-    return urls;
-  }
-
-  private static String getHANameNodeHttpAddress(Map<String, String> props, String nameServices, int index) {
-    return props.get("dfs.namenode.http-address." + nameServices + ".nn" + index);
-  }
-
-  private static String createURL(String address) {
-    return "http://" + address + "/webhdfs";
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/knox/blob/92e2ec59/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/ServiceURLCreator.java
----------------------------------------------------------------------
diff --git a/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/ServiceURLCreator.java b/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/ServiceURLCreator.java
new file mode 100644
index 0000000..c2a2d22
--- /dev/null
+++ b/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/ServiceURLCreator.java
@@ -0,0 +1,32 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.knox.gateway.topology.discovery.ambari;
+
+import java.util.List;
+
+public interface ServiceURLCreator {
+
+  /**
+   * Creates one or more cluster-specific URLs for the specified service.
+   *
+   * @param service The service identifier.
+   *
+   * @return A List of created URL strings; the list may be empty.
+   */
+  List<String> create(String service);
+
+}

http://git-wip-us.apache.org/repos/asf/knox/blob/92e2ec59/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/ServiceURLFactory.java
----------------------------------------------------------------------
diff --git a/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/ServiceURLFactory.java b/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/ServiceURLFactory.java
new file mode 100644
index 0000000..e009585
--- /dev/null
+++ b/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/ServiceURLFactory.java
@@ -0,0 +1,75 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.knox.gateway.topology.discovery.ambari;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Factory for creating cluster-specific service URLs.
+ */
+public class ServiceURLFactory {
+
+  private Map<String, ServiceURLCreator> urlCreators = new HashMap<>();
+
+  private ServiceURLCreator defaultURLCreator = null;
+
+
+  private ServiceURLFactory(AmbariCluster cluster) {
+    // Default URL creator
+    defaultURLCreator = new AmbariDynamicServiceURLCreator(cluster);
+
+    // Custom (internal) URL creators
+    urlCreators.put("WEBHDFS", new WebHdfsUrlCreator(cluster));
+  }
+
+
+  /**
+   * Create a new factory for the specified cluster.
+   *
+   * @param cluster The cluster.
+   *
+   * @return A ServiceURLFactory instance.
+   */
+  public static ServiceURLFactory newInstance(AmbariCluster cluster) {
+    return new ServiceURLFactory(cluster);
+  }
+
+
+  /**
+   * Create one or more cluster-specific URLs for the specified service.
+   *
+   * @param service The service.
+   *
+   * @return A List of service URL strings; the list may be empty.
+   */
+  public List<String> create(String service) {
+    List<String> urls = new ArrayList<>();
+
+    ServiceURLCreator creator = urlCreators.get(service);
+    if (creator == null) {
+      creator = defaultURLCreator;
+    }
+
+    urls.addAll(creator.create(service));
+
+    return urls;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/knox/blob/92e2ec59/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/WebHdfsUrlCreator.java
----------------------------------------------------------------------
diff --git a/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/WebHdfsUrlCreator.java b/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/WebHdfsUrlCreator.java
new file mode 100644
index 0000000..1c65982
--- /dev/null
+++ b/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/WebHdfsUrlCreator.java
@@ -0,0 +1,84 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.knox.gateway.topology.discovery.ambari;
+
+import org.apache.knox.gateway.i18n.messages.MessagesFactory;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * A ServiceURLCreator implementation for WEBHDFS.
+ */
+public class WebHdfsUrlCreator implements ServiceURLCreator {
+
+  private static final String SERVICE = "WEBHDFS";
+
+  private AmbariServiceDiscoveryMessages log = MessagesFactory.get(AmbariServiceDiscoveryMessages.class);
+
+  private AmbariCluster cluster = null;
+
+  WebHdfsUrlCreator(AmbariCluster cluster) {
+    this.cluster = cluster;
+  }
+
+  @Override
+  public List<String> create(String service) {
+    List<String> urls = new ArrayList<>();
+
+    if (SERVICE.equals(service)) {
+      AmbariCluster.ServiceConfiguration sc = cluster.getServiceConfiguration("HDFS", "hdfs-site");
+
+      // First, check if it's HA config
+      String nameServices = null;
+      AmbariComponent nameNodeComp = cluster.getComponent("NAMENODE");
+      if (nameNodeComp != null) {
+        nameServices = nameNodeComp.getConfigProperty("dfs.nameservices");
+      }
+
+      if (nameServices != null && !nameServices.isEmpty()) {
+        // If it is an HA configuration
+        Map<String, String> props = sc.getProperties();
+
+        // Name node HTTP addresses are defined as properties of the form:
+        //      dfs.namenode.http-address.<NAMESERVICES>.nn<INDEX>
+        // So, this iterates over the nn<INDEX> properties until there is no such property (since it cannot be known how
+        // many are defined by any other means).
+        int i = 1;
+        String propertyValue = getHANameNodeHttpAddress(props, nameServices, i++);
+        while (propertyValue != null) {
+          urls.add(createURL(propertyValue));
+          propertyValue = getHANameNodeHttpAddress(props, nameServices, i++);
+        }
+      } else { // If it's not an HA configuration, get the single name node HTTP address
+        urls.add(createURL(sc.getProperties().get("dfs.namenode.http-address")));
+      }
+    }
+
+    return urls;
+  }
+
+  private static String getHANameNodeHttpAddress(Map<String, String> props, String nameServices, int index) {
+    return props.get("dfs.namenode.http-address." + nameServices + ".nn" + index);
+  }
+
+  private static String createURL(String address) {
+    return "http://" + address + "/webhdfs";
+  }
+
+}


[30/53] [abbrv] knox git commit: KNOX-998 - Bring branch up to speed with 0.14.0 RC1

Posted by mo...@apache.org.
KNOX-998 - Bring branch up to speed with 0.14.0 RC1


Project: http://git-wip-us.apache.org/repos/asf/knox/repo
Commit: http://git-wip-us.apache.org/repos/asf/knox/commit/e70904b3
Tree: http://git-wip-us.apache.org/repos/asf/knox/tree/e70904b3
Diff: http://git-wip-us.apache.org/repos/asf/knox/diff/e70904b3

Branch: refs/heads/master
Commit: e70904b3d32af5df4c55f652187eda6b3719ab37
Parents: 2c69152
Author: Sandeep More <mo...@apache.org>
Authored: Mon Nov 13 10:47:33 2017 -0500
Committer: Sandeep More <mo...@apache.org>
Committed: Mon Nov 13 10:47:33 2017 -0500

----------------------------------------------------------------------
 gateway-adapter/pom.xml                         |   2 +-
 .../service/admin/TopologiesResource.java       |   2 +-
 .../resources/services/nifi/1.4.0/service.xml   |   2 +-
 .../hadoop/gateway/dispatch/NiFiDispatch.java   | 106 ------------------
 .../hadoop/gateway/dispatch/NiFiHaDispatch.java | 111 -------------------
 .../hadoop/gateway/dispatch/NiFiHeaders.java    |  26 -----
 .../gateway/dispatch/NiFiRequestUtil.java       |  89 ---------------
 .../gateway/dispatch/NiFiResponseUtil.java      |  89 ---------------
 .../knox/gateway/dispatch/NiFiDispatch.java     | 106 ++++++++++++++++++
 .../knox/gateway/dispatch/NiFiHaDispatch.java   | 111 +++++++++++++++++++
 .../knox/gateway/dispatch/NiFiHeaders.java      |  26 +++++
 .../knox/gateway/dispatch/NiFiRequestUtil.java  |  89 +++++++++++++++
 .../knox/gateway/dispatch/NiFiResponseUtil.java |  88 +++++++++++++++
 .../src/test/resources/log4j.properties         |   2 +-
 14 files changed, 424 insertions(+), 425 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/knox/blob/e70904b3/gateway-adapter/pom.xml
----------------------------------------------------------------------
diff --git a/gateway-adapter/pom.xml b/gateway-adapter/pom.xml
index 4bb62e3..d6bd49e 100644
--- a/gateway-adapter/pom.xml
+++ b/gateway-adapter/pom.xml
@@ -23,7 +23,7 @@
     <parent>
         <artifactId>gateway</artifactId>
         <groupId>org.apache.knox</groupId>
-        <version>0.14.0-SNAPSHOT</version>
+        <version>1.0.0-SNAPSHOT</version>
     </parent>
     <artifactId>gateway-adapter</artifactId>
     <name>gateway-adapter</name>

http://git-wip-us.apache.org/repos/asf/knox/blob/e70904b3/gateway-service-admin/src/main/java/org/apache/knox/gateway/service/admin/TopologiesResource.java
----------------------------------------------------------------------
diff --git a/gateway-service-admin/src/main/java/org/apache/knox/gateway/service/admin/TopologiesResource.java b/gateway-service-admin/src/main/java/org/apache/knox/gateway/service/admin/TopologiesResource.java
index 9ecd7fc..f960734 100644
--- a/gateway-service-admin/src/main/java/org/apache/knox/gateway/service/admin/TopologiesResource.java
+++ b/gateway-service-admin/src/main/java/org/apache/knox/gateway/service/admin/TopologiesResource.java
@@ -145,7 +145,7 @@ public class TopologiesResource {
 
     // Check for existing topology with the same name, to see if it had been generated
     boolean existingGenerated = false;
-    for (org.apache.hadoop.gateway.topology.Topology existingTopology : ts.getTopologies()) {
+    for (org.apache.knox.gateway.topology.Topology existingTopology : ts.getTopologies()) {
       if(existingTopology.getName().equals(id)) {
         existingGenerated = existingTopology.isGenerated();
         break;

http://git-wip-us.apache.org/repos/asf/knox/blob/e70904b3/gateway-service-definitions/src/main/resources/services/nifi/1.4.0/service.xml
----------------------------------------------------------------------
diff --git a/gateway-service-definitions/src/main/resources/services/nifi/1.4.0/service.xml b/gateway-service-definitions/src/main/resources/services/nifi/1.4.0/service.xml
index 2ccc10d..d600e2d 100644
--- a/gateway-service-definitions/src/main/resources/services/nifi/1.4.0/service.xml
+++ b/gateway-service-definitions/src/main/resources/services/nifi/1.4.0/service.xml
@@ -26,5 +26,5 @@
             <rewrite apply="NIFI/nifi/inbound/path/query-other" to="request.url"/>
         </route>
     </routes>
-    <dispatch classname="org.apache.hadoop.gateway.dispatch.NiFiDispatch" ha-classname="org.apache.hadoop.gateway.dispatch.NiFiHaDispatch" />
+    <dispatch classname="org.apache.knox.gateway.dispatch.NiFiDispatch" ha-classname="org.apache.knox.gateway.dispatch.NiFiHaDispatch" />
 </service>

http://git-wip-us.apache.org/repos/asf/knox/blob/e70904b3/gateway-service-nifi/src/main/java/org/apache/hadoop/gateway/dispatch/NiFiDispatch.java
----------------------------------------------------------------------
diff --git a/gateway-service-nifi/src/main/java/org/apache/hadoop/gateway/dispatch/NiFiDispatch.java b/gateway-service-nifi/src/main/java/org/apache/hadoop/gateway/dispatch/NiFiDispatch.java
deleted file mode 100644
index 013fd9c..0000000
--- a/gateway-service-nifi/src/main/java/org/apache/hadoop/gateway/dispatch/NiFiDispatch.java
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.gateway.dispatch;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.util.Set;
-
-import javax.servlet.http.HttpServletRequest;
-import javax.servlet.http.HttpServletResponse;
-
-import org.apache.hadoop.gateway.util.MimeTypes;
-import org.apache.http.Header;
-import org.apache.http.HttpEntity;
-import org.apache.http.HttpResponse;
-import org.apache.http.client.methods.HttpUriRequest;
-import org.apache.http.entity.ContentType;
-
-public class NiFiDispatch extends DefaultDispatch {
-
-  @Override
-  protected void executeRequest(HttpUriRequest outboundRequest, HttpServletRequest inboundRequest, HttpServletResponse outboundResponse) throws IOException {
-    outboundRequest = NiFiRequestUtil.modifyOutboundRequest(outboundRequest, inboundRequest);
-    HttpResponse inboundResponse = executeOutboundRequest(outboundRequest);
-    writeOutboundResponse(outboundRequest, inboundRequest, outboundResponse, inboundResponse);
-  }
-
-  /**
-   * Overridden to provide a spot to modify the outbound response before its stream is closed.
-   */
-  protected void writeOutboundResponse(HttpUriRequest outboundRequest, HttpServletRequest inboundRequest, HttpServletResponse outboundResponse, HttpResponse inboundResponse) throws IOException {
-    // Copy the client respond header to the server respond.
-    outboundResponse.setStatus(inboundResponse.getStatusLine().getStatusCode());
-    Header[] headers = inboundResponse.getAllHeaders();
-    Set<String> excludeHeaders = getOutboundResponseExcludeHeaders();
-    boolean hasExcludeHeaders = false;
-    if ((excludeHeaders != null) && !(excludeHeaders.isEmpty())) {
-      hasExcludeHeaders = true;
-    }
-    for ( Header header : headers ) {
-      String name = header.getName();
-      if (hasExcludeHeaders && excludeHeaders.contains(name.toUpperCase())) {
-        continue;
-      }
-      String value = header.getValue();
-      outboundResponse.addHeader(name, value);
-    }
-
-    HttpEntity entity = inboundResponse.getEntity();
-    if( entity != null ) {
-      outboundResponse.setContentType( getInboundResponseContentType( entity ) );
-      InputStream stream = entity.getContent();
-      try {
-        NiFiResponseUtil.modifyOutboundResponse(inboundRequest, outboundResponse, inboundResponse);
-        writeResponse( inboundRequest, outboundResponse, stream );
-      } finally {
-        closeInboundResponse( inboundResponse, stream );
-      }
-    }
-  }
-
-  /**
-   * Overriden due to {@link DefaultDispatch#getInboundResponseContentType(HttpEntity) having private access, and the method is used by
-   * {@link #writeOutboundResponse(HttpUriRequest, HttpServletRequest, HttpServletResponse, HttpResponse)}}
-   */
-  private String getInboundResponseContentType( final HttpEntity entity ) {
-    String fullContentType = null;
-    if( entity != null ) {
-      ContentType entityContentType = ContentType.get( entity );
-      if( entityContentType != null ) {
-        if( entityContentType.getCharset() == null ) {
-          final String entityMimeType = entityContentType.getMimeType();
-          final String defaultCharset = MimeTypes.getDefaultCharsetForMimeType( entityMimeType );
-          if( defaultCharset != null ) {
-            LOG.usingDefaultCharsetForEntity( entityMimeType, defaultCharset );
-            entityContentType = entityContentType.withCharset( defaultCharset );
-          }
-        } else {
-          LOG.usingExplicitCharsetForEntity( entityContentType.getMimeType(), entityContentType.getCharset() );
-        }
-        fullContentType = entityContentType.toString();
-      }
-    }
-    if( fullContentType == null ) {
-      LOG.unknownResponseEntityContentType();
-    } else {
-      LOG.inboundResponseEntityContentType( fullContentType );
-    }
-    return fullContentType;
-  }
-}

http://git-wip-us.apache.org/repos/asf/knox/blob/e70904b3/gateway-service-nifi/src/main/java/org/apache/hadoop/gateway/dispatch/NiFiHaDispatch.java
----------------------------------------------------------------------
diff --git a/gateway-service-nifi/src/main/java/org/apache/hadoop/gateway/dispatch/NiFiHaDispatch.java b/gateway-service-nifi/src/main/java/org/apache/hadoop/gateway/dispatch/NiFiHaDispatch.java
deleted file mode 100644
index 4272086..0000000
--- a/gateway-service-nifi/src/main/java/org/apache/hadoop/gateway/dispatch/NiFiHaDispatch.java
+++ /dev/null
@@ -1,111 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.gateway.dispatch;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.util.Set;
-
-import javax.servlet.http.HttpServletRequest;
-import javax.servlet.http.HttpServletResponse;
-
-import org.apache.hadoop.gateway.ha.dispatch.DefaultHaDispatch;
-import org.apache.hadoop.gateway.util.MimeTypes;
-import org.apache.http.Header;
-import org.apache.http.HttpEntity;
-import org.apache.http.HttpResponse;
-import org.apache.http.client.methods.HttpUriRequest;
-import org.apache.http.entity.ContentType;
-
-public class NiFiHaDispatch extends DefaultHaDispatch {
-
-  public NiFiHaDispatch() {
-    setServiceRole("NIFI");
-  }
-
-  @Override
-  protected void executeRequest(HttpUriRequest outboundRequest, HttpServletRequest inboundRequest, HttpServletResponse outboundResponse) throws IOException {
-    outboundRequest = NiFiRequestUtil.modifyOutboundRequest(outboundRequest, inboundRequest);
-    HttpResponse inboundResponse = executeOutboundRequest(outboundRequest);
-    writeOutboundResponse(outboundRequest, inboundRequest, outboundResponse, inboundResponse);
-  }
-
-  /**
-   * Overridden to provide a spot to modify the outbound response before its stream is closed.
-   */
-  protected void writeOutboundResponse(HttpUriRequest outboundRequest, HttpServletRequest inboundRequest, HttpServletResponse outboundResponse, HttpResponse inboundResponse) throws IOException {
-    // Copy the client respond header to the server respond.
-    outboundResponse.setStatus(inboundResponse.getStatusLine().getStatusCode());
-    Header[] headers = inboundResponse.getAllHeaders();
-    Set<String> excludeHeaders = getOutboundResponseExcludeHeaders();
-    boolean hasExcludeHeaders = false;
-    if ((excludeHeaders != null) && !(excludeHeaders.isEmpty())) {
-      hasExcludeHeaders = true;
-    }
-    for ( Header header : headers ) {
-      String name = header.getName();
-      if (hasExcludeHeaders && excludeHeaders.contains(name.toUpperCase())) {
-        continue;
-      }
-      String value = header.getValue();
-      outboundResponse.addHeader(name, value);
-    }
-
-    HttpEntity entity = inboundResponse.getEntity();
-    if( entity != null ) {
-      outboundResponse.setContentType( getInboundResponseContentType( entity ) );
-      InputStream stream = entity.getContent();
-      try {
-        NiFiResponseUtil.modifyOutboundResponse(inboundRequest, outboundResponse, inboundResponse);
-        writeResponse( inboundRequest, outboundResponse, stream );
-      } finally {
-        closeInboundResponse( inboundResponse, stream );
-      }
-    }
-  }
-
-  /**
-   * Overriden due to {@link DefaultDispatch#getInboundResponseContentType(HttpEntity) having private access, and the method is used by
-   * {@link #writeOutboundResponse(HttpUriRequest, HttpServletRequest, HttpServletResponse, HttpResponse)}}
-   */
-  private String getInboundResponseContentType( final HttpEntity entity ) {
-    String fullContentType = null;
-    if( entity != null ) {
-      ContentType entityContentType = ContentType.get( entity );
-      if( entityContentType != null ) {
-        if( entityContentType.getCharset() == null ) {
-          final String entityMimeType = entityContentType.getMimeType();
-          final String defaultCharset = MimeTypes.getDefaultCharsetForMimeType( entityMimeType );
-          if( defaultCharset != null ) {
-            DefaultDispatch.LOG.usingDefaultCharsetForEntity( entityMimeType, defaultCharset );
-            entityContentType = entityContentType.withCharset( defaultCharset );
-          }
-        } else {
-          DefaultDispatch.LOG.usingExplicitCharsetForEntity( entityContentType.getMimeType(), entityContentType.getCharset() );
-        }
-        fullContentType = entityContentType.toString();
-      }
-    }
-    if( fullContentType == null ) {
-      DefaultDispatch.LOG.unknownResponseEntityContentType();
-    } else {
-      DefaultDispatch.LOG.inboundResponseEntityContentType( fullContentType );
-    }
-    return fullContentType;
-  }
-}

http://git-wip-us.apache.org/repos/asf/knox/blob/e70904b3/gateway-service-nifi/src/main/java/org/apache/hadoop/gateway/dispatch/NiFiHeaders.java
----------------------------------------------------------------------
diff --git a/gateway-service-nifi/src/main/java/org/apache/hadoop/gateway/dispatch/NiFiHeaders.java b/gateway-service-nifi/src/main/java/org/apache/hadoop/gateway/dispatch/NiFiHeaders.java
deleted file mode 100644
index f3e8e68..0000000
--- a/gateway-service-nifi/src/main/java/org/apache/hadoop/gateway/dispatch/NiFiHeaders.java
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.gateway.dispatch;
-
-class NiFiHeaders {
-  static final String X_FORWARDED_PROTO = "X-Forwarded-Proto";
-  static final String X_FORWARDED_HOST = "X-Forwarded-Server";
-  static final String X_FORWARDED_PORT = "X-Forwarded-Port";
-  static final String X_FORWARDED_CONTEXT = "X-Forwarded-Context";
-  static final String X_PROXIED_ENTITIES_CHAIN = "X-ProxiedEntitiesChain";
-}

http://git-wip-us.apache.org/repos/asf/knox/blob/e70904b3/gateway-service-nifi/src/main/java/org/apache/hadoop/gateway/dispatch/NiFiRequestUtil.java
----------------------------------------------------------------------
diff --git a/gateway-service-nifi/src/main/java/org/apache/hadoop/gateway/dispatch/NiFiRequestUtil.java b/gateway-service-nifi/src/main/java/org/apache/hadoop/gateway/dispatch/NiFiRequestUtil.java
deleted file mode 100644
index 9fdc425..0000000
--- a/gateway-service-nifi/src/main/java/org/apache/hadoop/gateway/dispatch/NiFiRequestUtil.java
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.gateway.dispatch;
-
-import java.io.IOException;
-
-import javax.security.auth.Subject;
-import javax.servlet.http.HttpServletRequest;
-
-import org.apache.commons.lang.StringUtils;
-import org.apache.hadoop.gateway.security.SubjectUtils;
-import org.apache.http.Header;
-import org.apache.http.client.methods.HttpUriRequest;
-import org.apache.http.client.methods.RequestBuilder;
-import org.apache.log4j.Logger;
-
-import com.google.common.base.Objects;
-import com.google.common.base.Strings;
-
-class NiFiRequestUtil {
-
-  static HttpUriRequest modifyOutboundRequest(HttpUriRequest outboundRequest, HttpServletRequest inboundRequest) throws IOException {
-    // preserve trailing slash from inbound request in the outbound request
-    if (inboundRequest.getPathInfo().endsWith("/")) {
-      String[] split = outboundRequest.getURI().toString().split("\\?");
-      if (!split[0].endsWith("/")) {
-        outboundRequest = RequestBuilder.copy(outboundRequest).setUri(split[0] + "/" + (split.length == 2 ? "?" + split[1] : "")).build();
-      }
-    }
-    // update the X-Forwarded-Context header to include the Knox-specific context path
-    final Header originalXForwardedContextHeader = outboundRequest.getFirstHeader(NiFiHeaders.X_FORWARDED_CONTEXT);
-    if (originalXForwardedContextHeader != null) {
-      String xForwardedContextHeaderValue = originalXForwardedContextHeader.getValue();
-      if (!Strings.isNullOrEmpty(xForwardedContextHeaderValue)) {
-        // Inspect the inbound request and outbound request to determine the additional context path from the rewrite
-        // rules that needs to be added to the X-Forwarded-Context header to allow proper proxying to NiFi.
-        //
-        // NiFi does its own URL rewriting, and will not work with the context path provided by Knox
-        // (ie, "/gateway/sandbox").
-        //
-        // For example, if Knox has a rewrite rule "*://*:*/**/nifi-app/{**}?{**}", "/nifi-app" needs to be added
-        // to the existing value of the X-Forwarded-Context header, which ends up being "/gateway/sandbox/nifi-app".
-        String inboundRequestPathInfo = inboundRequest.getPathInfo();
-        String outboundRequestUriPath = outboundRequest.getURI().getPath();
-        String outboundRequestUriPathNoTrailingSlash = StringUtils.removeEnd(outboundRequestUriPath, "/");
-        String knoxRouteContext = null;
-        int index = inboundRequestPathInfo.lastIndexOf(outboundRequestUriPathNoTrailingSlash);
-        if (index >= 0) {
-          knoxRouteContext = inboundRequestPathInfo.substring(0, index);
-        } else {
-          Logger.getLogger(NiFiHaDispatch.class.getName()).error(String.format("Unable to find index of %s in %s", outboundRequestUriPathNoTrailingSlash, inboundRequestPathInfo));
-        }
-        outboundRequest.setHeader(NiFiHeaders.X_FORWARDED_CONTEXT, xForwardedContextHeaderValue + knoxRouteContext);
-      }
-    }
-
-    // NiFi requires the header "X-ProxiedEntitiesChain" to be set with the identity or identities of the authenticated requester.
-    // The effective principal (identity) in the requester subject must be added to "X-ProxiedEntitiesChain".
-    // If the request already has a populated "X-ProxiedEntitiesChain" header, the identities must be appended to it.
-    // If the user proxied through Knox is anonymous, the "Anonymous" identity needs to be represented in X-ProxiedEntitiesChain
-    // as empty angle brackets "<>".
-    final Subject subject = SubjectUtils.getCurrentSubject();
-    String effectivePrincipalName = SubjectUtils.getEffectivePrincipalName(subject);
-    outboundRequest.setHeader(NiFiHeaders.X_PROXIED_ENTITIES_CHAIN, Objects.firstNonNull(inboundRequest.getHeader(NiFiHeaders.X_PROXIED_ENTITIES_CHAIN), "") +
-        String.format("<%s>", effectivePrincipalName.equalsIgnoreCase("anonymous") ? "" : effectivePrincipalName));
-
-    // Make sure headers named "Cookie" are removed from the request to NiFi, since NiFi does not use cookies.
-    Header[] cookieHeaders = outboundRequest.getHeaders("Cookie");
-    for (Header cookieHeader : cookieHeaders) {
-      outboundRequest.removeHeader(cookieHeader);
-    }
-    return outboundRequest;
-  }
-}

http://git-wip-us.apache.org/repos/asf/knox/blob/e70904b3/gateway-service-nifi/src/main/java/org/apache/hadoop/gateway/dispatch/NiFiResponseUtil.java
----------------------------------------------------------------------
diff --git a/gateway-service-nifi/src/main/java/org/apache/hadoop/gateway/dispatch/NiFiResponseUtil.java b/gateway-service-nifi/src/main/java/org/apache/hadoop/gateway/dispatch/NiFiResponseUtil.java
deleted file mode 100644
index 38c98b3..0000000
--- a/gateway-service-nifi/src/main/java/org/apache/hadoop/gateway/dispatch/NiFiResponseUtil.java
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.gateway.dispatch;
-
-import java.io.IOException;
-import java.net.URI;
-import java.net.URISyntaxException;
-import java.util.Collections;
-import java.util.List;
-
-import javax.servlet.http.HttpServletRequest;
-import javax.servlet.http.HttpServletResponse;
-
-import org.apache.commons.lang.StringUtils;
-import org.apache.http.Header;
-import org.apache.http.HttpResponse;
-import org.apache.http.NameValuePair;
-import org.apache.http.client.utils.URIBuilder;
-
-class NiFiResponseUtil {
-
-  static void modifyOutboundResponse(HttpServletRequest inboundRequest, HttpServletResponse outboundResponse, HttpResponse inboundResponse) throws IOException {
-    // Only want to rewrite the Location header on a HTTP 302
-    if (inboundResponse.getStatusLine().getStatusCode() == HttpServletResponse.SC_FOUND) {
-      Header originalLocationHeader = inboundResponse.getFirstHeader("Location");
-      if (originalLocationHeader != null) {
-        String originalLocation = originalLocationHeader.getValue();
-        URIBuilder originalLocationUriBuilder;
-        try {
-          originalLocationUriBuilder = new URIBuilder(originalLocation);
-        } catch (URISyntaxException e) {
-          throw new RuntimeException("Unable to parse URI from Location header", e);
-        }
-        URIBuilder inboundRequestUriBuilder = null;
-        try {
-          inboundRequestUriBuilder = new URIBuilder(inboundRequest.getRequestURI());
-        } catch (URISyntaxException e) {
-          throw new RuntimeException("Unable to parse the inbound request URI", e);
-        }
-        /*
-         * if the path specified in the Location header fron the inbound response contains the inbound request's URI's path,
-         * then it's going to the same web context, and the Location header should be updated based on the X_FORWARDED_* headers.
-         */
-        String inboundRequestUriPath = inboundRequestUriBuilder.getPath();
-        String originalLocationUriPath = originalLocationUriBuilder.getPath();
-        if (originalLocationUriPath.contains(inboundRequestUriPath)) {
-          // check for trailing slash of Location header if it exists and preserve it
-          final String trailingSlash = originalLocationUriPath.endsWith("/") ? "/" : "";
-          // retain query params
-          final List<NameValuePair> queryParams = originalLocationUriBuilder.getQueryParams();
-
-          // check for proxy settings
-          final String scheme = inboundRequest.getHeader(NiFiHeaders.X_FORWARDED_PROTO);
-          final String host = inboundRequest.getHeader(NiFiHeaders.X_FORWARDED_HOST);
-          final String port = inboundRequest.getHeader(NiFiHeaders.X_FORWARDED_PORT);
-
-          final String baseContextPath = inboundRequest.getHeader(NiFiHeaders.X_FORWARDED_CONTEXT);
-          final String pathInfo = inboundRequest.getPathInfo();
-
-          try {
-            final URI newLocation = new URIBuilder().setScheme(scheme).setHost(host).setPort((StringUtils.isNumeric(port) ? Integer.parseInt(port) : -1)).setPath(
-                baseContextPath + pathInfo + trailingSlash).setParameters(queryParams).build();
-            outboundResponse.setHeader("Location", newLocation.toString());
-          } catch (URISyntaxException e) {
-            throw new RuntimeException("Unable to rewrite Location header in response", e);
-          }
-        }
-      } else {
-        throw new RuntimeException("Received HTTP 302, but response is missing Location header");
-      }
-    }
-  }
-}
-

http://git-wip-us.apache.org/repos/asf/knox/blob/e70904b3/gateway-service-nifi/src/main/java/org/apache/knox/gateway/dispatch/NiFiDispatch.java
----------------------------------------------------------------------
diff --git a/gateway-service-nifi/src/main/java/org/apache/knox/gateway/dispatch/NiFiDispatch.java b/gateway-service-nifi/src/main/java/org/apache/knox/gateway/dispatch/NiFiDispatch.java
new file mode 100644
index 0000000..d939180
--- /dev/null
+++ b/gateway-service-nifi/src/main/java/org/apache/knox/gateway/dispatch/NiFiDispatch.java
@@ -0,0 +1,106 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.knox.gateway.dispatch;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.Set;
+
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+
+import org.apache.knox.gateway.util.MimeTypes;
+import org.apache.http.Header;
+import org.apache.http.HttpEntity;
+import org.apache.http.HttpResponse;
+import org.apache.http.client.methods.HttpUriRequest;
+import org.apache.http.entity.ContentType;
+
+public class NiFiDispatch extends DefaultDispatch {
+
+  @Override
+  protected void executeRequest(HttpUriRequest outboundRequest, HttpServletRequest inboundRequest, HttpServletResponse outboundResponse) throws IOException {
+    outboundRequest = NiFiRequestUtil.modifyOutboundRequest(outboundRequest, inboundRequest);
+    HttpResponse inboundResponse = executeOutboundRequest(outboundRequest);
+    writeOutboundResponse(outboundRequest, inboundRequest, outboundResponse, inboundResponse);
+  }
+
+  /**
+   * Overridden to provide a spot to modify the outbound response before its stream is closed.
+   */
+  protected void writeOutboundResponse(HttpUriRequest outboundRequest, HttpServletRequest inboundRequest, HttpServletResponse outboundResponse, HttpResponse inboundResponse) throws IOException {
+    // Copy the client respond header to the server respond.
+    outboundResponse.setStatus(inboundResponse.getStatusLine().getStatusCode());
+    Header[] headers = inboundResponse.getAllHeaders();
+    Set<String> excludeHeaders = getOutboundResponseExcludeHeaders();
+    boolean hasExcludeHeaders = false;
+    if ((excludeHeaders != null) && !(excludeHeaders.isEmpty())) {
+      hasExcludeHeaders = true;
+    }
+    for ( Header header : headers ) {
+      String name = header.getName();
+      if (hasExcludeHeaders && excludeHeaders.contains(name.toUpperCase())) {
+        continue;
+      }
+      String value = header.getValue();
+      outboundResponse.addHeader(name, value);
+    }
+
+    HttpEntity entity = inboundResponse.getEntity();
+    if( entity != null ) {
+      outboundResponse.setContentType( getInboundResponseContentType( entity ) );
+      InputStream stream = entity.getContent();
+      try {
+        NiFiResponseUtil.modifyOutboundResponse(inboundRequest, outboundResponse, inboundResponse);
+        writeResponse( inboundRequest, outboundResponse, stream );
+      } finally {
+        closeInboundResponse( inboundResponse, stream );
+      }
+    }
+  }
+
+  /**
+   * Overriden due to {@link DefaultDispatch#getInboundResponseContentType(HttpEntity) having private access, and the method is used by
+   * {@link #writeOutboundResponse(HttpUriRequest, HttpServletRequest, HttpServletResponse, HttpResponse)}}
+   */
+  private String getInboundResponseContentType( final HttpEntity entity ) {
+    String fullContentType = null;
+    if( entity != null ) {
+      ContentType entityContentType = ContentType.get( entity );
+      if( entityContentType != null ) {
+        if( entityContentType.getCharset() == null ) {
+          final String entityMimeType = entityContentType.getMimeType();
+          final String defaultCharset = MimeTypes.getDefaultCharsetForMimeType( entityMimeType );
+          if( defaultCharset != null ) {
+            LOG.usingDefaultCharsetForEntity( entityMimeType, defaultCharset );
+            entityContentType = entityContentType.withCharset( defaultCharset );
+          }
+        } else {
+          LOG.usingExplicitCharsetForEntity( entityContentType.getMimeType(), entityContentType.getCharset() );
+        }
+        fullContentType = entityContentType.toString();
+      }
+    }
+    if( fullContentType == null ) {
+      LOG.unknownResponseEntityContentType();
+    } else {
+      LOG.inboundResponseEntityContentType( fullContentType );
+    }
+    return fullContentType;
+  }
+}

http://git-wip-us.apache.org/repos/asf/knox/blob/e70904b3/gateway-service-nifi/src/main/java/org/apache/knox/gateway/dispatch/NiFiHaDispatch.java
----------------------------------------------------------------------
diff --git a/gateway-service-nifi/src/main/java/org/apache/knox/gateway/dispatch/NiFiHaDispatch.java b/gateway-service-nifi/src/main/java/org/apache/knox/gateway/dispatch/NiFiHaDispatch.java
new file mode 100644
index 0000000..5e1e3a0
--- /dev/null
+++ b/gateway-service-nifi/src/main/java/org/apache/knox/gateway/dispatch/NiFiHaDispatch.java
@@ -0,0 +1,111 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.knox.gateway.dispatch;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.Set;
+
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+
+import org.apache.knox.gateway.ha.dispatch.DefaultHaDispatch;
+import org.apache.knox.gateway.util.MimeTypes;
+import org.apache.http.Header;
+import org.apache.http.HttpEntity;
+import org.apache.http.HttpResponse;
+import org.apache.http.client.methods.HttpUriRequest;
+import org.apache.http.entity.ContentType;
+
+public class NiFiHaDispatch extends DefaultHaDispatch {
+
+  public NiFiHaDispatch() {
+    setServiceRole("NIFI");
+  }
+
+  @Override
+  protected void executeRequest(HttpUriRequest outboundRequest, HttpServletRequest inboundRequest, HttpServletResponse outboundResponse) throws IOException {
+    outboundRequest = NiFiRequestUtil.modifyOutboundRequest(outboundRequest, inboundRequest);
+    HttpResponse inboundResponse = executeOutboundRequest(outboundRequest);
+    writeOutboundResponse(outboundRequest, inboundRequest, outboundResponse, inboundResponse);
+  }
+
+  /**
+   * Overridden to provide a spot to modify the outbound response before its stream is closed.
+   */
+  protected void writeOutboundResponse(HttpUriRequest outboundRequest, HttpServletRequest inboundRequest, HttpServletResponse outboundResponse, HttpResponse inboundResponse) throws IOException {
+    // Copy the client respond header to the server respond.
+    outboundResponse.setStatus(inboundResponse.getStatusLine().getStatusCode());
+    Header[] headers = inboundResponse.getAllHeaders();
+    Set<String> excludeHeaders = getOutboundResponseExcludeHeaders();
+    boolean hasExcludeHeaders = false;
+    if ((excludeHeaders != null) && !(excludeHeaders.isEmpty())) {
+      hasExcludeHeaders = true;
+    }
+    for ( Header header : headers ) {
+      String name = header.getName();
+      if (hasExcludeHeaders && excludeHeaders.contains(name.toUpperCase())) {
+        continue;
+      }
+      String value = header.getValue();
+      outboundResponse.addHeader(name, value);
+    }
+
+    HttpEntity entity = inboundResponse.getEntity();
+    if( entity != null ) {
+      outboundResponse.setContentType( getInboundResponseContentType( entity ) );
+      InputStream stream = entity.getContent();
+      try {
+        NiFiResponseUtil.modifyOutboundResponse(inboundRequest, outboundResponse, inboundResponse);
+        writeResponse( inboundRequest, outboundResponse, stream );
+      } finally {
+        closeInboundResponse( inboundResponse, stream );
+      }
+    }
+  }
+
+  /**
+   * Overriden due to {@link DefaultDispatch#getInboundResponseContentType(HttpEntity) having private access, and the method is used by
+   * {@link #writeOutboundResponse(HttpUriRequest, HttpServletRequest, HttpServletResponse, HttpResponse)}}
+   */
+  private String getInboundResponseContentType( final HttpEntity entity ) {
+    String fullContentType = null;
+    if( entity != null ) {
+      ContentType entityContentType = ContentType.get( entity );
+      if( entityContentType != null ) {
+        if( entityContentType.getCharset() == null ) {
+          final String entityMimeType = entityContentType.getMimeType();
+          final String defaultCharset = MimeTypes.getDefaultCharsetForMimeType( entityMimeType );
+          if( defaultCharset != null ) {
+            DefaultDispatch.LOG.usingDefaultCharsetForEntity( entityMimeType, defaultCharset );
+            entityContentType = entityContentType.withCharset( defaultCharset );
+          }
+        } else {
+          DefaultDispatch.LOG.usingExplicitCharsetForEntity( entityContentType.getMimeType(), entityContentType.getCharset() );
+        }
+        fullContentType = entityContentType.toString();
+      }
+    }
+    if( fullContentType == null ) {
+      DefaultDispatch.LOG.unknownResponseEntityContentType();
+    } else {
+      DefaultDispatch.LOG.inboundResponseEntityContentType( fullContentType );
+    }
+    return fullContentType;
+  }
+}

http://git-wip-us.apache.org/repos/asf/knox/blob/e70904b3/gateway-service-nifi/src/main/java/org/apache/knox/gateway/dispatch/NiFiHeaders.java
----------------------------------------------------------------------
diff --git a/gateway-service-nifi/src/main/java/org/apache/knox/gateway/dispatch/NiFiHeaders.java b/gateway-service-nifi/src/main/java/org/apache/knox/gateway/dispatch/NiFiHeaders.java
new file mode 100644
index 0000000..2de967e
--- /dev/null
+++ b/gateway-service-nifi/src/main/java/org/apache/knox/gateway/dispatch/NiFiHeaders.java
@@ -0,0 +1,26 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.knox.gateway.dispatch;
+
+class NiFiHeaders {
+  static final String X_FORWARDED_PROTO = "X-Forwarded-Proto";
+  static final String X_FORWARDED_HOST = "X-Forwarded-Server";
+  static final String X_FORWARDED_PORT = "X-Forwarded-Port";
+  static final String X_FORWARDED_CONTEXT = "X-Forwarded-Context";
+  static final String X_PROXIED_ENTITIES_CHAIN = "X-ProxiedEntitiesChain";
+}

http://git-wip-us.apache.org/repos/asf/knox/blob/e70904b3/gateway-service-nifi/src/main/java/org/apache/knox/gateway/dispatch/NiFiRequestUtil.java
----------------------------------------------------------------------
diff --git a/gateway-service-nifi/src/main/java/org/apache/knox/gateway/dispatch/NiFiRequestUtil.java b/gateway-service-nifi/src/main/java/org/apache/knox/gateway/dispatch/NiFiRequestUtil.java
new file mode 100644
index 0000000..7df3a09
--- /dev/null
+++ b/gateway-service-nifi/src/main/java/org/apache/knox/gateway/dispatch/NiFiRequestUtil.java
@@ -0,0 +1,89 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.knox.gateway.dispatch;
+
+import java.io.IOException;
+
+import javax.security.auth.Subject;
+import javax.servlet.http.HttpServletRequest;
+
+import org.apache.commons.lang.StringUtils;
+import org.apache.knox.gateway.security.SubjectUtils;
+import org.apache.http.Header;
+import org.apache.http.client.methods.HttpUriRequest;
+import org.apache.http.client.methods.RequestBuilder;
+import org.apache.log4j.Logger;
+
+import com.google.common.base.Objects;
+import com.google.common.base.Strings;
+
+class NiFiRequestUtil {
+
+  static HttpUriRequest modifyOutboundRequest(HttpUriRequest outboundRequest, HttpServletRequest inboundRequest) throws IOException {
+    // preserve trailing slash from inbound request in the outbound request
+    if (inboundRequest.getPathInfo().endsWith("/")) {
+      String[] split = outboundRequest.getURI().toString().split("\\?");
+      if (!split[0].endsWith("/")) {
+        outboundRequest = RequestBuilder.copy(outboundRequest).setUri(split[0] + "/" + (split.length == 2 ? "?" + split[1] : "")).build();
+      }
+    }
+    // update the X-Forwarded-Context header to include the Knox-specific context path
+    final Header originalXForwardedContextHeader = outboundRequest.getFirstHeader(NiFiHeaders.X_FORWARDED_CONTEXT);
+    if (originalXForwardedContextHeader != null) {
+      String xForwardedContextHeaderValue = originalXForwardedContextHeader.getValue();
+      if (!Strings.isNullOrEmpty(xForwardedContextHeaderValue)) {
+        // Inspect the inbound request and outbound request to determine the additional context path from the rewrite
+        // rules that needs to be added to the X-Forwarded-Context header to allow proper proxying to NiFi.
+        //
+        // NiFi does its own URL rewriting, and will not work with the context path provided by Knox
+        // (ie, "/gateway/sandbox").
+        //
+        // For example, if Knox has a rewrite rule "*://*:*/**/nifi-app/{**}?{**}", "/nifi-app" needs to be added
+        // to the existing value of the X-Forwarded-Context header, which ends up being "/gateway/sandbox/nifi-app".
+        String inboundRequestPathInfo = inboundRequest.getPathInfo();
+        String outboundRequestUriPath = outboundRequest.getURI().getPath();
+        String outboundRequestUriPathNoTrailingSlash = StringUtils.removeEnd(outboundRequestUriPath, "/");
+        String knoxRouteContext = null;
+        int index = inboundRequestPathInfo.lastIndexOf(outboundRequestUriPathNoTrailingSlash);
+        if (index >= 0) {
+          knoxRouteContext = inboundRequestPathInfo.substring(0, index);
+        } else {
+          Logger.getLogger(NiFiHaDispatch.class.getName()).error(String.format("Unable to find index of %s in %s", outboundRequestUriPathNoTrailingSlash, inboundRequestPathInfo));
+        }
+        outboundRequest.setHeader(NiFiHeaders.X_FORWARDED_CONTEXT, xForwardedContextHeaderValue + knoxRouteContext);
+      }
+    }
+
+    // NiFi requires the header "X-ProxiedEntitiesChain" to be set with the identity or identities of the authenticated requester.
+    // The effective principal (identity) in the requester subject must be added to "X-ProxiedEntitiesChain".
+    // If the request already has a populated "X-ProxiedEntitiesChain" header, the identities must be appended to it.
+    // If the user proxied through Knox is anonymous, the "Anonymous" identity needs to be represented in X-ProxiedEntitiesChain
+    // as empty angle brackets "<>".
+    final Subject subject = SubjectUtils.getCurrentSubject();
+    String effectivePrincipalName = SubjectUtils.getEffectivePrincipalName(subject);
+    outboundRequest.setHeader(NiFiHeaders.X_PROXIED_ENTITIES_CHAIN, Objects.firstNonNull(inboundRequest.getHeader(NiFiHeaders.X_PROXIED_ENTITIES_CHAIN), "") +
+        String.format("<%s>", effectivePrincipalName.equalsIgnoreCase("anonymous") ? "" : effectivePrincipalName));
+
+    // Make sure headers named "Cookie" are removed from the request to NiFi, since NiFi does not use cookies.
+    Header[] cookieHeaders = outboundRequest.getHeaders("Cookie");
+    for (Header cookieHeader : cookieHeaders) {
+      outboundRequest.removeHeader(cookieHeader);
+    }
+    return outboundRequest;
+  }
+}

http://git-wip-us.apache.org/repos/asf/knox/blob/e70904b3/gateway-service-nifi/src/main/java/org/apache/knox/gateway/dispatch/NiFiResponseUtil.java
----------------------------------------------------------------------
diff --git a/gateway-service-nifi/src/main/java/org/apache/knox/gateway/dispatch/NiFiResponseUtil.java b/gateway-service-nifi/src/main/java/org/apache/knox/gateway/dispatch/NiFiResponseUtil.java
new file mode 100644
index 0000000..b2d9ebb
--- /dev/null
+++ b/gateway-service-nifi/src/main/java/org/apache/knox/gateway/dispatch/NiFiResponseUtil.java
@@ -0,0 +1,88 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.knox.gateway.dispatch;
+
+import java.io.IOException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.util.List;
+
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+
+import org.apache.commons.lang.StringUtils;
+import org.apache.http.Header;
+import org.apache.http.HttpResponse;
+import org.apache.http.NameValuePair;
+import org.apache.http.client.utils.URIBuilder;
+
+class NiFiResponseUtil {
+
+  static void modifyOutboundResponse(HttpServletRequest inboundRequest, HttpServletResponse outboundResponse, HttpResponse inboundResponse) throws IOException {
+    // Only want to rewrite the Location header on a HTTP 302
+    if (inboundResponse.getStatusLine().getStatusCode() == HttpServletResponse.SC_FOUND) {
+      Header originalLocationHeader = inboundResponse.getFirstHeader("Location");
+      if (originalLocationHeader != null) {
+        String originalLocation = originalLocationHeader.getValue();
+        URIBuilder originalLocationUriBuilder;
+        try {
+          originalLocationUriBuilder = new URIBuilder(originalLocation);
+        } catch (URISyntaxException e) {
+          throw new RuntimeException("Unable to parse URI from Location header", e);
+        }
+        URIBuilder inboundRequestUriBuilder = null;
+        try {
+          inboundRequestUriBuilder = new URIBuilder(inboundRequest.getRequestURI());
+        } catch (URISyntaxException e) {
+          throw new RuntimeException("Unable to parse the inbound request URI", e);
+        }
+        /*
+         * if the path specified in the Location header fron the inbound response contains the inbound request's URI's path,
+         * then it's going to the same web context, and the Location header should be updated based on the X_FORWARDED_* headers.
+         */
+        String inboundRequestUriPath = inboundRequestUriBuilder.getPath();
+        String originalLocationUriPath = originalLocationUriBuilder.getPath();
+        if (originalLocationUriPath.contains(inboundRequestUriPath)) {
+          // check for trailing slash of Location header if it exists and preserve it
+          final String trailingSlash = originalLocationUriPath.endsWith("/") ? "/" : "";
+          // retain query params
+          final List<NameValuePair> queryParams = originalLocationUriBuilder.getQueryParams();
+
+          // check for proxy settings
+          final String scheme = inboundRequest.getHeader(NiFiHeaders.X_FORWARDED_PROTO);
+          final String host = inboundRequest.getHeader(NiFiHeaders.X_FORWARDED_HOST);
+          final String port = inboundRequest.getHeader(NiFiHeaders.X_FORWARDED_PORT);
+
+          final String baseContextPath = inboundRequest.getHeader(NiFiHeaders.X_FORWARDED_CONTEXT);
+          final String pathInfo = inboundRequest.getPathInfo();
+
+          try {
+            final URI newLocation = new URIBuilder().setScheme(scheme).setHost(host).setPort((StringUtils.isNumeric(port) ? Integer.parseInt(port) : -1)).setPath(
+                baseContextPath + pathInfo + trailingSlash).setParameters(queryParams).build();
+            outboundResponse.setHeader("Location", newLocation.toString());
+          } catch (URISyntaxException e) {
+            throw new RuntimeException("Unable to rewrite Location header in response", e);
+          }
+        }
+      } else {
+        throw new RuntimeException("Received HTTP 302, but response is missing Location header");
+      }
+    }
+  }
+}
+

http://git-wip-us.apache.org/repos/asf/knox/blob/e70904b3/gateway-test/src/test/resources/log4j.properties
----------------------------------------------------------------------
diff --git a/gateway-test/src/test/resources/log4j.properties b/gateway-test/src/test/resources/log4j.properties
index e500707..f3ee344 100644
--- a/gateway-test/src/test/resources/log4j.properties
+++ b/gateway-test/src/test/resources/log4j.properties
@@ -24,7 +24,7 @@ log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
 log4j.appender.stdout.layout.ConversionPattern=%5p [%c] %m%n
 
 #log4j.logger.org.apache.knox.gateway=DEBUG
-#log4j.logger.org.apache.hadoop.test=DEBUG
+#log4j.logger.org.apache.knox.test=DEBUG
 #log4j.logger.org.apache.knox.gateway.http=TRACE
 #log4j.logger.org.apache.knox.gateway.http.request.body=OFF
 #log4j.logger.org.apache.knox.gateway.http.response.body=OFF


[02/53] [abbrv] knox git commit: Merge branch 'master' into KNOX-998-Package_Restructuring

Posted by mo...@apache.org.
http://git-wip-us.apache.org/repos/asf/knox/blob/8affbc02/gateway-service-knoxtoken/src/test/java/org/apache/knox/gateway/service/knoxtoken/TokenServiceResourceTest.java
----------------------------------------------------------------------
diff --cc gateway-service-knoxtoken/src/test/java/org/apache/knox/gateway/service/knoxtoken/TokenServiceResourceTest.java
index 224eb1c,0000000..b73b1b7
mode 100644,000000..100644
--- a/gateway-service-knoxtoken/src/test/java/org/apache/knox/gateway/service/knoxtoken/TokenServiceResourceTest.java
+++ b/gateway-service-knoxtoken/src/test/java/org/apache/knox/gateway/service/knoxtoken/TokenServiceResourceTest.java
@@@ -1,307 -1,0 +1,510 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.service.knoxtoken;
 +
 +import org.apache.knox.gateway.service.knoxtoken.TokenResource;
 +import org.apache.knox.gateway.services.GatewayServices;
 +import org.apache.knox.gateway.services.security.token.JWTokenAuthority;
 +import org.apache.knox.gateway.services.security.token.TokenServiceException;
 +import org.apache.knox.gateway.services.security.token.impl.JWT;
 +import org.apache.knox.gateway.services.security.token.impl.JWTToken;
++import org.apache.knox.gateway.security.PrimaryPrincipal;
++
 +import org.easymock.EasyMock;
 +import org.junit.Assert;
 +import org.junit.BeforeClass;
 +import org.junit.Test;
 +
 +import com.nimbusds.jose.JWSSigner;
 +import com.nimbusds.jose.JWSVerifier;
 +import com.nimbusds.jose.crypto.RSASSASigner;
 +import com.nimbusds.jose.crypto.RSASSAVerifier;
 +
 +import java.util.Map;
 +
 +import javax.security.auth.Subject;
 +import javax.servlet.ServletContext;
 +import javax.servlet.http.HttpServletRequest;
 +import javax.servlet.http.HttpServletResponse;
 +import javax.ws.rs.core.Response;
 +
 +import static org.junit.Assert.*;
 +
 +import java.io.PrintWriter;
 +import java.io.StringWriter;
 +import java.security.KeyPair;
 +import java.security.KeyPairGenerator;
 +import java.security.NoSuchAlgorithmException;
 +import java.security.Principal;
++import java.security.cert.X509Certificate;
 +import java.security.interfaces.RSAPrivateKey;
 +import java.security.interfaces.RSAPublicKey;
 +import java.util.ArrayList;
 +import java.util.Arrays;
 +import java.util.Collections;
 +import java.util.HashMap;
 +import java.util.List;
 +
 +/**
 + * Some tests for the token service
 + */
 +public class TokenServiceResourceTest {
 +
 +  protected static RSAPublicKey publicKey;
 +  protected static RSAPrivateKey privateKey;
 +
 +  @BeforeClass
 +  public static void setup() throws Exception, NoSuchAlgorithmException {
 +    KeyPairGenerator kpg = KeyPairGenerator.getInstance("RSA");
 +    kpg.initialize(1024);
 +    KeyPair KPair = kpg.generateKeyPair();
 +
 +    publicKey = (RSAPublicKey) KPair.getPublic();
 +    privateKey = (RSAPrivateKey) KPair.getPrivate();
 +  }
 +
 +  @Test
 +  public void testTokenService() throws Exception {
 +    Assert.assertTrue(true);
 +  }
 +
 +  @Test
 +  public void testClientData() throws Exception {
 +    TokenResource tr = new TokenResource();
 +
 +    Map<String,Object> clientDataMap = new HashMap<>();
 +    tr.addClientDataToMap("cookie.name=hadoop-jwt,test=value".split(","), clientDataMap);
 +    Assert.assertTrue(clientDataMap.size() == 2);
 +
 +    clientDataMap = new HashMap<>();
 +    tr.addClientDataToMap("cookie.name=hadoop-jwt".split(","), clientDataMap);
 +    Assert.assertTrue(clientDataMap.size() == 1);
 +
 +    clientDataMap = new HashMap<>();
 +    tr.addClientDataToMap("".split(","), clientDataMap);
 +    Assert.assertTrue(clientDataMap.size() == 0);
 +  }
 +
 +  @Test
 +  public void testGetToken() throws Exception {
 +    TokenResource tr = new TokenResource();
 +
 +    ServletContext context = EasyMock.createNiceMock(ServletContext.class);
 +    //tr.context = context;
 +    // tr.init();
 +
 +    HttpServletRequest request = EasyMock.createNiceMock(HttpServletRequest.class);
 +    EasyMock.expect(request.getServletContext()).andReturn(context).anyTimes();
 +    Principal principal = EasyMock.createNiceMock(Principal.class);
 +    EasyMock.expect(principal.getName()).andReturn("alice").anyTimes();
 +    EasyMock.expect(request.getUserPrincipal()).andReturn(principal).anyTimes();
 +
 +    GatewayServices services = EasyMock.createNiceMock(GatewayServices.class);
 +    EasyMock.expect(context.getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE)).andReturn(services);
 +
 +    JWTokenAuthority authority = new TestJWTokenAuthority(publicKey, privateKey);
 +    EasyMock.expect(services.getService(GatewayServices.TOKEN_SERVICE)).andReturn(authority);
 +
 +    StringWriter writer = new StringWriter();
 +    PrintWriter printWriter = new PrintWriter(writer);
 +    HttpServletResponse response = EasyMock.createNiceMock(HttpServletResponse.class);
 +    EasyMock.expect(response.getWriter()).andReturn(printWriter);
 +
 +    EasyMock.replay(principal, services, context, request, response);
 +
 +    tr.request = request;
 +    tr.response = response;
 +
 +    // Issue a token
 +    Response retResponse = tr.doGet();
 +
 +    assertEquals(200, retResponse.getStatus());
 +
 +    // Parse the response
 +    String retString = writer.toString();
 +    String accessToken = getTagValue(retString, "access_token");
 +    assertNotNull(accessToken);
 +    String expiry = getTagValue(retString, "expires_in");
 +    assertNotNull(expiry);
 +
 +    // Verify the token
 +    JWTToken parsedToken = new JWTToken(accessToken);
 +    assertEquals("alice", parsedToken.getSubject());
 +    assertTrue(authority.verifyToken(parsedToken));
 +  }
 +
 +  @Test
 +  public void testAudiences() throws Exception {
 +
 +    ServletContext context = EasyMock.createNiceMock(ServletContext.class);
 +    EasyMock.expect(context.getInitParameter("knox.token.audiences")).andReturn("recipient1,recipient2");
 +    EasyMock.expect(context.getInitParameter("knox.token.ttl")).andReturn(null);
 +    EasyMock.expect(context.getInitParameter("knox.token.target.url")).andReturn(null);
 +    EasyMock.expect(context.getInitParameter("knox.token.client.data")).andReturn(null);
 +
 +    HttpServletRequest request = EasyMock.createNiceMock(HttpServletRequest.class);
 +    EasyMock.expect(request.getServletContext()).andReturn(context).anyTimes();
 +    Principal principal = EasyMock.createNiceMock(Principal.class);
 +    EasyMock.expect(principal.getName()).andReturn("alice").anyTimes();
 +    EasyMock.expect(request.getUserPrincipal()).andReturn(principal).anyTimes();
 +
 +    GatewayServices services = EasyMock.createNiceMock(GatewayServices.class);
 +    EasyMock.expect(context.getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE)).andReturn(services);
 +
 +    JWTokenAuthority authority = new TestJWTokenAuthority(publicKey, privateKey);
 +    EasyMock.expect(services.getService(GatewayServices.TOKEN_SERVICE)).andReturn(authority);
 +
 +    StringWriter writer = new StringWriter();
 +    PrintWriter printWriter = new PrintWriter(writer);
 +    HttpServletResponse response = EasyMock.createNiceMock(HttpServletResponse.class);
 +    EasyMock.expect(response.getWriter()).andReturn(printWriter);
 +
 +    EasyMock.replay(principal, services, context, request, response);
 +
 +    TokenResource tr = new TokenResource();
 +    tr.request = request;
 +    tr.response = response;
 +    tr.context = context;
 +    tr.init();
 +
 +    // Issue a token
 +    Response retResponse = tr.doGet();
 +
 +    assertEquals(200, retResponse.getStatus());
 +
 +    // Parse the response
 +    String retString = writer.toString();
 +    String accessToken = getTagValue(retString, "access_token");
 +    assertNotNull(accessToken);
 +    String expiry = getTagValue(retString, "expires_in");
 +    assertNotNull(expiry);
 +
 +    // Verify the token
 +    JWTToken parsedToken = new JWTToken(accessToken);
 +    assertEquals("alice", parsedToken.getSubject());
 +    assertTrue(authority.verifyToken(parsedToken));
 +
 +    // Verify the audiences
 +    List<String> audiences = Arrays.asList(parsedToken.getAudienceClaims());
 +    assertEquals(2, audiences.size());
 +    assertTrue(audiences.contains("recipient1"));
 +    assertTrue(audiences.contains("recipient2"));
 +  }
 +
++  @Test
++  public void testAudiencesWhitespace() throws Exception {
++
++    ServletContext context = EasyMock.createNiceMock(ServletContext.class);
++    EasyMock.expect(context.getInitParameter("knox.token.audiences")).andReturn(" recipient1, recipient2 ");
++    EasyMock.expect(context.getInitParameter("knox.token.ttl")).andReturn(null);
++    EasyMock.expect(context.getInitParameter("knox.token.target.url")).andReturn(null);
++    EasyMock.expect(context.getInitParameter("knox.token.client.data")).andReturn(null);
++
++    HttpServletRequest request = EasyMock.createNiceMock(HttpServletRequest.class);
++    EasyMock.expect(request.getServletContext()).andReturn(context).anyTimes();
++    Principal principal = EasyMock.createNiceMock(Principal.class);
++    EasyMock.expect(principal.getName()).andReturn("alice").anyTimes();
++    EasyMock.expect(request.getUserPrincipal()).andReturn(principal).anyTimes();
++
++    GatewayServices services = EasyMock.createNiceMock(GatewayServices.class);
++    EasyMock.expect(context.getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE)).andReturn(services);
++
++    JWTokenAuthority authority = new TestJWTokenAuthority(publicKey, privateKey);
++    EasyMock.expect(services.getService(GatewayServices.TOKEN_SERVICE)).andReturn(authority);
++
++    StringWriter writer = new StringWriter();
++    PrintWriter printWriter = new PrintWriter(writer);
++    HttpServletResponse response = EasyMock.createNiceMock(HttpServletResponse.class);
++    EasyMock.expect(response.getWriter()).andReturn(printWriter);
++
++    EasyMock.replay(principal, services, context, request, response);
++
++    TokenResource tr = new TokenResource();
++    tr.request = request;
++    tr.response = response;
++    tr.context = context;
++    tr.init();
++
++    // Issue a token
++    Response retResponse = tr.doGet();
++
++    assertEquals(200, retResponse.getStatus());
++
++    // Parse the response
++    String retString = writer.toString();
++    String accessToken = getTagValue(retString, "access_token");
++    assertNotNull(accessToken);
++    String expiry = getTagValue(retString, "expires_in");
++    assertNotNull(expiry);
++
++    // Verify the token
++    JWTToken parsedToken = new JWTToken(accessToken);
++    assertEquals("alice", parsedToken.getSubject());
++    assertTrue(authority.verifyToken(parsedToken));
++
++    // Verify the audiences
++    List<String> audiences = Arrays.asList(parsedToken.getAudienceClaims());
++    assertEquals(2, audiences.size());
++    assertTrue(audiences.contains("recipient1"));
++    assertTrue(audiences.contains("recipient2"));
++  }
++
++  @Test
++  public void testValidClientCert() throws Exception {
++
++    ServletContext context = EasyMock.createNiceMock(ServletContext.class);
++    EasyMock.expect(context.getInitParameter("knox.token.client.cert.required")).andReturn("true");
++    EasyMock.expect(context.getInitParameter("knox.token.allowed.principals")).andReturn("CN=localhost, OU=Test, O=Hadoop, L=Test, ST=Test, C=US");
++
++    HttpServletRequest request = EasyMock.createNiceMock(HttpServletRequest.class);
++    EasyMock.expect(request.getServletContext()).andReturn(context).anyTimes();
++    X509Certificate trustedCertMock = EasyMock.createMock(X509Certificate.class);
++    EasyMock.expect(trustedCertMock.getSubjectDN()).andReturn(new PrimaryPrincipal("CN=localhost, OU=Test, O=Hadoop, L=Test, ST=Test, C=US")).anyTimes();
++    ArrayList<X509Certificate> certArrayList = new ArrayList<X509Certificate>();
++    certArrayList.add(trustedCertMock);
++    X509Certificate[] certs = {};
++    EasyMock.expect(request.getAttribute("javax.servlet.request.X509Certificate")).andReturn(certArrayList.toArray(certs)).anyTimes();
++
++    Principal principal = EasyMock.createNiceMock(Principal.class);
++    EasyMock.expect(principal.getName()).andReturn("alice").anyTimes();
++    EasyMock.expect(request.getUserPrincipal()).andReturn(principal).anyTimes();
++
++    GatewayServices services = EasyMock.createNiceMock(GatewayServices.class);
++    EasyMock.expect(context.getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE)).andReturn(services);
++
++    JWTokenAuthority authority = new TestJWTokenAuthority(publicKey, privateKey);
++    EasyMock.expect(services.getService(GatewayServices.TOKEN_SERVICE)).andReturn(authority);
++
++    StringWriter writer = new StringWriter();
++    PrintWriter printWriter = new PrintWriter(writer);
++    HttpServletResponse response = EasyMock.createNiceMock(HttpServletResponse.class);
++    EasyMock.expect(response.getWriter()).andReturn(printWriter);
++
++    EasyMock.replay(principal, services, context, request, response, trustedCertMock);
++
++    TokenResource tr = new TokenResource();
++    tr.request = request;
++    tr.response = response;
++    tr.context = context;
++    tr.init();
++
++    // Issue a token
++    Response retResponse = tr.doGet();
++
++    assertEquals(200, retResponse.getStatus());
++
++    // Parse the response
++    String retString = writer.toString();
++    String accessToken = getTagValue(retString, "access_token");
++    assertNotNull(accessToken);
++    String expiry = getTagValue(retString, "expires_in");
++    assertNotNull(expiry);
++
++    // Verify the token
++    JWTToken parsedToken = new JWTToken(accessToken);
++    assertEquals("alice", parsedToken.getSubject());
++    assertTrue(authority.verifyToken(parsedToken));
++  }
++
++  @Test
++  public void testValidClientCertWrongUser() throws Exception {
++
++    ServletContext context = EasyMock.createNiceMock(ServletContext.class);
++    EasyMock.expect(context.getInitParameter("knox.token.client.cert.required")).andReturn("true");
++    EasyMock.expect(context.getInitParameter("knox.token.allowed.principals")).andReturn("CN=remotehost, OU=Test, O=Hadoop, L=Test, ST=Test, C=US");
++
++    HttpServletRequest request = EasyMock.createNiceMock(HttpServletRequest.class);
++    EasyMock.expect(request.getServletContext()).andReturn(context).anyTimes();
++    X509Certificate trustedCertMock = EasyMock.createMock(X509Certificate.class);
++    EasyMock.expect(trustedCertMock.getSubjectDN()).andReturn(new PrimaryPrincipal("CN=localhost, OU=Test, O=Hadoop, L=Test, ST=Test, C=US")).anyTimes();
++    ArrayList<X509Certificate> certArrayList = new ArrayList<X509Certificate>();
++    certArrayList.add(trustedCertMock);
++    X509Certificate[] certs = {};
++    EasyMock.expect(request.getAttribute("javax.servlet.request.X509Certificate")).andReturn(certArrayList.toArray(certs)).anyTimes();
++
++    Principal principal = EasyMock.createNiceMock(Principal.class);
++    EasyMock.expect(principal.getName()).andReturn("alice").anyTimes();
++    EasyMock.expect(request.getUserPrincipal()).andReturn(principal).anyTimes();
++
++    GatewayServices services = EasyMock.createNiceMock(GatewayServices.class);
++    EasyMock.expect(context.getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE)).andReturn(services);
++
++    JWTokenAuthority authority = new TestJWTokenAuthority(publicKey, privateKey);
++    EasyMock.expect(services.getService(GatewayServices.TOKEN_SERVICE)).andReturn(authority);
++
++    StringWriter writer = new StringWriter();
++    PrintWriter printWriter = new PrintWriter(writer);
++    HttpServletResponse response = EasyMock.createNiceMock(HttpServletResponse.class);
++    EasyMock.expect(response.getWriter()).andReturn(printWriter);
++
++    EasyMock.replay(principal, services, context, request, response, trustedCertMock);
++
++    TokenResource tr = new TokenResource();
++    tr.request = request;
++    tr.response = response;
++    tr.context = context;
++    tr.init();
++
++    // Issue a token
++    Response retResponse = tr.doGet();
++
++    assertEquals(403, retResponse.getStatus());
++  }
++
++  @Test
++  public void testMissingClientCert() throws Exception {
++
++    ServletContext context = EasyMock.createNiceMock(ServletContext.class);
++    EasyMock.expect(context.getInitParameter("knox.token.client.cert.required")).andReturn("true");
++    EasyMock.expect(context.getInitParameter("knox.token.allowed.principals")).andReturn("CN=remotehost, OU=Test, O=Hadoop, L=Test, ST=Test, C=US");
++
++    HttpServletRequest request = EasyMock.createNiceMock(HttpServletRequest.class);
++    EasyMock.expect(request.getServletContext()).andReturn(context).anyTimes();
++    EasyMock.expect(request.getAttribute("javax.servlet.request.X509Certificate")).andReturn(null).anyTimes();
++
++    Principal principal = EasyMock.createNiceMock(Principal.class);
++    EasyMock.expect(principal.getName()).andReturn("alice").anyTimes();
++    EasyMock.expect(request.getUserPrincipal()).andReturn(principal).anyTimes();
++
++    GatewayServices services = EasyMock.createNiceMock(GatewayServices.class);
++    EasyMock.expect(context.getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE)).andReturn(services);
++
++    JWTokenAuthority authority = new TestJWTokenAuthority(publicKey, privateKey);
++    EasyMock.expect(services.getService(GatewayServices.TOKEN_SERVICE)).andReturn(authority);
++
++    StringWriter writer = new StringWriter();
++    PrintWriter printWriter = new PrintWriter(writer);
++    HttpServletResponse response = EasyMock.createNiceMock(HttpServletResponse.class);
++    EasyMock.expect(response.getWriter()).andReturn(printWriter);
++
++    EasyMock.replay(principal, services, context, request, response);
++
++    TokenResource tr = new TokenResource();
++    tr.request = request;
++    tr.response = response;
++    tr.context = context;
++    tr.init();
++
++    // Issue a token
++    Response retResponse = tr.doGet();
++
++    assertEquals(403, retResponse.getStatus());
++  }
++
 +  private String getTagValue(String token, String tagName) {
 +    String searchString = tagName + "\":";
 +    String value = token.substring(token.indexOf(searchString) + searchString.length());
 +    if (value.startsWith("\"")) {
 +      value = value.substring(1);
 +    }
 +    if (value.contains("\"")) {
 +      return value.substring(0, value.indexOf("\""));
 +    } else if (value.contains(",")) {
 +      return value.substring(0, value.indexOf(","));
 +    } else {
 +      return value.substring(0, value.length() - 1);
 +    }
 +  }
 +
 +  private static class TestJWTokenAuthority implements JWTokenAuthority {
 +
 +    private RSAPublicKey publicKey;
 +    private RSAPrivateKey privateKey;
 +
 +    public TestJWTokenAuthority(RSAPublicKey publicKey, RSAPrivateKey privateKey) {
 +      this.publicKey = publicKey;
 +      this.privateKey = privateKey;
 +    }
 +
 +    @Override
 +    public JWT issueToken(Subject subject, String algorithm)
 +      throws TokenServiceException {
 +      Principal p = (Principal) subject.getPrincipals().toArray()[0];
 +      return issueToken(p, algorithm);
 +    }
 +
 +    @Override
 +    public JWT issueToken(Principal p, String algorithm)
 +      throws TokenServiceException {
 +      return issueToken(p, null, algorithm);
 +    }
 +
 +    @Override
 +    public JWT issueToken(Principal p, String audience, String algorithm)
 +      throws TokenServiceException {
 +      return issueToken(p, audience, algorithm, -1);
 +    }
 +
 +    @Override
 +    public boolean verifyToken(JWT token) throws TokenServiceException {
 +      JWSVerifier verifier = new RSASSAVerifier(publicKey);
 +      return token.verify(verifier);
 +    }
 +
 +    @Override
 +    public JWT issueToken(Principal p, String audience, String algorithm,
 +                               long expires) throws TokenServiceException {
 +      ArrayList<String> audiences = null;
 +      if (audience != null) {
 +        audiences = new ArrayList<String>();
 +        audiences.add(audience);
 +      }
 +      return issueToken(p, audiences, algorithm, expires);
 +    }
 +
 +    @Override
 +    public JWT issueToken(Principal p, List<String> audiences, String algorithm,
 +                               long expires) throws TokenServiceException {
 +      String[] claimArray = new String[4];
 +      claimArray[0] = "KNOXSSO";
 +      claimArray[1] = p.getName();
 +      claimArray[2] = null;
 +      if (expires == -1) {
 +        claimArray[3] = null;
 +      } else {
 +        claimArray[3] = String.valueOf(expires);
 +      }
 +
 +      JWTToken token = null;
 +      if ("RS256".equals(algorithm)) {
 +        token = new JWTToken("RS256", claimArray, audiences);
 +        JWSSigner signer = new RSASSASigner(privateKey);
 +        token.sign(signer);
 +      } else {
 +        throw new TokenServiceException("Cannot issue token - Unsupported algorithm");
 +      }
 +
 +      return token;
 +    }
 +
 +    @Override
 +    public JWT issueToken(Principal p, String algorithm, long expiry)
 +        throws TokenServiceException {
 +      return issueToken(p, Collections.<String>emptyList(), algorithm, expiry);
 +    }
 +
 +    @Override
 +    public boolean verifyToken(JWT token, RSAPublicKey publicKey) throws TokenServiceException {
 +      JWSVerifier verifier = new RSASSAVerifier(publicKey);
 +      return token.verify(verifier);
 +    }
 +
 +  }
 +
 +
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/8affbc02/gateway-shell-release/pom.xml
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/knox/blob/8affbc02/gateway-util-urltemplate/src/main/java/org/apache/knox/gateway/util/urltemplate/Parser.java
----------------------------------------------------------------------
diff --cc gateway-util-urltemplate/src/main/java/org/apache/knox/gateway/util/urltemplate/Parser.java
index 47ed00c,0000000..1d58978
mode 100644,000000..100644
--- a/gateway-util-urltemplate/src/main/java/org/apache/knox/gateway/util/urltemplate/Parser.java
+++ b/gateway-util-urltemplate/src/main/java/org/apache/knox/gateway/util/urltemplate/Parser.java
@@@ -1,345 -1,0 +1,349 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.util.urltemplate;
 +
 +import org.apache.knox.gateway.i18n.resources.ResourcesFactory;
 +
 +import java.net.URISyntaxException;
 +import java.util.StringTokenizer;
 +import java.util.regex.Matcher;
 +import java.util.regex.Pattern;
 +
 +//NOTE: Instances Not thread safe but reusable.  Static parse method is thread safe.
 +//NOTE: Ignores matrix parameters at this point.
 +public class Parser {
 +
 +  /*
 +      ^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?
 +       12            3  4          5       6  7        8 9
 +
 +   The numbers in the second line above are only to assist readability;
 +   they indicate the reference points for each subexpression (i.e., each
 +   paired parenthesis).  We refer to the value matched for subexpression
 +   <n> as $<n>.  For example, matching the above expression to
 +
 +      http://www.ics.uci.edu/pub/ietf/uri/#Related
 +
 +   results in the following subexpression matches:
 +
 +      $1 = http:
 +      $2 = http
 +      $3 = //www.ics.uci.edu
 +      $4 = www.ics.uci.edu
 +      $5 = /pub/ietf/uri/
 +      $6 = <undefined>
 +      $7 = <undefined>
 +      $8 = #Related
 +      $9 = Related
 +
 +   where <undefined> indicates that the component is not present, as is
 +   the case for the query component in the above example.  Therefore, we
 +   can determine the value of the five components as
 +
 +      scheme    = $2
 +      authority = $4
 +      path      = $5
 +      query     = $7
 +      fragment  = $9
 +   */
 +
 +  private static final Resources RES = ResourcesFactory.get( Resources.class );
 +
 +  public static final char TEMPLATE_OPEN_MARKUP = '{';
 +  public static final char TEMPLATE_CLOSE_MARKUP = '}';
 +  public static final char NAME_PATTERN_SEPARATOR = '=';
 +
 +  private static final int MATCH_GROUP_SCHEME = 1;
 +  private static final int MATCH_GROUP_SCHEME_NAKED = 2;
 +  private static final int MATCH_GROUP_AUTHORITY = 3;
 +  private static final int MATCH_GROUP_AUTHORITY_NAKED = 4;
 +  private static final int MATCH_GROUP_PATH = 5;
 +  private static final int MATCH_GROUP_QUERY = 6;
 +  private static final int MATCH_GROUP_QUERY_NAKED = 7;
 +  private static final int MATCH_GROUP_FRAGMENT = 8;
 +  private static final int MATCH_GROUP_FRAGMENT_NAKED = 9;
 +
 +  private static Pattern PATTERN = Pattern.compile( "^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\\?([^#]*))?(#(.*))?" );
 +
 +  @Deprecated
 +  public static final Template parse( String template ) throws URISyntaxException {
 +    return Parser.parseTemplate( template );
 +  }
 +
 +  public static final Template parseTemplate( final String template ) throws URISyntaxException {
 +    Builder builder = new Builder( template );
 +    return parseInternal( builder );
 +  }
 +
 +  public static final Template parseLiteral( final String literal ) throws URISyntaxException {
 +    Builder builder = new Builder( literal );
 +    builder.setLiteral( true );
 +    return parseInternal( builder );
 +  }
 +
 +  private static final Template parseInternal( final Builder builder ) throws URISyntaxException {
 +    String original = builder.getOriginal();
 +    builder.setHasScheme( false );
 +    builder.setHasAuthority( false ); // Assume no until found otherwise.  If true, will cause // in output URL.
 +    builder.setIsAuthorityOnly( false );
 +    builder.setIsAbsolute( false ); // Assume relative until found otherwise.  If true, will cause leading / in output URL.
 +    builder.setIsDirectory( false ); // Assume a file path until found otherwise.  If true, will cause trailing / in output URL.
 +    builder.setHasQuery( false ); // Assume no ? until found otherwise.  If true, will cause ? in output URL.
 +    builder.setHasFragment( false ); // Assume no # until found otherwise.  If true, will cause # in output URL.
 +    Matcher match = PATTERN.matcher( original );
 +    if( match.matches() ) {
 +      consumeSchemeMatch( builder, match );
 +      consumeAuthorityMatch( builder, match );
 +      consumePathMatch( builder, match );
 +      consumeQueryMatch( builder, match );
 +      consumeFragmentMatch( builder, match );
 +      fixNakedAuthority( builder );
 +    } else {
 +      throw new URISyntaxException( original, RES.parseTemplateFailureReason( original ) );
 +    }
 +    return builder.build();
 +  }
 +
 +  private static final void fixNakedAuthority( final Builder builder ) {
 +    if( builder.getHasScheme() &&
 +        !builder.getHasAuthority() &&
 +        !builder.getIsAbsolute() &&
 +        !builder.getIsDirectory() &&
 +        ( builder.getPath().size() == 1 ) &&
 +        !builder.getHasQuery() &&
 +        !builder.getHasFragment() ) {
 +      final Scheme scheme = builder.getScheme();
 +      builder.setHasScheme( false );
 +      builder.setHost( makeTokenSingular( scheme.getToken() ) );
 +      Path path = builder.getPath().remove( 0 );
 +      builder.setPort( makeTokenSingular( path.getToken() ) );
 +      builder.setIsAuthorityOnly( true );
 +    }
 +  }
 +
 +  private static final Token makeTokenSingular( Token token ) {
 +    final String effectivePattern = token.getEffectivePattern();
 +    if( Segment.GLOB_PATTERN.equals( effectivePattern ) ) {
 +      token = new Token( token.getParameterName(), token.getOriginalPattern(), Segment.STAR_PATTERN, token.isLiteral() );
 +    }
 +    return token;
 +  }
 +
 +//  private String makePatternSingular( String pattern ) {
 +//    if( Segment.GLOB_PATTERN.equals( pattern ) ) {
 +//      pattern = Segment.STAR_PATTERN;
 +//    }
 +//    return pattern;
 +//  }
 +
 +  private static void consumeSchemeMatch( final Builder builder, final Matcher match ) {
 +    if( match.group( MATCH_GROUP_SCHEME ) != null ) {
 +      builder.setHasScheme( true );
 +      consumeSchemeToken( builder, match.group( MATCH_GROUP_SCHEME_NAKED ) );
 +    }
 +  }
 +
 +  private static void consumeSchemeToken( final Builder builder, final String token ) {
 +    if( token != null ) {
 +      Token t = parseTemplateToken( builder, token, Segment.STAR_PATTERN );
 +      builder.setScheme( t );
 +    }
 +  }
 +
 +  private static void consumeAuthorityMatch( final Builder builder, final Matcher match ) {
 +    if( match.group( MATCH_GROUP_AUTHORITY ) != null ) {
 +      builder.setHasAuthority( true );
 +      consumeAuthorityToken( builder, match.group( MATCH_GROUP_AUTHORITY_NAKED ) );
 +    }
 +  }
 +
 +  private static void consumeAuthorityToken( final Builder builder, final String token ) {
 +    if( token != null ) {
 +      Token paramPattern;
 +      String[] usernamePassword=null, hostPort;
 +      String[] userAddr = split( token, '@' );
 +      if( userAddr.length == 1 ) {
 +        hostPort = split( userAddr[ 0 ], ':' );
 +      } else {
 +        usernamePassword = split( userAddr[ 0 ], ':' );
 +        hostPort = split( userAddr[ 1 ], ':' );
 +      }
 +      if( usernamePassword != null ) {
 +        if( usernamePassword[ 0 ].length() > 0 ) {
 +          paramPattern = makeTokenSingular( parseTemplateToken( builder, usernamePassword[ 0 ], Segment.STAR_PATTERN ) );
 +          builder.setUsername( paramPattern );
 +        }
 +        if( usernamePassword.length > 1 && usernamePassword[ 1 ].length() > 0 ) {
 +          paramPattern = makeTokenSingular( parseTemplateToken( builder, usernamePassword[ 1 ], Segment.STAR_PATTERN ) );
 +          builder.setPassword( paramPattern );
 +        }
 +      }
 +      if( hostPort[ 0 ].length() > 0 ) {
 +        paramPattern = makeTokenSingular( parseTemplateToken( builder, hostPort[ 0 ], Segment.STAR_PATTERN ) );
 +        builder.setHost( paramPattern );
 +      }
 +      if( hostPort.length > 1 && hostPort[ 1 ].length() > 0 ) {
 +        paramPattern = makeTokenSingular( parseTemplateToken( builder, hostPort[ 1 ], Segment.STAR_PATTERN ) );
 +        builder.setPort( paramPattern );
 +      }
 +    }
 +  }
 +
 +  private static void consumePathMatch( final Builder builder, final Matcher match ) {
 +    String path = match.group( MATCH_GROUP_PATH );
 +    if( path != null ) {
 +      builder.setIsAbsolute( path.startsWith( "/" ) );
 +      builder.setIsDirectory( path.endsWith( "/" ) );
 +      consumePathToken( builder, path );
 +    }
 +  }
 +
 +  private static final void consumePathToken( final Builder builder, final String token ) {
 +    if( token != null ) {
 +      final StringTokenizer tokenizer = new StringTokenizer( token, "/" );
 +      while( tokenizer.hasMoreTokens() ) {
 +        consumePathSegment( builder, tokenizer.nextToken() );
 +      }
 +    }
 +  }
 +
 +  private static final void consumePathSegment( final Builder builder, final String token ) {
 +    if( token != null ) {
 +      final Token t = parseTemplateToken( builder, token, Segment.GLOB_PATTERN );
 +      builder.addPath( t );
 +    }
 +  }
 +
 +  private static void consumeQueryMatch( final Builder builder, Matcher match ) {
 +    if( match.group( MATCH_GROUP_QUERY ) != null ) {
 +      builder.setHasQuery( true );
 +      consumeQueryToken( builder, match.group( MATCH_GROUP_QUERY_NAKED ) );
 +    }
 +  }
 +
 +  private static void consumeQueryToken( final Builder builder, String token ) {
 +    if( token != null ) {
-       StringTokenizer tokenizer = new StringTokenizer( token, "?&" );
-       while( tokenizer.hasMoreTokens() ) {
-         consumeQuerySegment( builder, tokenizer.nextToken() );
++      //add "&amp;" as a delimiter
++      String[] tokens = token.split("(&amp;|\\?|&)");
++      if (tokens != null){
++        for (String nextToken : tokens){
++          consumeQuerySegment(builder,nextToken);
++        }
 +      }
++
 +    }
 +  }
 +
 +  private static void consumeQuerySegment( final Builder builder, String token ) {
 +    if( token != null && token.length() > 0 ) {
 +      // Shorthand format {queryParam} == queryParam={queryParam=*}
 +      if( TEMPLATE_OPEN_MARKUP == token.charAt( 0 ) ) {
 +        Token paramPattern = parseTemplateToken( builder, token, Segment.GLOB_PATTERN );
 +        String paramName = paramPattern.parameterName;
 +        if( paramPattern.originalPattern == null ) {
 +          builder.addQuery( paramName, new Token( paramName, null, Segment.GLOB_PATTERN, builder.isLiteral() ) );
 +//          if( Segment.STAR_PATTERN.equals( paramName ) || Segment.GLOB_PATTERN.equals( paramName ) ) {
 +//            builder.addQuery( paramName, new Token( paramName, null, Segment.GLOB_PATTERN ) );
 +//          } else {
 +//            builder.addQuery( paramName, new Token( paramName, null, Segment.GLOB_PATTERN ) );
 +//          }
 +        } else {
 +          builder.addQuery( paramName, new Token( paramName, paramPattern.originalPattern, builder.isLiteral() ) );
 +        }
 +      } else {
 +        String nameValue[] = split( token, '=' );
 +        if( nameValue.length == 1 ) {
 +          String queryName = nameValue[ 0 ];
 +          builder.addQuery( queryName, new Token( Segment.ANONYMOUS_PARAM, null, builder.isLiteral() ) );
 +        } else {
 +          String queryName = nameValue[ 0 ];
 +          Token paramPattern = parseTemplateToken( builder, nameValue[ 1 ], Segment.GLOB_PATTERN );
 +          builder.addQuery( queryName, paramPattern );
 +        }
 +      }
 +    }
 +  }
 +
 +  private static void consumeFragmentMatch( final Builder builder, Matcher match ) {
 +    if( match.group( MATCH_GROUP_FRAGMENT ) != null ) {
 +      builder.setHasFragment( true );
 +      consumeFragmentToken( builder, match.group( MATCH_GROUP_FRAGMENT_NAKED ) );
 +    }
 +  }
 +
 +  private static void consumeFragmentToken( final Builder builder, String token ) {
 +    if( token != null && token.length() > 0 ) {
 +      Token t = parseTemplateToken( builder, token, Segment.STAR_PATTERN );
 +      builder.setFragment( t );
 +    }
 +  }
 +
 +  static final Token parseTemplateToken( final Builder builder, final String s, final String defaultEffectivePattern ) {
 +    String paramName, actualPattern, effectivePattern;
 +    final int l = s.length();
 +    // If the token isn't the empty string, then
 +    if( l > 0 && !builder.isLiteral() ) {
 +      final int b = ( s.charAt( 0 ) == TEMPLATE_OPEN_MARKUP ? 1 : -1 );
 +      final int e = ( s.charAt( l-1 ) == TEMPLATE_CLOSE_MARKUP ? l-1 : -1 );
 +      // If this is a parameter template, ie {...}
 +      if( ( b > 0 ) && ( e > 0 ) && ( e > b ) ) {
 +        final int i = s.indexOf( NAME_PATTERN_SEPARATOR, b );
 +        // If this is an anonymous template
 +        if( i < 0 ) {
 +          paramName = s.substring( b, e );
 +          actualPattern = null;
 +          if( Segment.GLOB_PATTERN.equals( paramName ) ) {
 +            effectivePattern = Segment.GLOB_PATTERN;
 +          } else {
 +            effectivePattern = defaultEffectivePattern;
 +          }
 +        // Otherwise populate the NVP.
 +        } else {
 +          paramName = s.substring( b, i );
 +          actualPattern = s.substring( i+1, e );
 +          effectivePattern = actualPattern;
 +        }
 +      // Otherwise it is just a pattern.
 +      } else {
 +        paramName = Segment.ANONYMOUS_PARAM;
 +        actualPattern = s;
 +        effectivePattern = actualPattern;
 +      }
 +    // Otherwise the token has no value.
 +    } else {
 +      paramName = Segment.ANONYMOUS_PARAM;
 +      actualPattern = s;
 +      effectivePattern = actualPattern;
 +    }
 +    final Token token = new Token( paramName, actualPattern, effectivePattern, builder.isLiteral() );
 +    return token;
 +  }
 +
 +  // Using this because String.split is very inefficient.
 +  private static String[] split( String s, char d ) {
 +    String[] a;
 +    int i = s.indexOf( d );
 +    if( i < 0 ) {
 +      a = new String[]{ s };
 +    } else {
 +      a = new String[]{ s.substring( 0, i ), s.substring( i + 1 ) };
 +    }
 +    return a;
 +  }
 +
 +}


[38/53] [abbrv] knox git commit: Merge branch 'master' into KNOX-998-Package_Restructuring

Posted by mo...@apache.org.
http://git-wip-us.apache.org/repos/asf/knox/blob/22a7304a/gateway-provider-security-pac4j/src/test/java/org/apache/knox/gateway/pac4j/Pac4jProviderTest.java
----------------------------------------------------------------------
diff --cc gateway-provider-security-pac4j/src/test/java/org/apache/knox/gateway/pac4j/Pac4jProviderTest.java
index e4e0462,0000000..e69c599
mode 100644,000000..100644
--- a/gateway-provider-security-pac4j/src/test/java/org/apache/knox/gateway/pac4j/Pac4jProviderTest.java
+++ b/gateway-provider-security-pac4j/src/test/java/org/apache/knox/gateway/pac4j/Pac4jProviderTest.java
@@@ -1,150 -1,0 +1,335 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.pac4j;
 +
 +import org.apache.knox.gateway.audit.api.AuditContext;
 +import org.apache.knox.gateway.audit.api.AuditService;
 +import org.apache.knox.gateway.audit.api.Auditor;
 +import org.apache.knox.gateway.pac4j.filter.Pac4jDispatcherFilter;
 +import org.apache.knox.gateway.pac4j.filter.Pac4jIdentityAdapter;
 +import org.apache.knox.gateway.pac4j.session.KnoxSessionStore;
 +import org.apache.knox.gateway.services.GatewayServices;
 +import org.apache.knox.gateway.services.security.AliasService;
 +import org.apache.knox.gateway.services.security.impl.DefaultCryptoService;
 +import org.junit.Test;
 +import org.pac4j.core.client.Clients;
 +import org.pac4j.core.context.Pac4jConstants;
 +import org.pac4j.http.client.indirect.IndirectBasicAuthClient;
 +
 +import javax.servlet.*;
 +import javax.servlet.http.*;
 +
 +import java.util.HashMap;
 +import java.util.List;
 +import java.util.Map;
- 
 +import static org.mockito.Mockito.*;
 +import static org.junit.Assert.*;
 +
 +/**
 + * This class simulates a full authentication process using pac4j.
 + */
 +public class Pac4jProviderTest {
 +
 +    private static final String LOCALHOST = "127.0.0.1";
 +    private static final String HADOOP_SERVICE_URL = "https://" + LOCALHOST + ":8443/gateway/sandox/webhdfs/v1/tmp?op=LISTSTATUS";
 +    private static final String KNOXSSO_SERVICE_URL = "https://" + LOCALHOST + ":8443/gateway/idp/api/v1/websso";
 +    private static final String PAC4J_CALLBACK_URL = KNOXSSO_SERVICE_URL;
 +    private static final String ORIGINAL_URL = "originalUrl";
 +    private static final String CLUSTER_NAME = "knox";
 +    private static final String PAC4J_PASSWORD = "pwdfortest";
 +    private static final String CLIENT_CLASS = IndirectBasicAuthClient.class.getSimpleName();
 +    private static final String USERNAME = "jleleu";
 +
 +    @Test
 +    public void test() throws Exception {
 +        final AliasService aliasService = mock(AliasService.class);
 +        when(aliasService.getPasswordFromAliasForCluster(CLUSTER_NAME, KnoxSessionStore.PAC4J_PASSWORD, true)).thenReturn(PAC4J_PASSWORD.toCharArray());
 +        when(aliasService.getPasswordFromAliasForCluster(CLUSTER_NAME, KnoxSessionStore.PAC4J_PASSWORD)).thenReturn(PAC4J_PASSWORD.toCharArray());
 +
 +        final DefaultCryptoService cryptoService = new DefaultCryptoService();
 +        cryptoService.setAliasService(aliasService);
 +
 +        final GatewayServices services = mock(GatewayServices.class);
 +        when(services.getService(GatewayServices.CRYPTO_SERVICE)).thenReturn(cryptoService);
 +        when(services.getService(GatewayServices.ALIAS_SERVICE)).thenReturn(aliasService);
 +
 +        final ServletContext context = mock(ServletContext.class);
 +        when(context.getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE)).thenReturn(services);
 +        when(context.getAttribute(GatewayServices.GATEWAY_CLUSTER_ATTRIBUTE)).thenReturn(CLUSTER_NAME);
 +
 +        final FilterConfig config = mock(FilterConfig.class);
 +        when(config.getServletContext()).thenReturn(context);
 +        when(config.getInitParameter(Pac4jDispatcherFilter.PAC4J_CALLBACK_URL)).thenReturn(PAC4J_CALLBACK_URL);
 +        when(config.getInitParameter("clientName")).thenReturn(Pac4jDispatcherFilter.TEST_BASIC_AUTH);
 +
 +        final Pac4jDispatcherFilter dispatcher = new Pac4jDispatcherFilter();
 +        dispatcher.init(config);
 +        final Pac4jIdentityAdapter adapter = new Pac4jIdentityAdapter();
 +        adapter.init(config);
 +        Pac4jIdentityAdapter.setAuditor(mock(Auditor.class));
 +        final AuditService auditService = mock(AuditService.class);
 +        when(auditService.getContext()).thenReturn(mock(AuditContext.class));
 +        Pac4jIdentityAdapter.setAuditService(auditService);
 +
 +        // step 1: call the KnoxSSO service with an original url pointing to an Hadoop service (redirected by the SSOCookieProvider)
 +        MockHttpServletRequest request = new MockHttpServletRequest();
 +        request.setRequestURL(KNOXSSO_SERVICE_URL + "?" + ORIGINAL_URL + "=" + HADOOP_SERVICE_URL);
 +        request.setCookies(new Cookie[0]);
 +        request.setServerName(LOCALHOST);
 +        MockHttpServletResponse response = new MockHttpServletResponse();
 +        FilterChain filterChain = mock(FilterChain.class);
 +        dispatcher.doFilter(request, response, filterChain);
 +        // it should be a redirection to the idp topology
 +        assertEquals(302, response.getStatus());
 +        assertEquals(PAC4J_CALLBACK_URL + "?" + Pac4jDispatcherFilter.PAC4J_CALLBACK_PARAMETER + "=true&" + Clients.DEFAULT_CLIENT_NAME_PARAMETER + "=" + CLIENT_CLASS, response.getHeaders().get("Location"));
 +        // we should have one cookie for the saved requested url
 +        List<Cookie> cookies = response.getCookies();
 +        assertEquals(1, cookies.size());
 +        final Cookie requestedUrlCookie = cookies.get(0);
 +        assertEquals(KnoxSessionStore.PAC4J_SESSION_PREFIX + Pac4jConstants.REQUESTED_URL, requestedUrlCookie.getName());
 +
 +        // step 2: send credentials to the callback url (callback from the identity provider)
 +        request = new MockHttpServletRequest();
 +        request.setCookies(new Cookie[]{requestedUrlCookie});
 +        request.setRequestURL(PAC4J_CALLBACK_URL + "?" + Pac4jDispatcherFilter.PAC4J_CALLBACK_PARAMETER + "=true&" + Clients.DEFAULT_CLIENT_NAME_PARAMETER + "=" + Clients.DEFAULT_CLIENT_NAME_PARAMETER + "=" + CLIENT_CLASS);
 +        request.addParameter(Pac4jDispatcherFilter.PAC4J_CALLBACK_PARAMETER, "true");
 +        request.addParameter(Clients.DEFAULT_CLIENT_NAME_PARAMETER, CLIENT_CLASS);
 +        request.addHeader("Authorization", "Basic amxlbGV1OmpsZWxldQ==");
 +        request.setServerName(LOCALHOST);
 +        response = new MockHttpServletResponse();
 +        filterChain = mock(FilterChain.class);
 +        dispatcher.doFilter(request, response, filterChain);
 +        // it should be a redirection to the original url
 +        assertEquals(302, response.getStatus());
 +        assertEquals(KNOXSSO_SERVICE_URL + "?" + ORIGINAL_URL + "=" + HADOOP_SERVICE_URL, response.getHeaders().get("Location"));
 +        // we should have 3 cookies among with the user profile
 +        cookies = response.getCookies();
 +        Map<String, String> mapCookies = new HashMap<>();
 +        assertEquals(3, cookies.size());
 +        for (final Cookie cookie : cookies) {
 +            mapCookies.put(cookie.getName(), cookie.getValue());
 +        }
 +        assertNull(mapCookies.get(KnoxSessionStore.PAC4J_SESSION_PREFIX + CLIENT_CLASS + "$attemptedAuthentication"));
 +        assertNotNull(mapCookies.get(KnoxSessionStore.PAC4J_SESSION_PREFIX + Pac4jConstants.USER_PROFILES));
 +        assertNull(mapCookies.get(KnoxSessionStore.PAC4J_SESSION_PREFIX + Pac4jConstants.REQUESTED_URL));
 +
 +        // step 3: turn pac4j identity into KnoxSSO identity
 +        request = new MockHttpServletRequest();
 +        request.setCookies(cookies.toArray(new Cookie[cookies.size()]));
 +        request.setRequestURL(KNOXSSO_SERVICE_URL + "?" + ORIGINAL_URL + "=" + HADOOP_SERVICE_URL);
 +        request.setServerName(LOCALHOST);
 +        response = new MockHttpServletResponse();
 +        filterChain = mock(FilterChain.class);
 +        dispatcher.doFilter(request, response, filterChain);
 +        assertEquals(0, response.getStatus());
 +        adapter.doFilter(request, response, filterChain);
 +        cookies = response.getCookies();
 +        assertEquals(1, cookies.size());
 +        final Cookie userProfileCookie = cookies.get(0);
 +        // the user profile has been cleaned
 +        assertEquals(KnoxSessionStore.PAC4J_SESSION_PREFIX + Pac4jConstants.USER_PROFILES, userProfileCookie.getName());
 +        assertNull(userProfileCookie.getValue());
 +        assertEquals(USERNAME, adapter.getTestIdentifier());
 +    }
++
++    @Test
++    public void testValidIdAttribute() throws Exception {
++        final AliasService aliasService = mock(AliasService.class);
++        when(aliasService.getPasswordFromAliasForCluster(CLUSTER_NAME, KnoxSessionStore.PAC4J_PASSWORD, true)).thenReturn(PAC4J_PASSWORD.toCharArray());
++        when(aliasService.getPasswordFromAliasForCluster(CLUSTER_NAME, KnoxSessionStore.PAC4J_PASSWORD)).thenReturn(PAC4J_PASSWORD.toCharArray());
++
++        final DefaultCryptoService cryptoService = new DefaultCryptoService();
++        cryptoService.setAliasService(aliasService);
++
++        final GatewayServices services = mock(GatewayServices.class);
++        when(services.getService(GatewayServices.CRYPTO_SERVICE)).thenReturn(cryptoService);
++        when(services.getService(GatewayServices.ALIAS_SERVICE)).thenReturn(aliasService);
++
++        final ServletContext context = mock(ServletContext.class);
++        when(context.getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE)).thenReturn(services);
++        when(context.getAttribute(GatewayServices.GATEWAY_CLUSTER_ATTRIBUTE)).thenReturn(CLUSTER_NAME);
++
++        final FilterConfig config = mock(FilterConfig.class);
++        when(config.getServletContext()).thenReturn(context);
++        when(config.getInitParameter(Pac4jDispatcherFilter.PAC4J_CALLBACK_URL)).thenReturn(PAC4J_CALLBACK_URL);
++        when(config.getInitParameter("clientName")).thenReturn(Pac4jDispatcherFilter.TEST_BASIC_AUTH);
++        when(config.getInitParameter(Pac4jIdentityAdapter.PAC4J_ID_ATTRIBUTE)).thenReturn("username");
++
++        final Pac4jDispatcherFilter dispatcher = new Pac4jDispatcherFilter();
++        dispatcher.init(config);
++        final Pac4jIdentityAdapter adapter = new Pac4jIdentityAdapter();
++        adapter.init(config);
++        Pac4jIdentityAdapter.setAuditor(mock(Auditor.class));
++        final AuditService auditService = mock(AuditService.class);
++        when(auditService.getContext()).thenReturn(mock(AuditContext.class));
++        Pac4jIdentityAdapter.setAuditService(auditService);
++
++        // step 1: call the KnoxSSO service with an original url pointing to an Hadoop service (redirected by the SSOCookieProvider)
++        MockHttpServletRequest request = new MockHttpServletRequest();
++        request.setRequestURL(KNOXSSO_SERVICE_URL + "?" + ORIGINAL_URL + "=" + HADOOP_SERVICE_URL);
++        request.setCookies(new Cookie[0]);
++        request.setServerName(LOCALHOST);
++        MockHttpServletResponse response = new MockHttpServletResponse();
++        FilterChain filterChain = mock(FilterChain.class);
++        dispatcher.doFilter(request, response, filterChain);
++        // it should be a redirection to the idp topology
++        assertEquals(302, response.getStatus());
++        assertEquals(PAC4J_CALLBACK_URL + "?" + Pac4jDispatcherFilter.PAC4J_CALLBACK_PARAMETER + "=true&" + Clients.DEFAULT_CLIENT_NAME_PARAMETER + "=" + CLIENT_CLASS, response.getHeaders().get("Location"));
++        // we should have one cookie for the saved requested url
++        List<Cookie> cookies = response.getCookies();
++        assertEquals(1, cookies.size());
++        final Cookie requestedUrlCookie = cookies.get(0);
++        assertEquals(KnoxSessionStore.PAC4J_SESSION_PREFIX + Pac4jConstants.REQUESTED_URL, requestedUrlCookie.getName());
++
++        // step 2: send credentials to the callback url (callback from the identity provider)
++        request = new MockHttpServletRequest();
++        request.setCookies(new Cookie[]{requestedUrlCookie});
++        request.setRequestURL(PAC4J_CALLBACK_URL + "?" + Pac4jDispatcherFilter.PAC4J_CALLBACK_PARAMETER + "=true&" + Clients.DEFAULT_CLIENT_NAME_PARAMETER + "=" + Clients.DEFAULT_CLIENT_NAME_PARAMETER + "=" + CLIENT_CLASS);
++        request.addParameter(Pac4jDispatcherFilter.PAC4J_CALLBACK_PARAMETER, "true");
++        request.addParameter(Clients.DEFAULT_CLIENT_NAME_PARAMETER, CLIENT_CLASS);
++        request.addHeader("Authorization", "Basic amxlbGV1OmpsZWxldQ==");
++        request.setServerName(LOCALHOST);
++        response = new MockHttpServletResponse();
++        filterChain = mock(FilterChain.class);
++        dispatcher.doFilter(request, response, filterChain);
++        // it should be a redirection to the original url
++        assertEquals(302, response.getStatus());
++        assertEquals(KNOXSSO_SERVICE_URL + "?" + ORIGINAL_URL + "=" + HADOOP_SERVICE_URL, response.getHeaders().get("Location"));
++        // we should have 3 cookies among with the user profile
++        cookies = response.getCookies();
++        Map<String, String> mapCookies = new HashMap<>();
++        assertEquals(3, cookies.size());
++        for (final Cookie cookie : cookies) {
++            mapCookies.put(cookie.getName(), cookie.getValue());
++        }
++        assertNull(mapCookies.get(KnoxSessionStore.PAC4J_SESSION_PREFIX + CLIENT_CLASS + "$attemptedAuthentication"));
++        assertNotNull(mapCookies.get(KnoxSessionStore.PAC4J_SESSION_PREFIX + Pac4jConstants.USER_PROFILES));
++        assertNull(mapCookies.get(KnoxSessionStore.PAC4J_SESSION_PREFIX + Pac4jConstants.REQUESTED_URL));
++
++        // step 3: turn pac4j identity into KnoxSSO identity
++        request = new MockHttpServletRequest();
++        request.setCookies(cookies.toArray(new Cookie[cookies.size()]));
++        request.setRequestURL(KNOXSSO_SERVICE_URL + "?" + ORIGINAL_URL + "=" + HADOOP_SERVICE_URL);
++        request.setServerName(LOCALHOST);
++        response = new MockHttpServletResponse();
++        filterChain = mock(FilterChain.class);
++        dispatcher.doFilter(request, response, filterChain);
++        assertEquals(0, response.getStatus());
++        adapter.doFilter(request, response, filterChain);
++        cookies = response.getCookies();
++        assertEquals(1, cookies.size());
++        final Cookie userProfileCookie = cookies.get(0);
++        // the user profile has been cleaned
++        assertEquals(KnoxSessionStore.PAC4J_SESSION_PREFIX + Pac4jConstants.USER_PROFILES, userProfileCookie.getName());
++        assertNull(userProfileCookie.getValue());
++        assertEquals(USERNAME, adapter.getTestIdentifier());
++    }
++    @Test
++    public void testInvalidIdAttribute() throws Exception {
++        final AliasService aliasService = mock(AliasService.class);
++        when(aliasService.getPasswordFromAliasForCluster(CLUSTER_NAME, KnoxSessionStore.PAC4J_PASSWORD, true)).thenReturn(PAC4J_PASSWORD.toCharArray());
++        when(aliasService.getPasswordFromAliasForCluster(CLUSTER_NAME, KnoxSessionStore.PAC4J_PASSWORD)).thenReturn(PAC4J_PASSWORD.toCharArray());
++
++        final DefaultCryptoService cryptoService = new DefaultCryptoService();
++        cryptoService.setAliasService(aliasService);
++
++        final GatewayServices services = mock(GatewayServices.class);
++        when(services.getService(GatewayServices.CRYPTO_SERVICE)).thenReturn(cryptoService);
++        when(services.getService(GatewayServices.ALIAS_SERVICE)).thenReturn(aliasService);
++
++        final ServletContext context = mock(ServletContext.class);
++        when(context.getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE)).thenReturn(services);
++        when(context.getAttribute(GatewayServices.GATEWAY_CLUSTER_ATTRIBUTE)).thenReturn(CLUSTER_NAME);
++
++        final FilterConfig config = mock(FilterConfig.class);
++        when(config.getServletContext()).thenReturn(context);
++        when(config.getInitParameter(Pac4jDispatcherFilter.PAC4J_CALLBACK_URL)).thenReturn(PAC4J_CALLBACK_URL);
++        when(config.getInitParameter("clientName")).thenReturn(Pac4jDispatcherFilter.TEST_BASIC_AUTH);
++        when(config.getInitParameter(Pac4jIdentityAdapter.PAC4J_ID_ATTRIBUTE)).thenReturn("larry");
++
++        final Pac4jDispatcherFilter dispatcher = new Pac4jDispatcherFilter();
++        dispatcher.init(config);
++        final Pac4jIdentityAdapter adapter = new Pac4jIdentityAdapter();
++        adapter.init(config);
++        Pac4jIdentityAdapter.setAuditor(mock(Auditor.class));
++        final AuditService auditService = mock(AuditService.class);
++        when(auditService.getContext()).thenReturn(mock(AuditContext.class));
++        Pac4jIdentityAdapter.setAuditService(auditService);
++
++        // step 1: call the KnoxSSO service with an original url pointing to an Hadoop service (redirected by the SSOCookieProvider)
++        MockHttpServletRequest request = new MockHttpServletRequest();
++        request.setRequestURL(KNOXSSO_SERVICE_URL + "?" + ORIGINAL_URL + "=" + HADOOP_SERVICE_URL);
++        request.setCookies(new Cookie[0]);
++        request.setServerName(LOCALHOST);
++        MockHttpServletResponse response = new MockHttpServletResponse();
++        FilterChain filterChain = mock(FilterChain.class);
++        dispatcher.doFilter(request, response, filterChain);
++        // it should be a redirection to the idp topology
++        assertEquals(302, response.getStatus());
++        assertEquals(PAC4J_CALLBACK_URL + "?" + Pac4jDispatcherFilter.PAC4J_CALLBACK_PARAMETER + "=true&" + Clients.DEFAULT_CLIENT_NAME_PARAMETER + "=" + CLIENT_CLASS, response.getHeaders().get("Location"));
++        // we should have one cookie for the saved requested url
++        List<Cookie> cookies = response.getCookies();
++        assertEquals(1, cookies.size());
++        final Cookie requestedUrlCookie = cookies.get(0);
++        assertEquals(KnoxSessionStore.PAC4J_SESSION_PREFIX + Pac4jConstants.REQUESTED_URL, requestedUrlCookie.getName());
++
++        // step 2: send credentials to the callback url (callback from the identity provider)
++        request = new MockHttpServletRequest();
++        request.setCookies(new Cookie[]{requestedUrlCookie});
++        request.setRequestURL(PAC4J_CALLBACK_URL + "?" + Pac4jDispatcherFilter.PAC4J_CALLBACK_PARAMETER + "=true&" + Clients.DEFAULT_CLIENT_NAME_PARAMETER + "=" + Clients.DEFAULT_CLIENT_NAME_PARAMETER + "=" + CLIENT_CLASS);
++        request.addParameter(Pac4jDispatcherFilter.PAC4J_CALLBACK_PARAMETER, "true");
++        request.addParameter(Clients.DEFAULT_CLIENT_NAME_PARAMETER, CLIENT_CLASS);
++        request.addHeader("Authorization", "Basic amxlbGV1OmpsZWxldQ==");
++        request.setServerName(LOCALHOST);
++        response = new MockHttpServletResponse();
++        filterChain = mock(FilterChain.class);
++        dispatcher.doFilter(request, response, filterChain);
++        // it should be a redirection to the original url
++        assertEquals(302, response.getStatus());
++        assertEquals(KNOXSSO_SERVICE_URL + "?" + ORIGINAL_URL + "=" + HADOOP_SERVICE_URL, response.getHeaders().get("Location"));
++        // we should have 3 cookies among with the user profile
++        cookies = response.getCookies();
++        Map<String, String> mapCookies = new HashMap<>();
++        assertEquals(3, cookies.size());
++        for (final Cookie cookie : cookies) {
++            mapCookies.put(cookie.getName(), cookie.getValue());
++        }
++        assertNull(mapCookies.get(KnoxSessionStore.PAC4J_SESSION_PREFIX + CLIENT_CLASS + "$attemptedAuthentication"));
++        assertNotNull(mapCookies.get(KnoxSessionStore.PAC4J_SESSION_PREFIX + Pac4jConstants.USER_PROFILES));
++        assertNull(mapCookies.get(KnoxSessionStore.PAC4J_SESSION_PREFIX + Pac4jConstants.REQUESTED_URL));
++
++        // step 3: turn pac4j identity into KnoxSSO identity
++        request = new MockHttpServletRequest();
++        request.setCookies(cookies.toArray(new Cookie[cookies.size()]));
++        request.setRequestURL(KNOXSSO_SERVICE_URL + "?" + ORIGINAL_URL + "=" + HADOOP_SERVICE_URL);
++        request.setServerName(LOCALHOST);
++        response = new MockHttpServletResponse();
++        filterChain = mock(FilterChain.class);
++        dispatcher.doFilter(request, response, filterChain);
++        assertEquals(0, response.getStatus());
++        adapter.doFilter(request, response, filterChain);
++        cookies = response.getCookies();
++        assertEquals(1, cookies.size());
++        final Cookie userProfileCookie = cookies.get(0);
++        // the user profile has been cleaned
++        assertEquals(KnoxSessionStore.PAC4J_SESSION_PREFIX + Pac4jConstants.USER_PROFILES, userProfileCookie.getName());
++        assertNull(userProfileCookie.getValue());
++        assertEquals(USERNAME, adapter.getTestIdentifier());
++    }
++
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/22a7304a/gateway-release/home/conf/topologies/admin.xml
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/knox/blob/22a7304a/gateway-release/home/conf/topologies/knoxsso.xml
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/knox/blob/22a7304a/gateway-release/home/conf/topologies/manager.xml
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/knox/blob/22a7304a/gateway-release/home/conf/topologies/sandbox.xml
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/knox/blob/22a7304a/gateway-server/src/main/java/org/apache/knox/gateway/GatewayMessages.java
----------------------------------------------------------------------
diff --cc gateway-server/src/main/java/org/apache/knox/gateway/GatewayMessages.java
index 61c5303,0000000..f10f97b
mode 100644,000000..100644
--- a/gateway-server/src/main/java/org/apache/knox/gateway/GatewayMessages.java
+++ b/gateway-server/src/main/java/org/apache/knox/gateway/GatewayMessages.java
@@@ -1,553 -1,0 +1,615 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway;
 +
 +import org.apache.commons.cli.ParseException;
 +import org.apache.knox.gateway.i18n.messages.Message;
 +import org.apache.knox.gateway.i18n.messages.MessageLevel;
 +import org.apache.knox.gateway.i18n.messages.Messages;
 +import org.apache.knox.gateway.i18n.messages.StackTrace;
 +import org.apache.knox.gateway.services.security.KeystoreServiceException;
 +
 +import java.io.File;
 +import java.net.URI;
 +import java.util.Date;
 +import java.util.Map;
 +import java.util.Set;
 +
 +/**
 + *
 + */
 +@Messages(logger="org.apache.knox.gateway")
 +public interface GatewayMessages {
 +
 +  @Message( level = MessageLevel.FATAL, text = "Failed to parse command line: {0}" )
 +  void failedToParseCommandLine( @StackTrace( level = MessageLevel.DEBUG ) ParseException e );
 +
 +  @Message( level = MessageLevel.INFO, text = "Starting gateway..." )
 +  void startingGateway();
 +
 +  @Message( level = MessageLevel.FATAL, text = "Failed to start gateway: {0}" )
 +  void failedToStartGateway( @StackTrace( level = MessageLevel.DEBUG ) Exception e );
 +
 +  @Message( level = MessageLevel.INFO, text = "Started gateway on port {0}." )
 +  void startedGateway( int port );
 +
 +  @Message( level = MessageLevel.INFO, text = "Stopping gateway..." )
 +  void stoppingGateway();
 +
 +  @Message( level = MessageLevel.INFO, text = "Stopped gateway." )
 +  void stoppedGateway();
 +
 +  @Message( level = MessageLevel.INFO, text = "Loading configuration resource {0}" )
 +  void loadingConfigurationResource( String res );
 +
 +  @Message( level = MessageLevel.INFO, text = "Loading configuration file {0}" )
 +  void loadingConfigurationFile( String file );
 +
 +  @Message( level = MessageLevel.WARN, text = "Failed to load configuration file {0}: {1}" )
 +  void failedToLoadConfig( String path, @StackTrace( level = MessageLevel.DEBUG ) Exception e );
 +
 +  @Message( level = MessageLevel.INFO, text = "Using {1} as GATEWAY_HOME via {0}." )
 +  void settingGatewayHomeDir( String location, String home );
 +
 +  @Message( level = MessageLevel.INFO, text = "Loading topologies from directory: {0}" )
 +  void loadingTopologiesFromDirectory( String topologiesDir );
 +
 +  @Message( level = MessageLevel.DEBUG, text = "Loading topology file: {0}" )
 +  void loadingTopologyFile( String fileName );
 +
 +  @Message( level = MessageLevel.INFO, text = "Monitoring topologies in directory: {0}" )
 +  void monitoringTopologyChangesInDirectory( String topologiesDir );
 +
 +  @Message( level = MessageLevel.INFO, text = "Deploying topology {0} to {1}" )
 +  void deployingTopology( String clusterName, String warDirName );
 +
 +  @Message( level = MessageLevel.DEBUG, text = "Deployed topology {0}." )
 +  void deployedTopology( String clusterName );
 +
 +  @Message( level = MessageLevel.INFO, text = "Loading topology {0} from {1}" )
 +  void redeployingTopology( String clusterName, String warDirName );
 +
 +  @Message( level = MessageLevel.DEBUG, text = "Redeployed topology {0}." )
 +  void redeployedTopology( String clusterName );
 +
 +  @Message( level = MessageLevel.INFO, text = "Activating topology {0}" )
 +  void activatingTopology( String name );
 +
 +  @Message( level = MessageLevel.INFO, text = "Activating topology {0} archive {1}" )
 +  void activatingTopologyArchive( String topology, String archive );
 +
 +  @Message( level = MessageLevel.INFO, text = "Deactivating topology {0}" )
 +  void deactivatingTopology( String name );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to deploy topology {0}: {1}" )
 +  void failedToDeployTopology( String name, @StackTrace(level=MessageLevel.DEBUG) Throwable e );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to redeploy topology {0}" )
 +  void failedToRedeployTopology( String name );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to redeploy topology {0}: {1}" )
 +  void failedToRedeployTopology( String name, @StackTrace(level=MessageLevel.DEBUG) Throwable e );
 +
 +  @Message(level = MessageLevel.ERROR, text = "Failed to load topology {0}: Topology configuration is invalid!")
 +  void failedToLoadTopology(String fileName);
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to redeploy topologies: {0}" )
 +  void failedToRedeployTopologies( @StackTrace(level=MessageLevel.DEBUG) Throwable e );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to undeploy topology {0}: {1}" )
 +  void failedToUndeployTopology( String name, @StackTrace(level=MessageLevel.DEBUG) Exception e );
 +
 +  @Message( level = MessageLevel.INFO, text = "Deleting topology {0}" )
 +  void deletingTopology( String topologyName );
 +
 +  @Message( level = MessageLevel.INFO, text = "Deleting deployed topology {0}" )
 +  void deletingDeployment( String warDirName );
 +
 +  @Message( level = MessageLevel.DEBUG, text = "Purge backups of deployed topology {0}" )
 +  void cleanupDeployments( String topologyName );
 +
 +  @Message( level = MessageLevel.INFO, text = "Deleting backup deployed topology {0}" )
 +  void cleanupDeployment( String absolutePath );
 +
 +  @Message( level = MessageLevel.INFO, text = "Creating gateway home directory: {0}" )
 +  void creatingGatewayHomeDir( File homeDir );
 +
 +  @Message( level = MessageLevel.INFO, text = "Creating gateway deployment directory: {0}" )
 +  void creatingGatewayDeploymentDir( File topologiesDir );
 +
 +  @Message( level = MessageLevel.INFO, text = "Creating default gateway configuration file: {0}" )
 +  void creatingDefaultConfigFile( File defaultConfigFile );
 +
 +  @Message( level = MessageLevel.INFO, text = "Creating sample topology file: {0}" )
 +  void creatingDefaultTopologyFile( File defaultConfigFile );
 +
 +  @Message( level = MessageLevel.WARN, text = "Ignoring service deployment contributor with invalid null name: {0}" )
 +  void ignoringServiceContributorWithMissingName( String className );
 +
 +  @Message( level = MessageLevel.WARN, text = "Ignoring service deployment contributor with invalid null role: {0}" )
 +  void ignoringServiceContributorWithMissingRole( String className );
 +
 +  @Message( level = MessageLevel.WARN, text = "Ignoring service deployment contributor with invalid null version: {0}" )
 +  void ignoringServiceContributorWithMissingVersion( String className );
 +
 +  @Message( level = MessageLevel.WARN, text = "Ignoring provider deployment contributor with invalid null name: {0}" )
 +  void ignoringProviderContributorWithMissingName( String className );
 +
 +  @Message( level = MessageLevel.WARN, text = "Ignoring provider deployment contributor with invalid null role: {0}" )
 +  void ignoringProviderContributorWithMissingRole( String className );
 +
 +  @Message( level = MessageLevel.INFO, text = "Loaded logging configuration: {0}" )
 +  void loadedLoggingConfig( String fileName );
 +
 +  @Message( level = MessageLevel.WARN, text = "Failed to load logging configuration: {0}" )
 +  void failedToLoadLoggingConfig( String fileName );
 +
 +  @Message( level = MessageLevel.INFO, text = "Creating credential store for the gateway instance." )
 +  void creatingCredentialStoreForGateway();
 +
 +  @Message( level = MessageLevel.INFO, text = "Credential store for the gateway instance found - no need to create one." )
 +  void credentialStoreForGatewayFoundNotCreating();
 +
 +  @Message( level = MessageLevel.INFO, text = "Creating keystore for the gateway instance." )
 +  void creatingKeyStoreForGateway();
 +
 +  @Message( level = MessageLevel.INFO, text = "Keystore for the gateway instance found - no need to create one." )
 +  void keyStoreForGatewayFoundNotCreating();
 +
 +  @Message( level = MessageLevel.INFO, text = "Creating credential store for the cluster: {0}" )
 +  void creatingCredentialStoreForCluster(String clusterName);
 +
 +  @Message( level = MessageLevel.INFO, text = "Credential store found for the cluster: {0} - no need to create one." )
 +  void credentialStoreForClusterFoundNotCreating(String clusterName);
 +
 +  @Message( level = MessageLevel.DEBUG, text = "Received request: {0} {1}" )
 +  void receivedRequest( String method, String uri );
 +
 +  @Message( level = MessageLevel.DEBUG, text = "Dispatch request: {0} {1}" )
 +  void dispatchRequest( String method, URI uri );
 +  
 +  @Message( level = MessageLevel.WARN, text = "Connection exception dispatching request: {0} {1}" )
 +  void dispatchServiceConnectionException( URI uri, @StackTrace(level=MessageLevel.WARN) Exception e );
 +
 +  @Message( level = MessageLevel.DEBUG, text = "Signature verified: {0}" )
 +  void signatureVerified( boolean verified );
 +
 +  @Message( level = MessageLevel.DEBUG, text = "Apache Knox Gateway {0} ({1})" )
 +  void gatewayVersionMessage( String version, String hash );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to inject service {0}: {1}" )
 +  void failedToInjectService( String serviceName, @StackTrace( level = MessageLevel.DEBUG ) Exception e );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to finalize contribution: {0}" )
 +  void failedToFinalizeContribution( @StackTrace( level = MessageLevel.DEBUG ) Exception e );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to contribute service [role={1}, name={0}]: {2}" )
 +  void failedToContributeService( String name, String role, @StackTrace( level = MessageLevel.ERROR ) Exception e );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to contribute provider [role={1}, name={0}]: {2}" )
 +  void failedToContributeProvider( String name, String role, @StackTrace( level = MessageLevel.ERROR ) Exception e );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to initialize contribution: {0}" )
 +  void failedToInitializeContribution( @StackTrace( level = MessageLevel.DEBUG ) Exception e );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to initialize servlet instance: {0}" )
 +  void failedToInitializeServletInstace( @StackTrace( level = MessageLevel.DEBUG ) Exception e );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Gateway processing failed: {0}" )
 +  void failedToExecuteFilter( @StackTrace( level = MessageLevel.INFO ) Throwable t );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to load topology {0}: {1}")
 +  void failedToLoadTopology( String fileName, @StackTrace( level = MessageLevel.DEBUG ) Exception e );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to load topology {0}, retrying after {1}ms: {2}")
 +  void failedToLoadTopologyRetrying( String friendlyURI, String delay, @StackTrace( level = MessageLevel.DEBUG ) Exception e );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to handle topology events: {0}" )
 +  void failedToHandleTopologyEvents( @StackTrace( level = MessageLevel.DEBUG ) Exception e );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to reload topologies: {0}" )
 +  void failedToReloadTopologies( @StackTrace( level = MessageLevel.DEBUG ) Exception e );
 +
 +  @Message( level = MessageLevel.FATAL, text = "Unsupported encoding: {0}" )
 +  void unsupportedEncoding( @StackTrace( level = MessageLevel.DEBUG ) Exception e );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to persist master secret: {0}" )
 +  void failedToPersistMasterSecret( @StackTrace( level = MessageLevel.DEBUG ) Exception e );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to encrypt master secret: {0}" )
 +  void failedToEncryptMasterSecret( @StackTrace( level = MessageLevel.DEBUG ) Exception e );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to initialize master service from persistent master {0}: {1}" )
 +  void failedToInitializeFromPersistentMaster( String masterFileName, @StackTrace( level = MessageLevel.DEBUG ) Exception e );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to encode passphrase: {0}" )
 +  void failedToEncodePassphrase( @StackTrace( level = MessageLevel.DEBUG ) Exception e );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to verify signature: {0}")
 +  void failedToVerifySignature( @StackTrace(level=MessageLevel.DEBUG) Exception e );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to sign the data: {0}")
 +  void failedToSignData( @StackTrace( level = MessageLevel.DEBUG ) Exception e );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to decrypt password for cluster {0}: {1}" )
 +  void failedToDecryptPasswordForCluster( String clusterName, @StackTrace( level = MessageLevel.DEBUG ) Exception e );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to encrypt password for cluster {0}: {1}")
 +  void failedToEncryptPasswordForCluster( String clusterName, @StackTrace( level = MessageLevel.DEBUG ) Exception e );
 +  
 +  @Message( level = MessageLevel.ERROR, text = "Failed to create keystore [filename={0}, type={1}]: {2}" )
 +  void failedToCreateKeystore( String fileName, String keyStoreType, @StackTrace( level = MessageLevel.DEBUG ) Exception e );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to load keystore [filename={0}, type={1}]: {2}" )
 +  void failedToLoadKeystore( String fileName, String keyStoreType, @StackTrace( level = MessageLevel.DEBUG ) Exception e );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to add key for cluster {0}: {1}" )
 +  void failedToAddKeyForCluster( String clusterName, @StackTrace( level = MessageLevel.DEBUG ) Exception e );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to add credential for cluster {0}: {1}" )
 +  void failedToAddCredentialForCluster( String clusterName, @StackTrace( level = MessageLevel.DEBUG ) Exception e );
 +  
 +  @Message( level = MessageLevel.ERROR, text = "Failed to get key for Gateway {0}: {1}" )
 +  void failedToGetKeyForGateway( String alias, @StackTrace( level=MessageLevel.DEBUG ) Exception e );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to get credential for cluster {0}: {1}" )
 +  void failedToGetCredentialForCluster( String clusterName, @StackTrace(level = MessageLevel.DEBUG ) Exception e );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to get key for cluster {0}: {1}" )
 +  void failedToGetKeyForCluster( String clusterName, @StackTrace(level = MessageLevel.DEBUG ) Exception e );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to add self signed certificate for Gateway {0}: {1}" )
 +  void failedToAddSeflSignedCertForGateway( String alias, @StackTrace( level = MessageLevel.DEBUG ) Exception e );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to generate secret key from password: {0}" )
 +  void failedToGenerateKeyFromPassword( @StackTrace( level = MessageLevel.DEBUG ) Exception e );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to establish connection to {0}: {1}" )
 +  void failedToEstablishConnectionToUrl( String url, @StackTrace( level = MessageLevel.DEBUG ) Exception e );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to interpret property \"{0}\": {1}")
 +  void failedToInterpretProperty( String property, @StackTrace( level = MessageLevel.DEBUG ) Exception e );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to instantiate the internal gateway services." )
 +  void failedToInstantiateGatewayServices();
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to serialize map to Json string {0}: {1}" )
 +  void failedToSerializeMapToJSON( Map<String, Object> map, @StackTrace( level = MessageLevel.DEBUG ) Exception e );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to get map from Json string {0}: {1}" )
 +  void failedToGetMapFromJsonString( String json, @StackTrace( level = MessageLevel.DEBUG ) Exception e );
 +  
 +  @Message( level = MessageLevel.DEBUG, text = "Successful Knox->Hadoop SPNegotiation authentication for URL: {0}" )
 +  void successfulSPNegoAuthn(String uri);
 +  
 +  @Message( level = MessageLevel.ERROR, text = "Failed Knox->Hadoop SPNegotiation authentication for URL: {0}" )
 +  void failedSPNegoAuthn(String uri);
 +
 +  @Message( level = MessageLevel.DEBUG, text = "Dispatch response status: {0}" )
 +  void dispatchResponseStatusCode(int statusCode);
 +
 +  @Message( level = MessageLevel.DEBUG, text = "Dispatch response status: {0}, Location: {1}" )
 +  void dispatchResponseCreatedStatusCode( int statusCode, String location );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to decrypt cipher text for cluster {0}: due to inability to retrieve the password." )
 +  void failedToDecryptCipherForClusterNullPassword(String clusterName);
 +
 +  @Message( level = MessageLevel.DEBUG, text = "Gateway services have not been initialized." )
 +  void gatewayServicesNotInitialized();
 +
 +  @Message( level = MessageLevel.INFO, text = "The Gateway SSL certificate is issued to hostname: {0}." )
 +  void certificateHostNameForGateway(String cn);
 +
 +  @Message( level = MessageLevel.INFO, text = "The Gateway SSL certificate is valid between: {0} and {1}." )
 +  void certificateValidityPeriod(Date notBefore, Date notAfter);
 +
 +  @Message( level = MessageLevel.ERROR, text = "Unable to retrieve certificate for Gateway: {0}." )
 +  void unableToRetrieveCertificateForGateway(Exception e);
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to generate alias for cluster: {0} {1}." )
 +  void failedToGenerateAliasForCluster(String clusterName, KeystoreServiceException e);
 +
 +  @Message( level = MessageLevel.DEBUG, text = "Key passphrase not found in credential store - using master secret." )
 +  void assumingKeyPassphraseIsMaster();
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to remove alias for cluster: {0} {1}." )
 +  void failedToRemoveCredentialForCluster(String clusterName, Exception e);
 +
 +  @Message( level = MessageLevel.WARN, text = "Failed to match path {0}" )
 +  void failedToMatchPath( String path );
 +  
 +  @Message( level = MessageLevel.ERROR, text = "Failed to get system ldap connection: {0}" )
 +  void failedToGetSystemLdapConnection( @StackTrace( level = MessageLevel.DEBUG ) Exception e );
 +  
 +  @Message( level = MessageLevel.WARN, text = "Value not found for cluster:{0}, alias: {1}" )
 +  void aliasValueNotFound( String cluster, String alias );
 +
 +  @Message( level = MessageLevel.INFO, text = "Computed userDn: {0} using dnTemplate for principal: {1}" )
 +  void computedUserDn(String userDn, String principal);
 +
 +  @Message( level = MessageLevel.DEBUG, text = "Searching from {0} where {1} scope {2}" )
 +  void searchBaseFilterScope( String searchBase, String searchFilter, String searchScope );
 +
 +  @Message( level = MessageLevel.INFO, text = "Computed userDn: {0} using ldapSearch for principal: {1}" )
 +  void searchedAndFoundUserDn(String userDn, String principal);
 +
 +  @Message( level = MessageLevel.INFO, text = "Computed roles/groups: {0} for principal: {1}" )
 +  void lookedUpUserRoles(Set<String> roleNames, String userName);
 +
 +  @Message( level = MessageLevel.DEBUG, text = "Initialize provider: {1}/{0}" )
 +  void initializeProvider( String name, String role );
 +
 +  @Message( level = MessageLevel.DEBUG, text = "Initialize service: {1}/{0}" )
 +  void initializeService( String name, String role );
 +
 +  @Message( level = MessageLevel.DEBUG, text = "Contribute provider: {1}/{0}" )
 +  void contributeProvider( String name, String role );
 +
 +  @Message( level = MessageLevel.DEBUG, text = "Contribute service: {1}/{0}" )
 +  void contributeService( String name, String role );
 +
 +  @Message( level = MessageLevel.DEBUG, text = "Finalize provider: {1}/{0}" )
 +  void finalizeProvider( String name, String role );
 +
 +  @Message( level = MessageLevel.DEBUG, text = "Finalize service: {1}/{0}" )
 +  void finalizeService( String name, String role );
 +
 +  @Message( level = MessageLevel.DEBUG, text = "Configured services directory is {0}" )
 +  void usingServicesDirectory(String path);
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to unmarshall service definition file {0} file : {1}" )
 +  void failedToLoadServiceDefinition(String fileName, @StackTrace( level = MessageLevel.DEBUG ) Exception e);
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to find service definition file {0} file : {1}" )
 +  void failedToFindServiceDefinitionFile(String fileName, @StackTrace( level = MessageLevel.DEBUG ) Exception e);
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to find rewrite file {0} file : {1}" )
 +  void failedToFindRewriteFile(String fileName, @StackTrace( level = MessageLevel.DEBUG ) Exception e);
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to unmarshall rewrite file {0} file : {1}" )
 +  void failedToLoadRewriteFile(String fileName, @StackTrace( level = MessageLevel.DEBUG ) Exception e);
 +
 +  @Message( level = MessageLevel.DEBUG, text = "No rewrite file found in service directory {0}" )
 +  void noRewriteFileFound(String path);
 +
 +  @Message( level = MessageLevel.DEBUG, text = "Added Service definition name: {0}, role : {1}, version : {2}" )
 +  void addedServiceDefinition(String serviceName, String serviceRole, String version);
 +
 +  @Message( level = MessageLevel.INFO, text = "System Property: {0}={1}" )
 +  void logSysProp( String name, String property );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Unable to get password: {0}" )
 +  void unableToGetPassword(@StackTrace( level = MessageLevel.DEBUG ) Exception e);
 +
 +  @Message( level = MessageLevel.DEBUG, text = "Initialize application: {0}" )
 +  void initializeApplication( String name );
 +
 +  @Message( level = MessageLevel.DEBUG, text = "Contribute application: {0}" )
 +  void contributeApplication( String name );
 +
 +  @Message( level = MessageLevel.DEBUG, text = "Finalize application: {0}" )
 +  void finalizeApplication( String name );
 +
 +  @Message( level = MessageLevel.INFO, text = "Default topology {0} at {1}" )
 +  void defaultTopologySetup( String defaultTopologyName, String redirectContext );
 +
 +  @Message( level = MessageLevel.DEBUG, text = "Default topology forward from {0} to {1}" )
 +  void defaultTopologyForward( String oldTarget, String newTarget );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Unable to setup PagedResults" )
 +  void unableToSetupPagedResults();
 +
 +  @Message( level = MessageLevel.INFO, text = "Ignoring PartialResultException" )
 +  void ignoringPartialResultException();
 +
 +  @Message( level = MessageLevel.WARN, text = "Only retrieved first {0} groups due to SizeLimitExceededException." )
 +  void sizeLimitExceededOnlyRetrieved(int numResults);
 +
 +  @Message( level = MessageLevel.DEBUG, text = "Failed to parse path into Template: {0} : {1}" )
 +  void failedToParsePath( String path, @StackTrace( level = MessageLevel.DEBUG ) Exception e );
 +
 +  @Message( level = MessageLevel.DEBUG, text = "Failed to initialize metrics reporter {0}  : {1}" )
 +  void failedToInitializeReporter( String name,  @StackTrace( level = MessageLevel.DEBUG ) Exception e);
 +
 +  @Message( level = MessageLevel.DEBUG, text = "Failed to start metrics reporter {0}  : {1}" )
 +  void failedToStartReporter( String name,  @StackTrace( level = MessageLevel.DEBUG ) Exception e);
 +
 +  @Message( level = MessageLevel.DEBUG, text = "Failed to stop metrics reporter {0}  : {1}" )
 +  void failedToStopReporter( String name,  @StackTrace( level = MessageLevel.DEBUG ) Exception e);
 +
 +  @Message( level = MessageLevel.INFO, text = "Cookie scoping feature enabled: {0}" )
 +  void cookieScopingFeatureEnabled( boolean enabled );
 +
 +  /**
 +   * Log whether Topology port mapping feature is enabled/disabled.
 +   *
 +   * @param enabled
 +   */
 +  @Message(level = MessageLevel.INFO,
 +           text = "Topology port mapping feature enabled: {0}")
 +  void gatewayTopologyPortMappingEnabled(final boolean enabled);
 +
 +  /**
 +   * @param topology
 +   * @param port
 +   */
 +  @Message(level = MessageLevel.DEBUG,
 +           text = "Creating a connector for topology {0} listening on port {1}.")
 +  void createJettyConnector(final String topology, final int port);
 +
 +  /**
 +   * @param topology
 +   */
 +  @Message(level = MessageLevel.DEBUG,
 +           text = "Creating a handler for topology {0}.")
 +  void createJettyHandler(final String topology);
 +
 +  /**
 +   * @param oldTarget
 +   * @param newTarget
 +   */
 +  @Message(level = MessageLevel.INFO,
 +           text = "Updating request context from {0} to {1}")
 +  void topologyPortMappingAddContext(final String oldTarget,
 +      final String newTarget);
 +
 +  /**
 +   * @param oldTarget
 +   * @param newTarget
 +   */
 +  @Message(level = MessageLevel.DEBUG,
 +           text = "Updating request target from {0} to {1}")
 +  void topologyPortMappingUpdateRequest(final String oldTarget,
 +      final String newTarget);
 +
 +  /**
 +   * Messages for Topology Port Mapping
 +   *
 +   * @param port
 +   * @param topology
 +   */
 +  @Message(level = MessageLevel.ERROR,
 +           text = "Port {0} configured for Topology - {1} is already in use.")
 +  void portAlreadyInUse(final int port, final String topology);
 +
 +  /**
 +   * Messages for Topology Port Mapping
 +   *
 +   * @param port
 +   */
 +  @Message(level = MessageLevel.ERROR,
 +           text = "Port {0} is already in use.")
 +  void portAlreadyInUse(final int port);
 +
 +  /**
 +   * Log topology and port
 +   *
 +   * @param topology
 +   * @param port
 +   */
 +  @Message(level = MessageLevel.INFO,
 +           text = "Started gateway, topology \"{0}\" listening on port \"{1}\".")
 +  void startedGateway(final String topology, final int port);
 +
 +  @Message(level = MessageLevel.ERROR,
 +           text =
 +               " Could not find topology \"{0}\" mapped to port \"{1}\" configured in gateway-config.xml. "
 +                   + "This invalid topology mapping will be ignored by the gateway. "
 +                   + "Gateway restart will be required if in the future \"{0}\" topology is added.")
 +  void topologyPortMappingCannotFindTopology(final String topology, final int port);
 +
 +
++  @Message( level = MessageLevel.WARN, text = "There is no registry client defined for remote configuration monitoring." )
++  void missingClientConfigurationForRemoteMonitoring();
++
++  @Message( level = MessageLevel.WARN, text = "Could not resolve a remote configuration registry client for {0}." )
++  void unresolvedClientConfigurationForRemoteMonitoring(final String clientName);
++
 +  @Message( level = MessageLevel.INFO, text = "Monitoring simple descriptors in directory: {0}" )
 +  void monitoringDescriptorChangesInDirectory(String descriptorsDir);
 +
- 
 +  @Message( level = MessageLevel.INFO, text = "Monitoring shared provider configurations in directory: {0}" )
 +  void monitoringProviderConfigChangesInDirectory(String sharedProviderDir);
 +
++  @Message( level = MessageLevel.ERROR, text = "Error registering listener for remote configuration path {0} : {1}" )
++  void errorAddingRemoteConfigurationListenerForPath(final String path,
++                                                     @StackTrace( level = MessageLevel.DEBUG ) Exception e);
++
++  @Message( level = MessageLevel.ERROR, text = "Error unregistering listener for remote configuration path {0} : {1}" )
++  void errorRemovingRemoteConfigurationListenerForPath(final String path,
++                                                       @StackTrace( level = MessageLevel.DEBUG ) Exception e);
++
++  @Message( level = MessageLevel.ERROR, text = "Error downloading remote configuration {0} : {1}" )
++  void errorDownloadingRemoteConfiguration(final String path,
++                                           @StackTrace( level = MessageLevel.DEBUG ) Exception e);
++
 +  @Message( level = MessageLevel.INFO, text = "Prevented deletion of shared provider configuration because there are referencing descriptors: {0}" )
 +  void preventedDeletionOfSharedProviderConfiguration(String providerConfigurationPath);
 +
 +  @Message( level = MessageLevel.INFO, text = "Generated topology {0} because the associated descriptor {1} changed." )
 +  void generatedTopologyForDescriptorChange(String topologyName, String descriptorName);
 +
++  @Message( level = MessageLevel.WARN, text = "An error occurred while attempting to initialize the remote configuration monitor: {0}" )
++  void remoteConfigurationMonitorInitFailure(final String errorMessage,
++                                             @StackTrace( level = MessageLevel.DEBUG ) Exception e );
++
++  @Message( level = MessageLevel.WARN, text = "An error occurred while attempting to start the remote configuration monitor {0} : {1}" )
++  void remoteConfigurationMonitorStartFailure(final String monitorType,
++                                              final String errorMessage,
++                                              @StackTrace( level = MessageLevel.DEBUG ) Exception e );
++
++  @Message( level = MessageLevel.INFO, text = "Starting remote configuration monitor for source {0} ..." )
++  void startingRemoteConfigurationMonitor(final String address);
++
++  @Message( level = MessageLevel.INFO, text = "Monitoring remote configuration source {0}" )
++  void monitoringRemoteConfigurationSource(final String address);
++
++  @Message( level = MessageLevel.INFO, text = "Remote configuration monitor downloaded {0} configuration file {1}" )
++  void downloadedRemoteConfigFile(final String type, final String configFileName);
++
++  @Message( level = MessageLevel.INFO, text = "Remote configuration monitor deleted {0} configuration file {1} based on remote change." )
++  void deletedRemoteConfigFile(final String type, final String configFileName);
++
 +  @Message( level = MessageLevel.ERROR, text = "An error occurred while processing {0} : {1}" )
 +  void simpleDescriptorHandlingError(final String simpleDesc,
 +                                     @StackTrace(level = MessageLevel.DEBUG) Exception e);
 +
 +  @Message(level = MessageLevel.DEBUG, text = "Successfully wrote configuration: {0}")
 +  void wroteConfigurationFile(final String filePath);
 +
 +  @Message(level = MessageLevel.ERROR, text = "Failed to write configuration: {0}")
 +  void failedToWriteConfigurationFile(final String filePath,
 +                                      @StackTrace(level = MessageLevel.DEBUG) Exception e );
 +
 +  @Message( level = MessageLevel.INFO, text = "Deleting topology {0} because the associated descriptor {1} was deleted." )
 +  void deletingTopologyForDescriptorDeletion(String topologyName, String descriptorName);
 +
 +  @Message( level = MessageLevel.INFO, text = "Deleting descriptor {0} because the associated topology {1} was deleted." )
 +  void deletingDescriptorForTopologyDeletion(String descriptorName, String topologyName);
 +
 +  @Message( level = MessageLevel.DEBUG, text = "Added descriptor {0} reference to provider configuration {1}." )
 +  void addedProviderConfigurationReference(String descriptorName, String providerConfigurationName);
 +
 +  @Message( level = MessageLevel.DEBUG, text = "Removed descriptor {0} reference to provider configuration {1}." )
 +  void removedProviderConfigurationReference(String descriptorName, String providerConfigurationName);
 +
++  @Message( level = MessageLevel.WARN,
++            text = "The permissions for the remote configuration registry entry \"{0}\" are such that its content may not be trustworthy." )
++  void suspectWritableRemoteConfigurationEntry(String entryPath);
++
++  @Message( level = MessageLevel.WARN,
++            text = "Correcting the suspect permissions for the remote configuration registry entry \"{0}\"." )
++  void correctingSuspectWritableRemoteConfigurationEntry(String entryPath);
++
++  @Message(level = MessageLevel.INFO,
++           text = "A cluster configuration change was noticed for {1} @ {0}")
++  void noticedClusterConfigurationChange(final String source, final String clusterName);
++
++
++  @Message(level = MessageLevel.INFO,
++           text = "Triggering topology regeneration for descriptor {2} because of change to the {1} @ {0} configuration.")
++  void triggeringTopologyRegeneration(final String source, final String clusterName, final String affected);
++
++
++  @Message(level = MessageLevel.ERROR,
++           text = "Encountered an error while responding to {1} @ {0} configuration change: {2}")
++  void errorRespondingToConfigChange(final String source,
++                                     final String clusterName,
++                                     @StackTrace(level = MessageLevel.DEBUG) Exception e);
++
 +}


[24/53] [abbrv] knox git commit: KNOX-998 - Some more refactoring

Posted by mo...@apache.org.
http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-test-utils/src/main/java/org/apache/hadoop/test/mock/MockHttpServletRequest.java
----------------------------------------------------------------------
diff --git a/gateway-test-utils/src/main/java/org/apache/hadoop/test/mock/MockHttpServletRequest.java b/gateway-test-utils/src/main/java/org/apache/hadoop/test/mock/MockHttpServletRequest.java
deleted file mode 100644
index 82ebe3d..0000000
--- a/gateway-test-utils/src/main/java/org/apache/hadoop/test/mock/MockHttpServletRequest.java
+++ /dev/null
@@ -1,410 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.test.mock;
-
-import javax.servlet.AsyncContext;
-import javax.servlet.DispatcherType;
-import javax.servlet.RequestDispatcher;
-import javax.servlet.ServletContext;
-import javax.servlet.ServletException;
-import javax.servlet.ServletInputStream;
-import javax.servlet.ServletRequest;
-import javax.servlet.ServletResponse;
-import javax.servlet.http.Cookie;
-import javax.servlet.http.HttpServletRequest;
-import javax.servlet.http.HttpServletResponse;
-import javax.servlet.http.HttpSession;
-import javax.servlet.http.HttpUpgradeHandler;
-import javax.servlet.http.Part;
-import java.io.BufferedReader;
-import java.io.IOException;
-import java.io.UnsupportedEncodingException;
-import java.security.Principal;
-import java.util.Collection;
-import java.util.Enumeration;
-import java.util.Locale;
-import java.util.Map;
-
-public class MockHttpServletRequest implements HttpServletRequest {
-
-  private String queryString;
-  private String contentType;
-  private String characterEncoding;
-  private ServletInputStream inputStream;
-  private String method = "GET";
-
-  @Override
-  public String getAuthType() {
-    return null;
-  }
-
-  @Override
-  public Cookie[] getCookies() {
-    return new Cookie[ 0 ];
-  }
-
-  @Override
-  public long getDateHeader( String s ) {
-    return 0;
-  }
-
-  @Override
-  public String getHeader( String s ) {
-    return null;
-  }
-
-  @Override
-  public Enumeration<String> getHeaders( String s ) {
-    return null;
-  }
-
-  @Override
-  public Enumeration<String> getHeaderNames() {
-    return null;
-  }
-
-  @Override
-  public int getIntHeader( String s ) {
-    return 0;
-  }
-
-  @Override
-  public String getMethod() {
-    return method;
-  }
-
-  public void setMethod( String method ) {
-    this.method = method;
-  }
-
-  @Override
-  public String getPathInfo() {
-    return null;
-  }
-
-  @Override
-  public String getPathTranslated() {
-    return null;
-  }
-
-  @Override
-  public String getContextPath() {
-    return null;
-  }
-
-  @Override
-  public String getQueryString() {
-    return queryString;
-  }
-
-  public void setQueryString( String queryString ) {
-    this.queryString = queryString;
-  }
-
-  @Override
-  public String getRemoteUser() {
-    return null;
-  }
-
-  @Override
-  public boolean isUserInRole( String s ) {
-    return false;
-  }
-
-  @Override
-  public Principal getUserPrincipal() {
-    return null;
-  }
-
-  @Override
-  public String getRequestedSessionId() {
-    return null;
-  }
-
-  @Override
-  public String getRequestURI() {
-    return null;
-  }
-
-  @Override
-  public StringBuffer getRequestURL() {
-    return null;
-  }
-
-  @Override
-  public String getServletPath() {
-    return null;
-  }
-
-  @Override
-  public HttpSession getSession( boolean b ) {
-    return null;
-  }
-
-  @Override
-  public HttpSession getSession() {
-    return null;
-  }
-
-  @Override
-  public String changeSessionId() {
-    throw new UnsupportedOperationException();
-  }
-
-  @Override
-  public boolean isRequestedSessionIdValid() {
-    return false;
-  }
-
-  @Override
-  public boolean isRequestedSessionIdFromCookie() {
-    return false;
-  }
-
-  @Override
-  public boolean isRequestedSessionIdFromURL() {
-    return false;
-  }
-
-  @Override
-  @SuppressWarnings("deprecation")
-  public boolean isRequestedSessionIdFromUrl() {
-    return false;
-  }
-
-  @Override
-  public boolean authenticate( HttpServletResponse httpServletResponse ) throws IOException, ServletException {
-    return false;
-  }
-
-  @Override
-  public void login( String s, String s1 ) throws ServletException {
-  }
-
-  @Override
-  public void logout() throws ServletException {
-  }
-
-  @Override
-  public Collection<Part> getParts() throws IOException, ServletException {
-    return null;
-  }
-
-  @Override
-  public Part getPart( String s ) throws IOException, ServletException {
-    return null;
-  }
-
-  @Override
-  public <T extends HttpUpgradeHandler> T upgrade( Class<T> aClass ) throws IOException, ServletException {
-    throw new UnsupportedOperationException();
-  }
-
-  @Override
-  public Object getAttribute( String s ) {
-    return null;
-  }
-
-  @Override
-  public Enumeration<String> getAttributeNames() {
-    return null;
-  }
-
-  @Override
-  public String getCharacterEncoding() {
-    return characterEncoding;
-  }
-
-  @Override
-  public void setCharacterEncoding( String characterEncoding ) throws UnsupportedEncodingException {
-    this.characterEncoding = characterEncoding;
-  }
-
-  @Override
-  public int getContentLength() {
-    return 0;
-  }
-
-  @Override
-  public long getContentLengthLong() {
-    return 0;
-  }
-
-  @Override
-  public String getContentType() {
-    return contentType;
-  }
-
-  public void setContentType( String contentType ) {
-    this.contentType = contentType;
-  }
-
-  @Override
-  public ServletInputStream getInputStream() throws IOException {
-    return inputStream;
-  }
-
-  public void setInputStream( ServletInputStream intputStream ) {
-    this.inputStream = intputStream;
-  }
-
-  @Override
-  public String getParameter( String s ) {
-    return null;
-  }
-
-  @Override
-  public Enumeration<String> getParameterNames() {
-    return null;
-  }
-
-  @Override
-  public String[] getParameterValues( String s ) {
-    return new String[ 0 ];
-  }
-
-  @Override
-  public Map<String, String[]> getParameterMap() {
-    return null;
-  }
-
-  @Override
-  public String getProtocol() {
-    return null;
-  }
-
-  @Override
-  public String getScheme() {
-    return null;
-  }
-
-  @Override
-  public String getServerName() {
-    return null;
-  }
-
-  @Override
-  public int getServerPort() {
-    return 0;
-  }
-
-  @Override
-  public BufferedReader getReader() throws IOException {
-    return null;
-  }
-
-  @Override
-  public String getRemoteAddr() {
-    return null;
-  }
-
-  @Override
-  public String getRemoteHost() {
-    return null;
-  }
-
-  @Override
-  public void setAttribute( String s, Object o ) {
-  }
-
-  @Override
-  public void removeAttribute( String s ) {
-  }
-
-  @Override
-  public Locale getLocale() {
-    return null;
-  }
-
-  @Override
-  public Enumeration<Locale> getLocales() {
-    return null;
-  }
-
-  @Override
-  public boolean isSecure() {
-    return false;
-  }
-
-  @Override
-  public RequestDispatcher getRequestDispatcher( String s ) {
-    return null;
-  }
-
-  @Override
-  @SuppressWarnings("deprecation")
-  public String getRealPath( String s ) {
-    return null;
-  }
-
-  @Override
-  public int getRemotePort() {
-    return 0;
-  }
-
-  @Override
-  public String getLocalName() {
-    return null;
-  }
-
-  @Override
-  public String getLocalAddr() {
-    return null;
-  }
-
-  @Override
-  public int getLocalPort() {
-    return 0;
-  }
-
-  @Override
-  public ServletContext getServletContext() {
-    return null;
-  }
-
-  @Override
-  public AsyncContext startAsync() throws IllegalStateException {
-    return null;
-  }
-
-  @Override
-  public AsyncContext startAsync( ServletRequest servletRequest, ServletResponse servletResponse ) throws IllegalStateException {
-    return null;
-  }
-
-  @Override
-  public boolean isAsyncStarted() {
-    return false;
-  }
-
-  @Override
-  public boolean isAsyncSupported() {
-    return false;
-  }
-
-  @Override
-  public AsyncContext getAsyncContext() {
-    return null;
-  }
-
-  @Override
-  public DispatcherType getDispatcherType() {
-    return null;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-test-utils/src/main/java/org/apache/hadoop/test/mock/MockHttpServletResponse.java
----------------------------------------------------------------------
diff --git a/gateway-test-utils/src/main/java/org/apache/hadoop/test/mock/MockHttpServletResponse.java b/gateway-test-utils/src/main/java/org/apache/hadoop/test/mock/MockHttpServletResponse.java
deleted file mode 100644
index 9d20d17..0000000
--- a/gateway-test-utils/src/main/java/org/apache/hadoop/test/mock/MockHttpServletResponse.java
+++ /dev/null
@@ -1,195 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.test.mock;
-
-import javax.servlet.ServletOutputStream;
-import javax.servlet.http.Cookie;
-import javax.servlet.http.HttpServletResponse;
-import java.io.IOException;
-import java.io.PrintWriter;
-import java.util.Collection;
-import java.util.Locale;
-
-public class MockHttpServletResponse implements HttpServletResponse {
-
-  @Override
-  public void addCookie( Cookie cookie ) {
-  }
-
-  @Override
-  public boolean containsHeader( String s ) {
-    return false;
-  }
-
-  @Override
-  public String encodeURL( String s ) {
-    return null;
-  }
-
-  @Override
-  public String encodeRedirectURL( String s ) {
-    return null;
-  }
-
-  @Override
-  @SuppressWarnings("deprecation")
-  public String encodeUrl( String s ) {
-    return null;
-  }
-
-  @Override
-  public String encodeRedirectUrl( String s ) {
-    return null;
-  }
-
-  @Override
-  public void sendError( int i, String s ) throws IOException {
-  }
-
-  @Override
-  public void sendError( int i ) throws IOException {
-  }
-
-  @Override
-  public void sendRedirect( String s ) throws IOException {
-  }
-
-  @Override
-  public void setDateHeader( String s, long l ) {
-  }
-
-  @Override
-  public void addDateHeader( String s, long l ) {
-  }
-
-  @Override
-  public void setHeader( String s, String s1 ) {
-  }
-
-  @Override
-  public void addHeader( String s, String s1 ) {
-  }
-
-  @Override
-  public void setIntHeader( String s, int i ) {
-  }
-
-  @Override
-  public void addIntHeader( String s, int i ) {
-  }
-
-  @Override
-  public void setStatus( int i ) {
-  }
-
-  @Override
-  @SuppressWarnings("deprecation")
-  public void setStatus( int i, String s ) {
-  }
-
-  @Override
-  public int getStatus() {
-    return 0;
-  }
-
-  @Override
-  public String getHeader( String s ) {
-    return null;
-  }
-
-  @Override
-  public Collection<String> getHeaders( String s ) {
-    return null;
-  }
-
-  @Override
-  public Collection<String> getHeaderNames() {
-    return null;
-  }
-
-  @Override
-  public String getCharacterEncoding() {
-    return null;
-  }
-
-  @Override
-  public String getContentType() {
-    return null;
-  }
-
-  @Override
-  public ServletOutputStream getOutputStream() throws IOException {
-    return null;
-  }
-
-  @Override
-  public PrintWriter getWriter() throws IOException {
-    return null;
-  }
-
-  @Override
-  public void setCharacterEncoding( String s ) {
-  }
-
-  @Override
-  public void setContentLength( int i ) {
-  }
-
-  @Override
-  public void setContentLengthLong( long l ) {
-  }
-
-  @Override
-  public void setContentType( String s ) {
-  }
-
-  @Override
-  public void setBufferSize( int i ) {
-  }
-
-  @Override
-  public int getBufferSize() {
-    return 0;
-  }
-
-  @Override
-  public void flushBuffer() throws IOException {
-  }
-
-  @Override
-  public void resetBuffer() {
-  }
-
-  @Override
-  public boolean isCommitted() {
-    return false;
-  }
-
-  @Override
-  public void reset() {
-  }
-
-  @Override
-  public void setLocale( Locale locale ) {
-  }
-
-  @Override
-  public Locale getLocale() {
-    return null;
-  }
-}

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-test-utils/src/main/java/org/apache/hadoop/test/mock/MockInteraction.java
----------------------------------------------------------------------
diff --git a/gateway-test-utils/src/main/java/org/apache/hadoop/test/mock/MockInteraction.java b/gateway-test-utils/src/main/java/org/apache/hadoop/test/mock/MockInteraction.java
deleted file mode 100644
index 1e30d38..0000000
--- a/gateway-test-utils/src/main/java/org/apache/hadoop/test/mock/MockInteraction.java
+++ /dev/null
@@ -1,33 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.test.mock;
-
-public class MockInteraction {
-
-  private MockResponseProvider response = new MockResponseProvider();
-  private MockRequestMatcher request = new MockRequestMatcher( response );
-
-  public MockRequestMatcher expect() {
-    return request;
-  }
-
-  public MockResponseProvider respond() {
-    return response;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-test-utils/src/main/java/org/apache/hadoop/test/mock/MockRequestMatcher.java
----------------------------------------------------------------------
diff --git a/gateway-test-utils/src/main/java/org/apache/hadoop/test/mock/MockRequestMatcher.java b/gateway-test-utils/src/main/java/org/apache/hadoop/test/mock/MockRequestMatcher.java
deleted file mode 100644
index e107e6f..0000000
--- a/gateway-test-utils/src/main/java/org/apache/hadoop/test/mock/MockRequestMatcher.java
+++ /dev/null
@@ -1,330 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.test.mock;
-
-import org.apache.commons.io.IOUtils;
-import org.apache.commons.lang3.ArrayUtils;
-import org.apache.http.NameValuePair;
-import org.apache.http.client.utils.URLEncodedUtils;
-import org.apache.http.message.BasicNameValuePair;
-import org.hamcrest.Matcher;
-import org.hamcrest.Matchers;
-
-import javax.servlet.http.Cookie;
-import javax.servlet.http.HttpServletRequest;
-import java.io.IOException;
-import java.io.InputStream;
-import java.net.URL;
-import java.nio.charset.Charset;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
-
-import static org.hamcrest.CoreMatchers.*;
-import static org.hamcrest.MatcherAssert.assertThat;
-import static org.hamcrest.Matchers.equalToIgnoringCase;
-import static org.xmlmatchers.XmlMatchers.isEquivalentTo;
-import static org.xmlmatchers.transform.XmlConverters.the;
-import static uk.co.datumedge.hamcrest.json.SameJSONAs.sameJSONAs;
-
-public class MockRequestMatcher {
-
-  private static final Charset UTF8 = Charset.forName( "UTF-8" );
-
-  private String from;
-  private MockResponseProvider response;
-  private Set<String> methods = null;
-  private String pathInfo = null;
-  private String requestURL = null;
-  Map<String,Matcher> headers = null;
-  Set<Cookie> cookies = null;
-  private Map<String,Object> attributes = null;
-  private Map<String,String> queryParams = null;
-  private String contentType = null;
-  private String characterEncoding = null;
-  private Integer contentLength = null;
-  private byte[] entity = null;
-  private Map<String,String[]> formParams = null;
-
-  public MockRequestMatcher( MockResponseProvider response ) {
-    this.response = response;
-  }
-
-  public MockResponseProvider respond() {
-    return response;
-  }
-
-  public MockRequestMatcher from( String from ) {
-    this.from = from;
-    return this;
-  }
-
-  public MockRequestMatcher method( String... methods ) {
-    if( this.methods == null ) {
-      this.methods = new HashSet<>();
-    }
-    if( methods != null ) {
-      for( String method: methods ) {
-        this.methods.add( method );
-      }
-    }
-    return this;
-  }
-
-  public MockRequestMatcher pathInfo( String pathInfo ) {
-    this.pathInfo = pathInfo;
-    return this;
-  }
-
-  public MockRequestMatcher requestUrl( String requestUrl ) {
-    this.requestURL = requestUrl;
-    return this;
-  }
-
-  public MockRequestMatcher header( String name, String value ) {
-    if( headers == null ) {
-      headers = new HashMap<>();
-    }
-    headers.put( name, Matchers.is(value) );
-    return this;
-  }
-
-  public MockRequestMatcher header( String name, Matcher matcher ) {
-    if( headers == null ) {
-      headers = new HashMap<>();
-    }
-    headers.put( name, matcher );
-    return this;
-  }
-
-  public MockRequestMatcher cookie( Cookie cookie ) {
-    if( cookies == null ) {
-      cookies = new HashSet<>();
-    }
-    cookies.add( cookie );
-    return this;
-  }
-
-  public MockRequestMatcher attribute( String name, Object value ) {
-    if( this.attributes == null ) {
-      this.attributes = new HashMap<>();
-    }
-    attributes.put( name, value );
-    return this;
-  }
-
-  public MockRequestMatcher queryParam( String name, String value ) {
-    if( this.queryParams == null ) {
-      this.queryParams = new HashMap<>();
-    }
-    queryParams.put( name, value );
-    return this;
-  }
-
-  public MockRequestMatcher formParam( String name, String... values ) {
-    if( entity != null ) {
-      throw new IllegalStateException( "Entity already specified." );
-    }
-    if( formParams == null ) {
-      formParams = new HashMap<>();
-    }
-    String[] currentValues = formParams.get( name );
-    if( currentValues == null ) {
-      currentValues = values;
-    } else if ( values != null ) {
-      currentValues = ArrayUtils.addAll( currentValues, values );
-    }
-    formParams.put( name, currentValues );
-    return this;
-  }
-
-  public MockRequestMatcher content( String string, Charset charset ) {
-    content( string.getBytes( charset ) );
-    return this;
-  }
-
-  public MockRequestMatcher content( byte[] entity ) {
-    if( formParams != null ) {
-      throw new IllegalStateException( "Form params already specified." );
-    }
-    this.entity = entity;
-    return this;
-  }
-
-  public MockRequestMatcher content( URL url ) throws IOException {
-    content( url.openStream() );
-    return this;
-  }
-
-  public MockRequestMatcher content( InputStream stream ) throws IOException {
-    content( IOUtils.toByteArray( stream ) );
-    return this;
-  }
-
-  public MockRequestMatcher contentType( String contentType ) {
-    this.contentType = contentType;
-    return this;
-  }
-
-  public MockRequestMatcher contentLength( int length ) {
-    this.contentLength = length;
-    return this;
-  }
-
-  public MockRequestMatcher characterEncoding( String charset ) {
-    this.characterEncoding = charset;
-    return this;
-  }
-
-  public void match( HttpServletRequest request ) throws IOException {
-    if( methods != null ) {
-      assertThat(
-          "Request " + request.getMethod() + " " + request.getRequestURL() +
-              " is not using one of the expected HTTP methods",
-          methods, hasItem( request.getMethod() ) );
-    }
-    if( pathInfo != null ) {
-      assertThat(
-          "Request " + request.getMethod() + " " + request.getRequestURL() +
-              " does not have the expected pathInfo",
-          request.getPathInfo(), is( pathInfo ) );
-    }
-    if( requestURL != null ) {
-      assertThat( 
-          "Request " + request.getMethod() + " " + request.getRequestURL() +
-              " does not have the expected requestURL",
-          request.getRequestURL().toString(), is( requestURL ) );
-    }
-    if( headers != null ) {
-      for( Entry<String, Matcher> entry : headers.entrySet() ) {
-        assertThat(
-            "Request " + request.getMethod() + " " + request.getRequestURL() +
-                " does not have the expected value for header " + entry.getKey(),
-            request.getHeader( entry.getKey() ),  entry.getValue() );
-      }
-    }
-    if( cookies != null ) {
-      List<Cookie> requestCookies = Arrays.asList( request.getCookies() );
-      for( Cookie cookie: cookies ) {
-        assertThat(
-            "Request " + request.getMethod() + " " + request.getRequestURL() +
-                " does not have the expected cookie " + cookie,
-            requestCookies, hasItem( cookie ) );
-      }
-    }
-    if( contentType != null ) {
-      String[] requestContentType = request.getContentType().split(";",2);
-      assertThat(
-          "Request " + request.getMethod() + " " + request.getRequestURL() +
-              " does not have the expected content type",
-          requestContentType[ 0 ], is( contentType ) );
-    }
-    if( characterEncoding != null ) {
-      assertThat(
-          "Request " + request.getMethod() + " " + request.getRequestURL() +
-              " does not have the expected character encoding",
-          request.getCharacterEncoding(), equalToIgnoringCase( characterEncoding ) );
-    }
-    if( contentLength != null ) {
-      assertThat(
-          "Request " + request.getMethod() + " " + request.getRequestURL() +
-              " does not have the expected content length",
-          request.getContentLength(), is( contentLength ) );
-    }
-    if( attributes != null ) {
-      for( String name: attributes.keySet() ) {
-        assertThat(
-            "Request " + request.getMethod() + " " + request.getRequestURL() +
-                " is missing attribute '" + name + "'",
-            request.getAttribute( name ), notNullValue() );
-        assertThat(
-            "Request " + request.getMethod() + " " + request.getRequestURL() +
-                " has wrong value for attribute '" + name + "'",
-            request.getAttribute( name ), is( request.getAttribute( name ) ) );
-      }
-    }
-    // Note: Cannot use any of the expect.getParameter*() methods because they will read the
-    // body and we don't want that to happen.
-    if( queryParams != null ) {
-      String queryString = request.getQueryString();
-      List<NameValuePair> requestParams = parseQueryString( queryString == null ? "" : queryString );
-      for( Entry<String, String> entry : queryParams.entrySet() ) {
-        assertThat(
-            "Request " + request.getMethod() + " " + request.getRequestURL() +
-                " query string " + queryString + " is missing parameter '" + entry.getKey() + "'",
-            requestParams, hasItem( new BasicNameValuePair(entry.getKey(), entry.getValue())) );
-      }
-    }
-    if( formParams != null ) {
-      String paramString = IOUtils.toString( request.getInputStream(), request.getCharacterEncoding() );
-      List<NameValuePair> requestParams = parseQueryString( paramString == null ? "" : paramString );
-      for( Entry<String, String[]> entry : formParams.entrySet() ) {
-        String[] expectedValues = entry.getValue();
-        for( String expectedValue : expectedValues ) {
-          assertThat(
-              "Request " + request.getMethod() + " " + request.getRequestURL() +
-                  " form params " + paramString + " is missing a value " + expectedValue + " for parameter '" + entry.getKey() + "'",
-              requestParams, hasItem( new BasicNameValuePair(entry.getKey(), expectedValue ) ));
-        }
-      }
-    }
-    if( entity != null ) {
-      if( contentType != null && contentType.endsWith( "/xml" ) ) {
-        String expectEncoding = characterEncoding;
-        String expect = new String( entity, ( expectEncoding == null ? UTF8.name() : expectEncoding ) );
-        String actualEncoding = request.getCharacterEncoding();
-        String actual = IOUtils.toString( request.getInputStream(), actualEncoding == null ? UTF8.name() : actualEncoding );
-        assertThat( the( actual ), isEquivalentTo( the( expect ) ) );
-      } else if ( contentType != null && contentType.endsWith( "/json" ) )  {
-        String expectEncoding = characterEncoding;
-        String expect = new String( entity, ( expectEncoding == null ? UTF8.name() : expectEncoding ) );
-        String actualEncoding = request.getCharacterEncoding();
-        String actual = IOUtils.toString( request.getInputStream(), actualEncoding == null ? UTF8.name() : actualEncoding );
-//        System.out.println( "EXPECT=" + expect );
-//        System.out.println( "ACTUAL=" + actual );
-        assertThat( actual, sameJSONAs( expect ) );
-      } else if( characterEncoding == null || request.getCharacterEncoding() == null ) {
-        byte[] bytes = IOUtils.toByteArray( request.getInputStream() );
-        assertThat(
-            "Request " + request.getMethod() + " " + request.getRequestURL() +
-                " content does not match the expected content",
-            bytes, is( entity ) );
-      } else {
-        String expect = new String( entity, characterEncoding );
-        String actual = IOUtils.toString( request.getInputStream(), request.getCharacterEncoding() );
-        assertThat(
-            "Request " + request.getMethod() + " " + request.getRequestURL() +
-                " content does not match the expected content",
-            actual, is( expect ) );
-      }
-    }
-  }
-
-  public String toString() {
-    return "from=" + from + ", pathInfo=" + pathInfo;
-  }
-
-  private static List<NameValuePair> parseQueryString( String queryString ) {
-    return URLEncodedUtils.parse(queryString, Charset.defaultCharset());
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-test-utils/src/main/java/org/apache/hadoop/test/mock/MockResponseProvider.java
----------------------------------------------------------------------
diff --git a/gateway-test-utils/src/main/java/org/apache/hadoop/test/mock/MockResponseProvider.java b/gateway-test-utils/src/main/java/org/apache/hadoop/test/mock/MockResponseProvider.java
deleted file mode 100644
index b1b1178..0000000
--- a/gateway-test-utils/src/main/java/org/apache/hadoop/test/mock/MockResponseProvider.java
+++ /dev/null
@@ -1,158 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.test.mock;
-
-import org.apache.commons.io.IOUtils;
-
-import javax.servlet.http.Cookie;
-import javax.servlet.http.HttpServletResponse;
-import java.io.IOException;
-import java.io.InputStream;
-import java.net.URL;
-import java.nio.charset.Charset;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Locale;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
-
-public class MockResponseProvider {
-
-  Integer errorCode = null;
-  String errorMsg = null;
-  Integer statusCode = null;
-  String redirectUrl = null;
-  Map<String,String> headers = null;
-  Set<Cookie> cookies = null;
-  byte[] entity = null;
-  String contentType = null;
-  String characterEncoding = null;
-  Integer contentLength = null;
-
-  public MockResponseProvider status( int statusCode ) {
-    this.statusCode = statusCode;
-    return this;
-  }
-
-  public MockResponseProvider error( int code, String message ) {
-    errorCode = code;
-    errorMsg = message;
-    return this;
-  }
-
-  public MockResponseProvider redirect( String location ) {
-    redirectUrl = location;
-    return this;
-  }
-
-  public MockResponseProvider header( String name, String value ) {
-    if( headers == null ) {
-      headers = new HashMap<>();
-    }
-    headers.put( name, value );
-    return this;
-  }
-
-  public MockResponseProvider cookie( Cookie cookie ) {
-    if( cookies == null ) {
-      cookies = new HashSet<>();
-    }
-    cookies.add( cookie );
-    return this;
-  }
-
-  public MockResponseProvider content( byte[] entity ) {
-    this.entity = entity;
-    return this;
-  }
-
-  public MockResponseProvider content( String string, Charset charset ) {
-    this.entity = string.getBytes( charset );
-    return this;
-  }
-
-  public MockResponseProvider content( URL url ) throws IOException {
-    content( url.openStream() );
-    return this;
-  }
-
-  public MockResponseProvider content( InputStream stream ) throws IOException {
-    content( IOUtils.toByteArray( stream ) );
-    return this;
-  }
-
-  public MockResponseProvider contentType( String contentType ) {
-    this.contentType = contentType;
-    return this;
-  }
-
-  public MockResponseProvider contentLength( int contentLength ) {
-    this.contentLength = contentLength;
-    return this;
-  }
-
-  public MockResponseProvider characterEncoding( String charset ) {
-    this.characterEncoding = charset;
-    return this;
-  }
-
-  public void apply( HttpServletResponse response ) throws IOException {
-    if( statusCode != null ) {
-      response.setStatus( statusCode );
-    } else {
-      response.setStatus( HttpServletResponse.SC_OK );
-    }
-    if( errorCode != null ) {
-      if( errorMsg != null ) {
-        response.sendError( errorCode, errorMsg );
-      } else {
-        response.sendError( errorCode );
-      }
-    }
-    if( redirectUrl != null ) {
-      response.sendRedirect( redirectUrl );
-    }
-    if( headers != null ) {
-      for( Entry<String, String> entry : headers.entrySet() ) {
-        response.addHeader( entry.getKey(), entry.getValue() );
-      }
-    }
-    if( cookies != null ) {
-      for( Cookie cookie: cookies ) {
-        response.addCookie( cookie );
-      }
-    }
-    if( contentType != null ) {
-      response.setContentType( contentType );
-    }
-    if( characterEncoding != null ) {
-      response.setCharacterEncoding( characterEncoding );
-    }
-    if( contentLength != null ) {
-      response.setContentLength( contentLength );
-    }
-    response.flushBuffer();
-    if( entity != null ) {
-      response.getOutputStream().write( entity );
-      //KNOX-685: response.getOutputStream().flush();
-      response.getOutputStream().close();
-    }
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-test-utils/src/main/java/org/apache/hadoop/test/mock/MockServer.java
----------------------------------------------------------------------
diff --git a/gateway-test-utils/src/main/java/org/apache/hadoop/test/mock/MockServer.java b/gateway-test-utils/src/main/java/org/apache/hadoop/test/mock/MockServer.java
deleted file mode 100644
index 5d95ce6..0000000
--- a/gateway-test-utils/src/main/java/org/apache/hadoop/test/mock/MockServer.java
+++ /dev/null
@@ -1,119 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.test.mock;
-
-import org.eclipse.jetty.server.Handler;
-import org.eclipse.jetty.server.Server;
-import org.eclipse.jetty.servlet.ServletContextHandler;
-import org.eclipse.jetty.servlet.ServletHolder;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import javax.servlet.Servlet;
-import java.util.LinkedList;
-import java.util.Queue;
-
-/**
- * An embedded Jetty server with a single servlet deployed on "/*".
- * It is used by populating a queue of "interactions".
- * Each interaction is an expected request and a resulting response.
- * These interactions are added to a queue in a fluent API style.
- * So in most of the tests like GatewayBasicFuncTest.testBasicJsonUseCase you will see calls like
- * driver.getMock( "WEBHDFS" ).expect()....respond()...;
- * This adds a single interaction to the mock server which is returned via the driver.getMock( "WEBHDFS" ) above.
- * Any number of interactions may be added.
- * When the request comes in it will check the request against the expected request.
- * If it matches return the response otherwise it will return a 500 error.
- * Typically at the end of a test you should check to make sure the interaction queue is consumed by calling isEmpty().
- * The reset() method can be used to ensure everything is cleaned up so that the mock server can be reused beteween tests.
- * The whole idea was modeled after how the REST testing framework REST-assured and aims to be a server side equivalent.
- */
-public class MockServer {
-
-  private Logger log = LoggerFactory.getLogger( this.getClass() );
-
-  private String name;
-  private Server jetty;
-
-  private Queue<MockInteraction> interactions = new LinkedList<MockInteraction>();
-
-  public MockServer( String name ) {
-    this.name = name;
-  }
-
-  public MockServer( String name, boolean start ) throws Exception {
-    this.name = name;
-    if( start ) {
-      start();
-    }
-  }
-
-  public String getName() {
-    return name;
-  }
-
-  public void start() throws Exception {
-    Handler context = createHandler();
-    jetty = new Server(0);
-    jetty.setHandler( context );
-    jetty.start();
-    log.info( "Mock server started on port " + getPort() );
-  }
-
-  public void stop() throws Exception {
-    jetty.stop();
-    jetty.join();
-  }
-
-  private ServletContextHandler createHandler() {
-    Servlet servlet = new MockServlet( getName(), interactions );
-    ServletHolder holder = new ServletHolder( servlet );
-    ServletContextHandler context = new ServletContextHandler( ServletContextHandler.SESSIONS );
-    context.setContextPath( "/" );
-    context.addServlet( holder, "/*" );
-    return context;
-  }
-
-  public int getPort() {
-    return jetty.getURI().getPort();
-  }
-
-  public MockRequestMatcher expect() {
-    MockInteraction interaction = new MockInteraction();
-    interactions.add( interaction );
-    return interaction.expect();
-  }
-
-  public MockResponseProvider respond() {
-    MockInteraction interaction = new MockInteraction();
-    interactions.add( interaction );
-    return interaction.respond();
-  }
-
-  public int getCount() {
-    return interactions.size();
-  }
-
-  public boolean isEmpty() {
-    return interactions.isEmpty();
-  }
-
-  public void reset() {
-    interactions.clear();
-  }
-}

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-test-utils/src/main/java/org/apache/hadoop/test/mock/MockServlet.java
----------------------------------------------------------------------
diff --git a/gateway-test-utils/src/main/java/org/apache/hadoop/test/mock/MockServlet.java b/gateway-test-utils/src/main/java/org/apache/hadoop/test/mock/MockServlet.java
deleted file mode 100644
index ca4692c..0000000
--- a/gateway-test-utils/src/main/java/org/apache/hadoop/test/mock/MockServlet.java
+++ /dev/null
@@ -1,61 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.test.mock;
-
-import javax.servlet.ServletException;
-import javax.servlet.http.HttpServlet;
-import javax.servlet.http.HttpServletRequest;
-import javax.servlet.http.HttpServletResponse;
-import java.io.IOException;
-import java.util.Queue;
-
-import org.apache.log4j.Logger;
-
-import static org.junit.Assert.fail;
-
-public class MockServlet extends HttpServlet {
-
-  private static final Logger LOG = Logger.getLogger(MockServlet.class.getName());
-
-  public String name;
-  public Queue<MockInteraction> interactions;
-
-  public MockServlet( String name, Queue<MockInteraction> interactions ) {
-    this.name = name;
-    this.interactions = interactions;
-  }
-
-  @Override
-  protected void service( HttpServletRequest request, HttpServletResponse response ) throws ServletException, IOException {
-    LOG.debug( "service: request=" + request.getMethod() + " " + request.getRequestURL() + "?" + request.getQueryString() );
-    try {
-      if( interactions.isEmpty() ) {
-        fail( "Mock servlet " + name + " received a request but the expected interaction queue is empty." );
-      }
-      MockInteraction interaction = interactions.remove();
-      interaction.expect().match( request );
-      interaction.respond().apply( response );
-      LOG.debug( "service: response=" + response.getStatus() );
-    } catch( AssertionError e ) {
-      LOG.debug( "service: exception=" + e.getMessage() );
-      e.printStackTrace(); // I18N not required.
-      throw new ServletException( e );
-    }
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-test-utils/src/main/java/org/apache/hadoop/test/mock/MockServletContext.java
----------------------------------------------------------------------
diff --git a/gateway-test-utils/src/main/java/org/apache/hadoop/test/mock/MockServletContext.java b/gateway-test-utils/src/main/java/org/apache/hadoop/test/mock/MockServletContext.java
deleted file mode 100644
index 0df84c3..0000000
--- a/gateway-test-utils/src/main/java/org/apache/hadoop/test/mock/MockServletContext.java
+++ /dev/null
@@ -1,293 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.test.mock;
-
-import javax.servlet.Filter;
-import javax.servlet.FilterRegistration;
-import javax.servlet.RequestDispatcher;
-import javax.servlet.Servlet;
-import javax.servlet.ServletContext;
-import javax.servlet.ServletException;
-import javax.servlet.ServletRegistration;
-import javax.servlet.SessionCookieConfig;
-import javax.servlet.SessionTrackingMode;
-import javax.servlet.descriptor.JspConfigDescriptor;
-import java.io.InputStream;
-import java.net.MalformedURLException;
-import java.net.URL;
-import java.util.Enumeration;
-import java.util.EventListener;
-import java.util.Map;
-import java.util.Set;
-
-public class MockServletContext implements ServletContext {
-
-  @Override
-  public String getContextPath() {
-    return null;
-  }
-
-  @Override
-  public ServletContext getContext( String s ) {
-    return null;
-  }
-
-  @Override
-  public int getMajorVersion() {
-    return 0;
-  }
-
-  @Override
-  public int getMinorVersion() {
-    return 0;
-  }
-
-  @Override
-  public int getEffectiveMajorVersion() {
-    return 0;
-  }
-
-  @Override
-  public int getEffectiveMinorVersion() {
-    return 0;
-  }
-
-  @Override
-  public String getMimeType( String s ) {
-    return null;
-  }
-
-  @Override
-  public Set<String> getResourcePaths( String s ) {
-    return null;
-  }
-
-  @Override
-  public URL getResource( String s ) throws MalformedURLException {
-    return null;
-  }
-
-  @Override
-  public InputStream getResourceAsStream( String s ) {
-    return null;
-  }
-
-  @Override
-  public RequestDispatcher getRequestDispatcher( String s ) {
-    return null;
-  }
-
-  @Override
-  public RequestDispatcher getNamedDispatcher( String s ) {
-    return null;
-  }
-
-  @Override
-  @SuppressWarnings("deprecation")
-  public Servlet getServlet( String s ) throws ServletException {
-    return null;
-  }
-
-  @Override
-  @SuppressWarnings("deprecation")
-  public Enumeration<Servlet> getServlets() {
-    return null;
-  }
-
-  @Override
-  @SuppressWarnings("deprecation")
-  public Enumeration<String> getServletNames() {
-    return null;
-  }
-
-  @Override
-  public void log( String s ) {
-  }
-
-  @Override
-  @SuppressWarnings("deprecation")
-  public void log( Exception e, String s ) {
-  }
-
-  @Override
-  public void log( String s, Throwable throwable ) {
-  }
-
-  @Override
-  public String getRealPath( String s ) {
-    return null;
-  }
-
-  @Override
-  public String getServerInfo() {
-    return null;
-  }
-
-  @Override
-  public String getInitParameter( String s ) {
-    return null;
-  }
-
-  @Override
-  public Enumeration<String> getInitParameterNames() {
-    return null;
-  }
-
-  @Override
-  public boolean setInitParameter( String s, String s1 ) {
-    return false;
-  }
-
-  @Override
-  public Object getAttribute( String s ) {
-    return null;
-  }
-
-  @Override
-  public Enumeration<String> getAttributeNames() {
-    return null;
-  }
-
-  @Override
-  public void setAttribute( String s, Object o ) {
-  }
-
-  @Override
-  public void removeAttribute( String s ) {
-  }
-
-  @Override
-  public String getServletContextName() {
-    return null;
-  }
-
-  @Override
-  public ServletRegistration.Dynamic addServlet( String s, String s1 ) {
-    return null;
-  }
-
-  @Override
-  public ServletRegistration.Dynamic addServlet( String s, Servlet servlet ) {
-    return null;
-  }
-
-  @Override
-  public ServletRegistration.Dynamic addServlet( String s, Class<? extends Servlet> aClass ) {
-    return null;
-  }
-
-  @Override
-  public <T extends Servlet> T createServlet( Class<T> tClass ) throws ServletException {
-    return null;
-  }
-
-  @Override
-  public ServletRegistration getServletRegistration( String s ) {
-    return null;
-  }
-
-  @Override
-  public Map<String, ? extends ServletRegistration> getServletRegistrations() {
-    return null;
-  }
-
-  @Override
-  public FilterRegistration.Dynamic addFilter( String s, String s1 ) {
-    return null;
-  }
-
-  @Override
-  public FilterRegistration.Dynamic addFilter( String s, Filter filter ) {
-    return null;
-  }
-
-  @Override
-  public FilterRegistration.Dynamic addFilter( String s, Class<? extends Filter> aClass ) {
-    return null;
-  }
-
-  @Override
-  public <T extends Filter> T createFilter( Class<T> tClass ) throws ServletException {
-    return null;
-  }
-
-  @Override
-  public FilterRegistration getFilterRegistration( String s ) {
-    return null;
-  }
-
-  @Override
-  public Map<String, ? extends FilterRegistration> getFilterRegistrations() {
-    return null;
-  }
-
-  @Override
-  public SessionCookieConfig getSessionCookieConfig() {
-    return null;
-  }
-
-  @Override
-  public void setSessionTrackingModes( Set<SessionTrackingMode> sessionTrackingModes ) {
-  }
-
-  @Override
-  public Set<SessionTrackingMode> getDefaultSessionTrackingModes() {
-    return null;
-  }
-
-  @Override
-  public Set<SessionTrackingMode> getEffectiveSessionTrackingModes() {
-    return null;
-  }
-
-  @Override
-  public void addListener( String s ) {
-  }
-
-  @Override
-  public <T extends EventListener> void addListener( T t ) {
-  }
-
-  @Override
-  public void addListener( Class<? extends EventListener> aClass ) {
-  }
-
-  @Override
-  public <T extends EventListener> T createListener( Class<T> tClass ) throws ServletException {
-    return null;
-  }
-
-  @Override
-  public JspConfigDescriptor getJspConfigDescriptor() {
-    return null;
-  }
-
-  @Override
-  public ClassLoader getClassLoader() {
-    return null;
-  }
-
-  @Override
-  public void declareRoles( String... strings ) {
-  }
-
-  @Override
-  public String getVirtualServerName() {
-    throw new UnsupportedOperationException();
-  }
-}

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-test-utils/src/main/java/org/apache/hadoop/test/mock/MockServletInputStream.java
----------------------------------------------------------------------
diff --git a/gateway-test-utils/src/main/java/org/apache/hadoop/test/mock/MockServletInputStream.java b/gateway-test-utils/src/main/java/org/apache/hadoop/test/mock/MockServletInputStream.java
deleted file mode 100644
index 227dc1c..0000000
--- a/gateway-test-utils/src/main/java/org/apache/hadoop/test/mock/MockServletInputStream.java
+++ /dev/null
@@ -1,54 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.test.mock;
-
-import java.io.IOException;
-import java.io.InputStream;
-
-import javax.servlet.ReadListener;
-import javax.servlet.ServletInputStream;
-
-public class MockServletInputStream extends ServletInputStream {
-
-  private InputStream stream;
-
-  public MockServletInputStream( InputStream stream ) {
-    this.stream = stream;
-  }
-
-  @Override
-  public int read() throws IOException {
-    return stream.read();
-  }
-
-  @Override
-  public boolean isFinished() {
-    throw new UnsupportedOperationException();
-  }
-
-  @Override
-  public boolean isReady() {
-    throw new UnsupportedOperationException();
-  }
-
-  @Override
-  public void setReadListener( ReadListener readListener ) {
-    throw new UnsupportedOperationException();
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-test-utils/src/main/java/org/apache/knox/test/Console.java
----------------------------------------------------------------------
diff --git a/gateway-test-utils/src/main/java/org/apache/knox/test/Console.java b/gateway-test-utils/src/main/java/org/apache/knox/test/Console.java
new file mode 100644
index 0000000..0965748
--- /dev/null
+++ b/gateway-test-utils/src/main/java/org/apache/knox/test/Console.java
@@ -0,0 +1,57 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.knox.test;
+
+import java.io.ByteArrayOutputStream;
+import java.io.PrintStream;
+
+public class Console {
+
+  PrintStream oldOut, newOut;
+  PrintStream oldErr, newErr;
+  ByteArrayOutputStream newOutBuf, newErrBuf;
+
+  public void capture() {
+    oldErr = System.err;
+    newErrBuf = new ByteArrayOutputStream();
+    newErr = new PrintStream( newErrBuf );
+
+    oldOut = System.out; // I18N not required.
+    newOutBuf = new ByteArrayOutputStream();
+    newOut = new PrintStream( newOutBuf );
+
+    System.setErr( newErr );
+    System.setOut( newOut );
+  }
+
+  public byte[] getOut() {
+    return newOutBuf.toByteArray();
+  }
+
+  public byte[] getErr() {
+    return newErrBuf.toByteArray();
+  }
+
+  public void release() {
+    System.setErr( oldErr );
+    System.setOut( oldOut );
+    newErr.close();
+    newOut.close();
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-test-utils/src/main/java/org/apache/knox/test/TestUtils.java
----------------------------------------------------------------------
diff --git a/gateway-test-utils/src/main/java/org/apache/knox/test/TestUtils.java b/gateway-test-utils/src/main/java/org/apache/knox/test/TestUtils.java
new file mode 100644
index 0000000..5437ce1
--- /dev/null
+++ b/gateway-test-utils/src/main/java/org/apache/knox/test/TestUtils.java
@@ -0,0 +1,216 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.knox.test;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.InputStreamReader;
+import java.io.Reader;
+import java.io.StringWriter;
+import java.net.HttpURLConnection;
+import java.net.InetSocketAddress;
+import java.net.ServerSocket;
+import java.net.Socket;
+import java.net.URL;
+import java.nio.ByteBuffer;
+import java.util.Properties;
+import java.util.UUID;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.commons.io.IOUtils;
+import org.apache.log4j.Logger;
+import org.apache.velocity.Template;
+import org.apache.velocity.VelocityContext;
+import org.apache.velocity.app.VelocityEngine;
+import org.apache.velocity.runtime.RuntimeConstants;
+import org.apache.velocity.runtime.resource.loader.ClasspathResourceLoader;
+import org.eclipse.jetty.http.HttpTester;
+import org.eclipse.jetty.servlet.ServletTester;
+
+public class TestUtils {
+
+  private static Logger LOG = Logger.getLogger(TestUtils.class);
+
+  public static final long SHORT_TIMEOUT = 1000L;
+  public static final long MEDIUM_TIMEOUT = 20 * 1000L;
+  public static final long LONG_TIMEOUT = 60 * 1000L;
+
+  public static String getResourceName( Class clazz, String name ) {
+    name = clazz.getName().replaceAll( "\\.", "/" ) + "/" + name;
+    return name;
+  }
+
+  public static URL getResourceUrl( Class clazz, String name ) throws FileNotFoundException {
+    name = getResourceName( clazz, name );
+    URL url = ClassLoader.getSystemResource( name );
+    if( url == null ) {
+      throw new FileNotFoundException( name );
+    }
+    return url;
+  }
+
+  public static URL getResourceUrl( String name ) throws FileNotFoundException {
+    URL url = ClassLoader.getSystemResource( name );
+    if( url == null ) {
+      throw new FileNotFoundException( name );
+    }
+    return url;
+  }
+
+  public static InputStream getResourceStream( String name ) throws IOException {
+    URL url = ClassLoader.getSystemResource( name );
+    InputStream stream = url.openStream();
+    return stream;
+  }
+
+  public static InputStream getResourceStream( Class clazz, String name ) throws IOException {
+    URL url = getResourceUrl( clazz, name );
+    InputStream stream = url.openStream();
+    return stream;
+  }
+
+  public static Reader getResourceReader( String name, String charset ) throws IOException {
+    return new InputStreamReader( getResourceStream( name ), charset );
+  }
+
+  public static Reader getResourceReader( Class clazz, String name, String charset ) throws IOException {
+    return new InputStreamReader( getResourceStream( clazz, name ), charset );
+  }
+
+  public static String getResourceString( Class clazz, String name, String charset ) throws IOException {
+    return IOUtils.toString( getResourceReader( clazz, name, charset ) );
+  }
+
+  public static File createTempDir( String prefix ) throws IOException {
+    File targetDir = new File( System.getProperty( "user.dir" ), "target" );
+    File tempDir = new File( targetDir, prefix + UUID.randomUUID() );
+    FileUtils.forceMkdir( tempDir );
+    return tempDir;
+  }
+
+  public static void LOG_ENTER() {
+    StackTraceElement caller = Thread.currentThread().getStackTrace()[2];
+    System.out.flush();
+    System.out.println( String.format( "Running %s#%s", caller.getClassName(), caller.getMethodName() ) );
+    System.out.flush();
+  }
+
+  public static void LOG_EXIT() {
+    StackTraceElement caller = Thread.currentThread().getStackTrace()[2];
+    System.out.flush();
+    System.out.println( String.format( "Exiting %s#%s", caller.getClassName(), caller.getMethodName() ) );
+    System.out.flush();
+  }
+
+  public static void awaitPortOpen( InetSocketAddress address, int timeout, int delay ) throws InterruptedException {
+    long maxTime = System.currentTimeMillis() + timeout;
+    do {
+      try {
+        Socket socket = new Socket();
+        socket.connect( address, delay );
+        socket.close();
+        return;
+      } catch ( IOException e ) {
+        //e.printStackTrace();
+      }
+    } while( System.currentTimeMillis() < maxTime );
+    throw new IllegalStateException( "Timed out " + timeout + " waiting for port " + address );
+  }
+
+  public static void awaitNon404HttpStatus( URL url, int timeout, int delay ) throws InterruptedException {
+    long maxTime = System.currentTimeMillis() + timeout;
+    do {
+      Thread.sleep( delay );
+      HttpURLConnection conn = null;
+      try {
+        conn = (HttpURLConnection)url.openConnection();
+        conn.getInputStream().close();
+        return;
+      } catch ( IOException e ) {
+        //e.printStackTrace();
+        try {
+          if( conn != null && conn.getResponseCode() != 404 ) {
+            return;
+          }
+        } catch ( IOException ee ) {
+          //ee.printStackTrace();
+        }
+      }
+    } while( System.currentTimeMillis() < maxTime );
+    throw new IllegalStateException( "Timed out " + timeout + " waiting for URL " + url );
+  }
+
+  public static String merge( String resource, Properties properties ) {
+    ClasspathResourceLoader loader = new ClasspathResourceLoader();
+    loader.getResourceStream( resource );
+
+    VelocityEngine engine = new VelocityEngine();
+    Properties config = new Properties();
+    config.setProperty( RuntimeConstants.RUNTIME_LOG_LOGSYSTEM_CLASS, "org.apache.velocity.runtime.log.NullLogSystem" );
+    config.setProperty( RuntimeConstants.RESOURCE_LOADER, "classpath" );
+    config.setProperty( "classpath.resource.loader.class", ClasspathResourceLoader.class.getName() );
+    engine.init( config );
+
+    VelocityContext context = new VelocityContext( properties );
+    Template template = engine.getTemplate( resource );
+    StringWriter writer = new StringWriter();
+    template.merge( context, writer );
+    return writer.toString();
+  }
+
+  public static String merge( Class base, String resource, Properties properties ) {
+    String baseResource = base.getName().replaceAll( "\\.", "/" );
+    String fullResource = baseResource + "/" + resource;
+    return merge( fullResource, properties );
+  }
+
+  public static int findFreePort() throws IOException {
+    ServerSocket socket = new ServerSocket(0);
+    int port = socket.getLocalPort();
+    socket.close();
+    return port;
+  }
+
+  public static void waitUntilNextSecond() {
+    long before = System.currentTimeMillis();
+    long wait;
+    while( ( wait = ( 1000 - ( System.currentTimeMillis() - before ) ) ) > 0 ) {
+      try {
+        Thread.sleep( wait );
+      } catch( InterruptedException e ) {
+        // Ignore.
+      }
+    }
+  }
+
+  public static HttpTester.Response execute( ServletTester server, HttpTester.Request request ) throws Exception {
+    LOG.debug( "execute: request=" + request );
+    ByteBuffer requestBuffer = request.generate();
+    LOG.trace( "execute: requestBuffer=[" + new String(requestBuffer.array(),0,requestBuffer.limit()) + "]" );
+    ByteBuffer responseBuffer = server.getResponses( requestBuffer, 30, TimeUnit.SECONDS );
+    HttpTester.Response response = HttpTester.parseResponse( responseBuffer );
+    LOG.trace( "execute: responseBuffer=[" + new String(responseBuffer.array(),0,responseBuffer.limit()) + "]" );
+    LOG.debug( "execute: reponse=" + response );
+    return response;
+  }
+
+
+}

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-test-utils/src/main/java/org/apache/knox/test/category/FastTests.java
----------------------------------------------------------------------
diff --git a/gateway-test-utils/src/main/java/org/apache/knox/test/category/FastTests.java b/gateway-test-utils/src/main/java/org/apache/knox/test/category/FastTests.java
new file mode 100644
index 0000000..2360c17
--- /dev/null
+++ b/gateway-test-utils/src/main/java/org/apache/knox/test/category/FastTests.java
@@ -0,0 +1,21 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p/>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p/>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.knox.test.category;
+
+public interface FastTests {
+}

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-test-utils/src/main/java/org/apache/knox/test/category/ManualTests.java
----------------------------------------------------------------------
diff --git a/gateway-test-utils/src/main/java/org/apache/knox/test/category/ManualTests.java b/gateway-test-utils/src/main/java/org/apache/knox/test/category/ManualTests.java
new file mode 100644
index 0000000..0065357
--- /dev/null
+++ b/gateway-test-utils/src/main/java/org/apache/knox/test/category/ManualTests.java
@@ -0,0 +1,21 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.knox.test.category;
+
+public interface ManualTests {
+}

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-test-utils/src/main/java/org/apache/knox/test/category/MediumTests.java
----------------------------------------------------------------------
diff --git a/gateway-test-utils/src/main/java/org/apache/knox/test/category/MediumTests.java b/gateway-test-utils/src/main/java/org/apache/knox/test/category/MediumTests.java
new file mode 100644
index 0000000..f5d354b
--- /dev/null
+++ b/gateway-test-utils/src/main/java/org/apache/knox/test/category/MediumTests.java
@@ -0,0 +1,21 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.knox.test.category;
+
+public interface MediumTests {
+}

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-test-utils/src/main/java/org/apache/knox/test/category/ReleaseTest.java
----------------------------------------------------------------------
diff --git a/gateway-test-utils/src/main/java/org/apache/knox/test/category/ReleaseTest.java b/gateway-test-utils/src/main/java/org/apache/knox/test/category/ReleaseTest.java
new file mode 100644
index 0000000..6e2279e
--- /dev/null
+++ b/gateway-test-utils/src/main/java/org/apache/knox/test/category/ReleaseTest.java
@@ -0,0 +1,21 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.knox.test.category;
+
+public interface ReleaseTest {
+}

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-test-utils/src/main/java/org/apache/knox/test/category/SlowTests.java
----------------------------------------------------------------------
diff --git a/gateway-test-utils/src/main/java/org/apache/knox/test/category/SlowTests.java b/gateway-test-utils/src/main/java/org/apache/knox/test/category/SlowTests.java
new file mode 100644
index 0000000..3f0b50c
--- /dev/null
+++ b/gateway-test-utils/src/main/java/org/apache/knox/test/category/SlowTests.java
@@ -0,0 +1,21 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p/>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p/>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.knox.test.category;
+
+public interface SlowTests {
+}

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-test-utils/src/main/java/org/apache/knox/test/category/UnitTests.java
----------------------------------------------------------------------
diff --git a/gateway-test-utils/src/main/java/org/apache/knox/test/category/UnitTests.java b/gateway-test-utils/src/main/java/org/apache/knox/test/category/UnitTests.java
new file mode 100644
index 0000000..0d91e00
--- /dev/null
+++ b/gateway-test-utils/src/main/java/org/apache/knox/test/category/UnitTests.java
@@ -0,0 +1,21 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.knox.test.category;
+
+public interface UnitTests {
+}

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-test-utils/src/main/java/org/apache/knox/test/category/VerifyTest.java
----------------------------------------------------------------------
diff --git a/gateway-test-utils/src/main/java/org/apache/knox/test/category/VerifyTest.java b/gateway-test-utils/src/main/java/org/apache/knox/test/category/VerifyTest.java
new file mode 100644
index 0000000..825c08f
--- /dev/null
+++ b/gateway-test-utils/src/main/java/org/apache/knox/test/category/VerifyTest.java
@@ -0,0 +1,21 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.knox.test.category;
+
+public interface VerifyTest {
+}

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-test-utils/src/main/java/org/apache/knox/test/log/CollectAppender.java
----------------------------------------------------------------------
diff --git a/gateway-test-utils/src/main/java/org/apache/knox/test/log/CollectAppender.java b/gateway-test-utils/src/main/java/org/apache/knox/test/log/CollectAppender.java
new file mode 100644
index 0000000..3ab0c93
--- /dev/null
+++ b/gateway-test-utils/src/main/java/org/apache/knox/test/log/CollectAppender.java
@@ -0,0 +1,51 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.knox.test.log;
+
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.LinkedBlockingQueue;
+
+import org.apache.log4j.AppenderSkeleton;
+import org.apache.log4j.spi.LoggingEvent;
+
+public class CollectAppender extends AppenderSkeleton {
+
+  public CollectAppender() {
+    super();
+  }
+
+  public static BlockingQueue<LoggingEvent> queue = new LinkedBlockingQueue<LoggingEvent>();
+  public static boolean closed = false;
+
+  @Override
+  protected void append( LoggingEvent event ) {
+    event.getProperties();
+    queue.add( event );
+  }
+
+  @Override
+  public void close() {
+    closed = true;
+  }
+
+  @Override
+  public boolean requiresLayout() {
+    return false;
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-test-utils/src/main/java/org/apache/knox/test/log/NoOpAppender.java
----------------------------------------------------------------------
diff --git a/gateway-test-utils/src/main/java/org/apache/knox/test/log/NoOpAppender.java b/gateway-test-utils/src/main/java/org/apache/knox/test/log/NoOpAppender.java
new file mode 100644
index 0000000..80a7fce
--- /dev/null
+++ b/gateway-test-utils/src/main/java/org/apache/knox/test/log/NoOpAppender.java
@@ -0,0 +1,98 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.knox.test.log;
+
+import org.apache.log4j.Appender;
+import org.apache.log4j.Layout;
+import org.apache.log4j.Logger;
+import org.apache.log4j.spi.ErrorHandler;
+import org.apache.log4j.spi.Filter;
+import org.apache.log4j.spi.LoggingEvent;
+
+import java.util.Enumeration;
+
+public class NoOpAppender implements Appender {
+
+  public static Enumeration<Appender> setUp() {
+    Enumeration<Appender> appenders = (Enumeration<Appender>)Logger.getRootLogger().getAllAppenders();
+    Logger.getRootLogger().removeAllAppenders();
+    Logger.getRootLogger().addAppender( new NoOpAppender() );
+    return appenders;
+  }
+
+  public static void tearDown( Enumeration<Appender> appenders ) {
+    if( appenders != null ) {
+      while( appenders.hasMoreElements() ) {
+        Logger.getRootLogger().addAppender( appenders.nextElement() );
+      }
+    }
+  }
+
+  @Override
+  public void addFilter( Filter newFilter ) {
+  }
+
+  @Override
+  public Filter getFilter() {
+    return null;
+  }
+
+  @Override
+  public void clearFilters() {
+  }
+
+  @Override
+  public void close() {
+  }
+
+  @Override
+  public void doAppend( LoggingEvent event ) {
+  }
+
+  @Override
+  public String getName() {
+    return this.getClass().getName();
+  }
+
+  @Override
+  public void setErrorHandler( ErrorHandler errorHandler ) {
+  }
+
+  @Override
+  public ErrorHandler getErrorHandler() {
+    return null;
+  }
+
+  @Override
+  public void setLayout( Layout layout ) {
+  }
+
+  @Override
+  public Layout getLayout() {
+    return null;
+  }
+
+  @Override
+  public void setName( String name ) {
+  }
+
+  @Override
+  public boolean requiresLayout() {
+    return false;
+  }
+}

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-test-utils/src/main/java/org/apache/knox/test/log/NoOpLogger.java
----------------------------------------------------------------------
diff --git a/gateway-test-utils/src/main/java/org/apache/knox/test/log/NoOpLogger.java b/gateway-test-utils/src/main/java/org/apache/knox/test/log/NoOpLogger.java
new file mode 100644
index 0000000..2c6763f
--- /dev/null
+++ b/gateway-test-utils/src/main/java/org/apache/knox/test/log/NoOpLogger.java
@@ -0,0 +1,87 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.knox.test.log;
+
+import org.eclipse.jetty.util.log.Logger;
+
+public class NoOpLogger implements Logger {
+
+  @Override
+  public String getName() {
+    return "";
+  }
+
+  @Override
+  public void warn( String msg, Object... args ) {
+  }
+
+  @Override
+  public void warn( Throwable thrown ) {
+  }
+
+  @Override
+  public void warn( String msg, Throwable thrown ) {
+  }
+
+  @Override
+  public void info( String msg, Object... args ) {
+  }
+
+  @Override
+  public void info( Throwable thrown ) {
+  }
+
+  @Override
+  public void info( String msg, Throwable thrown ) {
+  }
+
+  @Override
+  public boolean isDebugEnabled() {
+    return false;
+  }
+
+  @Override
+  public void setDebugEnabled( boolean enabled ) {
+  }
+
+  @Override
+  public void debug( String msg, Object... args ) {
+  }
+
+  @Override
+  public void debug( String msg, long arg ) {
+  }
+
+  @Override
+  public void debug( Throwable thrown ) {
+  }
+
+  @Override
+  public void debug( String msg, Throwable thrown ) {
+  }
+
+  @Override
+  public Logger getLogger( String name ) {
+    return this;
+  }
+
+  @Override
+  public void ignore( Throwable ignored ) {
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/knox/blob/1451428f/gateway-test-utils/src/main/java/org/apache/knox/test/mock/MockFilterConfig.java
----------------------------------------------------------------------
diff --git a/gateway-test-utils/src/main/java/org/apache/knox/test/mock/MockFilterConfig.java b/gateway-test-utils/src/main/java/org/apache/knox/test/mock/MockFilterConfig.java
new file mode 100644
index 0000000..39a3625
--- /dev/null
+++ b/gateway-test-utils/src/main/java/org/apache/knox/test/mock/MockFilterConfig.java
@@ -0,0 +1,46 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.knox.test.mock;
+
+import javax.servlet.FilterConfig;
+import javax.servlet.ServletContext;
+import java.util.Enumeration;
+
+public class MockFilterConfig implements FilterConfig {
+
+  @Override
+  public String getFilterName() {
+    return null;
+  }
+
+  @Override
+  public ServletContext getServletContext() {
+    return null;
+  }
+
+  @Override
+  public String getInitParameter( String s ) {
+    return null;
+  }
+
+  @Override
+  public Enumeration<String> getInitParameterNames() {
+    return null;
+  }
+
+}


[50/53] [abbrv] knox git commit: Merge branch 'master' into KNOX-998-Package_Restructuring

Posted by mo...@apache.org.
http://git-wip-us.apache.org/repos/asf/knox/blob/e5fd0622/gateway-server/src/main/java/org/apache/knox/gateway/services/topology/impl/DefaultTopologyService.java
----------------------------------------------------------------------
diff --cc gateway-server/src/main/java/org/apache/knox/gateway/services/topology/impl/DefaultTopologyService.java
index c6e373d,0000000..543d294
mode 100644,000000..100644
--- a/gateway-server/src/main/java/org/apache/knox/gateway/services/topology/impl/DefaultTopologyService.java
+++ b/gateway-server/src/main/java/org/apache/knox/gateway/services/topology/impl/DefaultTopologyService.java
@@@ -1,895 -1,0 +1,915 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +
 +package org.apache.knox.gateway.services.topology.impl;
 +
 +
 +import org.apache.commons.digester3.Digester;
 +import org.apache.commons.digester3.binder.DigesterLoader;
 +import org.apache.commons.io.FileUtils;
 +import org.apache.commons.io.FilenameUtils;
 +import org.apache.commons.io.monitor.FileAlterationListener;
 +import org.apache.commons.io.monitor.FileAlterationListenerAdaptor;
 +import org.apache.commons.io.monitor.FileAlterationMonitor;
 +import org.apache.commons.io.monitor.FileAlterationObserver;
 +import org.apache.knox.gateway.GatewayMessages;
 +import org.apache.knox.gateway.GatewayServer;
 +import org.apache.knox.gateway.audit.api.Action;
 +import org.apache.knox.gateway.audit.api.ActionOutcome;
 +import org.apache.knox.gateway.audit.api.AuditServiceFactory;
 +import org.apache.knox.gateway.audit.api.Auditor;
 +import org.apache.knox.gateway.audit.api.ResourceType;
 +import org.apache.knox.gateway.audit.log4j.audit.AuditConstants;
 +import org.apache.knox.gateway.config.GatewayConfig;
 +import org.apache.knox.gateway.i18n.messages.MessagesFactory;
 +import org.apache.knox.gateway.service.definition.ServiceDefinition;
 +import org.apache.knox.gateway.services.GatewayServices;
 +import org.apache.knox.gateway.services.ServiceLifecycleException;
 +import org.apache.knox.gateway.services.security.AliasService;
 +import org.apache.knox.gateway.services.topology.TopologyService;
 +import org.apache.knox.gateway.topology.ClusterConfigurationMonitorService;
 +import org.apache.knox.gateway.topology.Topology;
 +import org.apache.knox.gateway.topology.TopologyEvent;
 +import org.apache.knox.gateway.topology.TopologyListener;
 +import org.apache.knox.gateway.topology.TopologyMonitor;
 +import org.apache.knox.gateway.topology.TopologyProvider;
 +import org.apache.knox.gateway.topology.builder.TopologyBuilder;
 +import org.apache.knox.gateway.topology.discovery.ClusterConfigurationMonitor;
 +import org.apache.knox.gateway.topology.monitor.RemoteConfigurationMonitor;
 +import org.apache.knox.gateway.topology.monitor.RemoteConfigurationMonitorFactory;
++import org.apache.knox.gateway.topology.simple.SimpleDescriptor;
++import org.apache.knox.gateway.topology.simple.SimpleDescriptorFactory;
 +import org.apache.knox.gateway.topology.simple.SimpleDescriptorHandler;
 +import org.apache.knox.gateway.topology.validation.TopologyValidator;
 +import org.apache.knox.gateway.topology.xml.AmbariFormatXmlTopologyRules;
 +import org.apache.knox.gateway.topology.xml.KnoxFormatXmlTopologyRules;
 +import org.apache.knox.gateway.util.ServiceDefinitionsLoader;
 +import org.eclipse.persistence.jaxb.JAXBContextProperties;
 +import org.xml.sax.SAXException;
 +
 +import javax.xml.bind.JAXBContext;
 +import javax.xml.bind.JAXBException;
 +import javax.xml.bind.Marshaller;
 +import java.io.File;
 +import java.io.FileFilter;
 +import java.io.IOException;
 +import java.net.URISyntaxException;
 +import java.util.ArrayList;
 +import java.util.Arrays;
 +import java.util.Collection;
 +import java.util.Collections;
 +import java.util.HashMap;
 +import java.util.HashSet;
 +import java.util.List;
 +import java.util.Map;
 +import java.util.Set;
 +
 +import static org.apache.commons.digester3.binder.DigesterLoader.newLoader;
 +
 +
 +public class DefaultTopologyService
 +    extends FileAlterationListenerAdaptor
 +    implements TopologyService, TopologyMonitor, TopologyProvider, FileFilter, FileAlterationListener {
 +
 +  private static Auditor auditor = AuditServiceFactory.getAuditService().getAuditor(
 +    AuditConstants.DEFAULT_AUDITOR_NAME, AuditConstants.KNOX_SERVICE_NAME,
 +    AuditConstants.KNOX_COMPONENT_NAME);
 +
 +  private static final List<String> SUPPORTED_TOPOLOGY_FILE_EXTENSIONS = new ArrayList<String>();
 +  static {
 +    SUPPORTED_TOPOLOGY_FILE_EXTENSIONS.add("xml");
 +    SUPPORTED_TOPOLOGY_FILE_EXTENSIONS.add("conf");
 +  }
 +
 +  private static GatewayMessages log = MessagesFactory.get(GatewayMessages.class);
 +  private static DigesterLoader digesterLoader = newLoader(new KnoxFormatXmlTopologyRules(), new AmbariFormatXmlTopologyRules());
 +  private List<FileAlterationMonitor> monitors = new ArrayList<>();
 +  private File topologiesDirectory;
 +  private File sharedProvidersDirectory;
 +  private File descriptorsDirectory;
 +
 +  private DescriptorsMonitor descriptorsMonitor;
 +
 +  private Set<TopologyListener> listeners;
 +  private volatile Map<File, Topology> topologies;
 +  private AliasService aliasService;
 +
 +  private RemoteConfigurationMonitor remoteMonitor = null;
 +
 +  private Topology loadTopology(File file) throws IOException, SAXException, URISyntaxException, InterruptedException {
 +    final long TIMEOUT = 250; //ms
 +    final long DELAY = 50; //ms
 +    log.loadingTopologyFile(file.getAbsolutePath());
 +    Topology topology;
 +    long start = System.currentTimeMillis();
 +    while (true) {
 +      try {
 +        topology = loadTopologyAttempt(file);
 +        break;
 +      } catch (IOException e) {
 +        if (System.currentTimeMillis() - start < TIMEOUT) {
 +          log.failedToLoadTopologyRetrying(file.getAbsolutePath(), Long.toString(DELAY), e);
 +          Thread.sleep(DELAY);
 +        } else {
 +          throw e;
 +        }
 +      } catch (SAXException e) {
 +        if (System.currentTimeMillis() - start < TIMEOUT) {
 +          log.failedToLoadTopologyRetrying(file.getAbsolutePath(), Long.toString(DELAY), e);
 +          Thread.sleep(DELAY);
 +        } else {
 +          throw e;
 +        }
 +      }
 +    }
 +    return topology;
 +  }
 +
 +  private Topology loadTopologyAttempt(File file) throws IOException, SAXException, URISyntaxException {
 +    Topology topology;
 +    Digester digester = digesterLoader.newDigester();
 +    TopologyBuilder topologyBuilder = digester.parse(FileUtils.openInputStream(file));
 +    if (null == topologyBuilder) {
 +      return null;
 +    }
 +    topology = topologyBuilder.build();
 +    topology.setUri(file.toURI());
 +    topology.setName(FilenameUtils.removeExtension(file.getName()));
 +    topology.setTimestamp(file.lastModified());
 +    return topology;
 +  }
 +
 +  private void redeployTopology(Topology topology) {
 +    File topologyFile = new File(topology.getUri());
 +    try {
 +      TopologyValidator tv = new TopologyValidator(topology);
 +
 +      if(tv.validateTopology()) {
 +        throw new SAXException(tv.getErrorString());
 +      }
 +
 +      long start = System.currentTimeMillis();
 +      long limit = 1000L; // One second.
 +      long elapsed = 1;
 +      while (elapsed <= limit) {
 +        try {
 +          long origTimestamp = topologyFile.lastModified();
 +          long setTimestamp = Math.max(System.currentTimeMillis(), topologyFile.lastModified() + elapsed);
 +          if(topologyFile.setLastModified(setTimestamp)) {
 +            long newTimstamp = topologyFile.lastModified();
 +            if(newTimstamp > origTimestamp) {
 +              break;
 +            } else {
 +              Thread.sleep(10);
 +              elapsed = System.currentTimeMillis() - start;
 +              continue;
 +            }
 +          } else {
 +            auditor.audit(Action.REDEPLOY, topology.getName(), ResourceType.TOPOLOGY,
 +                ActionOutcome.FAILURE);
 +            log.failedToRedeployTopology(topology.getName());
 +            break;
 +          }
 +        } catch (InterruptedException e) {
 +          auditor.audit(Action.REDEPLOY, topology.getName(), ResourceType.TOPOLOGY,
 +              ActionOutcome.FAILURE);
 +          log.failedToRedeployTopology(topology.getName(), e);
 +          e.printStackTrace();
 +        }
 +      }
 +    } catch (SAXException e) {
 +      auditor.audit(Action.REDEPLOY, topology.getName(), ResourceType.TOPOLOGY, ActionOutcome.FAILURE);
 +      log.failedToRedeployTopology(topology.getName(), e);
 +    }
 +  }
 +
 +  private List<TopologyEvent> createChangeEvents(
 +      Map<File, Topology> oldTopologies,
 +      Map<File, Topology> newTopologies) {
 +    ArrayList<TopologyEvent> events = new ArrayList<TopologyEvent>();
 +    // Go through the old topologies and find anything that was deleted.
 +    for (File file : oldTopologies.keySet()) {
 +      if (!newTopologies.containsKey(file)) {
 +        events.add(new TopologyEvent(TopologyEvent.Type.DELETED, oldTopologies.get(file)));
 +      }
 +    }
 +    // Go through the new topologies and figure out what was updated vs added.
 +    for (File file : newTopologies.keySet()) {
 +      if (oldTopologies.containsKey(file)) {
 +        Topology oldTopology = oldTopologies.get(file);
 +        Topology newTopology = newTopologies.get(file);
 +        if (newTopology.getTimestamp() > oldTopology.getTimestamp()) {
 +          events.add(new TopologyEvent(TopologyEvent.Type.UPDATED, newTopologies.get(file)));
 +        }
 +      } else {
 +        events.add(new TopologyEvent(TopologyEvent.Type.CREATED, newTopologies.get(file)));
 +      }
 +    }
 +    return events;
 +  }
 +
 +  private File calculateAbsoluteProvidersConfigDir(GatewayConfig config) {
 +    File pcDir = new File(config.getGatewayProvidersConfigDir());
 +    return pcDir.getAbsoluteFile();
 +  }
 +
 +  private File calculateAbsoluteDescriptorsDir(GatewayConfig config) {
 +    File descDir = new File(config.getGatewayDescriptorsDir());
 +    return descDir.getAbsoluteFile();
 +  }
 +
 +  private File calculateAbsoluteTopologiesDir(GatewayConfig config) {
 +    File topoDir = new File(config.getGatewayTopologyDir());
 +    topoDir = topoDir.getAbsoluteFile();
 +    return topoDir;
 +  }
 +
 +  private File calculateAbsoluteConfigDir(GatewayConfig config) {
 +    File configDir;
 +
 +    String path = config.getGatewayConfDir();
 +    configDir = (path != null) ? new File(path) : (new File(config.getGatewayTopologyDir())).getParentFile();
 +
 +    return configDir.getAbsoluteFile();
 +  }
 +
 +  private void  initListener(FileAlterationMonitor  monitor,
 +                            File                   directory,
 +                            FileFilter             filter,
 +                            FileAlterationListener listener) {
 +    monitors.add(monitor);
 +    FileAlterationObserver observer = new FileAlterationObserver(directory, filter);
 +    observer.addListener(listener);
 +    monitor.addObserver(observer);
 +  }
 +
 +  private void initListener(File directory, FileFilter filter, FileAlterationListener listener) throws IOException, SAXException {
 +    // Increasing the monitoring interval to 5 seconds as profiling has shown
 +    // this is rather expensive in terms of generated garbage objects.
 +    initListener(new FileAlterationMonitor(5000L), directory, filter, listener);
 +  }
 +
 +  private Map<File, Topology> loadTopologies(File directory) {
 +    Map<File, Topology> map = new HashMap<>();
 +    if (directory.isDirectory() && directory.canRead()) {
 +      File[] existingTopologies = directory.listFiles(this);
 +      if (existingTopologies != null) {
 +        for (File file : existingTopologies) {
 +          try {
 +            Topology loadTopology = loadTopology(file);
 +            if (null != loadTopology) {
 +              map.put(file, loadTopology);
 +            } else {
 +              auditor.audit(Action.LOAD, file.getAbsolutePath(), ResourceType.TOPOLOGY,
 +                      ActionOutcome.FAILURE);
 +              log.failedToLoadTopology(file.getAbsolutePath());
 +            }
 +          } catch (IOException e) {
 +            // Maybe it makes sense to throw exception
 +            auditor.audit(Action.LOAD, file.getAbsolutePath(), ResourceType.TOPOLOGY,
 +                    ActionOutcome.FAILURE);
 +            log.failedToLoadTopology(file.getAbsolutePath(), e);
 +          } catch (SAXException e) {
 +            // Maybe it makes sense to throw exception
 +            auditor.audit(Action.LOAD, file.getAbsolutePath(), ResourceType.TOPOLOGY,
 +                    ActionOutcome.FAILURE);
 +            log.failedToLoadTopology(file.getAbsolutePath(), e);
 +          } catch (Exception e) {
 +            // Maybe it makes sense to throw exception
 +            auditor.audit(Action.LOAD, file.getAbsolutePath(), ResourceType.TOPOLOGY,
 +                    ActionOutcome.FAILURE);
 +            log.failedToLoadTopology(file.getAbsolutePath(), e);
 +          }
 +        }
 +      }
 +    }
 +    return map;
 +  }
 +
 +  public void setAliasService(AliasService as) {
 +    this.aliasService = as;
 +  }
 +
 +  public void deployTopology(Topology t){
 +
 +    try {
 +      File temp = new File(topologiesDirectory.getAbsolutePath() + "/" + t.getName() + ".xml.temp");
 +      Package topologyPkg = Topology.class.getPackage();
 +      String pkgName = topologyPkg.getName();
 +      String bindingFile = pkgName.replace(".", "/") + "/topology_binding-xml.xml";
 +
 +      Map<String, Object> properties = new HashMap<>(1);
 +      properties.put(JAXBContextProperties.OXM_METADATA_SOURCE, bindingFile);
 +      JAXBContext jc = JAXBContext.newInstance(pkgName, Topology.class.getClassLoader(), properties);
 +      Marshaller mr = jc.createMarshaller();
 +
 +      mr.setProperty(Marshaller.JAXB_FORMATTED_OUTPUT, true);
 +      mr.marshal(t, temp);
 +
 +      File topology = new File(topologiesDirectory.getAbsolutePath() + "/" + t.getName() + ".xml");
 +      if(!temp.renameTo(topology)) {
 +        FileUtils.forceDelete(temp);
 +        throw new IOException("Could not rename temp file");
 +      }
 +
 +      // This code will check if the topology is valid, and retrieve the errors if it is not.
 +      TopologyValidator validator = new TopologyValidator( topology.getAbsolutePath() );
 +      if( !validator.validateTopology() ){
 +        throw new SAXException( validator.getErrorString() );
 +      }
 +
 +
 +    } catch (JAXBException e) {
 +      auditor.audit(Action.DEPLOY, t.getName(), ResourceType.TOPOLOGY, ActionOutcome.FAILURE);
 +      log.failedToDeployTopology(t.getName(), e);
 +    } catch (IOException io) {
 +      auditor.audit(Action.DEPLOY, t.getName(), ResourceType.TOPOLOGY, ActionOutcome.FAILURE);
 +      log.failedToDeployTopology(t.getName(), io);
 +    } catch (SAXException sx){
 +      auditor.audit(Action.DEPLOY, t.getName(), ResourceType.TOPOLOGY, ActionOutcome.FAILURE);
 +      log.failedToDeployTopology(t.getName(), sx);
 +    }
 +    reloadTopologies();
 +  }
 +
 +  public void redeployTopologies(String topologyName) {
 +
 +    for (Topology topology : getTopologies()) {
 +      if (topologyName == null || topologyName.equals(topology.getName())) {
 +        redeployTopology(topology);
 +      }
 +    }
 +
 +  }
 +
 +  public void reloadTopologies() {
 +    try {
 +      synchronized (this) {
 +        Map<File, Topology> oldTopologies = topologies;
 +        Map<File, Topology> newTopologies = loadTopologies(topologiesDirectory);
 +        List<TopologyEvent> events = createChangeEvents(oldTopologies, newTopologies);
 +        topologies = newTopologies;
 +        notifyChangeListeners(events);
 +      }
 +    } catch (Exception e) {
 +      // Maybe it makes sense to throw exception
 +      log.failedToReloadTopologies(e);
 +    }
 +  }
 +
 +  public void deleteTopology(Topology t) {
 +    File topoDir = topologiesDirectory;
 +
 +    if(topoDir.isDirectory() && topoDir.canRead()) {
 +      for (File f : listFiles(topoDir)) {
 +        String fName = FilenameUtils.getBaseName(f.getName());
 +        if(fName.equals(t.getName())) {
 +          f.delete();
 +        }
 +      }
 +    }
 +    reloadTopologies();
 +  }
 +
 +  private void notifyChangeListeners(List<TopologyEvent> events) {
 +    for (TopologyListener listener : listeners) {
 +      try {
 +        listener.handleTopologyEvent(events);
 +      } catch (RuntimeException e) {
 +        auditor.audit(Action.LOAD, "Topology_Event", ResourceType.TOPOLOGY, ActionOutcome.FAILURE);
 +        log.failedToHandleTopologyEvents(e);
 +      }
 +    }
 +  }
 +
 +  public Map<String, List<String>> getServiceTestURLs(Topology t, GatewayConfig config) {
 +    File tFile = null;
 +    Map<String, List<String>> urls = new HashMap<>();
 +    if (topologiesDirectory.isDirectory() && topologiesDirectory.canRead()) {
 +      for (File f : listFiles(topologiesDirectory)) {
 +        if (FilenameUtils.removeExtension(f.getName()).equals(t.getName())) {
 +          tFile = f;
 +        }
 +      }
 +    }
 +    Set<ServiceDefinition> defs;
 +    if(tFile != null) {
 +      defs = ServiceDefinitionsLoader.getServiceDefinitions(new File(config.getGatewayServicesDir()));
 +
 +      for(ServiceDefinition def : defs) {
 +        urls.put(def.getRole(), def.getTestURLs());
 +      }
 +    }
 +    return urls;
 +  }
 +
 +  public Collection<Topology> getTopologies() {
 +    Map<File, Topology> map = topologies;
 +    return Collections.unmodifiableCollection(map.values());
 +  }
 +
 +  @Override
 +  public boolean deployProviderConfiguration(String name, String content) {
 +    return writeConfig(sharedProvidersDirectory, name, content);
 +  }
 +
 +  @Override
 +  public Collection<File> getProviderConfigurations() {
 +    List<File> providerConfigs = new ArrayList<>();
 +    for (File providerConfig : listFiles(sharedProvidersDirectory)) {
 +      if (SharedProviderConfigMonitor.SUPPORTED_EXTENSIONS.contains(FilenameUtils.getExtension(providerConfig.getName()))) {
 +        providerConfigs.add(providerConfig);
 +      }
 +    }
 +    return providerConfigs;
 +  }
 +
 +  @Override
 +  public boolean deleteProviderConfiguration(String name) {
 +    boolean result = false;
 +
 +    File providerConfig = getExistingFile(sharedProvidersDirectory, name);
 +    if (providerConfig != null) {
 +      List<String> references = descriptorsMonitor.getReferencingDescriptors(providerConfig.getAbsolutePath());
 +      if (references.isEmpty()) {
 +        result = providerConfig.delete();
 +      } else {
 +        log.preventedDeletionOfSharedProviderConfiguration(providerConfig.getAbsolutePath());
 +      }
 +    } else {
 +      result = true; // If it already does NOT exist, then the delete effectively succeeded
 +    }
 +
 +    return result;
 +  }
 +
 +  @Override
 +  public boolean deployDescriptor(String name, String content) {
 +    return writeConfig(descriptorsDirectory, name, content);
 +  }
 +
 +  @Override
 +  public Collection<File> getDescriptors() {
 +    List<File> descriptors = new ArrayList<>();
 +    for (File descriptor : listFiles(descriptorsDirectory)) {
 +      if (DescriptorsMonitor.SUPPORTED_EXTENSIONS.contains(FilenameUtils.getExtension(descriptor.getName()))) {
 +        descriptors.add(descriptor);
 +      }
 +    }
 +    return descriptors;
 +  }
 +
 +  @Override
 +  public boolean deleteDescriptor(String name) {
 +    File descriptor = getExistingFile(descriptorsDirectory, name);
 +    return (descriptor == null) || descriptor.delete();
 +  }
 +
 +  @Override
 +  public void addTopologyChangeListener(TopologyListener listener) {
 +    listeners.add(listener);
 +  }
 +
 +  @Override
 +  public void startMonitor() throws Exception {
 +    // Start the local configuration monitors
 +    for (FileAlterationMonitor monitor : monitors) {
 +      monitor.start();
 +    }
 +
 +    // Start the remote configuration monitor, if it has been initialized
 +    if (remoteMonitor != null) {
 +      try {
 +        remoteMonitor.start();
 +      } catch (Exception e) {
 +        log.remoteConfigurationMonitorStartFailure(remoteMonitor.getClass().getTypeName(), e.getLocalizedMessage(), e);
 +      }
 +    }
 +  }
 +
 +  @Override
 +  public void stopMonitor() throws Exception {
 +    // Stop the local configuration monitors
 +    for (FileAlterationMonitor monitor : monitors) {
 +      monitor.stop();
 +    }
 +
 +    // Stop the remote configuration monitor, if it has been initialized
 +    if (remoteMonitor != null) {
 +      remoteMonitor.stop();
 +    }
 +  }
 +
 +  @Override
 +  public boolean accept(File file) {
 +    boolean accept = false;
 +    if (!file.isDirectory() && file.canRead()) {
 +      String extension = FilenameUtils.getExtension(file.getName());
 +      if (SUPPORTED_TOPOLOGY_FILE_EXTENSIONS.contains(extension)) {
 +        accept = true;
 +      }
 +    }
 +    return accept;
 +  }
 +
 +  @Override
 +  public void onFileCreate(File file) {
 +    onFileChange(file);
 +  }
 +
 +  @Override
 +  public void onFileDelete(java.io.File file) {
 +    // For full topology descriptors, we need to make sure to delete any corresponding simple descriptors to prevent
 +    // unintended subsequent generation of the topology descriptor
 +    for (String ext : DescriptorsMonitor.SUPPORTED_EXTENSIONS) {
 +      File simpleDesc =
 +              new File(descriptorsDirectory, FilenameUtils.getBaseName(file.getName()) + "." + ext);
 +      if (simpleDesc.exists()) {
 +        log.deletingDescriptorForTopologyDeletion(simpleDesc.getName(), file.getName());
 +        simpleDesc.delete();
 +      }
 +    }
 +
 +    onFileChange(file);
 +  }
 +
 +  @Override
 +  public void onFileChange(File file) {
 +    reloadTopologies();
 +  }
 +
 +  @Override
 +  public void stop() {
 +
 +  }
 +
 +  @Override
 +  public void start() {
 +    // Register a cluster configuration monitor listener for change notifications
 +    ClusterConfigurationMonitorService ccms =
 +                  GatewayServer.getGatewayServices().getService(GatewayServices.CLUSTER_CONFIGURATION_MONITOR_SERVICE);
 +    ccms.addListener(new TopologyDiscoveryTrigger(this));
 +  }
 +
 +  @Override
 +  public void init(GatewayConfig config, Map<String, String> options) throws ServiceLifecycleException {
 +
 +    try {
 +      listeners  = new HashSet<>();
 +      topologies = new HashMap<>();
 +
 +      topologiesDirectory = calculateAbsoluteTopologiesDir(config);
 +
 +      File configDirectory = calculateAbsoluteConfigDir(config);
 +      descriptorsDirectory = new File(configDirectory, "descriptors");
 +      sharedProvidersDirectory = new File(configDirectory, "shared-providers");
 +
 +      // Add support for conf/topologies
 +      initListener(topologiesDirectory, this, this);
 +
 +      // Add support for conf/descriptors
 +      descriptorsMonitor = new DescriptorsMonitor(topologiesDirectory, aliasService);
 +      initListener(descriptorsDirectory,
 +                   descriptorsMonitor,
 +                   descriptorsMonitor);
 +      log.monitoringDescriptorChangesInDirectory(descriptorsDirectory.getAbsolutePath());
 +
 +      // Add support for conf/shared-providers
 +      SharedProviderConfigMonitor spm = new SharedProviderConfigMonitor(descriptorsMonitor, descriptorsDirectory);
 +      initListener(sharedProvidersDirectory, spm, spm);
 +      log.monitoringProviderConfigChangesInDirectory(sharedProvidersDirectory.getAbsolutePath());
 +
-       // For all the descriptors currently in the descriptors dir at start-up time, trigger topology generation.
++      // For all the descriptors currently in the descriptors dir at start-up time, determine if topology regeneration
++      // is required.
 +      // This happens prior to the start-up loading of the topologies.
 +      String[] descriptorFilenames =  descriptorsDirectory.list();
 +      if (descriptorFilenames != null) {
 +        for (String descriptorFilename : descriptorFilenames) {
 +          if (DescriptorsMonitor.isDescriptorFile(descriptorFilename)) {
++            String topologyName = FilenameUtils.getBaseName(descriptorFilename);
++            File existingDescriptorFile = getExistingFile(descriptorsDirectory, topologyName);
++
 +            // If there isn't a corresponding topology file, or if the descriptor has been modified since the
 +            // corresponding topology file was generated, then trigger generation of one
-             File matchingTopologyFile = getExistingFile(topologiesDirectory, FilenameUtils.getBaseName(descriptorFilename));
-             if (matchingTopologyFile == null ||
-                     matchingTopologyFile.lastModified() < (new File(descriptorsDirectory, descriptorFilename)).lastModified()) {
-               descriptorsMonitor.onFileChange(new File(descriptorsDirectory, descriptorFilename));
++            File matchingTopologyFile = getExistingFile(topologiesDirectory, topologyName);
++            if (matchingTopologyFile == null || matchingTopologyFile.lastModified() < existingDescriptorFile.lastModified()) {
++              descriptorsMonitor.onFileChange(existingDescriptorFile);
++            } else {
++              // If regeneration is NOT required, then we at least need to report the provider configuration
++              // reference relationship (KNOX-1144)
++              String normalizedDescriptorPath = FilenameUtils.normalize(existingDescriptorFile.getAbsolutePath());
++
++              // Parse the descriptor to determine the provider config reference
++              SimpleDescriptor sd = SimpleDescriptorFactory.parse(normalizedDescriptorPath);
++              if (sd != null) {
++                File referencedProviderConfig =
++                           getExistingFile(sharedProvidersDirectory, FilenameUtils.getBaseName(sd.getProviderConfig()));
++                if (referencedProviderConfig != null) {
++                  List<String> references =
++                         descriptorsMonitor.getReferencingDescriptors(referencedProviderConfig.getAbsolutePath());
++                  if (!references.contains(normalizedDescriptorPath)) {
++                    references.add(normalizedDescriptorPath);
++                  }
++                }
++              }
 +            }
 +          }
 +        }
 +      }
 +
 +      // Initialize the remote configuration monitor, if it has been configured
 +      remoteMonitor = RemoteConfigurationMonitorFactory.get(config);
 +
 +    } catch (IOException | SAXException io) {
 +      throw new ServiceLifecycleException(io.getMessage());
 +    }
 +  }
 +
 +  /**
 +   * Utility method for listing the files in the specified directory.
 +   * This method is "nicer" than the File#listFiles() because it will not return null.
 +   *
 +   * @param directory The directory whose files should be returned.
 +   *
 +   * @return A List of the Files on the directory.
 +   */
 +  private static List<File> listFiles(File directory) {
 +    List<File> result;
 +    File[] files = directory.listFiles();
 +    if (files != null) {
 +      result = Arrays.asList(files);
 +    } else {
 +      result = Collections.emptyList();
 +    }
 +    return result;
 +  }
 +
 +  /**
 +   * Search for a file in the specified directory whose base name (filename without extension) matches the
 +   * specified basename.
 +   *
 +   * @param directory The directory in which to search.
 +   * @param basename  The basename of interest.
 +   *
 +   * @return The matching File
 +   */
 +  private static File getExistingFile(File directory, String basename) {
 +    File match = null;
 +    for (File file : listFiles(directory)) {
 +      if (FilenameUtils.getBaseName(file.getName()).equals(basename)) {
 +        match = file;
 +        break;
 +      }
 +    }
 +    return match;
 +  }
 +
 +  /**
 +   * Write the specified content to a file.
 +   *
 +   * @param dest    The destination directory.
 +   * @param name    The name of the file.
 +   * @param content The contents of the file.
 +   *
 +   * @return true, if the write succeeds; otherwise, false.
 +   */
 +  private static boolean writeConfig(File dest, String name, String content) {
 +    boolean result = false;
 +
 +    File destFile = new File(dest, name);
 +    try {
 +      FileUtils.writeStringToFile(destFile, content);
 +      log.wroteConfigurationFile(destFile.getAbsolutePath());
 +      result = true;
 +    } catch (IOException e) {
 +      log.failedToWriteConfigurationFile(destFile.getAbsolutePath(), e);
 +    }
 +
 +    return result;
 +  }
 +
 +
 +  /**
 +   * Change handler for simple descriptors
 +   */
 +  public static class DescriptorsMonitor extends FileAlterationListenerAdaptor
 +                                          implements FileFilter {
 +
 +    static final List<String> SUPPORTED_EXTENSIONS = new ArrayList<String>();
 +    static {
 +      SUPPORTED_EXTENSIONS.add("json");
 +      SUPPORTED_EXTENSIONS.add("yml");
 +      SUPPORTED_EXTENSIONS.add("yaml");
 +    }
 +
 +    private File topologiesDir;
 +
 +    private AliasService aliasService;
 +
 +    private Map<String, List<String>> providerConfigReferences = new HashMap<>();
 +
 +
 +    static boolean isDescriptorFile(String filename) {
 +      return SUPPORTED_EXTENSIONS.contains(FilenameUtils.getExtension(filename));
 +    }
 +
 +    public DescriptorsMonitor(File topologiesDir, AliasService aliasService) {
 +      this.topologiesDir  = topologiesDir;
 +      this.aliasService   = aliasService;
 +    }
 +
 +    List<String> getReferencingDescriptors(String providerConfigPath) {
-       List<String> result = providerConfigReferences.get(FilenameUtils.normalize(providerConfigPath));
-       if (result == null) {
-         result = Collections.emptyList();
-       }
-       return result;
++      String normalizedPath = FilenameUtils.normalize(providerConfigPath);
++      return providerConfigReferences.computeIfAbsent(normalizedPath, p -> new ArrayList<>());
 +    }
 +
 +    @Override
 +    public void onFileCreate(File file) {
 +      onFileChange(file);
 +    }
 +
 +    @Override
 +    public void onFileDelete(File file) {
 +      // For simple descriptors, we need to make sure to delete any corresponding full topology descriptors to trigger undeployment
 +      for (String ext : DefaultTopologyService.SUPPORTED_TOPOLOGY_FILE_EXTENSIONS) {
 +        File topologyFile =
 +                new File(topologiesDir, FilenameUtils.getBaseName(file.getName()) + "." + ext);
 +        if (topologyFile.exists()) {
 +          log.deletingTopologyForDescriptorDeletion(topologyFile.getName(), file.getName());
 +          topologyFile.delete();
 +        }
 +      }
 +
 +      String normalizedFilePath = FilenameUtils.normalize(file.getAbsolutePath());
 +      String reference = null;
 +      for (Map.Entry<String, List<String>> entry : providerConfigReferences.entrySet()) {
 +        if (entry.getValue().contains(normalizedFilePath)) {
 +          reference = entry.getKey();
 +          break;
 +        }
 +      }
 +
 +      if (reference != null) {
 +        providerConfigReferences.get(reference).remove(normalizedFilePath);
 +        log.removedProviderConfigurationReference(normalizedFilePath, reference);
 +      }
 +    }
 +
 +    @Override
 +    public void onFileChange(File file) {
 +      try {
 +        // When a simple descriptor has been created or modified, generate the new topology descriptor
 +        Map<String, File> result = SimpleDescriptorHandler.handle(file, topologiesDir, aliasService);
 +        log.generatedTopologyForDescriptorChange(result.get("topology").getName(), file.getName());
 +
 +        // Add the provider config reference relationship for handling updates to the provider config
 +        String providerConfig = FilenameUtils.normalize(result.get("reference").getAbsolutePath());
 +        if (!providerConfigReferences.containsKey(providerConfig)) {
 +          providerConfigReferences.put(providerConfig, new ArrayList<String>());
 +        }
 +        List<String> refs = providerConfigReferences.get(providerConfig);
 +        String descriptorName = FilenameUtils.normalize(file.getAbsolutePath());
 +        if (!refs.contains(descriptorName)) {
 +          // Need to check if descriptor had previously referenced another provider config, so it can be removed
 +          for (List<String> descs : providerConfigReferences.values()) {
 +            if (descs.contains(descriptorName)) {
 +              descs.remove(descriptorName);
 +            }
 +          }
 +
 +          // Add the current reference relationship
 +          refs.add(descriptorName);
 +          log.addedProviderConfigurationReference(descriptorName, providerConfig);
 +        }
 +      } catch (Exception e) {
 +        log.simpleDescriptorHandlingError(file.getName(), e);
 +      }
 +    }
 +
 +    @Override
 +    public boolean accept(File file) {
 +      boolean accept = false;
 +      if (!file.isDirectory() && file.canRead()) {
 +        String extension = FilenameUtils.getExtension(file.getName());
 +        if (SUPPORTED_EXTENSIONS.contains(extension)) {
 +          accept = true;
 +        }
 +      }
 +      return accept;
 +    }
 +  }
 +
 +  /**
 +   * Change handler for shared provider configurations
 +   */
 +  public static class SharedProviderConfigMonitor extends FileAlterationListenerAdaptor
 +          implements FileFilter {
 +
 +    static final List<String> SUPPORTED_EXTENSIONS = new ArrayList<>();
 +    static {
 +      SUPPORTED_EXTENSIONS.add("xml");
 +    }
 +
 +    private DescriptorsMonitor descriptorsMonitor;
 +    private File descriptorsDir;
 +
 +
 +    SharedProviderConfigMonitor(DescriptorsMonitor descMonitor, File descriptorsDir) {
 +      this.descriptorsMonitor = descMonitor;
 +      this.descriptorsDir     = descriptorsDir;
 +    }
 +
 +    @Override
 +    public void onFileCreate(File file) {
 +      onFileChange(file);
 +    }
 +
 +    @Override
 +    public void onFileDelete(File file) {
 +      onFileChange(file);
 +    }
 +
 +    @Override
 +    public void onFileChange(File file) {
 +      // For shared provider configuration, we need to update any simple descriptors that reference it
 +      for (File descriptor : getReferencingDescriptors(file)) {
 +        descriptor.setLastModified(System.currentTimeMillis());
 +      }
 +    }
 +
 +    private List<File> getReferencingDescriptors(File sharedProviderConfig) {
 +      List<File> references = new ArrayList<>();
 +
 +      for (File descriptor : listFiles(descriptorsDir)) {
 +        if (DescriptorsMonitor.SUPPORTED_EXTENSIONS.contains(FilenameUtils.getExtension(descriptor.getName()))) {
 +          for (String reference : descriptorsMonitor.getReferencingDescriptors(FilenameUtils.normalize(sharedProviderConfig.getAbsolutePath()))) {
 +            references.add(new File(reference));
 +          }
 +        }
 +      }
 +
 +      return references;
 +    }
 +
 +    @Override
 +    public boolean accept(File file) {
 +      boolean accept = false;
 +      if (!file.isDirectory() && file.canRead()) {
 +        String extension = FilenameUtils.getExtension(file.getName());
 +        if (SUPPORTED_EXTENSIONS.contains(extension)) {
 +          accept = true;
 +        }
 +      }
 +      return accept;
 +    }
 +  }
 +
 +  /**
 +   * Listener for Ambari config change events, which will trigger re-generation (including re-discovery) of the
 +   * affected topologies.
 +   */
 +  private static class TopologyDiscoveryTrigger implements ClusterConfigurationMonitor.ConfigurationChangeListener {
 +
 +    private TopologyService topologyService = null;
 +
 +    TopologyDiscoveryTrigger(TopologyService topologyService) {
 +      this.topologyService = topologyService;
 +    }
 +
 +    @Override
 +    public void onConfigurationChange(String source, String clusterName) {
 +      log.noticedClusterConfigurationChange(source, clusterName);
 +      try {
 +        // Identify any descriptors associated with the cluster configuration change
 +        for (File descriptor : topologyService.getDescriptors()) {
 +          String descriptorContent = FileUtils.readFileToString(descriptor);
 +          if (descriptorContent.contains(source)) {
 +            if (descriptorContent.contains(clusterName)) {
 +              log.triggeringTopologyRegeneration(source, clusterName, descriptor.getAbsolutePath());
 +              // 'Touch' the descriptor to trigger re-generation of the associated topology
 +              descriptor.setLastModified(System.currentTimeMillis());
 +            }
 +          }
 +        }
 +      } catch (Exception e) {
 +        log.errorRespondingToConfigChange(source, clusterName, e);
 +      }
 +    }
 +  }
 +
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/e5fd0622/gateway-server/src/main/java/org/apache/knox/gateway/topology/monitor/DefaultRemoteConfigurationMonitor.java
----------------------------------------------------------------------
diff --cc gateway-server/src/main/java/org/apache/knox/gateway/topology/monitor/DefaultRemoteConfigurationMonitor.java
index efafee0,0000000..37d1ca6
mode 100644,000000..100644
--- a/gateway-server/src/main/java/org/apache/knox/gateway/topology/monitor/DefaultRemoteConfigurationMonitor.java
+++ b/gateway-server/src/main/java/org/apache/knox/gateway/topology/monitor/DefaultRemoteConfigurationMonitor.java
@@@ -1,228 -1,0 +1,246 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements. See the NOTICE file distributed with this
 + * work for additional information regarding copyright ownership. The ASF
 + * licenses this file to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance with the License.
 + * You may obtain a copy of the License at
 + * <p>
 + * http://www.apache.org/licenses/LICENSE-2.0
 + * <p>
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 + * License for the specific language governing permissions and limitations under
 + * the License.
 + */
 +package org.apache.knox.gateway.topology.monitor;
 +
 +import org.apache.commons.io.FileUtils;
 +import org.apache.knox.gateway.GatewayMessages;
 +import org.apache.knox.gateway.config.GatewayConfig;
 +import org.apache.knox.gateway.i18n.messages.MessagesFactory;
 +import org.apache.knox.gateway.services.config.client.RemoteConfigurationRegistryClient.ChildEntryListener;
 +import org.apache.knox.gateway.services.config.client.RemoteConfigurationRegistryClient.EntryListener;
 +import org.apache.knox.gateway.services.config.client.RemoteConfigurationRegistryClient;
 +import org.apache.knox.gateway.services.config.client.RemoteConfigurationRegistryClientService;
 +import org.apache.zookeeper.ZooDefs;
 +
 +import java.io.File;
 +import java.io.IOException;
 +import java.util.ArrayList;
++import java.util.Arrays;
 +import java.util.Collections;
 +import java.util.List;
 +
 +
 +class DefaultRemoteConfigurationMonitor implements RemoteConfigurationMonitor {
 +
 +    private static final String NODE_KNOX = "/knox";
 +    private static final String NODE_KNOX_CONFIG = NODE_KNOX + "/config";
 +    private static final String NODE_KNOX_PROVIDERS = NODE_KNOX_CONFIG + "/shared-providers";
 +    private static final String NODE_KNOX_DESCRIPTORS = NODE_KNOX_CONFIG + "/descriptors";
 +
 +    private static GatewayMessages log = MessagesFactory.get(GatewayMessages.class);
 +
 +    // N.B. This is ZooKeeper-specific, and should be abstracted when another registry is supported
 +    private static final RemoteConfigurationRegistryClient.EntryACL AUTHENTICATED_USERS_ALL;
 +    static {
 +        AUTHENTICATED_USERS_ALL = new RemoteConfigurationRegistryClient.EntryACL() {
 +            public String getId() {
 +                return "";
 +            }
 +
 +            public String getType() {
 +                return "auth";
 +            }
 +
 +            public Object getPermissions() {
 +                return ZooDefs.Perms.ALL;
 +            }
 +
 +            public boolean canRead() {
 +                return true;
 +            }
 +
 +            public boolean canWrite() {
 +                return true;
 +            }
 +        };
 +    }
 +
 +    private RemoteConfigurationRegistryClient client = null;
 +
 +    private File providersDir;
 +    private File descriptorsDir;
 +
 +    /**
 +     * @param config                The gateway configuration
 +     * @param registryClientService The service from which the remote registry client should be acquired.
 +     */
 +    DefaultRemoteConfigurationMonitor(GatewayConfig                            config,
 +                                      RemoteConfigurationRegistryClientService registryClientService) {
 +        this.providersDir   = new File(config.getGatewayProvidersConfigDir());
 +        this.descriptorsDir = new File(config.getGatewayDescriptorsDir());
 +
 +        if (registryClientService != null) {
 +            String clientName = config.getRemoteConfigurationMonitorClientName();
 +            if (clientName != null) {
 +                this.client = registryClientService.get(clientName);
 +                if (this.client == null) {
 +                    log.unresolvedClientConfigurationForRemoteMonitoring(clientName);
 +                }
 +            } else {
 +                log.missingClientConfigurationForRemoteMonitoring();
 +            }
 +        }
 +    }
 +
 +    @Override
 +    public void start() throws Exception {
 +        if (client == null) {
 +            throw new IllegalStateException("Failed to acquire a remote configuration registry client.");
 +        }
 +
 +        final String monitorSource = client.getAddress();
 +        log.startingRemoteConfigurationMonitor(monitorSource);
 +
 +        // Ensure the existence of the expected entries and their associated ACLs
 +        ensureEntries();
 +
 +        // Confirm access to the remote provider configs directory znode
 +        List<String> providerConfigs = client.listChildEntries(NODE_KNOX_PROVIDERS);
 +        if (providerConfigs == null) {
 +            // Either the ZNode does not exist, or there is an authentication problem
 +            throw new IllegalStateException("Unable to access remote path: " + NODE_KNOX_PROVIDERS);
++        } else {
++            // Download any existing provider configs in the remote registry, which either do not exist locally, or have
++            // been modified, so that they are certain to be present when this monitor downloads any descriptors that
++            // reference them.
++            for (String providerConfig : providerConfigs) {
++                File localFile = new File(providersDir, providerConfig);
++
++                byte[] remoteContent = client.getEntryData(NODE_KNOX_PROVIDERS + "/" + providerConfig).getBytes();
++                if (!localFile.exists() || !Arrays.equals(remoteContent, FileUtils.readFileToByteArray(localFile))) {
++                    FileUtils.writeByteArrayToFile(localFile, remoteContent);
++                    log.downloadedRemoteConfigFile(providersDir.getName(), providerConfig);
++                }
++            }
 +        }
 +
 +        // Confirm access to the remote descriptors directory znode
 +        List<String> descriptors = client.listChildEntries(NODE_KNOX_DESCRIPTORS);
 +        if (descriptors == null) {
 +            // Either the ZNode does not exist, or there is an authentication problem
 +            throw new IllegalStateException("Unable to access remote path: " + NODE_KNOX_DESCRIPTORS);
 +        }
 +
 +        // Register a listener for provider config znode additions/removals
 +        client.addChildEntryListener(NODE_KNOX_PROVIDERS, new ConfigDirChildEntryListener(providersDir));
 +
 +        // Register a listener for descriptor znode additions/removals
 +        client.addChildEntryListener(NODE_KNOX_DESCRIPTORS, new ConfigDirChildEntryListener(descriptorsDir));
 +
 +        log.monitoringRemoteConfigurationSource(monitorSource);
 +    }
 +
 +
 +    @Override
 +    public void stop() throws Exception {
 +        client.removeEntryListener(NODE_KNOX_PROVIDERS);
 +        client.removeEntryListener(NODE_KNOX_DESCRIPTORS);
 +    }
 +
 +    private void ensureEntries() {
 +        ensureEntry(NODE_KNOX);
 +        ensureEntry(NODE_KNOX_CONFIG);
 +        ensureEntry(NODE_KNOX_PROVIDERS);
 +        ensureEntry(NODE_KNOX_DESCRIPTORS);
 +    }
 +
 +    private void ensureEntry(String name) {
 +        if (!client.entryExists(name)) {
 +            client.createEntry(name);
 +        } else {
 +            // Validate the ACL
 +            List<RemoteConfigurationRegistryClient.EntryACL> entryACLs = client.getACL(name);
 +            for (RemoteConfigurationRegistryClient.EntryACL entryACL : entryACLs) {
 +                // N.B. This is ZooKeeper-specific, and should be abstracted when another registry is supported
 +                // For now, check for ZooKeeper world:anyone with ANY permissions (even read-only)
 +                if (entryACL.getType().equals("world") && entryACL.getId().equals("anyone")) {
 +                    log.suspectWritableRemoteConfigurationEntry(name);
 +
 +                    // If the client is authenticated, but "anyone" can write the content, then the content may not
 +                    // be trustworthy.
 +                    if (client.isAuthenticationConfigured()) {
 +                        log.correctingSuspectWritableRemoteConfigurationEntry(name);
 +
 +                        // Replace the existing ACL with one that permits only authenticated users
 +                        client.setACL(name, Collections.singletonList(AUTHENTICATED_USERS_ALL));
 +                  }
 +                }
 +            }
 +        }
 +    }
 +
 +    private static class ConfigDirChildEntryListener implements ChildEntryListener {
 +        File localDir;
 +
 +        ConfigDirChildEntryListener(File localDir) {
 +            this.localDir = localDir;
 +        }
 +
 +        @Override
 +        public void childEvent(RemoteConfigurationRegistryClient client, Type type, String path) {
 +            File localFile = new File(localDir, path.substring(path.lastIndexOf("/") + 1));
 +
 +            switch (type) {
 +                case REMOVED:
 +                    FileUtils.deleteQuietly(localFile);
 +                    log.deletedRemoteConfigFile(localDir.getName(), localFile.getName());
 +                    try {
 +                        client.removeEntryListener(path);
 +                    } catch (Exception e) {
 +                        log.errorRemovingRemoteConfigurationListenerForPath(path, e);
 +                    }
 +                    break;
 +                case ADDED:
 +                    try {
 +                        client.addEntryListener(path, new ConfigEntryListener(localDir));
 +                    } catch (Exception e) {
 +                        log.errorAddingRemoteConfigurationListenerForPath(path, e);
 +                    }
 +                    break;
 +            }
 +        }
 +    }
 +
 +    private static class ConfigEntryListener implements EntryListener {
 +        private File localDir;
 +
 +        ConfigEntryListener(File localDir) {
 +            this.localDir = localDir;
 +        }
 +
 +        @Override
 +        public void entryChanged(RemoteConfigurationRegistryClient client, String path, byte[] data) {
 +            File localFile = new File(localDir, path.substring(path.lastIndexOf("/")));
 +            if (data != null) {
 +                try {
-                     FileUtils.writeByteArrayToFile(localFile, data);
-                     log.downloadedRemoteConfigFile(localDir.getName(), localFile.getName());
++                    // If there is no corresponding local file, or the content is different from the existing local
++                    // file, write the data to the local file.
++                    if (!localFile.exists() || !Arrays.equals(FileUtils.readFileToByteArray(localFile), data)) {
++                        FileUtils.writeByteArrayToFile(localFile, data);
++                        log.downloadedRemoteConfigFile(localDir.getName(), localFile.getName());
++                    }
 +                } catch (IOException e) {
 +                    log.errorDownloadingRemoteConfiguration(path, e);
 +                }
 +            } else {
 +                FileUtils.deleteQuietly(localFile);
 +                log.deletedRemoteConfigFile(localDir.getName(), localFile.getName());
 +            }
 +        }
 +    }
 +
 +}


[42/53] [abbrv] knox git commit: KNOX-998 - Merge from trunk 0.14.0 code

Posted by mo...@apache.org.
http://git-wip-us.apache.org/repos/asf/knox/blob/e766b3b7/gateway-service-remoteconfig/src/test/java/org/apache/knox/gateway/service/config/remote/util/RemoteRegistryConfigTestUtils.java
----------------------------------------------------------------------
diff --git a/gateway-service-remoteconfig/src/test/java/org/apache/knox/gateway/service/config/remote/util/RemoteRegistryConfigTestUtils.java b/gateway-service-remoteconfig/src/test/java/org/apache/knox/gateway/service/config/remote/util/RemoteRegistryConfigTestUtils.java
new file mode 100644
index 0000000..2854998
--- /dev/null
+++ b/gateway-service-remoteconfig/src/test/java/org/apache/knox/gateway/service/config/remote/util/RemoteRegistryConfigTestUtils.java
@@ -0,0 +1,117 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.knox.gateway.service.config.remote.util;
+
+import java.util.Collection;
+import java.util.Map;
+
+public class RemoteRegistryConfigTestUtils {
+
+    public static final String PROPERTY_TYPE = "type";
+    public static final String PROPERTY_NAME = "name";
+    public static final String PROPERTY_ADDRESS = "address";
+    public static final String PROPERTY_NAMESAPCE = "namespace";
+    public static final String PROPERTY_SECURE = "secure";
+    public static final String PROPERTY_AUTH_TYPE = "authType";
+    public static final String PROPERTY_PRINCIPAL = "principal";
+    public static final String PROPERTY_CRED_ALIAS = "credentialAlias";
+    public static final String PROPERTY_KEYTAB = "keyTab";
+    public static final String PROPERTY_USE_KEYTAB = "useKeyTab";
+    public static final String PROPERTY_USE_TICKET_CACHE = "useTicketCache";
+
+    public static String createRemoteConfigRegistriesXML(Collection<Map<String, String>> configProperties) {
+        String result = "<?xml version=\"1.0\" encoding=\"utf-8\"?>\n" +
+                        "<remote-configuration-registries>\n";
+
+        for (Map<String, String> props : configProperties) {
+            String authType = props.get(PROPERTY_AUTH_TYPE);
+            if ("Kerberos".equalsIgnoreCase(authType)) {
+                result +=
+                   createRemoteConfigRegistryXMLWithKerberosAuth(props.get(PROPERTY_TYPE),
+                                                                 props.get(PROPERTY_NAME),
+                                                                 props.get(PROPERTY_ADDRESS),
+                                                                 props.get(PROPERTY_PRINCIPAL),
+                                                                 props.get(PROPERTY_KEYTAB),
+                                                                 Boolean.valueOf(props.get(PROPERTY_USE_KEYTAB)),
+                                                                 Boolean.valueOf(props.get(PROPERTY_USE_TICKET_CACHE)));
+            } else if ("Digest".equalsIgnoreCase(authType)) {
+                result +=
+                    createRemoteConfigRegistryXMLWithDigestAuth(props.get(PROPERTY_TYPE),
+                                                                props.get(PROPERTY_NAME),
+                                                                props.get(PROPERTY_ADDRESS),
+                                                                props.get(PROPERTY_PRINCIPAL),
+                                                                props.get(PROPERTY_CRED_ALIAS));
+            } else {
+                result += createRemoteConfigRegistryXMLNoAuth(props.get(PROPERTY_TYPE),
+                                                              props.get(PROPERTY_NAME),
+                                                              props.get(PROPERTY_ADDRESS));
+            }
+        }
+
+        result += "</remote-configuration-registries>\n";
+
+        return result;
+    }
+
+    public static String createRemoteConfigRegistryXMLWithKerberosAuth(String type,
+                                                                       String name,
+                                                                       String address,
+                                                                       String principal,
+                                                                       String keyTab,
+                                                                       boolean userKeyTab,
+                                                                       boolean useTicketCache) {
+        return "  <remote-configuration-registry>\n" +
+               "    <name>" + name + "</name>\n" +
+               "    <type>" + type + "</type>\n" +
+               "    <address>" + address + "</address>\n" +
+               "    <secure>true</secure>\n" +
+               "    <auth-type>" + "Kerberos" + "</auth-type>\n" +
+               "    <principal>" + principal + "</principal>\n" +
+               "    <keytab>" + keyTab + "</keytab>\n" +
+               "    <use-keytab>" + String.valueOf(userKeyTab) + "</use-keytab>\n" +
+               "    <use-ticket-cache>" + String.valueOf(useTicketCache) + "</use-ticket-cache>\n" +
+               "  </remote-configuration-registry>\n";
+    }
+
+    public static String createRemoteConfigRegistryXMLWithDigestAuth(String type,
+                                                                     String name,
+                                                                     String address,
+                                                                     String principal,
+                                                                     String credentialAlias) {
+        return "  <remote-configuration-registry>\n" +
+               "    <name>" + name + "</name>\n" +
+               "    <type>" + type + "</type>\n" +
+               "    <address>" + address + "</address>\n" +
+               "    <secure>true</secure>\n" +
+               "    <auth-type>" + "Digest" + "</auth-type>\n" +
+               "    <principal>" + principal + "</principal>\n" +
+               "    <credential-alias>" + credentialAlias + "</credential-alias>\n" +
+               "  </remote-configuration-registry>\n";
+    }
+
+
+    public static String createRemoteConfigRegistryXMLNoAuth(String type,
+                                                             String name,
+                                                             String address) {
+        return "  <remote-configuration-registry>\n" +
+               "    <name>" + name + "</name>\n" +
+               "    <type>" + type + "</type>\n" +
+               "    <address>" + address + "</address>\n" +
+               "  </remote-configuration-registry>\n";
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/knox/blob/e766b3b7/gateway-service-remoteconfig/src/test/java/org/apache/knox/gateway/service/config/remote/zk/RemoteConfigurationRegistryClientServiceTest.java
----------------------------------------------------------------------
diff --git a/gateway-service-remoteconfig/src/test/java/org/apache/knox/gateway/service/config/remote/zk/RemoteConfigurationRegistryClientServiceTest.java b/gateway-service-remoteconfig/src/test/java/org/apache/knox/gateway/service/config/remote/zk/RemoteConfigurationRegistryClientServiceTest.java
new file mode 100644
index 0000000..8a817a4
--- /dev/null
+++ b/gateway-service-remoteconfig/src/test/java/org/apache/knox/gateway/service/config/remote/zk/RemoteConfigurationRegistryClientServiceTest.java
@@ -0,0 +1,424 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.knox.gateway.service.config.remote.zk;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.curator.framework.CuratorFramework;
+import org.apache.curator.framework.CuratorFrameworkFactory;
+import org.apache.curator.retry.ExponentialBackoffRetry;
+import org.apache.curator.test.InstanceSpec;
+import org.apache.curator.test.TestingCluster;
+import org.apache.knox.gateway.config.GatewayConfig;
+import org.apache.knox.gateway.services.config.client.RemoteConfigurationRegistryClient.ChildEntryListener;
+import org.apache.knox.gateway.services.config.client.RemoteConfigurationRegistryClient;
+import org.apache.knox.gateway.services.config.client.RemoteConfigurationRegistryClientService;
+import org.apache.knox.gateway.service.config.remote.RemoteConfigurationRegistryClientServiceFactory;
+import org.apache.knox.gateway.service.config.remote.util.RemoteRegistryConfigTestUtils;
+import org.apache.knox.gateway.services.security.AliasService;
+import org.apache.zookeeper.ZooDefs;
+import org.apache.zookeeper.data.ACL;
+import org.apache.zookeeper.data.Id;
+import org.easymock.EasyMock;
+import org.junit.Test;
+
+import javax.security.auth.login.AppConfigurationEntry;
+import javax.security.auth.login.Configuration;
+import java.io.File;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
+public class RemoteConfigurationRegistryClientServiceTest {
+
+    /**
+     * Test a configuration for an unsecured remote registry, included in the gateway configuration.
+     */
+    @Test
+    public void testUnsecuredZooKeeperWithSimpleRegistryConfig() throws Exception {
+        final String REGISTRY_CLIENT_NAME = "unsecured-zk-registry-name";
+        final String PRINCIPAL = null;
+        final String PWD = null;
+        final String CRED_ALIAS = null;
+
+        // Configure and start a secure ZK cluster
+        TestingCluster zkCluster = setupAndStartSecureTestZooKeeper(PRINCIPAL, PWD);
+
+        try {
+            // Create the setup client for the test cluster, and initialize the test znodes
+            CuratorFramework setupClient = initializeTestClientAndZNodes(zkCluster, PRINCIPAL);
+
+            // Mock configuration
+            GatewayConfig config = EasyMock.createNiceMock(GatewayConfig.class);
+            final String registryConfigValue =
+                        GatewayConfig.REMOTE_CONFIG_REGISTRY_TYPE + "=" + ZooKeeperClientService.TYPE + ";" +
+                        GatewayConfig.REMOTE_CONFIG_REGISTRY_ADDRESS + "=" + zkCluster.getConnectString();
+            EasyMock.expect(config.getRemoteRegistryConfiguration(REGISTRY_CLIENT_NAME))
+                    .andReturn(registryConfigValue)
+                    .anyTimes();
+            EasyMock.expect(config.getRemoteRegistryConfigurationNames())
+                    .andReturn(Collections.singletonList(REGISTRY_CLIENT_NAME)).anyTimes();
+            EasyMock.replay(config);
+
+            doTestZooKeeperClient(setupClient, REGISTRY_CLIENT_NAME, config, CRED_ALIAS, PWD);
+        } finally {
+            zkCluster.stop();
+        }
+    }
+
+    /**
+     * Test multiple configurations for an unsecured remote registry.
+     */
+    @Test
+    public void testMultipleUnsecuredZooKeeperWithSimpleRegistryConfig() throws Exception {
+        final String REGISTRY_CLIENT_NAME_1 = "zkclient1";
+        final String REGISTRY_CLIENT_NAME_2 = "zkclient2";
+        final String PRINCIPAL = null;
+        final String PWD = null;
+        final String CRED_ALIAS = null;
+
+        // Configure and start a secure ZK cluster
+        TestingCluster zkCluster = setupAndStartSecureTestZooKeeper(PRINCIPAL, PWD);
+
+        try {
+            // Create the setup client for the test cluster, and initialize the test znodes
+            CuratorFramework setupClient = initializeTestClientAndZNodes(zkCluster, PRINCIPAL);
+
+            // Mock configuration
+            GatewayConfig config = EasyMock.createNiceMock(GatewayConfig.class);
+            final String registryConfigValue1 =
+                                GatewayConfig.REMOTE_CONFIG_REGISTRY_TYPE + "=" + ZooKeeperClientService.TYPE + ";" +
+                                GatewayConfig.REMOTE_CONFIG_REGISTRY_ADDRESS + "=" + zkCluster.getConnectString();
+            EasyMock.expect(config.getRemoteRegistryConfiguration(REGISTRY_CLIENT_NAME_1))
+                    .andReturn(registryConfigValue1).anyTimes();
+            final String registryConfigValue2 =
+                                GatewayConfig.REMOTE_CONFIG_REGISTRY_TYPE + "=" + ZooKeeperClientService.TYPE + ";" +
+                                GatewayConfig.REMOTE_CONFIG_REGISTRY_ADDRESS + "=" + zkCluster.getConnectString();
+            EasyMock.expect(config.getRemoteRegistryConfiguration(REGISTRY_CLIENT_NAME_2))
+                    .andReturn(registryConfigValue2).anyTimes();
+            EasyMock.expect(config.getRemoteRegistryConfigurationNames())
+                    .andReturn(Arrays.asList(REGISTRY_CLIENT_NAME_1, REGISTRY_CLIENT_NAME_2)).anyTimes();
+            EasyMock.replay(config);
+
+            // Create the client service instance
+            RemoteConfigurationRegistryClientService clientService =
+                    RemoteConfigurationRegistryClientServiceFactory.newInstance(config);
+            assertEquals("Wrong registry client service type.", clientService.getClass(), CuratorClientService.class);
+            clientService.setAliasService(null);
+            clientService.init(config, null);
+            clientService.start();
+
+            RemoteConfigurationRegistryClient client1 = clientService.get(REGISTRY_CLIENT_NAME_1);
+            assertNotNull(client1);
+
+            RemoteConfigurationRegistryClient client2 = clientService.get(REGISTRY_CLIENT_NAME_2);
+            assertNotNull(client2);
+
+            doTestZooKeeperClient(setupClient, REGISTRY_CLIENT_NAME_1, clientService, false);
+            doTestZooKeeperClient(setupClient, REGISTRY_CLIENT_NAME_2, clientService, false);
+        } finally {
+            zkCluster.stop();
+        }
+    }
+
+    /**
+     * Test a configuration for a secure remote registry, included in the gateway configuration.
+     */
+    @Test
+    public void testZooKeeperWithSimpleRegistryConfig() throws Exception {
+        final String AUTH_TYPE = "digest";
+        final String REGISTRY_CLIENT_NAME = "zk-registry-name";
+        final String PRINCIPAL = "knox";
+        final String PWD = "knoxtest";
+        final String CRED_ALIAS = "zkCredential";
+
+        // Configure and start a secure ZK cluster
+        TestingCluster zkCluster = setupAndStartSecureTestZooKeeper(PRINCIPAL, PWD);
+
+        try {
+            // Create the setup client for the test cluster, and initialize the test znodes
+            CuratorFramework setupClient = initializeTestClientAndZNodes(zkCluster, PRINCIPAL);
+
+            // Mock configuration
+            GatewayConfig config = EasyMock.createNiceMock(GatewayConfig.class);
+            final String registryConfigValue =
+                            GatewayConfig.REMOTE_CONFIG_REGISTRY_TYPE + "=" + ZooKeeperClientService.TYPE + ";" +
+                            GatewayConfig.REMOTE_CONFIG_REGISTRY_ADDRESS + "=" + zkCluster.getConnectString() + ";" +
+                            GatewayConfig.REMOTE_CONFIG_REGISTRY_AUTH_TYPE + "=" + AUTH_TYPE + ";" +
+                            GatewayConfig.REMOTE_CONFIG_REGISTRY_PRINCIPAL + "=" + PRINCIPAL + ";" +
+                            GatewayConfig.REMOTE_CONFIG_REGISTRY_CREDENTIAL_ALIAS + "=" + CRED_ALIAS;
+            EasyMock.expect(config.getRemoteRegistryConfiguration(REGISTRY_CLIENT_NAME))
+                    .andReturn(registryConfigValue)
+                    .anyTimes();
+            EasyMock.expect(config.getRemoteRegistryConfigurationNames())
+                    .andReturn(Collections.singletonList(REGISTRY_CLIENT_NAME)).anyTimes();
+            EasyMock.replay(config);
+
+            doTestZooKeeperClient(setupClient, REGISTRY_CLIENT_NAME, config, CRED_ALIAS, PWD);
+        } finally {
+            zkCluster.stop();
+        }
+    }
+
+    /**
+     * Test the remote registry configuration external to, and referenced from, the gateway configuration, for a secure
+     * client.
+     */
+    @Test
+    public void testZooKeeperWithSingleExternalRegistryConfig() throws Exception {
+        final String AUTH_TYPE = "digest";
+        final String REGISTRY_CLIENT_NAME = "my-zookeeper_registryNAME";
+        final String PRINCIPAL = "knox";
+        final String PWD = "knoxtest";
+        final String CRED_ALIAS = "zkCredential";
+
+        // Configure and start a secure ZK cluster
+        TestingCluster zkCluster = setupAndStartSecureTestZooKeeper(PRINCIPAL, PWD);
+
+        File tmpRegConfigFile = null;
+
+        try {
+            // Create the setup client for the test cluster, and initialize the test znodes
+            CuratorFramework setupClient = initializeTestClientAndZNodes(zkCluster, PRINCIPAL);
+
+            // Mock configuration
+            Map<String, String> registryConfigProps = new HashMap<>();
+            registryConfigProps.put("type", ZooKeeperClientService.TYPE);
+            registryConfigProps.put("name", REGISTRY_CLIENT_NAME);
+            registryConfigProps.put("address", zkCluster.getConnectString());
+            registryConfigProps.put("secure", "true");
+            registryConfigProps.put("authType", AUTH_TYPE);
+            registryConfigProps.put("principal", PRINCIPAL);
+            registryConfigProps.put("credentialAlias", CRED_ALIAS);
+            String registryConfigXML =
+                  RemoteRegistryConfigTestUtils.createRemoteConfigRegistriesXML(Collections.singleton(registryConfigProps));
+            tmpRegConfigFile = File.createTempFile("myRemoteRegistryConfig", "xml");
+            FileUtils.writeStringToFile(tmpRegConfigFile, registryConfigXML);
+
+            System.setProperty("org.apache.knox.gateway.remote.registry.config.file", tmpRegConfigFile.getAbsolutePath());
+
+            GatewayConfig config = EasyMock.createNiceMock(GatewayConfig.class);
+            EasyMock.replay(config);
+
+            doTestZooKeeperClient(setupClient, REGISTRY_CLIENT_NAME, config, CRED_ALIAS, PWD);
+        } finally {
+            zkCluster.stop();
+            if (tmpRegConfigFile != null && tmpRegConfigFile.exists()) {
+                tmpRegConfigFile.delete();
+            }
+            System.clearProperty("org.apache.knox.gateway.remote.registry.config.file");
+        }
+    }
+
+    /**
+     * Setup and start a secure test ZooKeeper cluster.
+     */
+    private TestingCluster setupAndStartSecureTestZooKeeper(String principal, String digestPassword) throws Exception {
+        final boolean applyAuthentication = (principal != null);
+
+        // Configure security for the ZK cluster instances
+        Map<String, Object> customInstanceSpecProps = new HashMap<>();
+
+        if (applyAuthentication) {
+            customInstanceSpecProps.put("authProvider.1", "org.apache.zookeeper.server.auth.SASLAuthenticationProvider");
+            customInstanceSpecProps.put("requireClientAuthScheme", "sasl");
+        }
+
+        // Define the test cluster
+        List<InstanceSpec> instanceSpecs = new ArrayList<>();
+        for (int i = 0 ; i < 3 ; i++) {
+            InstanceSpec is = new InstanceSpec(null, -1, -1, -1, false, (i+1), -1, -1, customInstanceSpecProps);
+            instanceSpecs.add(is);
+        }
+        TestingCluster zkCluster = new TestingCluster(instanceSpecs);
+
+        if (applyAuthentication) {
+            // Setup ZooKeeper server SASL
+            Map<String, String> digestOptions = new HashMap<>();
+            digestOptions.put("user_" + principal, digestPassword);
+            final AppConfigurationEntry[] serverEntries =
+                    {new AppConfigurationEntry("org.apache.zookeeper.server.auth.DigestLoginModule",
+                            AppConfigurationEntry.LoginModuleControlFlag.REQUIRED,
+                            digestOptions)};
+            Configuration.setConfiguration(new Configuration() {
+                @Override
+                public AppConfigurationEntry[] getAppConfigurationEntry(String name) {
+                    return ("Server".equalsIgnoreCase(name)) ? serverEntries : null;
+                }
+            });
+        }
+
+        // Start the cluster
+        zkCluster.start();
+
+        return zkCluster;
+    }
+
+    /**
+     * Create a ZooKeeper client with SASL digest auth configured, and initialize the test znodes.
+     */
+    private CuratorFramework initializeTestClientAndZNodes(TestingCluster zkCluster, String principal) throws Exception {
+        // Create the client for the test cluster
+        CuratorFramework setupClient = CuratorFrameworkFactory.builder()
+                                                              .connectString(zkCluster.getConnectString())
+                                                              .retryPolicy(new ExponentialBackoffRetry(100, 3))
+                                                              .build();
+        assertNotNull(setupClient);
+        setupClient.start();
+
+        List<ACL> acls = new ArrayList<>();
+        if (principal != null) {
+            acls.add(new ACL(ZooDefs.Perms.ALL, new Id("sasl", principal)));
+        } else {
+            acls.add(new ACL(ZooDefs.Perms.ALL, ZooDefs.Ids.ANYONE_ID_UNSAFE));
+        }
+        setupClient.create().creatingParentsIfNeeded().withACL(acls).forPath("/knox/config/descriptors");
+        setupClient.create().creatingParentsIfNeeded().withACL(acls).forPath("/knox/config/shared-providers");
+
+        List<ACL> negativeACLs = new ArrayList<>();
+        if (principal != null) {
+            negativeACLs.add(new ACL(ZooDefs.Perms.ALL, new Id("sasl", "notyou")));
+        } else {
+            negativeACLs.add(new ACL(ZooDefs.Perms.ALL, ZooDefs.Ids.ANYONE_ID_UNSAFE));
+        }
+        setupClient.create().creatingParentsIfNeeded().withACL(negativeACLs).forPath("/someotherconfig");
+
+        return setupClient;
+    }
+
+    private void doTestZooKeeperClient(final CuratorFramework setupClient,
+                                       final String           testClientName,
+                                       final GatewayConfig    config,
+                                       final String           credentialAlias,
+                                       final String           digestPassword) throws Exception {
+        boolean isSecureTest = (credentialAlias != null && digestPassword != null);
+
+        // Mock alias service
+        AliasService aliasService = EasyMock.createNiceMock(AliasService.class);
+        EasyMock.expect(aliasService.getPasswordFromAliasForGateway(credentialAlias))
+                .andReturn(isSecureTest ? digestPassword.toCharArray() : null)
+                .anyTimes();
+        EasyMock.replay(aliasService);
+
+        // Create the client service instance
+        RemoteConfigurationRegistryClientService clientService =
+                RemoteConfigurationRegistryClientServiceFactory.newInstance(config);
+        assertEquals("Wrong registry client service type.", clientService.getClass(), CuratorClientService.class);
+        clientService.setAliasService(aliasService);
+        clientService.init(config, null);
+        clientService.start();
+
+        doTestZooKeeperClient(setupClient, testClientName, clientService, isSecureTest);
+    }
+
+    /**
+     * Test secure ZooKeeper client interactions.
+     *
+     * @param setupClient    The client used for interacting with ZooKeeper independent from the registry client service.
+     * @param testClientName The name of the client to use from the registry client service.
+     * @param clientService  The RemoteConfigurationRegistryClientService
+     * @param isSecureTest   Flag to indicate whether this is a secure interaction test
+     */
+    private void doTestZooKeeperClient(final CuratorFramework                         setupClient,
+                                       final String                                   testClientName,
+                                       final RemoteConfigurationRegistryClientService clientService,
+                                       boolean                                        isSecureTest) throws Exception {
+
+        RemoteConfigurationRegistryClient client = clientService.get(testClientName);
+        assertNotNull(client);
+        List<String> descriptors = client.listChildEntries("/knox/config/descriptors");
+        assertNotNull(descriptors);
+        for (String descriptor : descriptors) {
+            System.out.println("Descriptor: " + descriptor);
+        }
+
+        List<String> providerConfigs = client.listChildEntries("/knox/config/shared-providers");
+        assertNotNull(providerConfigs);
+        for (String providerConfig : providerConfigs) {
+            System.out.println("Provider config: " + providerConfig);
+        }
+
+        List<String> someotherConfig = client.listChildEntries("/someotherconfig");
+        if (isSecureTest) {
+            assertNull("Expected null because of the ACL mismatch.", someotherConfig);
+        } else {
+            assertNotNull(someotherConfig);
+        }
+
+        // Test listeners
+        final String MY_NEW_ZNODE = "/clientServiceTestNode";
+        final String MY_NEW_DATA_ZNODE = MY_NEW_ZNODE + "/mydata";
+
+        if (setupClient.checkExists().forPath(MY_NEW_ZNODE) != null) {
+            setupClient.delete().deletingChildrenIfNeeded().forPath(MY_NEW_ZNODE);
+        }
+
+        final List<String> listenerLog = new ArrayList<>();
+        client.addChildEntryListener(MY_NEW_ZNODE, (c, type, path) -> {
+            listenerLog.add("EXTERNAL: " + type.toString() + ":" + path);
+            if (RemoteConfigurationRegistryClient.ChildEntryListener.Type.ADDED.equals(type)) {
+                try {
+                    c.addEntryListener(path, (cc, p, d) -> listenerLog.add("EXTERNAL: " + p + ":" + (d != null ? new String(d) : "null")));
+                } catch (Exception e) {
+                    e.printStackTrace();
+                }
+            }
+        });
+
+        client.createEntry(MY_NEW_ZNODE);
+        client.createEntry(MY_NEW_DATA_ZNODE, "more test data");
+        String testData = client.getEntryData(MY_NEW_DATA_ZNODE);
+        assertNotNull(testData);
+        assertEquals("more test data", testData);
+
+        assertTrue(client.entryExists(MY_NEW_DATA_ZNODE));
+        client.setEntryData(MY_NEW_DATA_ZNODE, "still more data");
+
+        try {
+            Thread.sleep(1000);
+        } catch (InterruptedException e) {
+            //
+        }
+
+        client.setEntryData(MY_NEW_DATA_ZNODE, "changed completely");
+
+        try {
+            Thread.sleep(1000);
+        } catch (InterruptedException e) {
+            //
+        }
+
+        client.deleteEntry(MY_NEW_DATA_ZNODE);
+
+        try {
+            Thread.sleep(1000);
+        } catch (InterruptedException e) {
+            //
+        }
+
+        assertFalse(listenerLog.isEmpty());
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/knox/blob/e766b3b7/gateway-service-remoteconfig/src/test/java/org/apache/knox/gateway/service/config/remote/zk/RemoteConfigurationRegistryJAASConfigTest.java
----------------------------------------------------------------------
diff --git a/gateway-service-remoteconfig/src/test/java/org/apache/knox/gateway/service/config/remote/zk/RemoteConfigurationRegistryJAASConfigTest.java b/gateway-service-remoteconfig/src/test/java/org/apache/knox/gateway/service/config/remote/zk/RemoteConfigurationRegistryJAASConfigTest.java
new file mode 100644
index 0000000..7a123f4
--- /dev/null
+++ b/gateway-service-remoteconfig/src/test/java/org/apache/knox/gateway/service/config/remote/zk/RemoteConfigurationRegistryJAASConfigTest.java
@@ -0,0 +1,255 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.knox.gateway.service.config.remote.zk;
+
+import org.apache.knox.gateway.service.config.remote.RemoteConfigurationRegistryConfig;
+import org.apache.knox.gateway.service.config.remote.zk.RemoteConfigurationRegistryJAASConfig;
+import org.apache.knox.gateway.services.security.AliasService;
+import org.easymock.EasyMock;
+import org.junit.Test;
+
+import javax.security.auth.login.AppConfigurationEntry;
+import javax.security.auth.login.Configuration;
+import java.io.File;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+public class RemoteConfigurationRegistryJAASConfigTest {
+
+    @Test
+    public void testZooKeeperDigestContextEntry() throws Exception {
+        List<RemoteConfigurationRegistryConfig> registryConfigs = new ArrayList<>();
+        final String ENTRY_NAME       = "my_digest_context";
+        final String DIGEST_PRINCIPAL = "myIdentity";
+        final String DIGEST_PWD_ALIAS = "myAlias";
+        final String DIGEST_PWD       = "mysecret";
+
+        AliasService aliasService = EasyMock.createNiceMock(AliasService.class);
+        EasyMock.expect(aliasService.getPasswordFromAliasForGateway(DIGEST_PWD_ALIAS)).andReturn(DIGEST_PWD.toCharArray()).anyTimes();
+        EasyMock.replay(aliasService);
+
+        registryConfigs.add(createDigestConfig(ENTRY_NAME, DIGEST_PRINCIPAL, DIGEST_PWD_ALIAS));
+
+        try {
+            RemoteConfigurationRegistryJAASConfig jaasConfig =
+                                    RemoteConfigurationRegistryJAASConfig.configure(registryConfigs, aliasService);
+
+            // Make sure there are no entries for an invalid context entry name
+            assertNull(jaasConfig.getAppConfigurationEntry("invalid"));
+
+            // Validate the intended context entry
+            validateDigestContext(jaasConfig,
+                                  ENTRY_NAME,
+                                  RemoteConfigurationRegistryJAASConfig.digestLoginModules.get("ZOOKEEPER"),
+                                  DIGEST_PRINCIPAL,
+                                  DIGEST_PWD);
+        } finally {
+            Configuration.setConfiguration(null);
+        }
+    }
+
+    @Test
+    public void testKerberosContextEntry() throws Exception {
+        List<RemoteConfigurationRegistryConfig> registryConfigs = new ArrayList<>();
+        final String ENTRY_NAME = "my_kerberos_context";
+        final String PRINCIPAL  = "myIdentity";
+
+        File dummyKeyTab = File.createTempFile("my_context", "keytab");
+        registryConfigs.add(createKerberosConfig(ENTRY_NAME, PRINCIPAL, dummyKeyTab.getAbsolutePath()));
+
+        try {
+            RemoteConfigurationRegistryJAASConfig jaasConfig =
+                                            RemoteConfigurationRegistryJAASConfig.configure(registryConfigs, null);
+
+            // Make sure there are no entries for an invalid context entry name
+            assertNull(jaasConfig.getAppConfigurationEntry("invalid"));
+
+            // Validate the intended context entry
+            validateKerberosContext(jaasConfig,
+                                    ENTRY_NAME,
+                                    PRINCIPAL,
+                                    dummyKeyTab.getAbsolutePath(),
+                                    true,
+                                    false);
+
+        } finally {
+            Configuration.setConfiguration(null);
+        }
+    }
+
+    @Test
+    public void testZooKeeperMultipleContextEntries() throws Exception {
+        List<RemoteConfigurationRegistryConfig> registryConfigs = new ArrayList<>();
+        final String KERBEROS_ENTRY_NAME = "my_kerberos_context";
+        final String KERBEROS_PRINCIPAL  = "myKerberosIdentity";
+        final String DIGEST_ENTRY_NAME   = "my_digest_context";
+        final String DIGEST_PRINCIPAL    = "myDigestIdentity";
+        final String DIGEST_PWD_ALIAS    = "myAlias";
+        final String DIGEST_PWD          = "mysecret";
+
+        AliasService aliasService = EasyMock.createNiceMock(AliasService.class);
+        EasyMock.expect(aliasService.getPasswordFromAliasForGateway(DIGEST_PWD_ALIAS)).andReturn(DIGEST_PWD.toCharArray()).anyTimes();
+        EasyMock.replay(aliasService);
+
+        File dummyKeyTab = File.createTempFile("my_context", "keytab");
+        registryConfigs.add(createKerberosConfig(KERBEROS_ENTRY_NAME, KERBEROS_PRINCIPAL, dummyKeyTab.getAbsolutePath()));
+        registryConfigs.add(createDigestConfig(DIGEST_ENTRY_NAME, DIGEST_PRINCIPAL, DIGEST_PWD_ALIAS));
+
+        try {
+            RemoteConfigurationRegistryJAASConfig jaasConfig =
+                                        RemoteConfigurationRegistryJAASConfig.configure(registryConfigs, aliasService);
+
+            // Make sure there are no entries for an invalid context entry name
+            assertNull(jaasConfig.getAppConfigurationEntry("invalid"));
+
+            // Validate the kerberos context entry
+            validateKerberosContext(jaasConfig,
+                                    KERBEROS_ENTRY_NAME,
+                                    KERBEROS_PRINCIPAL,
+                                    dummyKeyTab.getAbsolutePath(),
+                                    true,
+                                    false);
+
+            // Validate the digest context entry
+            validateDigestContext(jaasConfig,
+                                  DIGEST_ENTRY_NAME,
+                                  RemoteConfigurationRegistryJAASConfig.digestLoginModules.get("ZOOKEEPER"),
+                                  DIGEST_PRINCIPAL,
+                                  DIGEST_PWD);
+
+        } finally {
+            Configuration.setConfiguration(null);
+        }
+    }
+
+    @Test
+    public void testZooKeeperDigestContextEntryWithoutAliasService() throws Exception {
+        List<RemoteConfigurationRegistryConfig> registryConfigs = new ArrayList<>();
+        final String ENTRY_NAME       = "my_digest_context";
+        final String DIGEST_PRINCIPAL = "myIdentity";
+        final String DIGEST_PWD_ALIAS = "myAlias";
+
+        registryConfigs.add(createDigestConfig(ENTRY_NAME, DIGEST_PRINCIPAL, DIGEST_PWD_ALIAS));
+
+        try {
+            RemoteConfigurationRegistryJAASConfig jaasConfig =
+                                            RemoteConfigurationRegistryJAASConfig.configure(registryConfigs, null);
+            fail("Expected IllegalArgumentException because the AliasService is not available.");
+        } catch (IllegalArgumentException e) {
+            // Expected
+            assertTrue(e.getMessage().contains("AliasService"));
+        } catch (Throwable e) {
+            fail("Wrong exception encountered: " + e.getClass().getName() + ", " + e.getMessage());
+        } finally {
+            Configuration.setConfiguration(null);
+        }
+    }
+
+    private static RemoteConfigurationRegistryConfig createDigestConfig(String entryName,
+                                                                        String principal,
+                                                                        String credentialAlias) {
+        return createDigestConfig(entryName, principal, credentialAlias, "ZooKeeper");
+    }
+
+    private static RemoteConfigurationRegistryConfig createDigestConfig(String entryName,
+                                                                        String principal,
+                                                                        String credentialAlias,
+                                                                        String registryType) {
+        RemoteConfigurationRegistryConfig rc = EasyMock.createNiceMock(RemoteConfigurationRegistryConfig.class);
+        EasyMock.expect(rc.getRegistryType()).andReturn(registryType).anyTimes();
+        EasyMock.expect(rc.getName()).andReturn(entryName).anyTimes();
+        EasyMock.expect(rc.isSecureRegistry()).andReturn(true).anyTimes();
+        EasyMock.expect(rc.getAuthType()).andReturn("digest").anyTimes();
+        EasyMock.expect(rc.getPrincipal()).andReturn(principal).anyTimes();
+        EasyMock.expect(rc.getCredentialAlias()).andReturn(credentialAlias).anyTimes();
+        EasyMock.replay(rc);
+        return rc;
+    }
+
+
+    private static RemoteConfigurationRegistryConfig createKerberosConfig(String entryName,
+                                                                          String principal,
+                                                                          String keyTabPath) {
+        return createKerberosConfig(entryName, principal, keyTabPath, "ZooKeeper");
+    }
+
+    private static RemoteConfigurationRegistryConfig createKerberosConfig(String entryName,
+                                                                          String principal,
+                                                                          String keyTabPath,
+                                                                          String registryType) {
+        return createKerberosConfig(entryName, principal, keyTabPath, null, null, registryType);
+    }
+
+    private static RemoteConfigurationRegistryConfig createKerberosConfig(String entryName,
+                                                                          String principal,
+                                                                          String keyTabPath,
+                                                                          Boolean useKeyTab,
+                                                                          Boolean useTicketCache,
+                                                                          String registryType) {
+        RemoteConfigurationRegistryConfig rc = EasyMock.createNiceMock(RemoteConfigurationRegistryConfig.class);
+        EasyMock.expect(rc.getRegistryType()).andReturn(registryType).anyTimes();
+        EasyMock.expect(rc.getName()).andReturn(entryName).anyTimes();
+        EasyMock.expect(rc.isSecureRegistry()).andReturn(true).anyTimes();
+        EasyMock.expect(rc.getAuthType()).andReturn("kerberos").anyTimes();
+        EasyMock.expect(rc.getPrincipal()).andReturn(principal).anyTimes();
+        EasyMock.expect(rc.getKeytab()).andReturn(keyTabPath).anyTimes();
+        EasyMock.expect(rc.isUseKeyTab()).andReturn(useKeyTab != null ? useKeyTab : true).anyTimes();
+        EasyMock.expect(rc.isUseTicketCache()).andReturn(useTicketCache != null ? useTicketCache : false).anyTimes();
+        EasyMock.replay(rc);
+        return rc;
+    }
+
+    private static void validateDigestContext(RemoteConfigurationRegistryJAASConfig config,
+                                              String                                entryName,
+                                              String                                loginModule,
+                                              String                                principal,
+                                              String                                password) throws Exception {
+        AppConfigurationEntry[] myContextEntries = config.getAppConfigurationEntry(entryName);
+        assertNotNull(myContextEntries);
+        assertEquals(1, myContextEntries.length);
+        AppConfigurationEntry entry = myContextEntries[0];
+        assertTrue(entry.getLoginModuleName().equals(loginModule));
+        Map<String, ?> entryOpts = entry.getOptions();
+        assertEquals(principal, entryOpts.get("username"));
+        assertEquals(password, entryOpts.get("password"));
+    }
+
+    private static void validateKerberosContext(RemoteConfigurationRegistryJAASConfig config,
+                                                String                                entryName,
+                                                String                                principal,
+                                                String                                keyTab,
+                                                boolean                               useKeyTab,
+                                                boolean                               useTicketCache) throws Exception {
+        AppConfigurationEntry[] myContextEntries = config.getAppConfigurationEntry(entryName);
+        assertNotNull(myContextEntries);
+        assertEquals(1, myContextEntries.length);
+        AppConfigurationEntry entry = myContextEntries[0];
+        assertTrue(entry.getLoginModuleName().endsWith(".security.auth.module.Krb5LoginModule"));
+        Map<String, ?> entryOpts = entry.getOptions();
+        assertEquals(principal, entryOpts.get("principal"));
+        assertEquals(keyTab, entryOpts.get("keyTab"));
+        assertEquals(useKeyTab, Boolean.valueOf((String)entryOpts.get("isUseKeyTab")));
+        assertEquals(useTicketCache, Boolean.valueOf((String)entryOpts.get("isUseTicketCache")));
+    }
+}

http://git-wip-us.apache.org/repos/asf/knox/blob/e766b3b7/gateway-spi/src/main/java/org/apache/hadoop/gateway/services/config/client/RemoteConfigurationRegistryClient.java
----------------------------------------------------------------------
diff --git a/gateway-spi/src/main/java/org/apache/hadoop/gateway/services/config/client/RemoteConfigurationRegistryClient.java b/gateway-spi/src/main/java/org/apache/hadoop/gateway/services/config/client/RemoteConfigurationRegistryClient.java
deleted file mode 100644
index bfb4518..0000000
--- a/gateway-spi/src/main/java/org/apache/hadoop/gateway/services/config/client/RemoteConfigurationRegistryClient.java
+++ /dev/null
@@ -1,80 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.gateway.services.config.client;
-
-import java.util.List;
-
-public interface RemoteConfigurationRegistryClient {
-
-    String getAddress();
-
-    boolean isAuthenticationConfigured();
-
-    boolean entryExists(String path);
-
-    List<EntryACL> getACL(String path);
-
-    void setACL(String path, List<EntryACL> acls);
-
-    List<String> listChildEntries(String path);
-
-    String getEntryData(String path);
-
-    String getEntryData(String path, String encoding);
-
-    void createEntry(String path);
-
-    void createEntry(String path, String data);
-
-    void createEntry(String path, String data, String encoding);
-
-    int setEntryData(String path, String data);
-
-    int setEntryData(String path, String data, String encoding);
-
-    void deleteEntry(String path);
-
-    void addChildEntryListener(String path, ChildEntryListener listener) throws Exception;
-
-    void addEntryListener(String path, EntryListener listener) throws Exception;
-
-    void removeEntryListener(String path) throws Exception;
-
-    interface ChildEntryListener {
-
-        enum Type {
-            ADDED,
-            REMOVED,
-            UPDATED
-        }
-
-        void childEvent(RemoteConfigurationRegistryClient client, ChildEntryListener.Type type, String path);
-    }
-
-    interface EntryListener {
-        void entryChanged(RemoteConfigurationRegistryClient client, String path, byte[] data);
-    }
-
-    interface EntryACL {
-        String getId();
-        String getType();
-        Object getPermissions();
-        boolean canRead();
-        boolean canWrite();
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/knox/blob/e766b3b7/gateway-spi/src/main/java/org/apache/hadoop/gateway/services/config/client/RemoteConfigurationRegistryClientService.java
----------------------------------------------------------------------
diff --git a/gateway-spi/src/main/java/org/apache/hadoop/gateway/services/config/client/RemoteConfigurationRegistryClientService.java b/gateway-spi/src/main/java/org/apache/hadoop/gateway/services/config/client/RemoteConfigurationRegistryClientService.java
deleted file mode 100644
index 1467f75..0000000
--- a/gateway-spi/src/main/java/org/apache/hadoop/gateway/services/config/client/RemoteConfigurationRegistryClientService.java
+++ /dev/null
@@ -1,28 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.gateway.services.config.client;
-
-import org.apache.hadoop.gateway.services.Service;
-import org.apache.hadoop.gateway.services.security.AliasService;
-
-public interface RemoteConfigurationRegistryClientService extends Service {
-
-    void setAliasService(AliasService aliasService);
-
-    RemoteConfigurationRegistryClient get(String l);
-
-}

http://git-wip-us.apache.org/repos/asf/knox/blob/e766b3b7/gateway-spi/src/main/java/org/apache/hadoop/gateway/topology/ClusterConfigurationMonitorService.java
----------------------------------------------------------------------
diff --git a/gateway-spi/src/main/java/org/apache/hadoop/gateway/topology/ClusterConfigurationMonitorService.java b/gateway-spi/src/main/java/org/apache/hadoop/gateway/topology/ClusterConfigurationMonitorService.java
deleted file mode 100644
index 961f2e5..0000000
--- a/gateway-spi/src/main/java/org/apache/hadoop/gateway/topology/ClusterConfigurationMonitorService.java
+++ /dev/null
@@ -1,43 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.gateway.topology;
-
-import org.apache.hadoop.gateway.services.Service;
-import org.apache.hadoop.gateway.topology.discovery.ClusterConfigurationMonitor;
-
-/**
- * Gateway service for managing cluster configuration monitors.
- */
-public interface ClusterConfigurationMonitorService extends Service {
-
-    /**
-     *
-     * @param type The type of monitor (e.g., Ambari)
-     *
-     * @return The monitor associated with the specified type, or null if there is no such monitor.
-     */
-    ClusterConfigurationMonitor getMonitor(String type);
-
-
-    /**
-     * Register for configuration change notifications from <em>any</em> of the monitors managed by this service.
-     *
-     * @param listener The listener to register.
-     */
-    void addListener(ClusterConfigurationMonitor.ConfigurationChangeListener listener);
-
-}

http://git-wip-us.apache.org/repos/asf/knox/blob/e766b3b7/gateway-spi/src/main/java/org/apache/hadoop/gateway/topology/discovery/ClusterConfigurationMonitor.java
----------------------------------------------------------------------
diff --git a/gateway-spi/src/main/java/org/apache/hadoop/gateway/topology/discovery/ClusterConfigurationMonitor.java b/gateway-spi/src/main/java/org/apache/hadoop/gateway/topology/discovery/ClusterConfigurationMonitor.java
deleted file mode 100644
index fc3614d..0000000
--- a/gateway-spi/src/main/java/org/apache/hadoop/gateway/topology/discovery/ClusterConfigurationMonitor.java
+++ /dev/null
@@ -1,48 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.gateway.topology.discovery;
-
-public interface ClusterConfigurationMonitor {
-
-    /**
-     * Start the monitor.
-     */
-    void start();
-
-    /**
-     * Stop the monitor.
-     */
-    void stop();
-
-    /**
-     *
-     * @param interval The polling interval, in seconds
-     */
-    void setPollingInterval(int interval);
-
-    /**
-     * Register for notifications from the monitor.
-     */
-    void addListener(ConfigurationChangeListener listener);
-
-    /**
-     * Monitor listener interface for receiving notifications that a configuration has changed.
-     */
-    interface ConfigurationChangeListener {
-        void onConfigurationChange(String source, String clusterName);
-    }
-}

http://git-wip-us.apache.org/repos/asf/knox/blob/e766b3b7/gateway-spi/src/main/java/org/apache/hadoop/gateway/topology/discovery/ClusterConfigurationMonitorProvider.java
----------------------------------------------------------------------
diff --git a/gateway-spi/src/main/java/org/apache/hadoop/gateway/topology/discovery/ClusterConfigurationMonitorProvider.java b/gateway-spi/src/main/java/org/apache/hadoop/gateway/topology/discovery/ClusterConfigurationMonitorProvider.java
deleted file mode 100644
index a8d5f30..0000000
--- a/gateway-spi/src/main/java/org/apache/hadoop/gateway/topology/discovery/ClusterConfigurationMonitorProvider.java
+++ /dev/null
@@ -1,27 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.gateway.topology.discovery;
-
-import org.apache.hadoop.gateway.config.GatewayConfig;
-import org.apache.hadoop.gateway.services.security.AliasService;
-
-public interface ClusterConfigurationMonitorProvider {
-
-    String getType();
-
-    ClusterConfigurationMonitor newInstance(GatewayConfig config, AliasService aliasService);
-}

http://git-wip-us.apache.org/repos/asf/knox/blob/e766b3b7/gateway-spi/src/main/java/org/apache/hadoop/gateway/topology/monitor/RemoteConfigurationMonitor.java
----------------------------------------------------------------------
diff --git a/gateway-spi/src/main/java/org/apache/hadoop/gateway/topology/monitor/RemoteConfigurationMonitor.java b/gateway-spi/src/main/java/org/apache/hadoop/gateway/topology/monitor/RemoteConfigurationMonitor.java
deleted file mode 100644
index 82c5809..0000000
--- a/gateway-spi/src/main/java/org/apache/hadoop/gateway/topology/monitor/RemoteConfigurationMonitor.java
+++ /dev/null
@@ -1,24 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.gateway.topology.monitor;
-
-public interface RemoteConfigurationMonitor {
-
-    void start() throws Exception;
-
-    void stop() throws Exception;
-}

http://git-wip-us.apache.org/repos/asf/knox/blob/e766b3b7/gateway-spi/src/main/java/org/apache/hadoop/gateway/topology/monitor/RemoteConfigurationMonitorProvider.java
----------------------------------------------------------------------
diff --git a/gateway-spi/src/main/java/org/apache/hadoop/gateway/topology/monitor/RemoteConfigurationMonitorProvider.java b/gateway-spi/src/main/java/org/apache/hadoop/gateway/topology/monitor/RemoteConfigurationMonitorProvider.java
deleted file mode 100644
index d19dace..0000000
--- a/gateway-spi/src/main/java/org/apache/hadoop/gateway/topology/monitor/RemoteConfigurationMonitorProvider.java
+++ /dev/null
@@ -1,34 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.gateway.topology.monitor;
-
-
-import org.apache.hadoop.gateway.config.GatewayConfig;
-import org.apache.hadoop.gateway.services.config.client.RemoteConfigurationRegistryClientService;
-
-public interface RemoteConfigurationMonitorProvider {
-
-    /**
-     *
-     * @param config        The gateway configuration.
-     * @param clientService The RemoteConfigurationRegistryClientService for accessing the remote configuration.
-     *
-     * @return A RemoteConfigurationMonitor for keeping the local config in sync with the remote config
-     */
-    RemoteConfigurationMonitor newInstance(GatewayConfig config, RemoteConfigurationRegistryClientService clientService);
-
-}

http://git-wip-us.apache.org/repos/asf/knox/blob/e766b3b7/gateway-spi/src/main/java/org/apache/knox/gateway/services/config/client/RemoteConfigurationRegistryClient.java
----------------------------------------------------------------------
diff --git a/gateway-spi/src/main/java/org/apache/knox/gateway/services/config/client/RemoteConfigurationRegistryClient.java b/gateway-spi/src/main/java/org/apache/knox/gateway/services/config/client/RemoteConfigurationRegistryClient.java
new file mode 100644
index 0000000..5afae63
--- /dev/null
+++ b/gateway-spi/src/main/java/org/apache/knox/gateway/services/config/client/RemoteConfigurationRegistryClient.java
@@ -0,0 +1,80 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.knox.gateway.services.config.client;
+
+import java.util.List;
+
+public interface RemoteConfigurationRegistryClient {
+
+    String getAddress();
+
+    boolean isAuthenticationConfigured();
+
+    boolean entryExists(String path);
+
+    List<EntryACL> getACL(String path);
+
+    void setACL(String path, List<EntryACL> acls);
+
+    List<String> listChildEntries(String path);
+
+    String getEntryData(String path);
+
+    String getEntryData(String path, String encoding);
+
+    void createEntry(String path);
+
+    void createEntry(String path, String data);
+
+    void createEntry(String path, String data, String encoding);
+
+    int setEntryData(String path, String data);
+
+    int setEntryData(String path, String data, String encoding);
+
+    void deleteEntry(String path);
+
+    void addChildEntryListener(String path, ChildEntryListener listener) throws Exception;
+
+    void addEntryListener(String path, EntryListener listener) throws Exception;
+
+    void removeEntryListener(String path) throws Exception;
+
+    interface ChildEntryListener {
+
+        enum Type {
+            ADDED,
+            REMOVED,
+            UPDATED
+        }
+
+        void childEvent(RemoteConfigurationRegistryClient client, ChildEntryListener.Type type, String path);
+    }
+
+    interface EntryListener {
+        void entryChanged(RemoteConfigurationRegistryClient client, String path, byte[] data);
+    }
+
+    interface EntryACL {
+        String getId();
+        String getType();
+        Object getPermissions();
+        boolean canRead();
+        boolean canWrite();
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/knox/blob/e766b3b7/gateway-spi/src/main/java/org/apache/knox/gateway/services/config/client/RemoteConfigurationRegistryClientService.java
----------------------------------------------------------------------
diff --git a/gateway-spi/src/main/java/org/apache/knox/gateway/services/config/client/RemoteConfigurationRegistryClientService.java b/gateway-spi/src/main/java/org/apache/knox/gateway/services/config/client/RemoteConfigurationRegistryClientService.java
new file mode 100644
index 0000000..77573dd
--- /dev/null
+++ b/gateway-spi/src/main/java/org/apache/knox/gateway/services/config/client/RemoteConfigurationRegistryClientService.java
@@ -0,0 +1,28 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.knox.gateway.services.config.client;
+
+import org.apache.knox.gateway.services.Service;
+import org.apache.knox.gateway.services.security.AliasService;
+
+public interface RemoteConfigurationRegistryClientService extends Service {
+
+    void setAliasService(AliasService aliasService);
+
+    RemoteConfigurationRegistryClient get(String l);
+
+}

http://git-wip-us.apache.org/repos/asf/knox/blob/e766b3b7/gateway-spi/src/main/java/org/apache/knox/gateway/topology/ClusterConfigurationMonitorService.java
----------------------------------------------------------------------
diff --git a/gateway-spi/src/main/java/org/apache/knox/gateway/topology/ClusterConfigurationMonitorService.java b/gateway-spi/src/main/java/org/apache/knox/gateway/topology/ClusterConfigurationMonitorService.java
new file mode 100644
index 0000000..0bfaa5f
--- /dev/null
+++ b/gateway-spi/src/main/java/org/apache/knox/gateway/topology/ClusterConfigurationMonitorService.java
@@ -0,0 +1,43 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.knox.gateway.topology;
+
+import org.apache.knox.gateway.services.Service;
+import org.apache.knox.gateway.topology.discovery.ClusterConfigurationMonitor;
+
+/**
+ * Gateway service for managing cluster configuration monitors.
+ */
+public interface ClusterConfigurationMonitorService extends Service {
+
+    /**
+     *
+     * @param type The type of monitor (e.g., Ambari)
+     *
+     * @return The monitor associated with the specified type, or null if there is no such monitor.
+     */
+    ClusterConfigurationMonitor getMonitor(String type);
+
+
+    /**
+     * Register for configuration change notifications from <em>any</em> of the monitors managed by this service.
+     *
+     * @param listener The listener to register.
+     */
+    void addListener(ClusterConfigurationMonitor.ConfigurationChangeListener listener);
+
+}

http://git-wip-us.apache.org/repos/asf/knox/blob/e766b3b7/gateway-spi/src/main/java/org/apache/knox/gateway/topology/discovery/ClusterConfigurationMonitor.java
----------------------------------------------------------------------
diff --git a/gateway-spi/src/main/java/org/apache/knox/gateway/topology/discovery/ClusterConfigurationMonitor.java b/gateway-spi/src/main/java/org/apache/knox/gateway/topology/discovery/ClusterConfigurationMonitor.java
new file mode 100644
index 0000000..641bad5
--- /dev/null
+++ b/gateway-spi/src/main/java/org/apache/knox/gateway/topology/discovery/ClusterConfigurationMonitor.java
@@ -0,0 +1,48 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.knox.gateway.topology.discovery;
+
+public interface ClusterConfigurationMonitor {
+
+    /**
+     * Start the monitor.
+     */
+    void start();
+
+    /**
+     * Stop the monitor.
+     */
+    void stop();
+
+    /**
+     *
+     * @param interval The polling interval, in seconds
+     */
+    void setPollingInterval(int interval);
+
+    /**
+     * Register for notifications from the monitor.
+     */
+    void addListener(ConfigurationChangeListener listener);
+
+    /**
+     * Monitor listener interface for receiving notifications that a configuration has changed.
+     */
+    interface ConfigurationChangeListener {
+        void onConfigurationChange(String source, String clusterName);
+    }
+}

http://git-wip-us.apache.org/repos/asf/knox/blob/e766b3b7/gateway-spi/src/main/java/org/apache/knox/gateway/topology/discovery/ClusterConfigurationMonitorProvider.java
----------------------------------------------------------------------
diff --git a/gateway-spi/src/main/java/org/apache/knox/gateway/topology/discovery/ClusterConfigurationMonitorProvider.java b/gateway-spi/src/main/java/org/apache/knox/gateway/topology/discovery/ClusterConfigurationMonitorProvider.java
new file mode 100644
index 0000000..c84e5c9
--- /dev/null
+++ b/gateway-spi/src/main/java/org/apache/knox/gateway/topology/discovery/ClusterConfigurationMonitorProvider.java
@@ -0,0 +1,27 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.knox.gateway.topology.discovery;
+
+import org.apache.knox.gateway.config.GatewayConfig;
+import org.apache.knox.gateway.services.security.AliasService;
+
+public interface ClusterConfigurationMonitorProvider {
+
+    String getType();
+
+    ClusterConfigurationMonitor newInstance(GatewayConfig config, AliasService aliasService);
+}

http://git-wip-us.apache.org/repos/asf/knox/blob/e766b3b7/gateway-spi/src/main/java/org/apache/knox/gateway/topology/monitor/RemoteConfigurationMonitor.java
----------------------------------------------------------------------
diff --git a/gateway-spi/src/main/java/org/apache/knox/gateway/topology/monitor/RemoteConfigurationMonitor.java b/gateway-spi/src/main/java/org/apache/knox/gateway/topology/monitor/RemoteConfigurationMonitor.java
new file mode 100644
index 0000000..0ce1513
--- /dev/null
+++ b/gateway-spi/src/main/java/org/apache/knox/gateway/topology/monitor/RemoteConfigurationMonitor.java
@@ -0,0 +1,24 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.knox.gateway.topology.monitor;
+
+public interface RemoteConfigurationMonitor {
+
+    void start() throws Exception;
+
+    void stop() throws Exception;
+}

http://git-wip-us.apache.org/repos/asf/knox/blob/e766b3b7/gateway-spi/src/main/java/org/apache/knox/gateway/topology/monitor/RemoteConfigurationMonitorProvider.java
----------------------------------------------------------------------
diff --git a/gateway-spi/src/main/java/org/apache/knox/gateway/topology/monitor/RemoteConfigurationMonitorProvider.java b/gateway-spi/src/main/java/org/apache/knox/gateway/topology/monitor/RemoteConfigurationMonitorProvider.java
new file mode 100644
index 0000000..cab33f9
--- /dev/null
+++ b/gateway-spi/src/main/java/org/apache/knox/gateway/topology/monitor/RemoteConfigurationMonitorProvider.java
@@ -0,0 +1,34 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.knox.gateway.topology.monitor;
+
+
+import org.apache.knox.gateway.config.GatewayConfig;
+import org.apache.knox.gateway.services.config.client.RemoteConfigurationRegistryClientService;
+
+public interface RemoteConfigurationMonitorProvider {
+
+    /**
+     *
+     * @param config        The gateway configuration.
+     * @param clientService The RemoteConfigurationRegistryClientService for accessing the remote configuration.
+     *
+     * @return A RemoteConfigurationMonitor for keeping the local config in sync with the remote config
+     */
+    RemoteConfigurationMonitor newInstance(GatewayConfig config, RemoteConfigurationRegistryClientService clientService);
+
+}

http://git-wip-us.apache.org/repos/asf/knox/blob/e766b3b7/gateway-test/src/test/java/org/apache/hadoop/gateway/SimpleDescriptorHandlerFuncTest.java
----------------------------------------------------------------------
diff --git a/gateway-test/src/test/java/org/apache/hadoop/gateway/SimpleDescriptorHandlerFuncTest.java b/gateway-test/src/test/java/org/apache/hadoop/gateway/SimpleDescriptorHandlerFuncTest.java
deleted file mode 100644
index bda8952..0000000
--- a/gateway-test/src/test/java/org/apache/hadoop/gateway/SimpleDescriptorHandlerFuncTest.java
+++ /dev/null
@@ -1,275 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.gateway;
-
-import org.apache.commons.io.FileUtils;
-import org.apache.hadoop.gateway.config.GatewayConfig;
-import org.apache.hadoop.gateway.services.GatewayServices;
-import org.apache.hadoop.gateway.services.security.AliasService;
-import org.apache.hadoop.gateway.services.security.KeystoreService;
-import org.apache.hadoop.gateway.services.security.MasterService;
-import org.apache.hadoop.gateway.services.topology.TopologyService;
-import org.apache.hadoop.gateway.topology.discovery.ServiceDiscovery;
-import org.apache.hadoop.gateway.topology.discovery.ServiceDiscoveryConfig;
-import org.apache.hadoop.gateway.topology.discovery.ServiceDiscoveryType;
-import org.apache.hadoop.gateway.topology.simple.SimpleDescriptor;
-import org.apache.hadoop.gateway.topology.simple.SimpleDescriptorHandler;
-import org.apache.hadoop.test.TestUtils;
-import org.easymock.Capture;
-import org.easymock.EasyMock;
-import org.junit.Test;
-
-import java.io.File;
-import java.net.InetSocketAddress;
-import java.security.KeyStore;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import static org.easymock.EasyMock.anyObject;
-import static org.easymock.EasyMock.capture;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.fail;
-
-public class SimpleDescriptorHandlerFuncTest {
-
-
-  private static final String TEST_PROVIDER_CONFIG =
-      "    <gateway>\n" +
-          "        <provider>\n" +
-          "            <role>authentication</role>\n" +
-          "            <name>ShiroProvider</name>\n" +
-          "            <enabled>true</enabled>\n" +
-          "            <param>\n" +
-          "                <name>sessionTimeout</name>\n" +
-          "                <value>30</value>\n" +
-          "            </param>\n" +
-          "            <param>\n" +
-          "                <name>main.ldapRealm</name>\n" +
-          "                <value>org.apache.hadoop.gateway.shirorealm.KnoxLdapRealm</value>\n" +
-          "            </param>\n" +
-          "            <param>\n" +
-          "                <name>main.ldapContextFactory</name>\n" +
-          "                <value>org.apache.hadoop.gateway.shirorealm.KnoxLdapContextFactory</value>\n" +
-          "            </param>\n" +
-          "            <param>\n" +
-          "                <name>main.ldapRealm.contextFactory</name>\n" +
-          "                <value>$ldapContextFactory</value>\n" +
-          "            </param>\n" +
-          "            <param>\n" +
-          "                <name>main.ldapRealm.userDnTemplate</name>\n" +
-          "                <value>uid={0},ou=people,dc=hadoop,dc=apache,dc=org</value>\n" +
-          "            </param>\n" +
-          "            <param>\n" +
-          "                <name>main.ldapRealm.contextFactory.url</name>\n" +
-          "                <value>ldap://localhost:33389</value>\n" +
-          "            </param>\n" +
-          "            <param>\n" +
-          "                <name>main.ldapRealm.contextFactory.authenticationMechanism</name>\n" +
-          "                <value>simple</value>\n" +
-          "            </param>\n" +
-          "            <param>\n" +
-          "                <name>urls./**</name>\n" +
-          "                <value>authcBasic</value>\n" +
-          "            </param>\n" +
-          "        </provider>\n" +
-          "\n" +
-          "        <provider>\n" +
-          "            <role>identity-assertion</role>\n" +
-          "            <name>Default</name>\n" +
-          "            <enabled>true</enabled>\n" +
-          "        </provider>\n" +
-          "\n" +
-          "        <provider>\n" +
-          "            <role>hostmap</role>\n" +
-          "            <name>static</name>\n" +
-          "            <enabled>true</enabled>\n" +
-          "            <param><name>localhost</name><value>sandbox,sandbox.hortonworks.com</value></param>\n" +
-          "        </provider>\n" +
-          "    </gateway>\n";
-
-
-  /**
-   * KNOX-1136
-   * <p>
-   * Test that a credential store is created, and a encryptQueryString alias is defined, with a password that is not
-   * random (but is derived from the master secret and the topology name).
-   * <p>
-   * N.B. This test depends on the NoOpServiceDiscovery extension being configured in META-INF/services
-   */
-  @Test
-  public void testSimpleDescriptorHandlerQueryStringCredentialAliasCreation() throws Exception {
-
-    final String testMasterSecret = "mysecret";
-    final String discoveryType = "NO_OP";
-    final String clusterName = "dummy";
-
-    final Map<String, List<String>> serviceURLs = new HashMap<>();
-    serviceURLs.put("RESOURCEMANAGER", Collections.singletonList("http://myhost:1234/resource"));
-
-    File testRootDir = TestUtils.createTempDir(getClass().getSimpleName());
-    File testConfDir = new File(testRootDir, "conf");
-    File testProvDir = new File(testConfDir, "shared-providers");
-    File testTopoDir = new File(testConfDir, "topologies");
-    File testDeployDir = new File(testConfDir, "deployments");
-
-    // Write the externalized provider config to a temp file
-    File providerConfig = new File(testProvDir, "ambari-cluster-policy.xml");
-    FileUtils.write(providerConfig, TEST_PROVIDER_CONFIG);
-
-    File topologyFile = null;
-    try {
-      File destDir = new File(System.getProperty("java.io.tmpdir")).getCanonicalFile();
-
-      // Mock out the simple descriptor
-      SimpleDescriptor testDescriptor = EasyMock.createNiceMock(SimpleDescriptor.class);
-      EasyMock.expect(testDescriptor.getName()).andReturn("mysimpledescriptor").anyTimes();
-      EasyMock.expect(testDescriptor.getDiscoveryAddress()).andReturn(null).anyTimes();
-      EasyMock.expect(testDescriptor.getDiscoveryType()).andReturn(discoveryType).anyTimes();
-      EasyMock.expect(testDescriptor.getDiscoveryUser()).andReturn(null).anyTimes();
-      EasyMock.expect(testDescriptor.getProviderConfig()).andReturn(providerConfig.getAbsolutePath()).anyTimes();
-      EasyMock.expect(testDescriptor.getClusterName()).andReturn(clusterName).anyTimes();
-      List<SimpleDescriptor.Service> serviceMocks = new ArrayList<>();
-      for (String serviceName : serviceURLs.keySet()) {
-        SimpleDescriptor.Service svc = EasyMock.createNiceMock(SimpleDescriptor.Service.class);
-        EasyMock.expect(svc.getName()).andReturn(serviceName).anyTimes();
-        EasyMock.expect(svc.getURLs()).andReturn(serviceURLs.get(serviceName)).anyTimes();
-        EasyMock.expect(svc.getParams()).andReturn(Collections.emptyMap()).anyTimes();
-        EasyMock.replay(svc);
-        serviceMocks.add(svc);
-      }
-      EasyMock.expect(testDescriptor.getServices()).andReturn(serviceMocks).anyTimes();
-      EasyMock.replay(testDescriptor);
-
-      // Try setting up enough of the GatewayServer to support the test...
-      GatewayConfig config = EasyMock.createNiceMock(GatewayConfig.class);
-      InetSocketAddress gatewayAddress = new InetSocketAddress(0);
-      EasyMock.expect(config.getGatewayTopologyDir()).andReturn(testTopoDir.getAbsolutePath()).anyTimes();
-      EasyMock.expect(config.getGatewayDeploymentDir()).andReturn(testDeployDir.getAbsolutePath()).anyTimes();
-      EasyMock.expect(config.getGatewayAddress()).andReturn(gatewayAddress).anyTimes();
-      EasyMock.expect(config.getGatewayPortMappings()).andReturn(Collections.emptyMap()).anyTimes();
-      EasyMock.replay(config);
-
-      // Setup the Gateway Services
-      GatewayServices gatewayServices = EasyMock.createNiceMock(GatewayServices.class);
-
-      // Master Service
-      MasterService ms = EasyMock.createNiceMock(MasterService.class);
-      EasyMock.expect(ms.getMasterSecret()).andReturn(testMasterSecret.toCharArray()).anyTimes();
-      EasyMock.replay(ms);
-      EasyMock.expect(gatewayServices.getService("MasterService")).andReturn(ms).anyTimes();
-
-      // Keystore Service
-      KeystoreService ks = EasyMock.createNiceMock(KeystoreService.class);
-      EasyMock.expect(ks.isCredentialStoreForClusterAvailable(testDescriptor.getName())).andReturn(false).once();
-      ks.createCredentialStoreForCluster(testDescriptor.getName());
-      EasyMock.expectLastCall().once();
-      KeyStore credStore = EasyMock.createNiceMock(KeyStore.class);
-      EasyMock.expect(ks.getCredentialStoreForCluster(testDescriptor.getName())).andReturn(credStore).anyTimes();
-      EasyMock.replay(ks);
-      EasyMock.expect(gatewayServices.getService(GatewayServices.KEYSTORE_SERVICE)).andReturn(ks).anyTimes();
-
-      // Alias Service
-      AliasService as = EasyMock.createNiceMock(AliasService.class);
-      // Captures for validating the alias creation for a generated topology
-      Capture<String> capturedCluster = EasyMock.newCapture();
-      Capture<String> capturedAlias = EasyMock.newCapture();
-      Capture<String> capturedPwd = EasyMock.newCapture();
-      as.addAliasForCluster(capture(capturedCluster), capture(capturedAlias), capture(capturedPwd));
-      EasyMock.expectLastCall().anyTimes();
-      EasyMock.replay(as);
-      EasyMock.expect(gatewayServices.getService(GatewayServices.ALIAS_SERVICE)).andReturn(as).anyTimes();
-
-      // Topology Service
-      TopologyService ts = EasyMock.createNiceMock(TopologyService.class);
-      ts.addTopologyChangeListener(anyObject());
-      EasyMock.expectLastCall().anyTimes();
-      ts.reloadTopologies();
-      EasyMock.expectLastCall().anyTimes();
-      EasyMock.expect(ts.getTopologies()).andReturn(Collections.emptyList()).anyTimes();
-      EasyMock.replay(ts);
-      EasyMock.expect(gatewayServices.getService(GatewayServices.TOPOLOGY_SERVICE)).andReturn(ts).anyTimes();
-
-      EasyMock.replay(gatewayServices);
-
-      // Start a GatewayService with the GatewayServices mock
-      GatewayServer server = GatewayServer.startGateway(config, gatewayServices);
-
-      // Invoke the simple descriptor handler, which will also create the credential store
-      // (because it doesn't exist) and the encryptQueryString alias
-      Map<String, File> files = SimpleDescriptorHandler.handle(testDescriptor,
-                                                               providerConfig.getParentFile(),
-                                                               destDir);
-      topologyFile = files.get("topology");
-
-      // Validate the AliasService interaction
-      assertEquals("Unexpected cluster name for the alias (should be the topology name).",
-                   testDescriptor.getName(), capturedCluster.getValue());
-      assertEquals("Unexpected alias name.", "encryptQueryString", capturedAlias.getValue());
-      assertEquals("Unexpected alias value (should be master secret + topology name.",
-                   testMasterSecret + testDescriptor.getName(), capturedPwd.getValue());
-
-    } catch (Exception e) {
-      e.printStackTrace();
-      fail(e.getMessage());
-    } finally {
-      FileUtils.forceDelete(testRootDir);
-      if (topologyFile != null) {
-        topologyFile.delete();
-      }
-    }
-  }
-
-
-  ///////////////////////////////////////////////////////////////////////////////////////////////////////
-  // Test classes for effectively "skipping" service discovery for this test.
-  ///////////////////////////////////////////////////////////////////////////////////////////////////////
-
-  public static final class NoOpServiceDiscoveryType implements ServiceDiscoveryType {
-    @Override
-    public String getType() {
-      return NoOpServiceDiscovery.TYPE;
-    }
-
-    @Override
-    public ServiceDiscovery newInstance() {
-      return new NoOpServiceDiscovery();
-    }
-  }
-
-  private static final class NoOpServiceDiscovery implements ServiceDiscovery {
-    static final String TYPE = "NO_OP";
-
-    @Override
-    public String getType() {
-      return TYPE;
-    }
-
-    @Override
-    public Map<String, Cluster> discover(ServiceDiscoveryConfig config) {
-      return Collections.emptyMap();
-    }
-
-    @Override
-    public Cluster discover(ServiceDiscoveryConfig config, String clusterName) {
-      return null;
-    }
-  }
-
-}


[28/53] [abbrv] knox git commit: Merge branch 'master' into KNOX-998-Package_Restructuring

Posted by mo...@apache.org.
http://git-wip-us.apache.org/repos/asf/knox/blob/2c69152f/gateway-server/src/main/java/org/apache/knox/gateway/topology/simple/SimpleDescriptorImpl.java
----------------------------------------------------------------------
diff --cc gateway-server/src/main/java/org/apache/knox/gateway/topology/simple/SimpleDescriptorImpl.java
index 4eb1954,0000000..f3288fd
mode 100644,000000..100644
--- a/gateway-server/src/main/java/org/apache/knox/gateway/topology/simple/SimpleDescriptorImpl.java
+++ b/gateway-server/src/main/java/org/apache/knox/gateway/topology/simple/SimpleDescriptorImpl.java
@@@ -1,123 -1,0 +1,163 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.topology.simple;
 +
 +import com.fasterxml.jackson.annotation.JsonProperty;
 +
 +import java.util.ArrayList;
 +import java.util.List;
 +import java.util.Map;
 +
 +class SimpleDescriptorImpl implements SimpleDescriptor {
 +
 +    @JsonProperty("discovery-type")
 +    private String discoveryType;
 +
 +    @JsonProperty("discovery-address")
 +    private String discoveryAddress;
 +
 +    @JsonProperty("discovery-user")
 +    private String discoveryUser;
 +
 +    @JsonProperty("discovery-pwd-alias")
 +    private String discoveryPasswordAlias;
 +
 +    @JsonProperty("provider-config-ref")
 +    private String providerConfig;
 +
 +    @JsonProperty("cluster")
 +    private String cluster;
 +
 +    @JsonProperty("services")
 +    private List<ServiceImpl> services;
 +
++    @JsonProperty("applications")
++    private List<ApplicationImpl> applications;
++
 +    private String name = null;
 +
 +    void setName(String name) {
 +        this.name = name;
 +    }
 +
 +    @Override
 +    public String getName() {
 +        return name;
 +    }
 +
 +    @Override
 +    public String getDiscoveryType() {
 +        return discoveryType;
 +    }
 +
 +    @Override
 +    public String getDiscoveryAddress() {
 +        return discoveryAddress;
 +    }
 +
 +    @Override
 +    public String getDiscoveryUser() {
 +        return discoveryUser;
 +    }
 +
 +    @Override
 +    public String getDiscoveryPasswordAlias() {
 +        return discoveryPasswordAlias;
 +    }
 +
 +    @Override
 +    public String getClusterName() {
 +        return cluster;
 +    }
 +
 +    @Override
 +    public String getProviderConfig() {
 +        return providerConfig;
 +    }
 +
 +    @Override
 +    public List<Service> getServices() {
 +        List<Service> result = new ArrayList<>();
-         result.addAll(services);
++        if (services != null) {
++            result.addAll(services);
++        }
++        return result;
++    }
++
++    @Override
++    public List<Application> getApplications() {
++        List<Application> result = new ArrayList<>();
++        if (applications != null) {
++            result.addAll(applications);
++        }
 +        return result;
 +    }
 +
 +    public static class ServiceImpl implements Service {
 +        @JsonProperty("name")
 +        private String name;
 +
 +        @JsonProperty("params")
 +        private Map<String, String> params;
 +
 +        @JsonProperty("urls")
 +        private List<String> urls;
 +
 +        @Override
 +        public String getName() {
 +            return name;
 +        }
 +
 +        @Override
 +        public Map<String, String> getParams() {
 +            return params;
 +        }
 +
 +        @Override
 +        public List<String> getURLs() {
 +            return urls;
 +        }
 +    }
 +
++    public static class ApplicationImpl implements Application {
++        @JsonProperty("name")
++        private String name;
++
++        @JsonProperty("params")
++        private Map<String, String> params;
++
++        @JsonProperty("urls")
++        private List<String> urls;
++
++        @Override
++        public String getName() {
++            return name;
++        }
++
++        @Override
++        public Map<String, String> getParams() {
++            return params;
++        }
++
++        @Override
++        public List<String> getURLs() {
++            return urls;
++        }
++    }
++
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/2c69152f/gateway-server/src/main/java/org/apache/knox/gateway/topology/xml/KnoxFormatXmlTopologyRules.java
----------------------------------------------------------------------
diff --cc gateway-server/src/main/java/org/apache/knox/gateway/topology/xml/KnoxFormatXmlTopologyRules.java
index 81aedec,0000000..a1fcb6d
mode 100644,000000..100644
--- a/gateway-server/src/main/java/org/apache/knox/gateway/topology/xml/KnoxFormatXmlTopologyRules.java
+++ b/gateway-server/src/main/java/org/apache/knox/gateway/topology/xml/KnoxFormatXmlTopologyRules.java
@@@ -1,95 -1,0 +1,97 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.topology.xml;
 +
 +import org.apache.commons.digester3.Rule;
 +import org.apache.commons.digester3.binder.AbstractRulesModule;
 +import org.apache.knox.gateway.topology.Application;
 +import org.apache.knox.gateway.topology.Param;
 +import org.apache.knox.gateway.topology.Provider;
 +import org.apache.knox.gateway.topology.Service;
 +import org.apache.knox.gateway.topology.Version;
 +import org.apache.knox.gateway.topology.builder.BeanPropertyTopologyBuilder;
 +import org.xml.sax.Attributes;
 +
 +public class KnoxFormatXmlTopologyRules extends AbstractRulesModule {
 +
 +  private static final String ROOT_TAG = "topology";
 +  private static final String NAME_TAG = "name";
 +  private static final String VERSION_TAG = "version";
 +  private static final String DEFAULT_SERVICE_TAG = "path";
++  private static final String GENERATED_TAG = "generated";
 +  private static final String APPLICATION_TAG = "application";
 +  private static final String SERVICE_TAG = "service";
 +  private static final String ROLE_TAG = "role";
 +  private static final String URL_TAG = "url";
 +  private static final String PROVIDER_TAG = "gateway/provider";
 +  private static final String ENABLED_TAG = "enabled";
 +  private static final String PARAM_TAG = "param";
 +  private static final String VALUE_TAG = "value";
 +
 +  private static final Rule paramRule = new ParamRule();
 +
 +  @Override
 +  protected void configure() {
 +    forPattern( ROOT_TAG ).createObject().ofType( BeanPropertyTopologyBuilder.class );
 +    forPattern( ROOT_TAG + "/" + NAME_TAG ).callMethod("name").usingElementBodyAsArgument();
 +    forPattern( ROOT_TAG + "/" + VERSION_TAG ).callMethod("version").usingElementBodyAsArgument();
 +    forPattern( ROOT_TAG + "/" + DEFAULT_SERVICE_TAG ).callMethod("defaultService").usingElementBodyAsArgument();
++    forPattern( ROOT_TAG + "/" + GENERATED_TAG ).callMethod("generated").usingElementBodyAsArgument();
 +
 +    forPattern( ROOT_TAG + "/" + APPLICATION_TAG ).createObject().ofType( Application.class ).then().setNext( "addApplication" );
 +    forPattern( ROOT_TAG + "/" + APPLICATION_TAG + "/" + ROLE_TAG ).setBeanProperty();
 +    forPattern( ROOT_TAG + "/" + APPLICATION_TAG + "/" + NAME_TAG ).setBeanProperty();
 +    forPattern( ROOT_TAG + "/" + APPLICATION_TAG + "/" + VERSION_TAG ).createObject().ofType(Version.class).then().setBeanProperty().then().setNext("setVersion");
 +    forPattern( ROOT_TAG + "/" + APPLICATION_TAG + "/" + URL_TAG ).callMethod( "addUrl" ).usingElementBodyAsArgument();
 +    forPattern( ROOT_TAG + "/" + APPLICATION_TAG + "/" + PARAM_TAG ).createObject().ofType( Param.class ).then().addRule( paramRule ).then().setNext( "addParam" );
 +    forPattern( ROOT_TAG + "/" + APPLICATION_TAG + "/" + PARAM_TAG + "/" + NAME_TAG ).setBeanProperty();
 +    forPattern( ROOT_TAG + "/" + APPLICATION_TAG + "/" + PARAM_TAG + "/" + VALUE_TAG ).setBeanProperty();
 +
 +    forPattern( ROOT_TAG + "/" + SERVICE_TAG ).createObject().ofType( Service.class ).then().setNext( "addService" );
 +    forPattern( ROOT_TAG + "/" + SERVICE_TAG + "/" + ROLE_TAG ).setBeanProperty();
 +    forPattern( ROOT_TAG + "/" + SERVICE_TAG + "/" + NAME_TAG ).setBeanProperty();
 +    forPattern( ROOT_TAG + "/" + SERVICE_TAG + "/" + VERSION_TAG ).createObject().ofType(Version.class).then().setBeanProperty().then().setNext("setVersion");
 +    forPattern( ROOT_TAG + "/" + SERVICE_TAG + "/" + URL_TAG ).callMethod( "addUrl" ).usingElementBodyAsArgument();
 +    forPattern( ROOT_TAG + "/" + SERVICE_TAG + "/" + PARAM_TAG ).createObject().ofType( Param.class ).then().addRule( paramRule ).then().setNext( "addParam" );
 +    forPattern( ROOT_TAG + "/" + SERVICE_TAG + "/" + PARAM_TAG + "/" + NAME_TAG ).setBeanProperty();
 +    forPattern( ROOT_TAG + "/" + SERVICE_TAG + "/" + PARAM_TAG + "/" + VALUE_TAG ).setBeanProperty();
 +
 +    forPattern( ROOT_TAG + "/" + PROVIDER_TAG ).createObject().ofType( Provider.class ).then().setNext( "addProvider" );
 +    forPattern( ROOT_TAG + "/" + PROVIDER_TAG + "/" + ROLE_TAG ).setBeanProperty();
 +    forPattern( ROOT_TAG + "/" + PROVIDER_TAG + "/" + ENABLED_TAG ).setBeanProperty();
 +    forPattern( ROOT_TAG + "/" + PROVIDER_TAG + "/" + NAME_TAG ).setBeanProperty();
 +    forPattern( ROOT_TAG + "/" + PROVIDER_TAG + "/" + PARAM_TAG ).createObject().ofType( Param.class ).then().addRule( paramRule ).then().setNext( "addParam" );
 +    forPattern( ROOT_TAG + "/" + PROVIDER_TAG + "/" + PARAM_TAG + "/" + NAME_TAG ).setBeanProperty();
 +    forPattern( ROOT_TAG + "/" + PROVIDER_TAG + "/" + PARAM_TAG + "/" + VALUE_TAG ).setBeanProperty();
 +  }
 +
 +  private static class ParamRule extends Rule {
 +
 +    @Override
 +    public void begin( String namespace, String name, Attributes attributes ) {
 +      Param param = getDigester().peek();
 +      String paramName = attributes.getValue( "name" );
 +      if( paramName != null ) {
 +        param.setName( paramName );
 +        param.setValue( attributes.getValue( "value" ) );
 +      }
 +    }
 +
 +  }
 +
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/2c69152f/gateway-server/src/test/java/org/apache/knox/gateway/topology/simple/SimpleDescriptorFactoryTest.java
----------------------------------------------------------------------
diff --cc gateway-server/src/test/java/org/apache/knox/gateway/topology/simple/SimpleDescriptorFactoryTest.java
index 41a7c10,0000000..df31f3d
mode 100644,000000..100644
--- a/gateway-server/src/test/java/org/apache/knox/gateway/topology/simple/SimpleDescriptorFactoryTest.java
+++ b/gateway-server/src/test/java/org/apache/knox/gateway/topology/simple/SimpleDescriptorFactoryTest.java
@@@ -1,422 -1,0 +1,681 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements. See the NOTICE file distributed with this
 + * work for additional information regarding copyright ownership. The ASF
 + * licenses this file to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance with the License.
 + * You may obtain a copy of the License at
 + *
 + * http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 + * License for the specific language governing permissions and limitations under
 + * the License.
 + */
 +package org.apache.knox.gateway.topology.simple;
 +
 +import java.io.File;
 +import java.io.FileWriter;
 +import java.io.Writer;
 +import java.util.*;
 +
 +import org.junit.Test;
 +import static org.junit.Assert.*;
 +
 +
 +public class SimpleDescriptorFactoryTest {
 +
++    private enum FileType {
++        JSON,
++        YAML
++    }
 +
 +    @Test
 +    public void testParseJSONSimpleDescriptor() throws Exception {
++        testParseSimpleDescriptor(FileType.JSON);
++    }
++
++    @Test
++    public void testParseYAMLSimpleDescriptor() throws Exception {
++        testParseSimpleDescriptor(FileType.YAML);
++    }
++
++    @Test
++    public void testParseJSONSimpleDescriptorWithServiceParams() throws Exception {
++        testParseSimpleDescriptorWithServiceParams(FileType.JSON);
++    }
++
++    @Test
++    public void testParseYAMLSimpleDescriptorWithServiceParams() throws Exception {
++        testParseSimpleDescriptorWithServiceParams(FileType.YAML);
++    }
++
++    @Test
++    public void testParseJSONSimpleDescriptorWithApplications() throws Exception {
++        testParseSimpleDescriptorWithApplications(FileType.JSON);
++    }
++
++    @Test
++    public void testParseYAMLSimpleDescriptorApplications() throws Exception {
++        testParseSimpleDescriptorWithApplications(FileType.YAML);
++    }
++
++
++    @Test
++    public void testParseJSONSimpleDescriptorWithServicesAndApplications() throws Exception {
++        testParseSimpleDescriptorWithServicesAndApplications(FileType.JSON);
++    }
++
++    @Test
++    public void testParseYAMLSimpleDescriptorWithServicesAndApplications() throws Exception {
++        testParseSimpleDescriptorWithServicesAndApplications(FileType.YAML);
++    }
++
 +
++    private void testParseSimpleDescriptor(FileType type) throws Exception {
 +        final String   discoveryType    = "AMBARI";
 +        final String   discoveryAddress = "http://c6401.ambari.apache.org:8080";
-         final String   discoveryUser    = "admin";
++        final String   discoveryUser    = "joeblow";
 +        final String   providerConfig   = "ambari-cluster-policy.xml";
 +        final String   clusterName      = "myCluster";
 +
 +        final Map<String, List<String>> services = new HashMap<>();
 +        services.put("NODEMANAGER", null);
 +        services.put("JOBTRACKER", null);
 +        services.put("RESOURCEMANAGER", null);
 +        services.put("HIVE", Arrays.asList("http://c6401.ambari.apache.org", "http://c6402.ambari.apache.org", "http://c6403.ambari.apache.org"));
-         services.put("AMBARIUI", Arrays.asList("http://c6401.ambari.apache.org:8080"));
++        services.put("AMBARIUI", Collections.singletonList("http://c6401.ambari.apache.org:8080"));
 +
-         String fileName = "test-topology.json";
-         File testJSON = null;
++        String fileName = "test-topology." + getFileExtensionForType(type);
++        File testFile = null;
 +        try {
-             testJSON = writeJSON(fileName, discoveryType, discoveryAddress, discoveryUser, providerConfig, clusterName, services);
-             SimpleDescriptor sd = SimpleDescriptorFactory.parse(testJSON.getAbsolutePath());
++            testFile = writeDescriptorFile(type,
++                                           fileName,
++                                           discoveryType,
++                                           discoveryAddress,
++                                           discoveryUser,
++                                           providerConfig,
++                                           clusterName,
++                                           services);
++            SimpleDescriptor sd = SimpleDescriptorFactory.parse(testFile.getAbsolutePath());
 +            validateSimpleDescriptor(sd, discoveryType, discoveryAddress, providerConfig, clusterName, services);
 +        } catch (Exception e) {
 +            e.printStackTrace();
 +        } finally {
-             if (testJSON != null) {
++            if (testFile != null) {
 +                try {
-                     testJSON.delete();
++                    testFile.delete();
 +                } catch (Exception e) {
 +                    // Ignore
 +                }
 +            }
 +        }
 +    }
 +
-     @Test
-     public void testParseJSONSimpleDescriptorWithServiceParams() throws Exception {
++    private void testParseSimpleDescriptorWithServiceParams(FileType type) throws Exception {
 +
 +        final String   discoveryType    = "AMBARI";
 +        final String   discoveryAddress = "http://c6401.ambari.apache.org:8080";
 +        final String   discoveryUser    = "admin";
 +        final String   providerConfig   = "ambari-cluster-policy.xml";
 +        final String   clusterName      = "myCluster";
 +
 +        final Map<String, List<String>> services = new HashMap<>();
 +        services.put("NODEMANAGER", null);
 +        services.put("JOBTRACKER", null);
 +        services.put("RESOURCEMANAGER", null);
 +        services.put("HIVE", Arrays.asList("http://c6401.ambari.apache.org", "http://c6402.ambari.apache.org", "http://c6403.ambari.apache.org"));
 +        services.put("AMBARIUI", Collections.singletonList("http://c6401.ambari.apache.org:8080"));
 +        services.put("KNOXSSO", null);
 +        services.put("KNOXTOKEN", null);
 +        services.put("CustomRole", Collections.singletonList("http://c6402.ambari.apache.org:1234"));
 +
 +        final Map<String, Map<String, String>> serviceParams = new HashMap<>();
 +        Map<String, String> knoxSSOParams = new HashMap<>();
 +        knoxSSOParams.put("knoxsso.cookie.secure.only", "true");
 +        knoxSSOParams.put("knoxsso.token.ttl", "100000");
 +        serviceParams.put("KNOXSSO", knoxSSOParams);
 +
 +        Map<String, String> knoxTokenParams = new HashMap<>();
 +        knoxTokenParams.put("knox.token.ttl", "36000000");
 +        knoxTokenParams.put("knox.token.audiences", "tokenbased");
 +        knoxTokenParams.put("knox.token.target.url", "https://localhost:8443/gateway/tokenbased");
 +        serviceParams.put("KNOXTOKEN", knoxTokenParams);
 +
 +        Map<String, String> customRoleParams = new HashMap<>();
 +        customRoleParams.put("custom.param.1", "value1");
 +        customRoleParams.put("custom.param.2", "value2");
 +        serviceParams.put("CustomRole", customRoleParams);
 +
-         String fileName = "test-topology.json";
-         File testJSON = null;
++        String fileName = "test-topology." + getFileExtensionForType(type);
++        File testFile = null;
 +        try {
-             testJSON = writeJSON(fileName,
-                                  discoveryType,
-                                  discoveryAddress,
-                                  discoveryUser,
-                                  providerConfig,
-                                  clusterName,
-                                  services,
-                                  serviceParams);
-             SimpleDescriptor sd = SimpleDescriptorFactory.parse(testJSON.getAbsolutePath());
++            testFile = writeDescriptorFile(type,
++                                           fileName,
++                                           discoveryType,
++                                           discoveryAddress,
++                                           discoveryUser,
++                                           providerConfig,
++                                           clusterName,
++                                           services,
++                                           serviceParams);
++            SimpleDescriptor sd = SimpleDescriptorFactory.parse(testFile.getAbsolutePath());
 +            validateSimpleDescriptor(sd, discoveryType, discoveryAddress, providerConfig, clusterName, services, serviceParams);
-         } catch (Exception e) {
-             e.printStackTrace();
 +        } finally {
-             if (testJSON != null) {
++            if (testFile != null) {
 +                try {
-                     testJSON.delete();
++                    testFile.delete();
 +                } catch (Exception e) {
 +                    // Ignore
 +                }
 +            }
 +        }
 +    }
 +
-     @Test
-     public void testParseYAMLSimpleDescriptor() throws Exception {
++    private void testParseSimpleDescriptorWithApplications(FileType type) throws Exception {
 +
 +        final String   discoveryType    = "AMBARI";
 +        final String   discoveryAddress = "http://c6401.ambari.apache.org:8080";
-         final String   discoveryUser    = "joeblow";
++        final String   discoveryUser    = "admin";
 +        final String   providerConfig   = "ambari-cluster-policy.xml";
 +        final String   clusterName      = "myCluster";
 +
-         final Map<String, List<String>> services = new HashMap<>();
-         services.put("NODEMANAGER", null);
-         services.put("JOBTRACKER", null);
-         services.put("RESOURCEMANAGER", null);
-         services.put("HIVE", Arrays.asList("http://c6401.ambari.apache.org", "http://c6402.ambari.apache.org", "http://c6403.ambari.apache.org"));
-         services.put("AMBARIUI", Arrays.asList("http://c6401.ambari.apache.org:8080"));
- 
-         String fileName = "test-topology.yml";
-         File testYAML = null;
++        final Map<String, List<String>> apps = new HashMap<>();
++        apps.put("app-one", null);
++        apps.put("appTwo", null);
++        apps.put("thirdApps", null);
++        apps.put("appfour", Arrays.asList("http://host1:1234", "http://host2:5678", "http://host1:1357"));
++        apps.put("AppFive", Collections.singletonList("http://host5:8080"));
++
++        final Map<String, Map<String, String>> appParams = new HashMap<>();
++        Map<String, String> oneParams = new HashMap<>();
++        oneParams.put("appone.cookie.secure.only", "true");
++        oneParams.put("appone.token.ttl", "100000");
++        appParams.put("app-one", oneParams);
++        Map<String, String> fiveParams = new HashMap<>();
++        fiveParams.put("myproperty", "true");
++        fiveParams.put("anotherparam", "100000");
++        appParams.put("AppFive", fiveParams);
++
++        String fileName = "test-topology." + getFileExtensionForType(type);
++        File testFile = null;
 +        try {
-             testYAML = writeYAML(fileName, discoveryType, discoveryAddress, discoveryUser, providerConfig, clusterName, services);
-             SimpleDescriptor sd = SimpleDescriptorFactory.parse(testYAML.getAbsolutePath());
-             validateSimpleDescriptor(sd, discoveryType, discoveryAddress, providerConfig, clusterName, services);
-         } catch (Exception e) {
-             e.printStackTrace();
++            testFile = writeDescriptorFile(type,
++                                           fileName,
++                                           discoveryType,
++                                           discoveryAddress,
++                                           discoveryUser,
++                                           providerConfig,
++                                           clusterName,
++                                           null,
++                                           null,
++                                           apps,
++                                           appParams);
++            SimpleDescriptor sd = SimpleDescriptorFactory.parse(testFile.getAbsolutePath());
++            validateSimpleDescriptor(sd,
++                                     discoveryType,
++                                     discoveryAddress,
++                                     providerConfig,
++                                     clusterName,
++                                     null,
++                                     null,
++                                     apps,
++                                     appParams);
 +        } finally {
-             if (testYAML != null) {
++            if (testFile != null) {
 +                try {
-                     testYAML.delete();
++                    testFile.delete();
 +                } catch (Exception e) {
 +                    // Ignore
 +                }
 +            }
 +        }
 +    }
 +
- 
-     @Test
-     public void testParseYAMLSimpleDescriptorWithServiceParams() throws Exception {
++    private void testParseSimpleDescriptorWithServicesAndApplications(FileType type) throws Exception {
 +
 +        final String   discoveryType    = "AMBARI";
 +        final String   discoveryAddress = "http://c6401.ambari.apache.org:8080";
-         final String   discoveryUser    = "joeblow";
++        final String   discoveryUser    = "admin";
 +        final String   providerConfig   = "ambari-cluster-policy.xml";
 +        final String   clusterName      = "myCluster";
 +
 +        final Map<String, List<String>> services = new HashMap<>();
 +        services.put("NODEMANAGER", null);
 +        services.put("JOBTRACKER", null);
 +        services.put("RESOURCEMANAGER", null);
 +        services.put("HIVE", Arrays.asList("http://c6401.ambari.apache.org", "http://c6402.ambari.apache.org", "http://c6403.ambari.apache.org"));
-         services.put("AMBARIUI", Arrays.asList("http://c6401.ambari.apache.org:8080"));
++        services.put("AMBARIUI", Collections.singletonList("http://c6401.ambari.apache.org:8080"));
 +        services.put("KNOXSSO", null);
 +        services.put("KNOXTOKEN", null);
 +        services.put("CustomRole", Collections.singletonList("http://c6402.ambari.apache.org:1234"));
 +
 +        final Map<String, Map<String, String>> serviceParams = new HashMap<>();
 +        Map<String, String> knoxSSOParams = new HashMap<>();
 +        knoxSSOParams.put("knoxsso.cookie.secure.only", "true");
 +        knoxSSOParams.put("knoxsso.token.ttl", "100000");
 +        serviceParams.put("KNOXSSO", knoxSSOParams);
 +
 +        Map<String, String> knoxTokenParams = new HashMap<>();
 +        knoxTokenParams.put("knox.token.ttl", "36000000");
 +        knoxTokenParams.put("knox.token.audiences", "tokenbased");
 +        knoxTokenParams.put("knox.token.target.url", "https://localhost:8443/gateway/tokenbased");
 +        serviceParams.put("KNOXTOKEN", knoxTokenParams);
 +
 +        Map<String, String> customRoleParams = new HashMap<>();
 +        customRoleParams.put("custom.param.1", "value1");
 +        customRoleParams.put("custom.param.2", "value2");
 +        serviceParams.put("CustomRole", customRoleParams);
 +
-         String fileName = "test-topology.yml";
-         File testYAML = null;
++        final Map<String, List<String>> apps = new HashMap<>();
++        apps.put("app-one", null);
++        apps.put("appTwo", null);
++        apps.put("thirdApps", null);
++        apps.put("appfour", Arrays.asList("http://host1:1234", "http://host2:5678", "http://host1:1357"));
++        apps.put("AppFive", Collections.singletonList("http://host5:8080"));
++
++        final Map<String, Map<String, String>> appParams = new HashMap<>();
++        Map<String, String> oneParams = new HashMap<>();
++        oneParams.put("appone.cookie.secure.only", "true");
++        oneParams.put("appone.token.ttl", "100000");
++        appParams.put("app-one", oneParams);
++        Map<String, String> fiveParams = new HashMap<>();
++        fiveParams.put("myproperty", "true");
++        fiveParams.put("anotherparam", "100000");
++        appParams.put("AppFive", fiveParams);
++
++        String fileName = "test-topology." + getFileExtensionForType(type);
++        File testFile = null;
 +        try {
-             testYAML = writeYAML(fileName, discoveryType, discoveryAddress, discoveryUser, providerConfig, clusterName, services, serviceParams);
-             SimpleDescriptor sd = SimpleDescriptorFactory.parse(testYAML.getAbsolutePath());
-             validateSimpleDescriptor(sd, discoveryType, discoveryAddress, providerConfig, clusterName, services, serviceParams);
-         } catch (Exception e) {
-             e.printStackTrace();
++            testFile = writeDescriptorFile(type,
++                                           fileName,
++                                           discoveryType,
++                                           discoveryAddress,
++                                           discoveryUser,
++                                           providerConfig,
++                                           clusterName,
++                                           services,
++                                           serviceParams,
++                                           apps,
++                                           appParams);
++            SimpleDescriptor sd = SimpleDescriptorFactory.parse(testFile.getAbsolutePath());
++            validateSimpleDescriptor(sd,
++                                     discoveryType,
++                                     discoveryAddress,
++                                     providerConfig,
++                                     clusterName,
++                                     services,
++                                     serviceParams,
++                                     apps,
++                                     appParams);
 +        } finally {
-             if (testYAML != null) {
++            if (testFile != null) {
 +                try {
-                     testYAML.delete();
++                    testFile.delete();
 +                } catch (Exception e) {
 +                    // Ignore
 +                }
 +            }
 +        }
 +    }
 +
- 
-     private void validateSimpleDescriptor(SimpleDescriptor          sd,
-                                           String                    discoveryType,
-                                           String                    discoveryAddress,
-                                           String                    providerConfig,
-                                           String                    clusterName,
-                                           Map<String, List<String>> expectedServices) {
-         validateSimpleDescriptor(sd, discoveryType, discoveryAddress, providerConfig, clusterName, expectedServices, null);
-     }
- 
- 
-     private void validateSimpleDescriptor(SimpleDescriptor                 sd,
-                                           String                           discoveryType,
-                                           String                           discoveryAddress,
-                                           String                           providerConfig,
-                                           String                           clusterName,
-                                           Map<String, List<String>>        expectedServices,
-                                           Map<String, Map<String, String>> expectedServiceParameters) {
-         assertNotNull(sd);
-         assertEquals(discoveryType, sd.getDiscoveryType());
-         assertEquals(discoveryAddress, sd.getDiscoveryAddress());
-         assertEquals(providerConfig, sd.getProviderConfig());
-         assertEquals(clusterName, sd.getClusterName());
- 
-         List<SimpleDescriptor.Service> actualServices = sd.getServices();
- 
-         assertEquals(expectedServices.size(), actualServices.size());
- 
-         for (SimpleDescriptor.Service actualService : actualServices) {
-             assertTrue(expectedServices.containsKey(actualService.getName()));
-             assertEquals(expectedServices.get(actualService.getName()), actualService.getURLs());
- 
-             // Validate service parameters
-             if (expectedServiceParameters != null) {
-                 if (expectedServiceParameters.containsKey(actualService.getName())) {
-                     Map<String, String> expectedParams = expectedServiceParameters.get(actualService.getName());
- 
-                     Map<String, String> actualServiceParams = actualService.getParams();
-                     assertNotNull(actualServiceParams);
- 
-                     // Validate the size of the service parameter set
-                     assertEquals(expectedParams.size(), actualServiceParams.size());
- 
-                     // Validate the parameter contents
-                     for (String paramName : actualServiceParams.keySet()) {
-                         assertTrue(expectedParams.containsKey(paramName));
-                         assertEquals(expectedParams.get(paramName), actualServiceParams.get(paramName));
-                     }
-                 }
-             }
++    private String getFileExtensionForType(FileType type) {
++        String extension = null;
++        switch (type) {
++            case JSON:
++                extension = "json";
++                break;
++            case YAML:
++                extension = "yml";
++                break;
 +        }
++        return extension;
 +    }
 +
++    private File writeDescriptorFile(FileType type,
++                                     String                           path,
++                                     String                           discoveryType,
++                                     String                           discoveryAddress,
++                                     String                           discoveryUser,
++                                     String                           providerConfig,
++                                     String                           clusterName,
++                                     Map<String, List<String>>        services) throws Exception {
++        return writeDescriptorFile(type,
++                                   path,
++                                   discoveryType,
++                                   discoveryAddress,
++                                   discoveryUser,
++                                   providerConfig,
++                                   clusterName,
++                                   services,
++                                   null);
++    }
 +
-     private File writeJSON(String path, String content) throws Exception {
-         File f = new File(path);
- 
-         Writer fw = new FileWriter(f);
-         fw.write(content);
-         fw.flush();
-         fw.close();
- 
-         return f;
++    private File writeDescriptorFile(FileType type,
++                                     String                           path,
++                                     String                           discoveryType,
++                                     String                           discoveryAddress,
++                                     String                           discoveryUser,
++                                     String                           providerConfig,
++                                     String                           clusterName,
++                                     Map<String, List<String>>        services,
++                                     Map<String, Map<String, String>> serviceParams) throws Exception {
++        return writeDescriptorFile(type,
++                                   path,
++                                   discoveryType,
++                                   discoveryAddress,
++                                   discoveryUser,
++                                   providerConfig,
++                                   clusterName,
++                                   services,
++                                   serviceParams,
++                                   null,
++                                   null);
 +    }
 +
 +
-     private File writeJSON(String path,
-                            String discoveryType,
-                            String discoveryAddress,
-                            String discoveryUser,
-                            String providerConfig,
-                            String clusterName,
-                            Map<String, List<String>> services) throws Exception {
-         return writeJSON(path, discoveryType, discoveryAddress, discoveryUser, providerConfig, clusterName, services, null);
++    private File writeDescriptorFile(FileType type,
++                                     String                           path,
++                                     String                           discoveryType,
++                                     String                           discoveryAddress,
++                                     String                           discoveryUser,
++                                     String                           providerConfig,
++                                     String                           clusterName,
++                                     Map<String, List<String>>        services,
++                                     Map<String, Map<String, String>> serviceParams,
++                                     Map<String, List<String>>        apps,
++                                     Map<String, Map<String, String>> appParams) throws Exception {
++        File result = null;
++        switch (type) {
++            case JSON:
++                result = writeJSON(path,
++                                   discoveryType,
++                                   discoveryAddress,
++                                   discoveryUser,
++                                   providerConfig,
++                                   clusterName,
++                                   services,
++                                   serviceParams,
++                                   apps,
++                                   appParams);
++                break;
++            case YAML:
++                result = writeYAML(path,
++                                   discoveryType,
++                                   discoveryAddress,
++                                   discoveryUser,
++                                   providerConfig,
++                                   clusterName,
++                                   services,
++                                   serviceParams,
++                                   apps,
++                                   appParams);
++                break;
++        }
++        return result;
 +    }
 +
++
 +    private File writeJSON(String path,
 +                           String discoveryType,
 +                           String discoveryAddress,
 +                           String discoveryUser,
 +                           String providerConfig,
 +                           String clusterName,
 +                           Map<String, List<String>> services,
-                            Map<String, Map<String, String>> serviceParams) throws Exception {
++                           Map<String, Map<String, String>> serviceParams,
++                           Map<String, List<String>> apps,
++                           Map<String, Map<String, String>> appParams) throws Exception {
 +        File f = new File(path);
 +
 +        Writer fw = new FileWriter(f);
 +        fw.write("{" + "\n");
 +        fw.write("\"discovery-type\":\"" + discoveryType + "\",\n");
 +        fw.write("\"discovery-address\":\"" + discoveryAddress + "\",\n");
 +        fw.write("\"discovery-user\":\"" + discoveryUser + "\",\n");
 +        fw.write("\"provider-config-ref\":\"" + providerConfig + "\",\n");
-         fw.write("\"cluster\":\"" + clusterName + "\",\n");
-         fw.write("\"services\":[\n");
- 
-         int i = 0;
-         for (String name : services.keySet()) {
-             fw.write("{\"name\":\"" + name + "\"");
- 
-             // Service params
-             if (serviceParams != null && !serviceParams.isEmpty()) {
-                 Map<String, String> params = serviceParams.get(name);
-                 if (params != null && !params.isEmpty()) {
-                     fw.write(",\n\"params\":{\n");
-                     Iterator<String> paramNames = params.keySet().iterator();
-                     while (paramNames.hasNext()) {
-                         String paramName = paramNames.next();
-                         String paramValue = params.get(paramName);
-                         fw.write("\"" + paramName + "\":\"" + paramValue + "\"");
-                         fw.write(paramNames.hasNext() ? ",\n" : "");
-                     }
-                     fw.write("\n}");
-                 }
-             }
++        fw.write("\"cluster\":\"" + clusterName + "\"");
 +
-             // Service URLs
-             List<String> urls = services.get(name);
-             if (urls != null) {
-                 fw.write(",\n\"urls\":[");
-                 Iterator<String> urlIter = urls.iterator();
-                 while (urlIter.hasNext()) {
-                     fw.write("\"" + urlIter.next() + "\"");
-                     if (urlIter.hasNext()) {
-                         fw.write(", ");
-                     }
-                 }
-                 fw.write("]\n");
-             }
++        if (services != null && !services.isEmpty()) {
++            fw.write(",\n\"services\":[\n");
++            writeServiceOrApplicationJSON(fw, services, serviceParams);
++            fw.write("]\n");
++        }
 +
-             fw.write("}");
-             if (i++ < services.size() - 1) {
-                 fw.write(",");
-             }
-             fw.write("\n");
++        if (apps != null && !apps.isEmpty()) {
++            fw.write(",\n\"applications\":[\n");
++            writeServiceOrApplicationJSON(fw, apps, appParams);
++            fw.write("]\n");
 +        }
-         fw.write("]\n");
++
 +        fw.write("}\n");
 +        fw.flush();
 +        fw.close();
 +
 +        return f;
 +    }
 +
++    private void writeServiceOrApplicationJSON(Writer fw,
++                                               Map<String, List<String>> elementURLs,
++                                               Map<String, Map<String, String>> elementParams) throws Exception {
++        if (elementURLs != null) {
++            int i = 0;
++            for (String name : elementURLs.keySet()) {
++                fw.write("{\"name\":\"" + name + "\"");
++
++                // Service params
++                if (elementParams != null && !elementParams.isEmpty()) {
++                    Map<String, String> params = elementParams.get(name);
++                    if (params != null && !params.isEmpty()) {
++                        fw.write(",\n\"params\":{\n");
++                        Iterator<String> paramNames = params.keySet().iterator();
++                        while (paramNames.hasNext()) {
++                            String paramName = paramNames.next();
++                            String paramValue = params.get(paramName);
++                            fw.write("\"" + paramName + "\":\"" + paramValue + "\"");
++                            fw.write(paramNames.hasNext() ? ",\n" : "");
++                        }
++                        fw.write("\n}");
++                    }
++                }
 +
-     private File writeYAML(String                    path,
-                            String                    discoveryType,
-                            String                    discoveryAddress,
-                            String                    discoveryUser,
-                            String                    providerConfig,
-                            String                    clusterName,
-                            Map<String, List<String>> services) throws Exception {
-         return writeYAML(path, discoveryType, discoveryAddress, discoveryUser, providerConfig, clusterName, services, null);
-     }
++                // Service URLs
++                List<String> urls = elementURLs.get(name);
++                if (urls != null) {
++                    fw.write(",\n\"urls\":[");
++                    Iterator<String> urlIter = urls.iterator();
++                    while (urlIter.hasNext()) {
++                        fw.write("\"" + urlIter.next() + "\"");
++                        if (urlIter.hasNext()) {
++                            fw.write(", ");
++                        }
++                    }
++                    fw.write("]\n");
++                }
 +
++                fw.write("}");
++                if (i++ < elementURLs.size() - 1) {
++                    fw.write(",");
++                }
++                fw.write("\n");
++            }
++        }
++    }
 +
 +    private File writeYAML(String                           path,
 +                           String                           discoveryType,
 +                           String                           discoveryAddress,
 +                           String                           discoveryUser,
 +                           String                           providerConfig,
 +                           String                           clusterName,
 +                           Map<String, List<String>>        services,
-                            Map<String, Map<String, String>> serviceParams) throws Exception {
++                           Map<String, Map<String, String>> serviceParams,
++                           Map<String, List<String>>        apps,
++                           Map<String, Map<String, String>> appParams) throws Exception {
++
 +        File f = new File(path);
 +
 +        Writer fw = new FileWriter(f);
 +        fw.write("---" + "\n");
 +        fw.write("discovery-type: " + discoveryType + "\n");
 +        fw.write("discovery-address: " + discoveryAddress + "\n");
 +        fw.write("discovery-user: " + discoveryUser + "\n");
 +        fw.write("provider-config-ref: " + providerConfig + "\n");
 +        fw.write("cluster: " + clusterName+ "\n");
-         fw.write("services:\n");
-         for (String name : services.keySet()) {
++
++        if (services != null && !services.isEmpty()) {
++            fw.write("services:\n");
++            writeServiceOrApplicationYAML(fw, services, serviceParams);
++        }
++
++        if (apps != null && !apps.isEmpty()) {
++            fw.write("applications:\n");
++            writeServiceOrApplicationYAML(fw, apps, appParams);
++        }
++
++        fw.flush();
++        fw.close();
++
++        return f;
++    }
++
++    private void writeServiceOrApplicationYAML(Writer                           fw,
++                                               Map<String, List<String>>        elementURLs,
++                                               Map<String, Map<String, String>> elementParams) throws Exception {
++        for (String name : elementURLs.keySet()) {
 +            fw.write("    - name: " + name + "\n");
 +
 +            // Service params
-             if (serviceParams != null && !serviceParams.isEmpty()) {
-                 if (serviceParams.containsKey(name)) {
-                     Map<String, String> params = serviceParams.get(name);
++            if (elementParams != null && !elementParams.isEmpty()) {
++                if (elementParams.containsKey(name)) {
++                    Map<String, String> params = elementParams.get(name);
 +                    fw.write("      params:\n");
 +                    for (String paramName : params.keySet()) {
 +                        fw.write("            " + paramName + ": " + params.get(paramName) + "\n");
 +                    }
 +                }
 +            }
 +
 +            // Service URLs
-             List<String> urls = services.get(name);
++            List<String> urls = elementURLs.get(name);
 +            if (urls != null) {
 +                fw.write("      urls:\n");
 +                for (String url : urls) {
 +                    fw.write("          - " + url + "\n");
 +                }
 +            }
 +        }
-         fw.flush();
-         fw.close();
++    }
 +
-         return f;
++
++    private void validateSimpleDescriptor(SimpleDescriptor          sd,
++                                          String                    discoveryType,
++                                          String                    discoveryAddress,
++                                          String                    providerConfig,
++                                          String                    clusterName,
++                                          Map<String, List<String>> expectedServices) {
++        validateSimpleDescriptor(sd, discoveryType, discoveryAddress, providerConfig, clusterName, expectedServices, null);
++    }
++
++
++    private void validateSimpleDescriptor(SimpleDescriptor                 sd,
++                                          String                           discoveryType,
++                                          String                           discoveryAddress,
++                                          String                           providerConfig,
++                                          String                           clusterName,
++                                          Map<String, List<String>>        expectedServices,
++                                          Map<String, Map<String, String>> expectedServiceParameters) {
++        validateSimpleDescriptor(sd,
++                                 discoveryType,
++                                 discoveryAddress,
++                                 providerConfig,
++                                 clusterName,
++                                 expectedServices,
++                                 expectedServiceParameters,
++                                 null,
++                                 null);
 +    }
 +
++    private void validateSimpleDescriptor(SimpleDescriptor                 sd,
++                                          String                           discoveryType,
++                                          String                           discoveryAddress,
++                                          String                           providerConfig,
++                                          String                           clusterName,
++                                          Map<String, List<String>>        expectedServices,
++                                          Map<String, Map<String, String>> expectedServiceParameters,
++                                          Map<String, List<String>>        expectedApps,
++                                          Map<String, Map<String, String>> expectedAppParameters) {
++        assertNotNull(sd);
++        assertEquals(discoveryType, sd.getDiscoveryType());
++        assertEquals(discoveryAddress, sd.getDiscoveryAddress());
++        assertEquals(providerConfig, sd.getProviderConfig());
++        assertEquals(clusterName, sd.getClusterName());
++
++        List<SimpleDescriptor.Service> actualServices = sd.getServices();
++
++        if (expectedServices == null) {
++            assertTrue(actualServices.isEmpty());
++        } else {
++            assertEquals(expectedServices.size(), actualServices.size());
++
++            for (SimpleDescriptor.Service actualService : actualServices) {
++                assertTrue(expectedServices.containsKey(actualService.getName()));
++                assertEquals(expectedServices.get(actualService.getName()), actualService.getURLs());
++
++                // Validate service parameters
++                if (expectedServiceParameters != null) {
++                    if (expectedServiceParameters.containsKey(actualService.getName())) {
++                        Map<String, String> expectedParams = expectedServiceParameters.get(actualService.getName());
++
++                        Map<String, String> actualServiceParams = actualService.getParams();
++                        assertNotNull(actualServiceParams);
++
++                        // Validate the size of the service parameter set
++                        assertEquals(expectedParams.size(), actualServiceParams.size());
++
++                        // Validate the parameter contents
++                        for (String paramName : actualServiceParams.keySet()) {
++                            assertTrue(expectedParams.containsKey(paramName));
++                            assertEquals(expectedParams.get(paramName), actualServiceParams.get(paramName));
++                        }
++                    }
++                }
++            }
++        }
++
++        List<SimpleDescriptor.Application> actualApps = sd.getApplications();
++
++        if (expectedApps == null) {
++            assertTrue(actualApps.isEmpty());
++        } else {
++            assertEquals(expectedApps.size(), actualApps.size());
++
++            for (SimpleDescriptor.Application actualApp : actualApps) {
++                assertTrue(expectedApps.containsKey(actualApp.getName()));
++                assertEquals(expectedApps.get(actualApp.getName()), actualApp.getURLs());
++
++                // Validate service parameters
++                if (expectedServiceParameters != null) {
++                    if (expectedAppParameters.containsKey(actualApp.getName())) {
++                        Map<String, String> expectedParams = expectedAppParameters.get(actualApp.getName());
++
++                        Map<String, String> actualAppParams = actualApp.getParams();
++                        assertNotNull(actualAppParams);
++
++                        // Validate the size of the service parameter set
++                        assertEquals(expectedParams.size(), actualAppParams.size());
++
++                        // Validate the parameter contents
++                        for (String paramName : actualAppParams.keySet()) {
++                            assertTrue(expectedParams.containsKey(paramName));
++                            assertEquals(expectedParams.get(paramName), actualAppParams.get(paramName));
++                        }
++                    }
++                }
++            }
++        }
++    }
 +
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/2c69152f/gateway-server/src/test/java/org/apache/knox/gateway/topology/simple/SimpleDescriptorHandlerTest.java
----------------------------------------------------------------------
diff --cc gateway-server/src/test/java/org/apache/knox/gateway/topology/simple/SimpleDescriptorHandlerTest.java
index f40fad7,0000000..575b68a
mode 100644,000000..100644
--- a/gateway-server/src/test/java/org/apache/knox/gateway/topology/simple/SimpleDescriptorHandlerTest.java
+++ b/gateway-server/src/test/java/org/apache/knox/gateway/topology/simple/SimpleDescriptorHandlerTest.java
@@@ -1,447 -1,0 +1,455 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.topology.simple;
 +
 +import org.apache.knox.gateway.topology.validation.TopologyValidator;
 +import org.apache.knox.gateway.util.XmlUtils;
 +import java.io.ByteArrayInputStream;
 +import java.io.File;
 +import java.io.FileNotFoundException;
 +import java.io.FileOutputStream;
 +import java.io.IOException;
 +
 +import java.util.ArrayList;
 +import java.util.Collections;
 +import java.util.HashMap;
 +import java.util.List;
 +import java.util.Map;
 +import java.util.Properties;
 +
 +import javax.xml.xpath.XPath;
 +import javax.xml.xpath.XPathConstants;
 +import javax.xml.xpath.XPathFactory;
 +
 +import org.apache.commons.io.FileUtils;
 +import org.easymock.EasyMock;
 +import org.junit.Test;
 +import org.w3c.dom.Document;
 +import org.w3c.dom.Node;
 +import org.w3c.dom.NodeList;
 +import org.xml.sax.SAXException;
 +
++import static org.hamcrest.Matchers.hasXPath;
++import static org.hamcrest.Matchers.is;
 +import static org.junit.Assert.assertEquals;
 +import static org.junit.Assert.assertFalse;
 +import static org.junit.Assert.assertNotNull;
++import static org.junit.Assert.assertThat;
 +import static org.junit.Assert.assertTrue;
 +import static org.junit.Assert.fail;
 +
 +
 +public class SimpleDescriptorHandlerTest {
 +
 +    private static final String TEST_PROVIDER_CONFIG =
 +            "    <gateway>\n" +
 +                    "        <provider>\n" +
 +                    "            <role>authentication</role>\n" +
 +                    "            <name>ShiroProvider</name>\n" +
 +                    "            <enabled>true</enabled>\n" +
 +                    "            <param>\n" +
 +                    "                <!-- \n" +
 +                    "                session timeout in minutes,  this is really idle timeout,\n" +
 +                    "                defaults to 30mins, if the property value is not defined,, \n" +
 +                    "                current client authentication would expire if client idles contiuosly for more than this value\n" +
 +                    "                -->\n" +
 +                    "                <name>sessionTimeout</name>\n" +
 +                    "                <value>30</value>\n" +
 +                    "            </param>\n" +
 +                    "            <param>\n" +
 +                    "                <name>main.ldapRealm</name>\n" +
 +                    "                <value>org.apache.knox.gateway.shirorealm.KnoxLdapRealm</value>\n" +
 +                    "            </param>\n" +
 +                    "            <param>\n" +
 +                    "                <name>main.ldapContextFactory</name>\n" +
 +                    "                <value>org.apache.knox.gateway.shirorealm.KnoxLdapContextFactory</value>\n" +
 +                    "            </param>\n" +
 +                    "            <param>\n" +
 +                    "                <name>main.ldapRealm.contextFactory</name>\n" +
 +                    "                <value>$ldapContextFactory</value>\n" +
 +                    "            </param>\n" +
 +                    "            <param>\n" +
 +                    "                <name>main.ldapRealm.userDnTemplate</name>\n" +
 +                    "                <value>uid={0},ou=people,dc=hadoop,dc=apache,dc=org</value>\n" +
 +                    "            </param>\n" +
 +                    "            <param>\n" +
 +                    "                <name>main.ldapRealm.contextFactory.url</name>\n" +
 +                    "                <value>ldap://localhost:33389</value>\n" +
 +                    "            </param>\n" +
 +                    "            <param>\n" +
 +                    "                <name>main.ldapRealm.contextFactory.authenticationMechanism</name>\n" +
 +                    "                <value>simple</value>\n" +
 +                    "            </param>\n" +
 +                    "            <param>\n" +
 +                    "                <name>urls./**</name>\n" +
 +                    "                <value>authcBasic</value>\n" +
 +                    "            </param>\n" +
 +                    "        </provider>\n" +
 +                    "\n" +
 +                    "        <provider>\n" +
 +                    "            <role>identity-assertion</role>\n" +
 +                    "            <name>Default</name>\n" +
 +                    "            <enabled>true</enabled>\n" +
 +                    "        </provider>\n" +
 +                    "\n" +
 +                    "        <!--\n" +
 +                    "        Defines rules for mapping host names internal to a Hadoop cluster to externally accessible host names.\n" +
 +                    "        For example, a hadoop service running in AWS may return a response that includes URLs containing the\n" +
 +                    "        some AWS internal host name.  If the client needs to make a subsequent request to the host identified\n" +
 +                    "        in those URLs they need to be mapped to external host names that the client Knox can use to connect.\n" +
 +                    "\n" +
 +                    "        If the external hostname and internal host names are same turn of this provider by setting the value of\n" +
 +                    "        enabled parameter as false.\n" +
 +                    "\n" +
 +                    "        The name parameter specifies the external host names in a comma separated list.\n" +
 +                    "        The value parameter specifies corresponding internal host names in a comma separated list.\n" +
 +                    "\n" +
 +                    "        Note that when you are using Sandbox, the external hostname needs to be localhost, as seen in out\n" +
 +                    "        of box sandbox.xml.  This is because Sandbox uses port mapping to allow clients to connect to the\n" +
 +                    "        Hadoop services using localhost.  In real clusters, external host names would almost never be localhost.\n" +
 +                    "        -->\n" +
 +                    "        <provider>\n" +
 +                    "            <role>hostmap</role>\n" +
 +                    "            <name>static</name>\n" +
 +                    "            <enabled>true</enabled>\n" +
 +                    "            <param><name>localhost</name><value>sandbox,sandbox.hortonworks.com</value></param>\n" +
 +                    "        </provider>\n" +
 +                    "    </gateway>\n";
 +
 +
 +    /**
 +     * KNOX-1006
 +     *
 +     * N.B. This test depends on the PropertiesFileServiceDiscovery extension being configured:
 +     *             org.apache.knox.gateway.topology.discovery.test.extension.PropertiesFileServiceDiscovery
 +     */
 +    @Test
 +    public void testSimpleDescriptorHandler() throws Exception {
 +
 +        final String type = "PROPERTIES_FILE";
 +        final String clusterName = "dummy";
 +
 +        // Create a properties file to be the source of service discovery details for this test
 +        final File discoveryConfig = File.createTempFile(getClass().getName() + "_discovery-config", ".properties");
 +
 +        final String address = discoveryConfig.getAbsolutePath();
 +
 +        final Properties DISCOVERY_PROPERTIES = new Properties();
 +        DISCOVERY_PROPERTIES.setProperty(clusterName + ".name", clusterName);
 +        DISCOVERY_PROPERTIES.setProperty(clusterName + ".NAMENODE", "hdfs://namenodehost:8020");
 +        DISCOVERY_PROPERTIES.setProperty(clusterName + ".JOBTRACKER", "rpc://jobtrackerhostname:8050");
 +        DISCOVERY_PROPERTIES.setProperty(clusterName + ".WEBHDFS", "http://webhdfshost:1234");
 +        DISCOVERY_PROPERTIES.setProperty(clusterName + ".WEBHCAT", "http://webhcathost:50111/templeton");
 +        DISCOVERY_PROPERTIES.setProperty(clusterName + ".OOZIE", "http://ooziehost:11000/oozie");
 +        DISCOVERY_PROPERTIES.setProperty(clusterName + ".WEBHBASE", "http://webhbasehost:1234");
 +        DISCOVERY_PROPERTIES.setProperty(clusterName + ".HIVE", "http://hivehostname:10001/clipath");
 +        DISCOVERY_PROPERTIES.setProperty(clusterName + ".RESOURCEMANAGER", "http://remanhost:8088/ws");
 +
 +        try {
 +            DISCOVERY_PROPERTIES.store(new FileOutputStream(discoveryConfig), null);
 +        } catch (FileNotFoundException e) {
 +            fail(e.getMessage());
 +        }
 +
 +        final Map<String, List<String>> serviceURLs = new HashMap<>();
 +        serviceURLs.put("NAMENODE", null);
 +        serviceURLs.put("JOBTRACKER", null);
 +        serviceURLs.put("WEBHDFS", null);
 +        serviceURLs.put("WEBHCAT", null);
 +        serviceURLs.put("OOZIE", null);
 +        serviceURLs.put("WEBHBASE", null);
 +        serviceURLs.put("HIVE", null);
 +        serviceURLs.put("RESOURCEMANAGER", null);
 +        serviceURLs.put("AMBARIUI", Collections.singletonList("http://c6401.ambari.apache.org:8080"));
 +        serviceURLs.put("KNOXSSO", null);
 +
 +        // Write the externalized provider config to a temp file
 +        File providerConfig = new File(System.getProperty("java.io.tmpdir"), "ambari-cluster-policy.xml");
 +        FileUtils.write(providerConfig, TEST_PROVIDER_CONFIG);
 +
 +        File topologyFile = null;
 +        try {
 +            File destDir = new File(System.getProperty("java.io.tmpdir")).getCanonicalFile();
 +
 +            Map<String, Map<String, String>> serviceParameters = new HashMap<>();
 +            Map<String, String> knoxssoParams = new HashMap<>();
 +            knoxssoParams.put("knoxsso.cookie.secure.only", "true");
 +            knoxssoParams.put("knoxsso.token.ttl", "100000");
 +            serviceParameters.put("KNOXSSO", knoxssoParams);
 +
 +            // Mock out the simple descriptor
 +            SimpleDescriptor testDescriptor = EasyMock.createNiceMock(SimpleDescriptor.class);
 +            EasyMock.expect(testDescriptor.getName()).andReturn("mysimpledescriptor").anyTimes();
 +            EasyMock.expect(testDescriptor.getDiscoveryAddress()).andReturn(address).anyTimes();
 +            EasyMock.expect(testDescriptor.getDiscoveryType()).andReturn(type).anyTimes();
 +            EasyMock.expect(testDescriptor.getDiscoveryUser()).andReturn(null).anyTimes();
 +            EasyMock.expect(testDescriptor.getProviderConfig()).andReturn(providerConfig.getAbsolutePath()).anyTimes();
 +            EasyMock.expect(testDescriptor.getClusterName()).andReturn(clusterName).anyTimes();
 +            List<SimpleDescriptor.Service> serviceMocks = new ArrayList<>();
 +            for (String serviceName : serviceURLs.keySet()) {
 +                SimpleDescriptor.Service svc = EasyMock.createNiceMock(SimpleDescriptor.Service.class);
 +                EasyMock.expect(svc.getName()).andReturn(serviceName).anyTimes();
 +                EasyMock.expect(svc.getURLs()).andReturn(serviceURLs.get(serviceName)).anyTimes();
 +                EasyMock.expect(svc.getParams()).andReturn(serviceParameters.get(serviceName)).anyTimes();
 +                EasyMock.replay(svc);
 +                serviceMocks.add(svc);
 +            }
 +            EasyMock.expect(testDescriptor.getServices()).andReturn(serviceMocks).anyTimes();
 +            EasyMock.replay(testDescriptor);
 +
 +            // Invoke the simple descriptor handler
 +            Map<String, File> files =
 +                           SimpleDescriptorHandler.handle(testDescriptor,
 +                                                          providerConfig.getParentFile(), // simple desc co-located with provider config
 +                                                          destDir);
 +            topologyFile = files.get("topology");
 +
 +            // Validate the resulting topology descriptor
 +            assertTrue(topologyFile.exists());
 +
 +            // Validate the topology descriptor's correctness
 +            TopologyValidator validator = new TopologyValidator( topologyFile.getAbsolutePath() );
 +            if( !validator.validateTopology() ){
 +                throw new SAXException( validator.getErrorString() );
 +            }
 +
 +            XPathFactory xPathfactory = XPathFactory.newInstance();
 +            XPath xpath = xPathfactory.newXPath();
 +
 +            // Parse the topology descriptor
 +            Document topologyXml = XmlUtils.readXml(topologyFile);
 +
++            // KNOX-1105 Mark generated topology files
++            assertThat("Expected the \"generated\" marker element in the topology XML, with value of \"true\".",
++                       topologyXml,
++                       hasXPath("/topology/generated", is("true")));
++
 +            // Validate the provider configuration
 +            Document extProviderConf = XmlUtils.readXml(new ByteArrayInputStream(TEST_PROVIDER_CONFIG.getBytes()));
 +            Node gatewayNode = (Node) xpath.compile("/topology/gateway").evaluate(topologyXml, XPathConstants.NODE);
 +            assertTrue("Resulting provider config should be identical to the referenced content.",
 +                       extProviderConf.getDocumentElement().isEqualNode(gatewayNode));
 +
 +            // Validate the service declarations
 +            Map<String, List<String>> topologyServiceURLs = new HashMap<>();
 +            NodeList serviceNodes =
 +                        (NodeList) xpath.compile("/topology/service").evaluate(topologyXml, XPathConstants.NODESET);
 +            for (int serviceNodeIndex=0; serviceNodeIndex < serviceNodes.getLength(); serviceNodeIndex++) {
 +                Node serviceNode = serviceNodes.item(serviceNodeIndex);
 +
 +                // Validate the role
 +                Node roleNode = (Node) xpath.compile("role/text()").evaluate(serviceNode, XPathConstants.NODE);
 +                assertNotNull(roleNode);
 +                String role = roleNode.getNodeValue();
 +
 +                // Validate the URLs
 +                NodeList urlNodes = (NodeList) xpath.compile("url/text()").evaluate(serviceNode, XPathConstants.NODESET);
 +                for(int urlNodeIndex = 0 ; urlNodeIndex < urlNodes.getLength(); urlNodeIndex++) {
 +                    Node urlNode = urlNodes.item(urlNodeIndex);
 +                    assertNotNull(urlNode);
 +                    String url = urlNode.getNodeValue();
 +
 +                    // If the service should have a URL (some don't require it)
 +                    if (serviceURLs.containsKey(role)) {
 +                        assertNotNull("Declared service should have a URL.", url);
 +                        if (!topologyServiceURLs.containsKey(role)) {
 +                            topologyServiceURLs.put(role, new ArrayList<>());
 +                        }
 +                        topologyServiceURLs.get(role).add(url); // Add it for validation later
 +                    }
 +                }
 +
 +                // If params were declared in the descriptor, then validate them in the resulting topology file
 +                Map<String, String> params = serviceParameters.get(role);
 +                if (params != null) {
 +                    NodeList paramNodes = (NodeList) xpath.compile("param").evaluate(serviceNode, XPathConstants.NODESET);
 +                    for (int paramNodeIndex = 0; paramNodeIndex < paramNodes.getLength(); paramNodeIndex++) {
 +                        Node paramNode = paramNodes.item(paramNodeIndex);
 +                        String paramName = (String) xpath.compile("name/text()").evaluate(paramNode, XPathConstants.STRING);
 +                        String paramValue = (String) xpath.compile("value/text()").evaluate(paramNode, XPathConstants.STRING);
 +                        assertTrue(params.keySet().contains(paramName));
 +                        assertEquals(params.get(paramName), paramValue);
 +                    }
 +                }
 +
 +            }
 +            assertEquals("Unexpected number of service declarations.", (serviceURLs.size() - 1), topologyServiceURLs.size());
 +
 +        } catch (Exception e) {
 +            e.printStackTrace();
 +            fail(e.getMessage());
 +        } finally {
 +            providerConfig.delete();
 +            discoveryConfig.delete();
 +            if (topologyFile != null) {
 +                topologyFile.delete();
 +            }
 +        }
 +    }
 +
 +
 +    /**
 +     * KNOX-1006
 +     *
 +     * Verify the behavior of the SimpleDescriptorHandler when service discovery fails to produce a valid URL for
 +     * a service.
 +     *
 +     * N.B. This test depends on the PropertiesFileServiceDiscovery extension being configured:
 +     *             org.apache.knox.gateway.topology.discovery.test.extension.PropertiesFileServiceDiscovery
 +     */
 +    @Test
 +    public void testInvalidServiceURLFromDiscovery() throws Exception {
 +        final String CLUSTER_NAME = "myproperties";
 +
 +        // Configure the PropertiesFile Service Discovery implementation for this test
 +        final String DEFAULT_VALID_SERVICE_URL = "http://localhost:9999/thiswillwork";
 +        Properties serviceDiscoverySourceProps = new Properties();
 +        serviceDiscoverySourceProps.setProperty(CLUSTER_NAME + ".NAMENODE",
 +                                                DEFAULT_VALID_SERVICE_URL.replace("http", "hdfs"));
 +        serviceDiscoverySourceProps.setProperty(CLUSTER_NAME + ".JOBTRACKER",
 +                                                DEFAULT_VALID_SERVICE_URL.replace("http", "rpc"));
 +        serviceDiscoverySourceProps.setProperty(CLUSTER_NAME + ".WEBHDFS",         DEFAULT_VALID_SERVICE_URL);
 +        serviceDiscoverySourceProps.setProperty(CLUSTER_NAME + ".WEBHCAT",         DEFAULT_VALID_SERVICE_URL);
 +        serviceDiscoverySourceProps.setProperty(CLUSTER_NAME + ".OOZIE",           DEFAULT_VALID_SERVICE_URL);
 +        serviceDiscoverySourceProps.setProperty(CLUSTER_NAME + ".WEBHBASE",        DEFAULT_VALID_SERVICE_URL);
 +        serviceDiscoverySourceProps.setProperty(CLUSTER_NAME + ".HIVE",            "{SCHEME}://localhost:10000/");
 +        serviceDiscoverySourceProps.setProperty(CLUSTER_NAME + ".RESOURCEMANAGER", DEFAULT_VALID_SERVICE_URL);
 +        serviceDiscoverySourceProps.setProperty(CLUSTER_NAME + ".AMBARIUI",        DEFAULT_VALID_SERVICE_URL);
 +        File serviceDiscoverySource = File.createTempFile("service-discovery", ".properties");
 +        serviceDiscoverySourceProps.store(new FileOutputStream(serviceDiscoverySource),
 +                                          "Test Service Discovery Source");
 +
 +        // Prepare a mock SimpleDescriptor
 +        final String type = "PROPERTIES_FILE";
 +        final String address = serviceDiscoverySource.getAbsolutePath();
 +        final Map<String, List<String>> serviceURLs = new HashMap<>();
 +        serviceURLs.put("NAMENODE", null);
 +        serviceURLs.put("JOBTRACKER", null);
 +        serviceURLs.put("WEBHDFS", null);
 +        serviceURLs.put("WEBHCAT", null);
 +        serviceURLs.put("OOZIE", null);
 +        serviceURLs.put("WEBHBASE", null);
 +        serviceURLs.put("HIVE", null);
 +        serviceURLs.put("RESOURCEMANAGER", null);
 +        serviceURLs.put("AMBARIUI", Collections.singletonList("http://c6401.ambari.apache.org:8080"));
 +
 +        // Write the externalized provider config to a temp file
 +        File providerConfig = writeProviderConfig("ambari-cluster-policy.xml", TEST_PROVIDER_CONFIG);
 +
 +        File topologyFile = null;
 +        try {
 +            File destDir = (new File(".")).getCanonicalFile();
 +
 +            // Mock out the simple descriptor
 +            SimpleDescriptor testDescriptor = EasyMock.createNiceMock(SimpleDescriptor.class);
 +            EasyMock.expect(testDescriptor.getName()).andReturn("mysimpledescriptor").anyTimes();
 +            EasyMock.expect(testDescriptor.getDiscoveryAddress()).andReturn(address).anyTimes();
 +            EasyMock.expect(testDescriptor.getDiscoveryType()).andReturn(type).anyTimes();
 +            EasyMock.expect(testDescriptor.getDiscoveryUser()).andReturn(null).anyTimes();
 +            EasyMock.expect(testDescriptor.getProviderConfig()).andReturn(providerConfig.getAbsolutePath()).anyTimes();
 +            EasyMock.expect(testDescriptor.getClusterName()).andReturn(CLUSTER_NAME).anyTimes();
 +            List<SimpleDescriptor.Service> serviceMocks = new ArrayList<>();
 +            for (String serviceName : serviceURLs.keySet()) {
 +                SimpleDescriptor.Service svc = EasyMock.createNiceMock(SimpleDescriptor.Service.class);
 +                EasyMock.expect(svc.getName()).andReturn(serviceName).anyTimes();
 +                EasyMock.expect(svc.getURLs()).andReturn(serviceURLs.get(serviceName)).anyTimes();
 +                EasyMock.replay(svc);
 +                serviceMocks.add(svc);
 +            }
 +            EasyMock.expect(testDescriptor.getServices()).andReturn(serviceMocks).anyTimes();
 +            EasyMock.replay(testDescriptor);
 +
 +            // Invoke the simple descriptor handler
 +            Map<String, File> files =
 +                    SimpleDescriptorHandler.handle(testDescriptor,
 +                                                   providerConfig.getParentFile(), // simple desc co-located with provider config
 +                                                   destDir);
 +
 +            topologyFile = files.get("topology");
 +
 +            // Validate the resulting topology descriptor
 +            assertTrue(topologyFile.exists());
 +
 +            // Validate the topology descriptor's correctness
 +            TopologyValidator validator = new TopologyValidator( topologyFile.getAbsolutePath() );
 +            if( !validator.validateTopology() ){
 +                throw new SAXException( validator.getErrorString() );
 +            }
 +
 +            XPathFactory xPathfactory = XPathFactory.newInstance();
 +            XPath xpath = xPathfactory.newXPath();
 +
 +            // Parse the topology descriptor
 +            Document topologyXml = XmlUtils.readXml(topologyFile);
 +
 +            // Validate the provider configuration
 +            Document extProviderConf = XmlUtils.readXml(new ByteArrayInputStream(TEST_PROVIDER_CONFIG.getBytes()));
 +            Node gatewayNode = (Node) xpath.compile("/topology/gateway").evaluate(topologyXml, XPathConstants.NODE);
 +            assertTrue("Resulting provider config should be identical to the referenced content.",
 +                    extProviderConf.getDocumentElement().isEqualNode(gatewayNode));
 +
 +            // Validate the service declarations
 +            List<String> topologyServices = new ArrayList<>();
 +            Map<String, List<String>> topologyServiceURLs = new HashMap<>();
 +            NodeList serviceNodes =
 +                    (NodeList) xpath.compile("/topology/service").evaluate(topologyXml, XPathConstants.NODESET);
 +            for (int serviceNodeIndex=0; serviceNodeIndex < serviceNodes.getLength(); serviceNodeIndex++) {
 +                Node serviceNode = serviceNodes.item(serviceNodeIndex);
 +                Node roleNode = (Node) xpath.compile("role/text()").evaluate(serviceNode, XPathConstants.NODE);
 +                assertNotNull(roleNode);
 +                String role = roleNode.getNodeValue();
 +                topologyServices.add(role);
 +                NodeList urlNodes = (NodeList) xpath.compile("url/text()").evaluate(serviceNode, XPathConstants.NODESET);
 +                for(int urlNodeIndex = 0 ; urlNodeIndex < urlNodes.getLength(); urlNodeIndex++) {
 +                    Node urlNode = urlNodes.item(urlNodeIndex);
 +                    assertNotNull(urlNode);
 +                    String url = urlNode.getNodeValue();
 +                    assertNotNull("Every declared service should have a URL.", url);
 +                    if (!topologyServiceURLs.containsKey(role)) {
 +                        topologyServiceURLs.put(role, new ArrayList<>());
 +                    }
 +                    topologyServiceURLs.get(role).add(url);
 +                }
 +            }
 +
 +            // There should not be a service element for HIVE, since it had no valid URLs
 +            assertEquals("Unexpected number of service declarations.", serviceURLs.size() - 1, topologyServices.size());
 +            assertFalse("The HIVE service should have been omitted from the generated topology.", topologyServices.contains("HIVE"));
 +
 +            assertEquals("Unexpected number of service URLs.", serviceURLs.size() - 1, topologyServiceURLs.size());
 +
 +        } catch (Exception e) {
 +            e.printStackTrace();
 +            fail(e.getMessage());
 +        } finally {
 +            serviceDiscoverySource.delete();
 +            providerConfig.delete();
 +            if (topologyFile != null) {
 +                topologyFile.delete();
 +            }
 +        }
 +    }
 +
 +
 +    private File writeProviderConfig(String path, String content) throws IOException {
 +        File f = new File(path);
 +        FileUtils.write(f, content);
 +        return f;
 +    }
 +
 +}


[08/53] [abbrv] knox git commit: KNOX-998 - package name refactoring

Posted by mo...@apache.org.
http://git-wip-us.apache.org/repos/asf/knox/blob/7d0bff16/gateway-discovery-ambari/src/test/java/org/apache/hadoop/gateway/topology/discovery/ambari/AmbariDynamicServiceURLCreatorTest.java
----------------------------------------------------------------------
diff --git a/gateway-discovery-ambari/src/test/java/org/apache/hadoop/gateway/topology/discovery/ambari/AmbariDynamicServiceURLCreatorTest.java b/gateway-discovery-ambari/src/test/java/org/apache/hadoop/gateway/topology/discovery/ambari/AmbariDynamicServiceURLCreatorTest.java
deleted file mode 100644
index dd35dbb..0000000
--- a/gateway-discovery-ambari/src/test/java/org/apache/hadoop/gateway/topology/discovery/ambari/AmbariDynamicServiceURLCreatorTest.java
+++ /dev/null
@@ -1,876 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.gateway.topology.discovery.ambari;
-
-import org.apache.commons.io.FileUtils;
-import org.easymock.EasyMock;
-import org.junit.Test;
-
-import java.io.File;
-import java.net.MalformedURLException;
-import java.net.URI;
-import java.net.URISyntaxException;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-
-import static junit.framework.TestCase.assertTrue;
-import static junit.framework.TestCase.fail;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-
-
-public class AmbariDynamicServiceURLCreatorTest {
-
-    @Test
-    public void testHiveURLFromInternalMapping() throws Exception {
-        testHiveURL(null);
-    }
-
-    @Test
-    public void testHiveURLFromExternalMapping() throws Exception {
-        testHiveURL(TEST_MAPPING_CONFIG);
-    }
-
-    private void testHiveURL(Object mappingConfiguration) throws Exception {
-
-        final String   SERVICE_NAME = "HIVE";
-        final String[] HOSTNAMES    = {"host3", "host2", "host4"};
-        final String   HTTP_PATH    = "cliservice";
-        final String   HTTP_PORT    = "10001";
-        final String   BINARY_PORT  = "10000";
-
-        String expectedScheme = "http";
-
-        final List<String> hiveServerHosts = Arrays.asList(HOSTNAMES);
-
-        AmbariComponent hiveServer = EasyMock.createNiceMock(AmbariComponent.class);
-
-        AmbariCluster cluster = EasyMock.createNiceMock(AmbariCluster.class);
-        EasyMock.expect(cluster.getComponent("HIVE_SERVER")).andReturn(hiveServer).anyTimes();
-        EasyMock.replay(cluster);
-
-        // Configure HTTP Transport
-        EasyMock.expect(hiveServer.getHostNames()).andReturn(hiveServerHosts).anyTimes();
-        EasyMock.expect(hiveServer.getConfigProperty("hive.server2.use.SSL")).andReturn("false").anyTimes();
-        EasyMock.expect(hiveServer.getConfigProperty("hive.server2.thrift.http.path")).andReturn(HTTP_PATH).anyTimes();
-        EasyMock.expect(hiveServer.getConfigProperty("hive.server2.thrift.http.port")).andReturn(HTTP_PORT).anyTimes();
-        EasyMock.expect(hiveServer.getConfigProperty("hive.server2.transport.mode")).andReturn("http").anyTimes();
-        EasyMock.replay(hiveServer);
-
-        // Run the test
-        AmbariDynamicServiceURLCreator builder = newURLCreator(cluster, mappingConfiguration);
-        List<String> urls = builder.create(SERVICE_NAME);
-        assertEquals(HOSTNAMES.length, urls.size());
-        validateServiceURLs(urls, HOSTNAMES, expectedScheme, HTTP_PORT, HTTP_PATH);
-
-        // Configure BINARY Transport
-        EasyMock.reset(hiveServer);
-        EasyMock.expect(hiveServer.getHostNames()).andReturn(hiveServerHosts).anyTimes();
-        EasyMock.expect(hiveServer.getConfigProperty("hive.server2.use.SSL")).andReturn("false").anyTimes();
-        EasyMock.expect(hiveServer.getConfigProperty("hive.server2.thrift.http.path")).andReturn("").anyTimes();
-        EasyMock.expect(hiveServer.getConfigProperty("hive.server2.thrift.http.port")).andReturn(HTTP_PORT).anyTimes();
-        EasyMock.expect(hiveServer.getConfigProperty("hive.server2.thrift.port")).andReturn(BINARY_PORT).anyTimes();
-        EasyMock.expect(hiveServer.getConfigProperty("hive.server2.transport.mode")).andReturn("binary").anyTimes();
-        EasyMock.replay(hiveServer);
-
-        // Run the test
-        urls = builder.create(SERVICE_NAME);
-        assertEquals(HOSTNAMES.length, urls.size());
-        validateServiceURLs(urls, HOSTNAMES, expectedScheme, HTTP_PORT, "");
-
-        // Configure HTTPS Transport
-        EasyMock.reset(hiveServer);
-        EasyMock.expect(hiveServer.getHostNames()).andReturn(hiveServerHosts).anyTimes();
-        EasyMock.expect(hiveServer.getConfigProperty("hive.server2.use.SSL")).andReturn("true").anyTimes();
-        EasyMock.expect(hiveServer.getConfigProperty("hive.server2.thrift.http.path")).andReturn(HTTP_PATH).anyTimes();
-        EasyMock.expect(hiveServer.getConfigProperty("hive.server2.thrift.http.port")).andReturn(HTTP_PORT).anyTimes();
-        EasyMock.expect(hiveServer.getConfigProperty("hive.server2.transport.mode")).andReturn("http").anyTimes();
-        EasyMock.replay(hiveServer);
-
-        // Run the test
-        expectedScheme = "https";
-        urls = builder.create(SERVICE_NAME);
-        assertEquals(HOSTNAMES.length, urls.size());
-        validateServiceURLs(urls, HOSTNAMES, expectedScheme, HTTP_PORT, HTTP_PATH);
-    }
-
-    @Test
-    public void testResourceManagerURLFromInternalMapping() throws Exception {
-        testResourceManagerURL(null);
-    }
-
-    @Test
-    public void testResourceManagerURLFromExternalMapping() throws Exception {
-        testResourceManagerURL(TEST_MAPPING_CONFIG);
-    }
-
-    private void testResourceManagerURL(Object mappingConfiguration) throws Exception {
-
-        final String HTTP_ADDRESS  = "host2:1111";
-        final String HTTPS_ADDRESS = "host2:22222";
-
-        // HTTP
-        AmbariComponent resman = EasyMock.createNiceMock(AmbariComponent.class);
-        setResourceManagerComponentExpectations(resman, HTTP_ADDRESS, HTTPS_ADDRESS, "HTTP");
-
-        AmbariCluster cluster = EasyMock.createNiceMock(AmbariCluster.class);
-        EasyMock.expect(cluster.getComponent("RESOURCEMANAGER")).andReturn(resman).anyTimes();
-        EasyMock.replay(cluster);
-
-        // Run the test
-        AmbariDynamicServiceURLCreator builder = newURLCreator(cluster, mappingConfiguration);
-        String url = builder.create("RESOURCEMANAGER").get(0);
-        assertEquals("http://" + HTTP_ADDRESS + "/ws", url);
-
-        // HTTPS
-        EasyMock.reset(resman);
-        setResourceManagerComponentExpectations(resman, HTTP_ADDRESS, HTTPS_ADDRESS, "HTTPS_ONLY");
-
-        // Run the test
-        url = builder.create("RESOURCEMANAGER").get(0);
-        assertEquals("https://" + HTTPS_ADDRESS + "/ws", url);
-    }
-
-    private void setResourceManagerComponentExpectations(final AmbariComponent resmanMock,
-                                                         final String          httpAddress,
-                                                         final String          httpsAddress,
-                                                         final String          httpPolicy) {
-        EasyMock.expect(resmanMock.getConfigProperty("yarn.resourcemanager.webapp.address")).andReturn(httpAddress).anyTimes();
-        EasyMock.expect(resmanMock.getConfigProperty("yarn.resourcemanager.webapp.https.address")).andReturn(httpsAddress).anyTimes();
-        EasyMock.expect(resmanMock.getConfigProperty("yarn.http.policy")).andReturn(httpPolicy).anyTimes();
-        EasyMock.replay(resmanMock);
-    }
-
-    @Test
-    public void testJobTrackerURLFromInternalMapping() throws Exception {
-        testJobTrackerURL(null);
-    }
-
-    @Test
-    public void testJobTrackerURLFromExternalMapping() throws Exception {
-        testJobTrackerURL(TEST_MAPPING_CONFIG);
-    }
-
-    private void testJobTrackerURL(Object mappingConfiguration) throws Exception {
-        final String ADDRESS = "host2:5678";
-
-        AmbariComponent resman = EasyMock.createNiceMock(AmbariComponent.class);
-        EasyMock.expect(resman.getConfigProperty("yarn.resourcemanager.address")).andReturn(ADDRESS).anyTimes();
-        EasyMock.replay(resman);
-
-        AmbariCluster cluster = EasyMock.createNiceMock(AmbariCluster.class);
-        EasyMock.expect(cluster.getComponent("RESOURCEMANAGER")).andReturn(resman).anyTimes();
-        EasyMock.replay(cluster);
-
-        // Run the test
-        AmbariDynamicServiceURLCreator builder = newURLCreator(cluster, mappingConfiguration);
-        String url = builder.create("JOBTRACKER").get(0);
-        assertEquals("rpc://" + ADDRESS, url);
-    }
-
-    @Test
-    public void testNameNodeURLFromInternalMapping() throws Exception {
-        testNameNodeURL(null);
-    }
-
-    @Test
-    public void testNameNodeURLFromExternalMapping() throws Exception {
-        testNameNodeURL(TEST_MAPPING_CONFIG);
-    }
-
-    private void testNameNodeURL(Object mappingConfiguration) throws Exception {
-        final String ADDRESS = "host1:1234";
-
-        AmbariComponent namenode = EasyMock.createNiceMock(AmbariComponent.class);
-        EasyMock.expect(namenode.getConfigProperty("dfs.namenode.rpc-address")).andReturn(ADDRESS).anyTimes();
-        EasyMock.replay(namenode);
-
-        AmbariCluster cluster = EasyMock.createNiceMock(AmbariCluster.class);
-        EasyMock.expect(cluster.getComponent("NAMENODE")).andReturn(namenode).anyTimes();
-        EasyMock.replay(cluster);
-
-        // Run the test
-        AmbariDynamicServiceURLCreator builder = newURLCreator(cluster, mappingConfiguration);
-        String url = builder.create("NAMENODE").get(0);
-        assertEquals("hdfs://" + ADDRESS, url);
-    }
-
-    @Test
-    public void testWebHCatURLFromInternalMapping() throws Exception {
-        testWebHCatURL(null);
-    }
-
-    @Test
-    public void testWebHCatURLFromExternalMapping() throws Exception {
-        testWebHCatURL(TEST_MAPPING_CONFIG);
-    }
-
-    private void testWebHCatURL(Object mappingConfiguration) throws Exception {
-
-        final String HOSTNAME = "host3";
-        final String PORT     = "1919";
-
-        AmbariComponent webhcatServer = EasyMock.createNiceMock(AmbariComponent.class);
-        EasyMock.expect(webhcatServer.getConfigProperty("templeton.port")).andReturn(PORT).anyTimes();
-        List<String> webHcatServerHosts = Collections.singletonList(HOSTNAME);
-        EasyMock.expect(webhcatServer.getHostNames()).andReturn(webHcatServerHosts).anyTimes();
-        EasyMock.replay(webhcatServer);
-
-        AmbariCluster cluster = EasyMock.createNiceMock(AmbariCluster.class);
-        EasyMock.expect(cluster.getComponent("WEBHCAT_SERVER")).andReturn(webhcatServer).anyTimes();
-        EasyMock.replay(cluster);
-
-        // Run the test
-        AmbariDynamicServiceURLCreator builder = newURLCreator(cluster, mappingConfiguration);
-        String url = builder.create("WEBHCAT").get(0);
-        assertEquals("http://" + HOSTNAME + ":" + PORT + "/templeton", url);
-    }
-
-    @Test
-    public void testOozieURLFromInternalMapping() throws Exception {
-        testOozieURL(null);
-    }
-
-    @Test
-    public void testOozieURLFromExternalMapping() throws Exception {
-        testOozieURL(TEST_MAPPING_CONFIG);
-    }
-
-    private void testOozieURL(Object mappingConfiguration) throws Exception {
-        final String URL = "http://host3:2222";
-
-        AmbariComponent oozieServer = EasyMock.createNiceMock(AmbariComponent.class);
-        EasyMock.expect(oozieServer.getConfigProperty("oozie.base.url")).andReturn(URL).anyTimes();
-        EasyMock.replay(oozieServer);
-
-        AmbariCluster cluster = EasyMock.createNiceMock(AmbariCluster.class);
-        EasyMock.expect(cluster.getComponent("OOZIE_SERVER")).andReturn(oozieServer).anyTimes();
-        EasyMock.replay(cluster);
-
-        // Run the test
-        AmbariDynamicServiceURLCreator builder = newURLCreator(cluster, mappingConfiguration);
-        String url = builder.create("OOZIE").get(0);
-        assertEquals(URL, url);
-    }
-
-    @Test
-    public void testWebHBaseURLFromInternalMapping() throws Exception {
-        testWebHBaseURL(null);
-    }
-
-    @Test
-    public void testWebHBaseURLFromExternalMapping() throws Exception {
-        testWebHBaseURL(TEST_MAPPING_CONFIG);
-    }
-
-    private void testWebHBaseURL(Object mappingConfiguration) throws Exception {
-        final String[] HOSTNAMES = {"host2", "host4"};
-
-        AmbariComponent hbaseMaster = EasyMock.createNiceMock(AmbariComponent.class);
-        List<String> hbaseMasterHosts = Arrays.asList(HOSTNAMES);
-        EasyMock.expect(hbaseMaster.getHostNames()).andReturn(hbaseMasterHosts).anyTimes();
-        EasyMock.replay(hbaseMaster);
-
-        AmbariCluster cluster = EasyMock.createNiceMock(AmbariCluster.class);
-        EasyMock.expect(cluster.getComponent("HBASE_MASTER")).andReturn(hbaseMaster).anyTimes();
-        EasyMock.replay(cluster);
-
-        // Run the test
-        AmbariDynamicServiceURLCreator builder = newURLCreator(cluster, mappingConfiguration);
-        List<String> urls = builder.create("WEBHBASE");
-        validateServiceURLs(urls, HOSTNAMES, "http", "60080", null);
-    }
-
-    @Test
-    public void testWebHdfsURLFromInternalMapping() throws Exception {
-        testWebHdfsURL(null);
-    }
-
-    @Test
-    public void testWebHdfsURLFromExternalMapping() throws Exception {
-        testWebHdfsURL(TEST_MAPPING_CONFIG);
-    }
-
-    @Test
-    public void testWebHdfsURLFromSystemPropertyOverride() throws Exception {
-        // Write the test mapping configuration to a temp file
-        File mappingFile = File.createTempFile("mapping-config", "xml");
-        FileUtils.write(mappingFile, OVERRIDE_MAPPING_FILE_CONTENTS, "utf-8");
-
-        // Set the system property to point to the temp file
-        System.setProperty(AmbariDynamicServiceURLCreator.MAPPING_CONFIG_OVERRIDE_PROPERTY,
-                           mappingFile.getAbsolutePath());
-        try {
-            final String ADDRESS = "host3:1357";
-            // The URL creator should apply the file contents, and create the URL accordingly
-            String url = getTestWebHdfsURL(ADDRESS, null);
-
-            // Verify the URL matches the pattern from the file
-            assertEquals("http://" + ADDRESS + "/webhdfs/OVERRIDE", url);
-        } finally {
-            // Reset the system property, and delete the temp file
-            System.clearProperty(AmbariDynamicServiceURLCreator.MAPPING_CONFIG_OVERRIDE_PROPERTY);
-            mappingFile.delete();
-        }
-    }
-
-    private void testWebHdfsURL(Object mappingConfiguration) throws Exception {
-        final String ADDRESS = "host3:1357";
-        assertEquals("http://" + ADDRESS + "/webhdfs", getTestWebHdfsURL(ADDRESS, mappingConfiguration));
-    }
-
-
-    private String getTestWebHdfsURL(String address, Object mappingConfiguration) throws Exception {
-        AmbariCluster.ServiceConfiguration hdfsSC = EasyMock.createNiceMock(AmbariCluster.ServiceConfiguration.class);
-        Map<String, String> hdfsProps = new HashMap<>();
-        hdfsProps.put("dfs.namenode.http-address", address);
-        EasyMock.expect(hdfsSC.getProperties()).andReturn(hdfsProps).anyTimes();
-        EasyMock.replay(hdfsSC);
-
-        AmbariCluster cluster = EasyMock.createNiceMock(AmbariCluster.class);
-        EasyMock.expect(cluster.getServiceConfiguration("HDFS", "hdfs-site")).andReturn(hdfsSC).anyTimes();
-        EasyMock.replay(cluster);
-
-        // Create the URL
-        AmbariDynamicServiceURLCreator creator = newURLCreator(cluster, mappingConfiguration);
-        return creator.create("WEBHDFS").get(0);
-    }
-
-
-    @Test
-    public void testAtlasApiURL() throws Exception {
-        final String ATLAS_REST_ADDRESS = "http://host2:21000";
-
-        AmbariComponent atlasServer = EasyMock.createNiceMock(AmbariComponent.class);
-        EasyMock.expect(atlasServer.getConfigProperty("atlas.rest.address")).andReturn(ATLAS_REST_ADDRESS).anyTimes();
-        EasyMock.replay(atlasServer);
-
-        AmbariCluster cluster = EasyMock.createNiceMock(AmbariCluster.class);
-        EasyMock.expect(cluster.getComponent("ATLAS_SERVER")).andReturn(atlasServer).anyTimes();
-        EasyMock.replay(cluster);
-
-        // Run the test
-        AmbariDynamicServiceURLCreator builder = newURLCreator(cluster, null);
-        List<String> urls = builder.create("ATLAS-API");
-        assertEquals(1, urls.size());
-        assertEquals(ATLAS_REST_ADDRESS, urls.get(0));
-    }
-
-
-    @Test
-    public void testAtlasURL() throws Exception {
-        final String HTTP_PORT = "8787";
-        final String HTTPS_PORT = "8989";
-
-        final String[] HOSTNAMES = {"host1", "host4"};
-        final List<String> atlastServerHosts = Arrays.asList(HOSTNAMES);
-
-        AmbariComponent atlasServer = EasyMock.createNiceMock(AmbariComponent.class);
-        EasyMock.expect(atlasServer.getHostNames()).andReturn(atlastServerHosts).anyTimes();
-        EasyMock.expect(atlasServer.getConfigProperty("atlas.enableTLS")).andReturn("false").anyTimes();
-        EasyMock.expect(atlasServer.getConfigProperty("atlas.server.http.port")).andReturn(HTTP_PORT).anyTimes();
-        EasyMock.expect(atlasServer.getConfigProperty("atlas.server.https.port")).andReturn(HTTPS_PORT).anyTimes();
-        EasyMock.replay(atlasServer);
-
-        AmbariCluster cluster = EasyMock.createNiceMock(AmbariCluster.class);
-        EasyMock.expect(cluster.getComponent("ATLAS_SERVER")).andReturn(atlasServer).anyTimes();
-        EasyMock.replay(cluster);
-
-        // Run the test
-        AmbariDynamicServiceURLCreator builder = newURLCreator(cluster, null);
-        List<String> urls = builder.create("ATLAS");
-        validateServiceURLs(urls, HOSTNAMES, "http", HTTP_PORT, null);
-
-        EasyMock.reset(atlasServer);
-        EasyMock.expect(atlasServer.getHostNames()).andReturn(atlastServerHosts).anyTimes();
-        EasyMock.expect(atlasServer.getConfigProperty("atlas.enableTLS")).andReturn("true").anyTimes();
-        EasyMock.expect(atlasServer.getConfigProperty("atlas.server.http.port")).andReturn(HTTP_PORT).anyTimes();
-        EasyMock.expect(atlasServer.getConfigProperty("atlas.server.https.port")).andReturn(HTTPS_PORT).anyTimes();
-        EasyMock.replay(atlasServer);
-
-        // Run the test
-        urls = builder.create("ATLAS");
-        validateServiceURLs(urls, HOSTNAMES, "https", HTTPS_PORT, null);
-    }
-
-
-    @Test
-    public void testZeppelinURL() throws Exception {
-        final String HTTP_PORT = "8787";
-        final String HTTPS_PORT = "8989";
-
-        final String[] HOSTNAMES = {"host1", "host4"};
-        final List<String> atlastServerHosts = Arrays.asList(HOSTNAMES);
-
-        AmbariComponent zeppelinMaster = EasyMock.createNiceMock(AmbariComponent.class);
-        EasyMock.expect(zeppelinMaster.getHostNames()).andReturn(atlastServerHosts).anyTimes();
-        EasyMock.expect(zeppelinMaster.getConfigProperty("zeppelin.ssl")).andReturn("false").anyTimes();
-        EasyMock.expect(zeppelinMaster.getConfigProperty("zeppelin.server.port")).andReturn(HTTP_PORT).anyTimes();
-        EasyMock.expect(zeppelinMaster.getConfigProperty("zeppelin.server.ssl.port")).andReturn(HTTPS_PORT).anyTimes();
-        EasyMock.replay(zeppelinMaster);
-
-        AmbariCluster cluster = EasyMock.createNiceMock(AmbariCluster.class);
-        EasyMock.expect(cluster.getComponent("ZEPPELIN_MASTER")).andReturn(zeppelinMaster).anyTimes();
-        EasyMock.replay(cluster);
-
-        AmbariDynamicServiceURLCreator builder = newURLCreator(cluster, null);
-
-        // Run the test
-        validateServiceURLs(builder.create("ZEPPELIN"), HOSTNAMES, "http", HTTP_PORT, null);
-
-        EasyMock.reset(zeppelinMaster);
-        EasyMock.expect(zeppelinMaster.getHostNames()).andReturn(atlastServerHosts).anyTimes();
-        EasyMock.expect(zeppelinMaster.getConfigProperty("zeppelin.ssl")).andReturn("true").anyTimes();
-        EasyMock.expect(zeppelinMaster.getConfigProperty("zeppelin.server.port")).andReturn(HTTP_PORT).anyTimes();
-        EasyMock.expect(zeppelinMaster.getConfigProperty("zeppelin.server.ssl.port")).andReturn(HTTPS_PORT).anyTimes();
-        EasyMock.replay(zeppelinMaster);
-
-        // Run the test
-        validateServiceURLs(builder.create("ZEPPELIN"), HOSTNAMES, "https", HTTPS_PORT, null);
-    }
-
-
-    @Test
-    public void testZeppelinUiURL() throws Exception {
-        final String HTTP_PORT = "8787";
-        final String HTTPS_PORT = "8989";
-
-        final String[] HOSTNAMES = {"host1", "host4"};
-        final List<String> atlastServerHosts = Arrays.asList(HOSTNAMES);
-
-        AmbariComponent zeppelinMaster = EasyMock.createNiceMock(AmbariComponent.class);
-        EasyMock.expect(zeppelinMaster.getHostNames()).andReturn(atlastServerHosts).anyTimes();
-        EasyMock.expect(zeppelinMaster.getConfigProperty("zeppelin.ssl")).andReturn("false").anyTimes();
-        EasyMock.expect(zeppelinMaster.getConfigProperty("zeppelin.server.port")).andReturn(HTTP_PORT).anyTimes();
-        EasyMock.expect(zeppelinMaster.getConfigProperty("zeppelin.server.ssl.port")).andReturn(HTTPS_PORT).anyTimes();
-        EasyMock.replay(zeppelinMaster);
-
-        AmbariCluster cluster = EasyMock.createNiceMock(AmbariCluster.class);
-        EasyMock.expect(cluster.getComponent("ZEPPELIN_MASTER")).andReturn(zeppelinMaster).anyTimes();
-        EasyMock.replay(cluster);
-
-        AmbariDynamicServiceURLCreator builder = newURLCreator(cluster, null);
-
-        // Run the test
-        validateServiceURLs(builder.create("ZEPPELINUI"), HOSTNAMES, "http", HTTP_PORT, null);
-
-        EasyMock.reset(zeppelinMaster);
-        EasyMock.expect(zeppelinMaster.getHostNames()).andReturn(atlastServerHosts).anyTimes();
-        EasyMock.expect(zeppelinMaster.getConfigProperty("zeppelin.ssl")).andReturn("true").anyTimes();
-        EasyMock.expect(zeppelinMaster.getConfigProperty("zeppelin.server.port")).andReturn(HTTP_PORT).anyTimes();
-        EasyMock.expect(zeppelinMaster.getConfigProperty("zeppelin.server.ssl.port")).andReturn(HTTPS_PORT).anyTimes();
-        EasyMock.replay(zeppelinMaster);
-
-        // Run the test
-        validateServiceURLs(builder.create("ZEPPELINUI"), HOSTNAMES, "https", HTTPS_PORT, null);
-    }
-
-
-    @Test
-    public void testZeppelinWsURL() throws Exception {
-        final String HTTP_PORT = "8787";
-        final String HTTPS_PORT = "8989";
-
-        final String[] HOSTNAMES = {"host1", "host4"};
-        final List<String> atlastServerHosts = Arrays.asList(HOSTNAMES);
-
-        AmbariComponent zeppelinMaster = EasyMock.createNiceMock(AmbariComponent.class);
-        EasyMock.expect(zeppelinMaster.getHostNames()).andReturn(atlastServerHosts).anyTimes();
-        EasyMock.expect(zeppelinMaster.getConfigProperty("zeppelin.ssl")).andReturn("false").anyTimes();
-        EasyMock.expect(zeppelinMaster.getConfigProperty("zeppelin.server.port")).andReturn(HTTP_PORT).anyTimes();
-        EasyMock.expect(zeppelinMaster.getConfigProperty("zeppelin.server.ssl.port")).andReturn(HTTPS_PORT).anyTimes();
-        EasyMock.replay(zeppelinMaster);
-
-        AmbariCluster cluster = EasyMock.createNiceMock(AmbariCluster.class);
-        EasyMock.expect(cluster.getComponent("ZEPPELIN_MASTER")).andReturn(zeppelinMaster).anyTimes();
-        EasyMock.replay(cluster);
-
-        AmbariDynamicServiceURLCreator builder = newURLCreator(cluster, null);
-
-        // Run the test
-        validateServiceURLs(builder.create("ZEPPELINWS"), HOSTNAMES, "ws", HTTP_PORT, null);
-
-        EasyMock.reset(zeppelinMaster);
-        EasyMock.expect(zeppelinMaster.getHostNames()).andReturn(atlastServerHosts).anyTimes();
-        EasyMock.expect(zeppelinMaster.getConfigProperty("zeppelin.ssl")).andReturn("true").anyTimes();
-        EasyMock.expect(zeppelinMaster.getConfigProperty("zeppelin.server.port")).andReturn(HTTP_PORT).anyTimes();
-        EasyMock.expect(zeppelinMaster.getConfigProperty("zeppelin.server.ssl.port")).andReturn(HTTPS_PORT).anyTimes();
-        EasyMock.replay(zeppelinMaster);
-
-        // Run the test
-        validateServiceURLs(builder.create("ZEPPELINWS"), HOSTNAMES, "wss", HTTPS_PORT, null);
-    }
-
-
-    @Test
-    public void testDruidCoordinatorURL() throws Exception {
-        final String PORT = "8787";
-
-        final String[] HOSTNAMES = {"host3", "host2"};
-        final List<String> druidCoordinatorHosts = Arrays.asList(HOSTNAMES);
-
-        AmbariComponent druidCoordinator = EasyMock.createNiceMock(AmbariComponent.class);
-        EasyMock.expect(druidCoordinator.getHostNames()).andReturn(druidCoordinatorHosts).anyTimes();
-        EasyMock.expect(druidCoordinator.getConfigProperty("druid.port")).andReturn(PORT).anyTimes();
-        EasyMock.replay(druidCoordinator);
-
-        AmbariCluster cluster = EasyMock.createNiceMock(AmbariCluster.class);
-        EasyMock.expect(cluster.getComponent("DRUID_COORDINATOR")).andReturn(druidCoordinator).anyTimes();
-        EasyMock.replay(cluster);
-
-        // Run the test
-        AmbariDynamicServiceURLCreator builder = newURLCreator(cluster, null);
-        List<String> urls = builder.create("DRUID-COORDINATOR");
-        validateServiceURLs(urls, HOSTNAMES, "http", PORT, null);
-    }
-
-
-    @Test
-    public void testDruidBrokerURL() throws Exception {
-        final String PORT = "8181";
-
-        final String[] HOSTNAMES = {"host4", "host3"};
-        final List<String> druidHosts = Arrays.asList(HOSTNAMES);
-
-        AmbariComponent druidBroker = EasyMock.createNiceMock(AmbariComponent.class);
-        EasyMock.expect(druidBroker.getHostNames()).andReturn(druidHosts).anyTimes();
-        EasyMock.expect(druidBroker.getConfigProperty("druid.port")).andReturn(PORT).anyTimes();
-        EasyMock.replay(druidBroker);
-
-        AmbariCluster cluster = EasyMock.createNiceMock(AmbariCluster.class);
-        EasyMock.expect(cluster.getComponent("DRUID_BROKER")).andReturn(druidBroker).anyTimes();
-        EasyMock.replay(cluster);
-
-        // Run the test
-        AmbariDynamicServiceURLCreator builder = newURLCreator(cluster, null);
-        List<String> urls = builder.create("DRUID-BROKER");
-        validateServiceURLs(urls, HOSTNAMES, "http", PORT, null);
-    }
-
-
-    @Test
-    public void testDruidRouterURL() throws Exception {
-        final String PORT = "8282";
-
-        final String[] HOSTNAMES = {"host5", "host7"};
-        final List<String> druidHosts = Arrays.asList(HOSTNAMES);
-
-        AmbariComponent druidRouter = EasyMock.createNiceMock(AmbariComponent.class);
-        EasyMock.expect(druidRouter.getHostNames()).andReturn(druidHosts).anyTimes();
-        EasyMock.expect(druidRouter.getConfigProperty("druid.port")).andReturn(PORT).anyTimes();
-        EasyMock.replay(druidRouter);
-
-        AmbariCluster cluster = EasyMock.createNiceMock(AmbariCluster.class);
-        EasyMock.expect(cluster.getComponent("DRUID_ROUTER")).andReturn(druidRouter).anyTimes();
-        EasyMock.replay(cluster);
-
-        // Run the test
-        AmbariDynamicServiceURLCreator builder = newURLCreator(cluster, null);
-        List<String> urls = builder.create("DRUID-ROUTER");
-        validateServiceURLs(urls, HOSTNAMES, "http", PORT, null);
-    }
-
-
-    @Test
-    public void testDruidOverlordURL() throws Exception {
-        final String PORT = "8383";
-
-        final String[] HOSTNAMES = {"host4", "host1"};
-        final List<String> druidHosts = Arrays.asList(HOSTNAMES);
-
-        AmbariComponent druidOverlord = EasyMock.createNiceMock(AmbariComponent.class);
-        EasyMock.expect(druidOverlord.getHostNames()).andReturn(druidHosts).anyTimes();
-        EasyMock.expect(druidOverlord.getConfigProperty("druid.port")).andReturn(PORT).anyTimes();
-        EasyMock.replay(druidOverlord);
-
-        AmbariCluster cluster = EasyMock.createNiceMock(AmbariCluster.class);
-        EasyMock.expect(cluster.getComponent("DRUID_OVERLORD")).andReturn(druidOverlord).anyTimes();
-        EasyMock.replay(cluster);
-
-        // Run the test
-        AmbariDynamicServiceURLCreator builder = newURLCreator(cluster, null);
-        List<String> urls = builder.create("DRUID-OVERLORD");
-        validateServiceURLs(urls, HOSTNAMES, "http", PORT, null);
-    }
-
-
-    @Test
-    public void testDruidSupersetURL() throws Exception {
-        final String PORT = "8484";
-
-        final String[] HOSTNAMES = {"host4", "host1"};
-        final List<String> druidHosts = Arrays.asList(HOSTNAMES);
-
-        AmbariComponent druidSuperset = EasyMock.createNiceMock(AmbariComponent.class);
-        EasyMock.expect(druidSuperset.getHostNames()).andReturn(druidHosts).anyTimes();
-        EasyMock.expect(druidSuperset.getConfigProperty("SUPERSET_WEBSERVER_PORT")).andReturn(PORT).anyTimes();
-        EasyMock.replay(druidSuperset);
-
-        AmbariCluster cluster = EasyMock.createNiceMock(AmbariCluster.class);
-        EasyMock.expect(cluster.getComponent("DRUID_SUPERSET")).andReturn(druidSuperset).anyTimes();
-        EasyMock.replay(cluster);
-
-        // Run the test
-        AmbariDynamicServiceURLCreator builder = newURLCreator(cluster, null);
-        List<String> urls = builder.create("SUPERSET");
-        validateServiceURLs(urls, HOSTNAMES, "http", PORT, null);
-    }
-
-
-    @Test
-    public void testMissingServiceComponentURL() throws Exception {
-        AmbariCluster cluster = EasyMock.createNiceMock(AmbariCluster.class);
-        EasyMock.expect(cluster.getComponent("DRUID_BROKER")).andReturn(null).anyTimes();
-        EasyMock.expect(cluster.getComponent("HIVE_SERVER")).andReturn(null).anyTimes();
-        EasyMock.replay(cluster);
-
-        // Run the test
-        AmbariDynamicServiceURLCreator builder = newURLCreator(cluster, null);
-        List<String> urls = builder.create("DRUID-BROKER");
-        assertNotNull(urls);
-        assertEquals(1, urls.size());
-        assertEquals("http://{HOST}:{PORT}", urls.get(0));
-
-        urls = builder.create("HIVE");
-        assertNotNull(urls);
-        assertEquals(1, urls.size());
-        assertEquals("http://{HOST}:{PORT}/{PATH}", urls.get(0));
-    }
-
-
-    /**
-     * Convenience method for creating AmbariDynamicServiceURLCreator instances from different mapping configuration
-     * input sources.
-     *
-     * @param cluster       The Ambari ServiceDiscovery Cluster model
-     * @param mappingConfig The mapping configuration, or null if the internal config should be used.
-     *
-     * @return An AmbariDynamicServiceURLCreator instance, capable of creating service URLs based on the specified
-     *         cluster's configuration details.
-     */
-    private static AmbariDynamicServiceURLCreator newURLCreator(AmbariCluster cluster, Object mappingConfig) throws Exception {
-        AmbariDynamicServiceURLCreator result = null;
-
-        if (mappingConfig == null) {
-            result = new AmbariDynamicServiceURLCreator(cluster);
-        } else {
-            if (mappingConfig instanceof String) {
-                result = new AmbariDynamicServiceURLCreator(cluster, (String) mappingConfig);
-            } else if (mappingConfig instanceof File) {
-                result = new AmbariDynamicServiceURLCreator(cluster, (File) mappingConfig);
-            }
-        }
-
-        return result;
-    }
-
-
-    /**
-     * Validate the specifed HIVE URLs.
-     *
-     * @param urlsToValidate The URLs to validate
-     * @param hostNames      The host names expected in the test URLs
-     * @param scheme         The expected scheme for the URLs
-     * @param port           The expected port for the URLs
-     * @param path           The expected path for the URLs
-     */
-    private static void validateServiceURLs(List<String> urlsToValidate,
-                                            String[]     hostNames,
-                                            String       scheme,
-                                            String       port,
-                                            String       path) throws MalformedURLException {
-
-        List<String> hostNamesToTest = new LinkedList<>(Arrays.asList(hostNames));
-        for (String url : urlsToValidate) {
-            URI test = null;
-            try {
-                // Make sure it's a valid URL
-                test = new URI(url);
-            } catch (URISyntaxException e) {
-                fail(e.getMessage());
-            }
-
-            // Validate the scheme
-            assertEquals(scheme, test.getScheme());
-
-            // Validate the port
-            assertEquals(port, String.valueOf(test.getPort()));
-
-            // If the expected path is not specified, don't validate it
-            if (path != null) {
-                assertEquals("/" + path, test.getPath());
-            }
-
-            // Validate the host name
-            assertTrue(hostNamesToTest.contains(test.getHost()));
-            hostNamesToTest.remove(test.getHost());
-        }
-        assertTrue(hostNamesToTest.isEmpty());
-    }
-
-
-    private static final String TEST_MAPPING_CONFIG =
-            "<?xml version=\"1.0\" encoding=\"utf-8\"?>\n" +
-            "<service-discovery-url-mappings>\n" +
-            "  <service name=\"NAMENODE\">\n" +
-            "    <url-pattern>hdfs://{DFS_NAMENODE_RPC_ADDRESS}</url-pattern>\n" +
-            "    <properties>\n" +
-            "      <property name=\"DFS_NAMENODE_RPC_ADDRESS\">\n" +
-            "        <component>NAMENODE</component>\n" +
-            "        <config-property>dfs.namenode.rpc-address</config-property>\n" +
-            "      </property>\n" +
-            "    </properties>\n" +
-            "  </service>\n" +
-            "\n" +
-            "  <service name=\"JOBTRACKER\">\n" +
-            "    <url-pattern>rpc://{YARN_RM_ADDRESS}</url-pattern>\n" +
-            "    <properties>\n" +
-            "      <property name=\"YARN_RM_ADDRESS\">\n" +
-            "        <component>RESOURCEMANAGER</component>\n" +
-            "        <config-property>yarn.resourcemanager.address</config-property>\n" +
-            "      </property>\n" +
-            "    </properties>\n" +
-            "  </service>\n" +
-            "\n" +
-            "  <service name=\"WEBHDFS\">\n" +
-            "    <url-pattern>http://{WEBHDFS_ADDRESS}/webhdfs</url-pattern>\n" +
-            "    <properties>\n" +
-            "      <property name=\"WEBHDFS_ADDRESS\">\n" +
-            "        <service-config name=\"HDFS\">hdfs-site</service-config>\n" +
-            "        <config-property>dfs.namenode.http-address</config-property>\n" +
-            "      </property>\n" +
-            "    </properties>\n" +
-            "  </service>\n" +
-            "\n" +
-            "  <service name=\"WEBHCAT\">\n" +
-            "    <url-pattern>http://{HOST}:{PORT}/templeton</url-pattern>\n" +
-            "    <properties>\n" +
-            "      <property name=\"HOST\">\n" +
-            "        <component>WEBHCAT_SERVER</component>\n" +
-            "        <hostname/>\n" +
-            "      </property>\n" +
-            "      <property name=\"PORT\">\n" +
-            "        <component>WEBHCAT_SERVER</component>\n" +
-            "        <config-property>templeton.port</config-property>\n" +
-            "      </property>\n" +
-            "    </properties>\n" +
-            "  </service>\n" +
-            "\n" +
-            "  <service name=\"OOZIE\">\n" +
-            "    <url-pattern>{OOZIE_ADDRESS}</url-pattern>\n" +
-            "    <properties>\n" +
-            "      <property name=\"OOZIE_ADDRESS\">\n" +
-            "        <component>OOZIE_SERVER</component>\n" +
-            "        <config-property>oozie.base.url</config-property>\n" +
-            "      </property>\n" +
-            "    </properties>\n" +
-            "  </service>\n" +
-            "\n" +
-            "  <service name=\"WEBHBASE\">\n" +
-            "    <url-pattern>http://{HOST}:60080</url-pattern>\n" +
-            "    <properties>\n" +
-            "      <property name=\"HOST\">\n" +
-            "        <component>HBASE_MASTER</component>\n" +
-            "        <hostname/>\n" +
-            "      </property>\n" +
-            "    </properties>\n" +
-            "  </service>\n" +
-            "  <service name=\"RESOURCEMANAGER\">\n" +
-            "    <url-pattern>{SCHEME}://{WEBAPP_ADDRESS}/ws</url-pattern>\n" +
-            "    <properties>\n" +
-            "      <property name=\"WEBAPP_HTTP_ADDRESS\">\n" +
-            "        <component>RESOURCEMANAGER</component>\n" +
-            "        <config-property>yarn.resourcemanager.webapp.address</config-property>\n" +
-            "      </property>\n" +
-            "      <property name=\"WEBAPP_HTTPS_ADDRESS\">\n" +
-            "        <component>RESOURCEMANAGER</component>\n" +
-            "        <config-property>yarn.resourcemanager.webapp.https.address</config-property>\n" +
-            "      </property>\n" +
-            "      <property name=\"HTTP_POLICY\">\n" +
-            "        <component>RESOURCEMANAGER</component>\n" +
-            "        <config-property>yarn.http.policy</config-property>\n" +
-            "      </property>\n" +
-            "      <property name=\"SCHEME\">\n" +
-            "        <config-property>\n" +
-            "          <if property=\"HTTP_POLICY\" value=\"HTTPS_ONLY\">\n" +
-            "            <then>https</then>\n" +
-            "            <else>http</else>\n" +
-            "          </if>\n" +
-            "        </config-property>\n" +
-            "      </property>\n" +
-            "      <property name=\"WEBAPP_ADDRESS\">\n" +
-            "        <component>RESOURCEMANAGER</component>\n" +
-            "        <config-property>\n" +
-            "          <if property=\"HTTP_POLICY\" value=\"HTTPS_ONLY\">\n" +
-            "            <then>WEBAPP_HTTPS_ADDRESS</then>\n" +
-            "            <else>WEBAPP_HTTP_ADDRESS</else>\n" +
-            "          </if>\n" +
-            "        </config-property>\n" +
-            "      </property>\n" +
-            "    </properties>\n" +
-            "  </service>\n" +
-            "  <service name=\"HIVE\">\n" +
-            "    <url-pattern>{SCHEME}://{HOST}:{PORT}/{PATH}</url-pattern>\n" +
-            "    <properties>\n" +
-            "      <property name=\"HOST\">\n" +
-            "        <component>HIVE_SERVER</component>\n" +
-            "        <hostname/>\n" +
-            "      </property>\n" +
-            "      <property name=\"USE_SSL\">\n" +
-            "        <component>HIVE_SERVER</component>\n" +
-            "        <config-property>hive.server2.use.SSL</config-property>\n" +
-            "      </property>\n" +
-            "      <property name=\"PATH\">\n" +
-            "        <component>HIVE_SERVER</component>\n" +
-            "        <config-property>hive.server2.thrift.http.path</config-property>\n" +
-            "      </property>\n" +
-            "      <property name=\"PORT\">\n" +
-            "        <component>HIVE_SERVER</component>\n" +
-            "        <config-property>hive.server2.thrift.http.port</config-property>\n" +
-            "      </property>\n" +
-            "      <property name=\"SCHEME\">\n" +
-            "        <config-property>\n" +
-            "            <if property=\"USE_SSL\" value=\"true\">\n" +
-            "                <then>https</then>\n" +
-            "                <else>http</else>\n" +
-            "            </if>\n" +
-            "        </config-property>\n" +
-            "      </property>\n" +
-            "    </properties>\n" +
-            "  </service>\n" +
-            "</service-discovery-url-mappings>\n";
-
-
-    private static final String OVERRIDE_MAPPING_FILE_CONTENTS =
-            "<?xml version=\"1.0\" encoding=\"utf-8\"?>\n" +
-            "<service-discovery-url-mappings>\n" +
-            "  <service name=\"WEBHDFS\">\n" +
-            "    <url-pattern>http://{WEBHDFS_ADDRESS}/webhdfs/OVERRIDE</url-pattern>\n" +
-            "    <properties>\n" +
-            "      <property name=\"WEBHDFS_ADDRESS\">\n" +
-            "        <service-config name=\"HDFS\">hdfs-site</service-config>\n" +
-            "        <config-property>dfs.namenode.http-address</config-property>\n" +
-            "      </property>\n" +
-            "    </properties>\n" +
-            "  </service>\n" +
-            "</service-discovery-url-mappings>\n";
-
-}

http://git-wip-us.apache.org/repos/asf/knox/blob/7d0bff16/gateway-discovery-ambari/src/test/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariDynamicServiceURLCreatorTest.java
----------------------------------------------------------------------
diff --git a/gateway-discovery-ambari/src/test/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariDynamicServiceURLCreatorTest.java b/gateway-discovery-ambari/src/test/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariDynamicServiceURLCreatorTest.java
new file mode 100644
index 0000000..f015dd5
--- /dev/null
+++ b/gateway-discovery-ambari/src/test/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariDynamicServiceURLCreatorTest.java
@@ -0,0 +1,876 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.knox.gateway.topology.discovery.ambari;
+
+import org.apache.commons.io.FileUtils;
+import org.easymock.EasyMock;
+import org.junit.Test;
+
+import java.io.File;
+import java.net.MalformedURLException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+
+import static junit.framework.TestCase.assertTrue;
+import static junit.framework.TestCase.fail;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+
+
+public class AmbariDynamicServiceURLCreatorTest {
+
+    @Test
+    public void testHiveURLFromInternalMapping() throws Exception {
+        testHiveURL(null);
+    }
+
+    @Test
+    public void testHiveURLFromExternalMapping() throws Exception {
+        testHiveURL(TEST_MAPPING_CONFIG);
+    }
+
+    private void testHiveURL(Object mappingConfiguration) throws Exception {
+
+        final String   SERVICE_NAME = "HIVE";
+        final String[] HOSTNAMES    = {"host3", "host2", "host4"};
+        final String   HTTP_PATH    = "cliservice";
+        final String   HTTP_PORT    = "10001";
+        final String   BINARY_PORT  = "10000";
+
+        String expectedScheme = "http";
+
+        final List<String> hiveServerHosts = Arrays.asList(HOSTNAMES);
+
+        AmbariComponent hiveServer = EasyMock.createNiceMock(AmbariComponent.class);
+
+        AmbariCluster cluster = EasyMock.createNiceMock(AmbariCluster.class);
+        EasyMock.expect(cluster.getComponent("HIVE_SERVER")).andReturn(hiveServer).anyTimes();
+        EasyMock.replay(cluster);
+
+        // Configure HTTP Transport
+        EasyMock.expect(hiveServer.getHostNames()).andReturn(hiveServerHosts).anyTimes();
+        EasyMock.expect(hiveServer.getConfigProperty("hive.server2.use.SSL")).andReturn("false").anyTimes();
+        EasyMock.expect(hiveServer.getConfigProperty("hive.server2.thrift.http.path")).andReturn(HTTP_PATH).anyTimes();
+        EasyMock.expect(hiveServer.getConfigProperty("hive.server2.thrift.http.port")).andReturn(HTTP_PORT).anyTimes();
+        EasyMock.expect(hiveServer.getConfigProperty("hive.server2.transport.mode")).andReturn("http").anyTimes();
+        EasyMock.replay(hiveServer);
+
+        // Run the test
+        AmbariDynamicServiceURLCreator builder = newURLCreator(cluster, mappingConfiguration);
+        List<String> urls = builder.create(SERVICE_NAME);
+        assertEquals(HOSTNAMES.length, urls.size());
+        validateServiceURLs(urls, HOSTNAMES, expectedScheme, HTTP_PORT, HTTP_PATH);
+
+        // Configure BINARY Transport
+        EasyMock.reset(hiveServer);
+        EasyMock.expect(hiveServer.getHostNames()).andReturn(hiveServerHosts).anyTimes();
+        EasyMock.expect(hiveServer.getConfigProperty("hive.server2.use.SSL")).andReturn("false").anyTimes();
+        EasyMock.expect(hiveServer.getConfigProperty("hive.server2.thrift.http.path")).andReturn("").anyTimes();
+        EasyMock.expect(hiveServer.getConfigProperty("hive.server2.thrift.http.port")).andReturn(HTTP_PORT).anyTimes();
+        EasyMock.expect(hiveServer.getConfigProperty("hive.server2.thrift.port")).andReturn(BINARY_PORT).anyTimes();
+        EasyMock.expect(hiveServer.getConfigProperty("hive.server2.transport.mode")).andReturn("binary").anyTimes();
+        EasyMock.replay(hiveServer);
+
+        // Run the test
+        urls = builder.create(SERVICE_NAME);
+        assertEquals(HOSTNAMES.length, urls.size());
+        validateServiceURLs(urls, HOSTNAMES, expectedScheme, HTTP_PORT, "");
+
+        // Configure HTTPS Transport
+        EasyMock.reset(hiveServer);
+        EasyMock.expect(hiveServer.getHostNames()).andReturn(hiveServerHosts).anyTimes();
+        EasyMock.expect(hiveServer.getConfigProperty("hive.server2.use.SSL")).andReturn("true").anyTimes();
+        EasyMock.expect(hiveServer.getConfigProperty("hive.server2.thrift.http.path")).andReturn(HTTP_PATH).anyTimes();
+        EasyMock.expect(hiveServer.getConfigProperty("hive.server2.thrift.http.port")).andReturn(HTTP_PORT).anyTimes();
+        EasyMock.expect(hiveServer.getConfigProperty("hive.server2.transport.mode")).andReturn("http").anyTimes();
+        EasyMock.replay(hiveServer);
+
+        // Run the test
+        expectedScheme = "https";
+        urls = builder.create(SERVICE_NAME);
+        assertEquals(HOSTNAMES.length, urls.size());
+        validateServiceURLs(urls, HOSTNAMES, expectedScheme, HTTP_PORT, HTTP_PATH);
+    }
+
+    @Test
+    public void testResourceManagerURLFromInternalMapping() throws Exception {
+        testResourceManagerURL(null);
+    }
+
+    @Test
+    public void testResourceManagerURLFromExternalMapping() throws Exception {
+        testResourceManagerURL(TEST_MAPPING_CONFIG);
+    }
+
+    private void testResourceManagerURL(Object mappingConfiguration) throws Exception {
+
+        final String HTTP_ADDRESS  = "host2:1111";
+        final String HTTPS_ADDRESS = "host2:22222";
+
+        // HTTP
+        AmbariComponent resman = EasyMock.createNiceMock(AmbariComponent.class);
+        setResourceManagerComponentExpectations(resman, HTTP_ADDRESS, HTTPS_ADDRESS, "HTTP");
+
+        AmbariCluster cluster = EasyMock.createNiceMock(AmbariCluster.class);
+        EasyMock.expect(cluster.getComponent("RESOURCEMANAGER")).andReturn(resman).anyTimes();
+        EasyMock.replay(cluster);
+
+        // Run the test
+        AmbariDynamicServiceURLCreator builder = newURLCreator(cluster, mappingConfiguration);
+        String url = builder.create("RESOURCEMANAGER").get(0);
+        assertEquals("http://" + HTTP_ADDRESS + "/ws", url);
+
+        // HTTPS
+        EasyMock.reset(resman);
+        setResourceManagerComponentExpectations(resman, HTTP_ADDRESS, HTTPS_ADDRESS, "HTTPS_ONLY");
+
+        // Run the test
+        url = builder.create("RESOURCEMANAGER").get(0);
+        assertEquals("https://" + HTTPS_ADDRESS + "/ws", url);
+    }
+
+    private void setResourceManagerComponentExpectations(final AmbariComponent resmanMock,
+                                                         final String          httpAddress,
+                                                         final String          httpsAddress,
+                                                         final String          httpPolicy) {
+        EasyMock.expect(resmanMock.getConfigProperty("yarn.resourcemanager.webapp.address")).andReturn(httpAddress).anyTimes();
+        EasyMock.expect(resmanMock.getConfigProperty("yarn.resourcemanager.webapp.https.address")).andReturn(httpsAddress).anyTimes();
+        EasyMock.expect(resmanMock.getConfigProperty("yarn.http.policy")).andReturn(httpPolicy).anyTimes();
+        EasyMock.replay(resmanMock);
+    }
+
+    @Test
+    public void testJobTrackerURLFromInternalMapping() throws Exception {
+        testJobTrackerURL(null);
+    }
+
+    @Test
+    public void testJobTrackerURLFromExternalMapping() throws Exception {
+        testJobTrackerURL(TEST_MAPPING_CONFIG);
+    }
+
+    private void testJobTrackerURL(Object mappingConfiguration) throws Exception {
+        final String ADDRESS = "host2:5678";
+
+        AmbariComponent resman = EasyMock.createNiceMock(AmbariComponent.class);
+        EasyMock.expect(resman.getConfigProperty("yarn.resourcemanager.address")).andReturn(ADDRESS).anyTimes();
+        EasyMock.replay(resman);
+
+        AmbariCluster cluster = EasyMock.createNiceMock(AmbariCluster.class);
+        EasyMock.expect(cluster.getComponent("RESOURCEMANAGER")).andReturn(resman).anyTimes();
+        EasyMock.replay(cluster);
+
+        // Run the test
+        AmbariDynamicServiceURLCreator builder = newURLCreator(cluster, mappingConfiguration);
+        String url = builder.create("JOBTRACKER").get(0);
+        assertEquals("rpc://" + ADDRESS, url);
+    }
+
+    @Test
+    public void testNameNodeURLFromInternalMapping() throws Exception {
+        testNameNodeURL(null);
+    }
+
+    @Test
+    public void testNameNodeURLFromExternalMapping() throws Exception {
+        testNameNodeURL(TEST_MAPPING_CONFIG);
+    }
+
+    private void testNameNodeURL(Object mappingConfiguration) throws Exception {
+        final String ADDRESS = "host1:1234";
+
+        AmbariComponent namenode = EasyMock.createNiceMock(AmbariComponent.class);
+        EasyMock.expect(namenode.getConfigProperty("dfs.namenode.rpc-address")).andReturn(ADDRESS).anyTimes();
+        EasyMock.replay(namenode);
+
+        AmbariCluster cluster = EasyMock.createNiceMock(AmbariCluster.class);
+        EasyMock.expect(cluster.getComponent("NAMENODE")).andReturn(namenode).anyTimes();
+        EasyMock.replay(cluster);
+
+        // Run the test
+        AmbariDynamicServiceURLCreator builder = newURLCreator(cluster, mappingConfiguration);
+        String url = builder.create("NAMENODE").get(0);
+        assertEquals("hdfs://" + ADDRESS, url);
+    }
+
+    @Test
+    public void testWebHCatURLFromInternalMapping() throws Exception {
+        testWebHCatURL(null);
+    }
+
+    @Test
+    public void testWebHCatURLFromExternalMapping() throws Exception {
+        testWebHCatURL(TEST_MAPPING_CONFIG);
+    }
+
+    private void testWebHCatURL(Object mappingConfiguration) throws Exception {
+
+        final String HOSTNAME = "host3";
+        final String PORT     = "1919";
+
+        AmbariComponent webhcatServer = EasyMock.createNiceMock(AmbariComponent.class);
+        EasyMock.expect(webhcatServer.getConfigProperty("templeton.port")).andReturn(PORT).anyTimes();
+        List<String> webHcatServerHosts = Collections.singletonList(HOSTNAME);
+        EasyMock.expect(webhcatServer.getHostNames()).andReturn(webHcatServerHosts).anyTimes();
+        EasyMock.replay(webhcatServer);
+
+        AmbariCluster cluster = EasyMock.createNiceMock(AmbariCluster.class);
+        EasyMock.expect(cluster.getComponent("WEBHCAT_SERVER")).andReturn(webhcatServer).anyTimes();
+        EasyMock.replay(cluster);
+
+        // Run the test
+        AmbariDynamicServiceURLCreator builder = newURLCreator(cluster, mappingConfiguration);
+        String url = builder.create("WEBHCAT").get(0);
+        assertEquals("http://" + HOSTNAME + ":" + PORT + "/templeton", url);
+    }
+
+    @Test
+    public void testOozieURLFromInternalMapping() throws Exception {
+        testOozieURL(null);
+    }
+
+    @Test
+    public void testOozieURLFromExternalMapping() throws Exception {
+        testOozieURL(TEST_MAPPING_CONFIG);
+    }
+
+    private void testOozieURL(Object mappingConfiguration) throws Exception {
+        final String URL = "http://host3:2222";
+
+        AmbariComponent oozieServer = EasyMock.createNiceMock(AmbariComponent.class);
+        EasyMock.expect(oozieServer.getConfigProperty("oozie.base.url")).andReturn(URL).anyTimes();
+        EasyMock.replay(oozieServer);
+
+        AmbariCluster cluster = EasyMock.createNiceMock(AmbariCluster.class);
+        EasyMock.expect(cluster.getComponent("OOZIE_SERVER")).andReturn(oozieServer).anyTimes();
+        EasyMock.replay(cluster);
+
+        // Run the test
+        AmbariDynamicServiceURLCreator builder = newURLCreator(cluster, mappingConfiguration);
+        String url = builder.create("OOZIE").get(0);
+        assertEquals(URL, url);
+    }
+
+    @Test
+    public void testWebHBaseURLFromInternalMapping() throws Exception {
+        testWebHBaseURL(null);
+    }
+
+    @Test
+    public void testWebHBaseURLFromExternalMapping() throws Exception {
+        testWebHBaseURL(TEST_MAPPING_CONFIG);
+    }
+
+    private void testWebHBaseURL(Object mappingConfiguration) throws Exception {
+        final String[] HOSTNAMES = {"host2", "host4"};
+
+        AmbariComponent hbaseMaster = EasyMock.createNiceMock(AmbariComponent.class);
+        List<String> hbaseMasterHosts = Arrays.asList(HOSTNAMES);
+        EasyMock.expect(hbaseMaster.getHostNames()).andReturn(hbaseMasterHosts).anyTimes();
+        EasyMock.replay(hbaseMaster);
+
+        AmbariCluster cluster = EasyMock.createNiceMock(AmbariCluster.class);
+        EasyMock.expect(cluster.getComponent("HBASE_MASTER")).andReturn(hbaseMaster).anyTimes();
+        EasyMock.replay(cluster);
+
+        // Run the test
+        AmbariDynamicServiceURLCreator builder = newURLCreator(cluster, mappingConfiguration);
+        List<String> urls = builder.create("WEBHBASE");
+        validateServiceURLs(urls, HOSTNAMES, "http", "60080", null);
+    }
+
+    @Test
+    public void testWebHdfsURLFromInternalMapping() throws Exception {
+        testWebHdfsURL(null);
+    }
+
+    @Test
+    public void testWebHdfsURLFromExternalMapping() throws Exception {
+        testWebHdfsURL(TEST_MAPPING_CONFIG);
+    }
+
+    @Test
+    public void testWebHdfsURLFromSystemPropertyOverride() throws Exception {
+        // Write the test mapping configuration to a temp file
+        File mappingFile = File.createTempFile("mapping-config", "xml");
+        FileUtils.write(mappingFile, OVERRIDE_MAPPING_FILE_CONTENTS, "utf-8");
+
+        // Set the system property to point to the temp file
+        System.setProperty(AmbariDynamicServiceURLCreator.MAPPING_CONFIG_OVERRIDE_PROPERTY,
+                           mappingFile.getAbsolutePath());
+        try {
+            final String ADDRESS = "host3:1357";
+            // The URL creator should apply the file contents, and create the URL accordingly
+            String url = getTestWebHdfsURL(ADDRESS, null);
+
+            // Verify the URL matches the pattern from the file
+            assertEquals("http://" + ADDRESS + "/webhdfs/OVERRIDE", url);
+        } finally {
+            // Reset the system property, and delete the temp file
+            System.clearProperty(AmbariDynamicServiceURLCreator.MAPPING_CONFIG_OVERRIDE_PROPERTY);
+            mappingFile.delete();
+        }
+    }
+
+    private void testWebHdfsURL(Object mappingConfiguration) throws Exception {
+        final String ADDRESS = "host3:1357";
+        assertEquals("http://" + ADDRESS + "/webhdfs", getTestWebHdfsURL(ADDRESS, mappingConfiguration));
+    }
+
+
+    private String getTestWebHdfsURL(String address, Object mappingConfiguration) throws Exception {
+        AmbariCluster.ServiceConfiguration hdfsSC = EasyMock.createNiceMock(AmbariCluster.ServiceConfiguration.class);
+        Map<String, String> hdfsProps = new HashMap<>();
+        hdfsProps.put("dfs.namenode.http-address", address);
+        EasyMock.expect(hdfsSC.getProperties()).andReturn(hdfsProps).anyTimes();
+        EasyMock.replay(hdfsSC);
+
+        AmbariCluster cluster = EasyMock.createNiceMock(AmbariCluster.class);
+        EasyMock.expect(cluster.getServiceConfiguration("HDFS", "hdfs-site")).andReturn(hdfsSC).anyTimes();
+        EasyMock.replay(cluster);
+
+        // Create the URL
+        AmbariDynamicServiceURLCreator creator = newURLCreator(cluster, mappingConfiguration);
+        return creator.create("WEBHDFS").get(0);
+    }
+
+
+    @Test
+    public void testAtlasApiURL() throws Exception {
+        final String ATLAS_REST_ADDRESS = "http://host2:21000";
+
+        AmbariComponent atlasServer = EasyMock.createNiceMock(AmbariComponent.class);
+        EasyMock.expect(atlasServer.getConfigProperty("atlas.rest.address")).andReturn(ATLAS_REST_ADDRESS).anyTimes();
+        EasyMock.replay(atlasServer);
+
+        AmbariCluster cluster = EasyMock.createNiceMock(AmbariCluster.class);
+        EasyMock.expect(cluster.getComponent("ATLAS_SERVER")).andReturn(atlasServer).anyTimes();
+        EasyMock.replay(cluster);
+
+        // Run the test
+        AmbariDynamicServiceURLCreator builder = newURLCreator(cluster, null);
+        List<String> urls = builder.create("ATLAS-API");
+        assertEquals(1, urls.size());
+        assertEquals(ATLAS_REST_ADDRESS, urls.get(0));
+    }
+
+
+    @Test
+    public void testAtlasURL() throws Exception {
+        final String HTTP_PORT = "8787";
+        final String HTTPS_PORT = "8989";
+
+        final String[] HOSTNAMES = {"host1", "host4"};
+        final List<String> atlastServerHosts = Arrays.asList(HOSTNAMES);
+
+        AmbariComponent atlasServer = EasyMock.createNiceMock(AmbariComponent.class);
+        EasyMock.expect(atlasServer.getHostNames()).andReturn(atlastServerHosts).anyTimes();
+        EasyMock.expect(atlasServer.getConfigProperty("atlas.enableTLS")).andReturn("false").anyTimes();
+        EasyMock.expect(atlasServer.getConfigProperty("atlas.server.http.port")).andReturn(HTTP_PORT).anyTimes();
+        EasyMock.expect(atlasServer.getConfigProperty("atlas.server.https.port")).andReturn(HTTPS_PORT).anyTimes();
+        EasyMock.replay(atlasServer);
+
+        AmbariCluster cluster = EasyMock.createNiceMock(AmbariCluster.class);
+        EasyMock.expect(cluster.getComponent("ATLAS_SERVER")).andReturn(atlasServer).anyTimes();
+        EasyMock.replay(cluster);
+
+        // Run the test
+        AmbariDynamicServiceURLCreator builder = newURLCreator(cluster, null);
+        List<String> urls = builder.create("ATLAS");
+        validateServiceURLs(urls, HOSTNAMES, "http", HTTP_PORT, null);
+
+        EasyMock.reset(atlasServer);
+        EasyMock.expect(atlasServer.getHostNames()).andReturn(atlastServerHosts).anyTimes();
+        EasyMock.expect(atlasServer.getConfigProperty("atlas.enableTLS")).andReturn("true").anyTimes();
+        EasyMock.expect(atlasServer.getConfigProperty("atlas.server.http.port")).andReturn(HTTP_PORT).anyTimes();
+        EasyMock.expect(atlasServer.getConfigProperty("atlas.server.https.port")).andReturn(HTTPS_PORT).anyTimes();
+        EasyMock.replay(atlasServer);
+
+        // Run the test
+        urls = builder.create("ATLAS");
+        validateServiceURLs(urls, HOSTNAMES, "https", HTTPS_PORT, null);
+    }
+
+
+    @Test
+    public void testZeppelinURL() throws Exception {
+        final String HTTP_PORT = "8787";
+        final String HTTPS_PORT = "8989";
+
+        final String[] HOSTNAMES = {"host1", "host4"};
+        final List<String> atlastServerHosts = Arrays.asList(HOSTNAMES);
+
+        AmbariComponent zeppelinMaster = EasyMock.createNiceMock(AmbariComponent.class);
+        EasyMock.expect(zeppelinMaster.getHostNames()).andReturn(atlastServerHosts).anyTimes();
+        EasyMock.expect(zeppelinMaster.getConfigProperty("zeppelin.ssl")).andReturn("false").anyTimes();
+        EasyMock.expect(zeppelinMaster.getConfigProperty("zeppelin.server.port")).andReturn(HTTP_PORT).anyTimes();
+        EasyMock.expect(zeppelinMaster.getConfigProperty("zeppelin.server.ssl.port")).andReturn(HTTPS_PORT).anyTimes();
+        EasyMock.replay(zeppelinMaster);
+
+        AmbariCluster cluster = EasyMock.createNiceMock(AmbariCluster.class);
+        EasyMock.expect(cluster.getComponent("ZEPPELIN_MASTER")).andReturn(zeppelinMaster).anyTimes();
+        EasyMock.replay(cluster);
+
+        AmbariDynamicServiceURLCreator builder = newURLCreator(cluster, null);
+
+        // Run the test
+        validateServiceURLs(builder.create("ZEPPELIN"), HOSTNAMES, "http", HTTP_PORT, null);
+
+        EasyMock.reset(zeppelinMaster);
+        EasyMock.expect(zeppelinMaster.getHostNames()).andReturn(atlastServerHosts).anyTimes();
+        EasyMock.expect(zeppelinMaster.getConfigProperty("zeppelin.ssl")).andReturn("true").anyTimes();
+        EasyMock.expect(zeppelinMaster.getConfigProperty("zeppelin.server.port")).andReturn(HTTP_PORT).anyTimes();
+        EasyMock.expect(zeppelinMaster.getConfigProperty("zeppelin.server.ssl.port")).andReturn(HTTPS_PORT).anyTimes();
+        EasyMock.replay(zeppelinMaster);
+
+        // Run the test
+        validateServiceURLs(builder.create("ZEPPELIN"), HOSTNAMES, "https", HTTPS_PORT, null);
+    }
+
+
+    @Test
+    public void testZeppelinUiURL() throws Exception {
+        final String HTTP_PORT = "8787";
+        final String HTTPS_PORT = "8989";
+
+        final String[] HOSTNAMES = {"host1", "host4"};
+        final List<String> atlastServerHosts = Arrays.asList(HOSTNAMES);
+
+        AmbariComponent zeppelinMaster = EasyMock.createNiceMock(AmbariComponent.class);
+        EasyMock.expect(zeppelinMaster.getHostNames()).andReturn(atlastServerHosts).anyTimes();
+        EasyMock.expect(zeppelinMaster.getConfigProperty("zeppelin.ssl")).andReturn("false").anyTimes();
+        EasyMock.expect(zeppelinMaster.getConfigProperty("zeppelin.server.port")).andReturn(HTTP_PORT).anyTimes();
+        EasyMock.expect(zeppelinMaster.getConfigProperty("zeppelin.server.ssl.port")).andReturn(HTTPS_PORT).anyTimes();
+        EasyMock.replay(zeppelinMaster);
+
+        AmbariCluster cluster = EasyMock.createNiceMock(AmbariCluster.class);
+        EasyMock.expect(cluster.getComponent("ZEPPELIN_MASTER")).andReturn(zeppelinMaster).anyTimes();
+        EasyMock.replay(cluster);
+
+        AmbariDynamicServiceURLCreator builder = newURLCreator(cluster, null);
+
+        // Run the test
+        validateServiceURLs(builder.create("ZEPPELINUI"), HOSTNAMES, "http", HTTP_PORT, null);
+
+        EasyMock.reset(zeppelinMaster);
+        EasyMock.expect(zeppelinMaster.getHostNames()).andReturn(atlastServerHosts).anyTimes();
+        EasyMock.expect(zeppelinMaster.getConfigProperty("zeppelin.ssl")).andReturn("true").anyTimes();
+        EasyMock.expect(zeppelinMaster.getConfigProperty("zeppelin.server.port")).andReturn(HTTP_PORT).anyTimes();
+        EasyMock.expect(zeppelinMaster.getConfigProperty("zeppelin.server.ssl.port")).andReturn(HTTPS_PORT).anyTimes();
+        EasyMock.replay(zeppelinMaster);
+
+        // Run the test
+        validateServiceURLs(builder.create("ZEPPELINUI"), HOSTNAMES, "https", HTTPS_PORT, null);
+    }
+
+
+    @Test
+    public void testZeppelinWsURL() throws Exception {
+        final String HTTP_PORT = "8787";
+        final String HTTPS_PORT = "8989";
+
+        final String[] HOSTNAMES = {"host1", "host4"};
+        final List<String> atlastServerHosts = Arrays.asList(HOSTNAMES);
+
+        AmbariComponent zeppelinMaster = EasyMock.createNiceMock(AmbariComponent.class);
+        EasyMock.expect(zeppelinMaster.getHostNames()).andReturn(atlastServerHosts).anyTimes();
+        EasyMock.expect(zeppelinMaster.getConfigProperty("zeppelin.ssl")).andReturn("false").anyTimes();
+        EasyMock.expect(zeppelinMaster.getConfigProperty("zeppelin.server.port")).andReturn(HTTP_PORT).anyTimes();
+        EasyMock.expect(zeppelinMaster.getConfigProperty("zeppelin.server.ssl.port")).andReturn(HTTPS_PORT).anyTimes();
+        EasyMock.replay(zeppelinMaster);
+
+        AmbariCluster cluster = EasyMock.createNiceMock(AmbariCluster.class);
+        EasyMock.expect(cluster.getComponent("ZEPPELIN_MASTER")).andReturn(zeppelinMaster).anyTimes();
+        EasyMock.replay(cluster);
+
+        AmbariDynamicServiceURLCreator builder = newURLCreator(cluster, null);
+
+        // Run the test
+        validateServiceURLs(builder.create("ZEPPELINWS"), HOSTNAMES, "ws", HTTP_PORT, null);
+
+        EasyMock.reset(zeppelinMaster);
+        EasyMock.expect(zeppelinMaster.getHostNames()).andReturn(atlastServerHosts).anyTimes();
+        EasyMock.expect(zeppelinMaster.getConfigProperty("zeppelin.ssl")).andReturn("true").anyTimes();
+        EasyMock.expect(zeppelinMaster.getConfigProperty("zeppelin.server.port")).andReturn(HTTP_PORT).anyTimes();
+        EasyMock.expect(zeppelinMaster.getConfigProperty("zeppelin.server.ssl.port")).andReturn(HTTPS_PORT).anyTimes();
+        EasyMock.replay(zeppelinMaster);
+
+        // Run the test
+        validateServiceURLs(builder.create("ZEPPELINWS"), HOSTNAMES, "wss", HTTPS_PORT, null);
+    }
+
+
+    @Test
+    public void testDruidCoordinatorURL() throws Exception {
+        final String PORT = "8787";
+
+        final String[] HOSTNAMES = {"host3", "host2"};
+        final List<String> druidCoordinatorHosts = Arrays.asList(HOSTNAMES);
+
+        AmbariComponent druidCoordinator = EasyMock.createNiceMock(AmbariComponent.class);
+        EasyMock.expect(druidCoordinator.getHostNames()).andReturn(druidCoordinatorHosts).anyTimes();
+        EasyMock.expect(druidCoordinator.getConfigProperty("druid.port")).andReturn(PORT).anyTimes();
+        EasyMock.replay(druidCoordinator);
+
+        AmbariCluster cluster = EasyMock.createNiceMock(AmbariCluster.class);
+        EasyMock.expect(cluster.getComponent("DRUID_COORDINATOR")).andReturn(druidCoordinator).anyTimes();
+        EasyMock.replay(cluster);
+
+        // Run the test
+        AmbariDynamicServiceURLCreator builder = newURLCreator(cluster, null);
+        List<String> urls = builder.create("DRUID-COORDINATOR");
+        validateServiceURLs(urls, HOSTNAMES, "http", PORT, null);
+    }
+
+
+    @Test
+    public void testDruidBrokerURL() throws Exception {
+        final String PORT = "8181";
+
+        final String[] HOSTNAMES = {"host4", "host3"};
+        final List<String> druidHosts = Arrays.asList(HOSTNAMES);
+
+        AmbariComponent druidBroker = EasyMock.createNiceMock(AmbariComponent.class);
+        EasyMock.expect(druidBroker.getHostNames()).andReturn(druidHosts).anyTimes();
+        EasyMock.expect(druidBroker.getConfigProperty("druid.port")).andReturn(PORT).anyTimes();
+        EasyMock.replay(druidBroker);
+
+        AmbariCluster cluster = EasyMock.createNiceMock(AmbariCluster.class);
+        EasyMock.expect(cluster.getComponent("DRUID_BROKER")).andReturn(druidBroker).anyTimes();
+        EasyMock.replay(cluster);
+
+        // Run the test
+        AmbariDynamicServiceURLCreator builder = newURLCreator(cluster, null);
+        List<String> urls = builder.create("DRUID-BROKER");
+        validateServiceURLs(urls, HOSTNAMES, "http", PORT, null);
+    }
+
+
+    @Test
+    public void testDruidRouterURL() throws Exception {
+        final String PORT = "8282";
+
+        final String[] HOSTNAMES = {"host5", "host7"};
+        final List<String> druidHosts = Arrays.asList(HOSTNAMES);
+
+        AmbariComponent druidRouter = EasyMock.createNiceMock(AmbariComponent.class);
+        EasyMock.expect(druidRouter.getHostNames()).andReturn(druidHosts).anyTimes();
+        EasyMock.expect(druidRouter.getConfigProperty("druid.port")).andReturn(PORT).anyTimes();
+        EasyMock.replay(druidRouter);
+
+        AmbariCluster cluster = EasyMock.createNiceMock(AmbariCluster.class);
+        EasyMock.expect(cluster.getComponent("DRUID_ROUTER")).andReturn(druidRouter).anyTimes();
+        EasyMock.replay(cluster);
+
+        // Run the test
+        AmbariDynamicServiceURLCreator builder = newURLCreator(cluster, null);
+        List<String> urls = builder.create("DRUID-ROUTER");
+        validateServiceURLs(urls, HOSTNAMES, "http", PORT, null);
+    }
+
+
+    @Test
+    public void testDruidOverlordURL() throws Exception {
+        final String PORT = "8383";
+
+        final String[] HOSTNAMES = {"host4", "host1"};
+        final List<String> druidHosts = Arrays.asList(HOSTNAMES);
+
+        AmbariComponent druidOverlord = EasyMock.createNiceMock(AmbariComponent.class);
+        EasyMock.expect(druidOverlord.getHostNames()).andReturn(druidHosts).anyTimes();
+        EasyMock.expect(druidOverlord.getConfigProperty("druid.port")).andReturn(PORT).anyTimes();
+        EasyMock.replay(druidOverlord);
+
+        AmbariCluster cluster = EasyMock.createNiceMock(AmbariCluster.class);
+        EasyMock.expect(cluster.getComponent("DRUID_OVERLORD")).andReturn(druidOverlord).anyTimes();
+        EasyMock.replay(cluster);
+
+        // Run the test
+        AmbariDynamicServiceURLCreator builder = newURLCreator(cluster, null);
+        List<String> urls = builder.create("DRUID-OVERLORD");
+        validateServiceURLs(urls, HOSTNAMES, "http", PORT, null);
+    }
+
+
+    @Test
+    public void testDruidSupersetURL() throws Exception {
+        final String PORT = "8484";
+
+        final String[] HOSTNAMES = {"host4", "host1"};
+        final List<String> druidHosts = Arrays.asList(HOSTNAMES);
+
+        AmbariComponent druidSuperset = EasyMock.createNiceMock(AmbariComponent.class);
+        EasyMock.expect(druidSuperset.getHostNames()).andReturn(druidHosts).anyTimes();
+        EasyMock.expect(druidSuperset.getConfigProperty("SUPERSET_WEBSERVER_PORT")).andReturn(PORT).anyTimes();
+        EasyMock.replay(druidSuperset);
+
+        AmbariCluster cluster = EasyMock.createNiceMock(AmbariCluster.class);
+        EasyMock.expect(cluster.getComponent("DRUID_SUPERSET")).andReturn(druidSuperset).anyTimes();
+        EasyMock.replay(cluster);
+
+        // Run the test
+        AmbariDynamicServiceURLCreator builder = newURLCreator(cluster, null);
+        List<String> urls = builder.create("SUPERSET");
+        validateServiceURLs(urls, HOSTNAMES, "http", PORT, null);
+    }
+
+
+    @Test
+    public void testMissingServiceComponentURL() throws Exception {
+        AmbariCluster cluster = EasyMock.createNiceMock(AmbariCluster.class);
+        EasyMock.expect(cluster.getComponent("DRUID_BROKER")).andReturn(null).anyTimes();
+        EasyMock.expect(cluster.getComponent("HIVE_SERVER")).andReturn(null).anyTimes();
+        EasyMock.replay(cluster);
+
+        // Run the test
+        AmbariDynamicServiceURLCreator builder = newURLCreator(cluster, null);
+        List<String> urls = builder.create("DRUID-BROKER");
+        assertNotNull(urls);
+        assertEquals(1, urls.size());
+        assertEquals("http://{HOST}:{PORT}", urls.get(0));
+
+        urls = builder.create("HIVE");
+        assertNotNull(urls);
+        assertEquals(1, urls.size());
+        assertEquals("http://{HOST}:{PORT}/{PATH}", urls.get(0));
+    }
+
+
+    /**
+     * Convenience method for creating AmbariDynamicServiceURLCreator instances from different mapping configuration
+     * input sources.
+     *
+     * @param cluster       The Ambari ServiceDiscovery Cluster model
+     * @param mappingConfig The mapping configuration, or null if the internal config should be used.
+     *
+     * @return An AmbariDynamicServiceURLCreator instance, capable of creating service URLs based on the specified
+     *         cluster's configuration details.
+     */
+    private static AmbariDynamicServiceURLCreator newURLCreator(AmbariCluster cluster, Object mappingConfig) throws Exception {
+        AmbariDynamicServiceURLCreator result = null;
+
+        if (mappingConfig == null) {
+            result = new AmbariDynamicServiceURLCreator(cluster);
+        } else {
+            if (mappingConfig instanceof String) {
+                result = new AmbariDynamicServiceURLCreator(cluster, (String) mappingConfig);
+            } else if (mappingConfig instanceof File) {
+                result = new AmbariDynamicServiceURLCreator(cluster, (File) mappingConfig);
+            }
+        }
+
+        return result;
+    }
+
+
+    /**
+     * Validate the specifed HIVE URLs.
+     *
+     * @param urlsToValidate The URLs to validate
+     * @param hostNames      The host names expected in the test URLs
+     * @param scheme         The expected scheme for the URLs
+     * @param port           The expected port for the URLs
+     * @param path           The expected path for the URLs
+     */
+    private static void validateServiceURLs(List<String> urlsToValidate,
+                                            String[]     hostNames,
+                                            String       scheme,
+                                            String       port,
+                                            String       path) throws MalformedURLException {
+
+        List<String> hostNamesToTest = new LinkedList<>(Arrays.asList(hostNames));
+        for (String url : urlsToValidate) {
+            URI test = null;
+            try {
+                // Make sure it's a valid URL
+                test = new URI(url);
+            } catch (URISyntaxException e) {
+                fail(e.getMessage());
+            }
+
+            // Validate the scheme
+            assertEquals(scheme, test.getScheme());
+
+            // Validate the port
+            assertEquals(port, String.valueOf(test.getPort()));
+
+            // If the expected path is not specified, don't validate it
+            if (path != null) {
+                assertEquals("/" + path, test.getPath());
+            }
+
+            // Validate the host name
+            assertTrue(hostNamesToTest.contains(test.getHost()));
+            hostNamesToTest.remove(test.getHost());
+        }
+        assertTrue(hostNamesToTest.isEmpty());
+    }
+
+
+    private static final String TEST_MAPPING_CONFIG =
+            "<?xml version=\"1.0\" encoding=\"utf-8\"?>\n" +
+            "<service-discovery-url-mappings>\n" +
+            "  <service name=\"NAMENODE\">\n" +
+            "    <url-pattern>hdfs://{DFS_NAMENODE_RPC_ADDRESS}</url-pattern>\n" +
+            "    <properties>\n" +
+            "      <property name=\"DFS_NAMENODE_RPC_ADDRESS\">\n" +
+            "        <component>NAMENODE</component>\n" +
+            "        <config-property>dfs.namenode.rpc-address</config-property>\n" +
+            "      </property>\n" +
+            "    </properties>\n" +
+            "  </service>\n" +
+            "\n" +
+            "  <service name=\"JOBTRACKER\">\n" +
+            "    <url-pattern>rpc://{YARN_RM_ADDRESS}</url-pattern>\n" +
+            "    <properties>\n" +
+            "      <property name=\"YARN_RM_ADDRESS\">\n" +
+            "        <component>RESOURCEMANAGER</component>\n" +
+            "        <config-property>yarn.resourcemanager.address</config-property>\n" +
+            "      </property>\n" +
+            "    </properties>\n" +
+            "  </service>\n" +
+            "\n" +
+            "  <service name=\"WEBHDFS\">\n" +
+            "    <url-pattern>http://{WEBHDFS_ADDRESS}/webhdfs</url-pattern>\n" +
+            "    <properties>\n" +
+            "      <property name=\"WEBHDFS_ADDRESS\">\n" +
+            "        <service-config name=\"HDFS\">hdfs-site</service-config>\n" +
+            "        <config-property>dfs.namenode.http-address</config-property>\n" +
+            "      </property>\n" +
+            "    </properties>\n" +
+            "  </service>\n" +
+            "\n" +
+            "  <service name=\"WEBHCAT\">\n" +
+            "    <url-pattern>http://{HOST}:{PORT}/templeton</url-pattern>\n" +
+            "    <properties>\n" +
+            "      <property name=\"HOST\">\n" +
+            "        <component>WEBHCAT_SERVER</component>\n" +
+            "        <hostname/>\n" +
+            "      </property>\n" +
+            "      <property name=\"PORT\">\n" +
+            "        <component>WEBHCAT_SERVER</component>\n" +
+            "        <config-property>templeton.port</config-property>\n" +
+            "      </property>\n" +
+            "    </properties>\n" +
+            "  </service>\n" +
+            "\n" +
+            "  <service name=\"OOZIE\">\n" +
+            "    <url-pattern>{OOZIE_ADDRESS}</url-pattern>\n" +
+            "    <properties>\n" +
+            "      <property name=\"OOZIE_ADDRESS\">\n" +
+            "        <component>OOZIE_SERVER</component>\n" +
+            "        <config-property>oozie.base.url</config-property>\n" +
+            "      </property>\n" +
+            "    </properties>\n" +
+            "  </service>\n" +
+            "\n" +
+            "  <service name=\"WEBHBASE\">\n" +
+            "    <url-pattern>http://{HOST}:60080</url-pattern>\n" +
+            "    <properties>\n" +
+            "      <property name=\"HOST\">\n" +
+            "        <component>HBASE_MASTER</component>\n" +
+            "        <hostname/>\n" +
+            "      </property>\n" +
+            "    </properties>\n" +
+            "  </service>\n" +
+            "  <service name=\"RESOURCEMANAGER\">\n" +
+            "    <url-pattern>{SCHEME}://{WEBAPP_ADDRESS}/ws</url-pattern>\n" +
+            "    <properties>\n" +
+            "      <property name=\"WEBAPP_HTTP_ADDRESS\">\n" +
+            "        <component>RESOURCEMANAGER</component>\n" +
+            "        <config-property>yarn.resourcemanager.webapp.address</config-property>\n" +
+            "      </property>\n" +
+            "      <property name=\"WEBAPP_HTTPS_ADDRESS\">\n" +
+            "        <component>RESOURCEMANAGER</component>\n" +
+            "        <config-property>yarn.resourcemanager.webapp.https.address</config-property>\n" +
+            "      </property>\n" +
+            "      <property name=\"HTTP_POLICY\">\n" +
+            "        <component>RESOURCEMANAGER</component>\n" +
+            "        <config-property>yarn.http.policy</config-property>\n" +
+            "      </property>\n" +
+            "      <property name=\"SCHEME\">\n" +
+            "        <config-property>\n" +
+            "          <if property=\"HTTP_POLICY\" value=\"HTTPS_ONLY\">\n" +
+            "            <then>https</then>\n" +
+            "            <else>http</else>\n" +
+            "          </if>\n" +
+            "        </config-property>\n" +
+            "      </property>\n" +
+            "      <property name=\"WEBAPP_ADDRESS\">\n" +
+            "        <component>RESOURCEMANAGER</component>\n" +
+            "        <config-property>\n" +
+            "          <if property=\"HTTP_POLICY\" value=\"HTTPS_ONLY\">\n" +
+            "            <then>WEBAPP_HTTPS_ADDRESS</then>\n" +
+            "            <else>WEBAPP_HTTP_ADDRESS</else>\n" +
+            "          </if>\n" +
+            "        </config-property>\n" +
+            "      </property>\n" +
+            "    </properties>\n" +
+            "  </service>\n" +
+            "  <service name=\"HIVE\">\n" +
+            "    <url-pattern>{SCHEME}://{HOST}:{PORT}/{PATH}</url-pattern>\n" +
+            "    <properties>\n" +
+            "      <property name=\"HOST\">\n" +
+            "        <component>HIVE_SERVER</component>\n" +
+            "        <hostname/>\n" +
+            "      </property>\n" +
+            "      <property name=\"USE_SSL\">\n" +
+            "        <component>HIVE_SERVER</component>\n" +
+            "        <config-property>hive.server2.use.SSL</config-property>\n" +
+            "      </property>\n" +
+            "      <property name=\"PATH\">\n" +
+            "        <component>HIVE_SERVER</component>\n" +
+            "        <config-property>hive.server2.thrift.http.path</config-property>\n" +
+            "      </property>\n" +
+            "      <property name=\"PORT\">\n" +
+            "        <component>HIVE_SERVER</component>\n" +
+            "        <config-property>hive.server2.thrift.http.port</config-property>\n" +
+            "      </property>\n" +
+            "      <property name=\"SCHEME\">\n" +
+            "        <config-property>\n" +
+            "            <if property=\"USE_SSL\" value=\"true\">\n" +
+            "                <then>https</then>\n" +
+            "                <else>http</else>\n" +
+            "            </if>\n" +
+            "        </config-property>\n" +
+            "      </property>\n" +
+            "    </properties>\n" +
+            "  </service>\n" +
+            "</service-discovery-url-mappings>\n";
+
+
+    private static final String OVERRIDE_MAPPING_FILE_CONTENTS =
+            "<?xml version=\"1.0\" encoding=\"utf-8\"?>\n" +
+            "<service-discovery-url-mappings>\n" +
+            "  <service name=\"WEBHDFS\">\n" +
+            "    <url-pattern>http://{WEBHDFS_ADDRESS}/webhdfs/OVERRIDE</url-pattern>\n" +
+            "    <properties>\n" +
+            "      <property name=\"WEBHDFS_ADDRESS\">\n" +
+            "        <service-config name=\"HDFS\">hdfs-site</service-config>\n" +
+            "        <config-property>dfs.namenode.http-address</config-property>\n" +
+            "      </property>\n" +
+            "    </properties>\n" +
+            "  </service>\n" +
+            "</service-discovery-url-mappings>\n";
+
+}

http://git-wip-us.apache.org/repos/asf/knox/blob/7d0bff16/gateway-server/src/main/java/org/apache/hadoop/gateway/websockets/ProxyInboundClient.java
----------------------------------------------------------------------
diff --git a/gateway-server/src/main/java/org/apache/hadoop/gateway/websockets/ProxyInboundClient.java b/gateway-server/src/main/java/org/apache/hadoop/gateway/websockets/ProxyInboundClient.java
deleted file mode 100644
index 4e938d2..0000000
--- a/gateway-server/src/main/java/org/apache/hadoop/gateway/websockets/ProxyInboundClient.java
+++ /dev/null
@@ -1,107 +0,0 @@
-package org.apache.hadoop.gateway.websockets;
-
-import javax.websocket.CloseReason;
-import javax.websocket.Endpoint;
-import javax.websocket.EndpointConfig;
-import javax.websocket.MessageHandler;
-import javax.websocket.Session;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-/**
- * A Websocket client with callback which is not annotation based.
- * This handler accepts String and binary messages.
- * @since 0.14.0
- */
-public class ProxyInboundClient extends Endpoint {
-
-  /**
-   * Callback to be called once we have events on our socket.
-   */
-  private MessageEventCallback callback;
-
-  protected Session session;
-  protected EndpointConfig config;
-
-
-  public ProxyInboundClient(final MessageEventCallback callback) {
-    super();
-    this.callback = callback;
-  }
-
-  /**
-   * Developers must implement this method to be notified when a new
-   * conversation has just begun.
-   *
-   * @param backendSession the session that has just been activated.
-   * @param config  the configuration used to configure this endpoint.
-   */
-  @Override
-  public void onOpen(final javax.websocket.Session backendSession, final EndpointConfig config) {
-    this.session = backendSession;
-    this.config = config;
-
-    /* Set the max message size */
-    session.setMaxBinaryMessageBufferSize(Integer.MAX_VALUE);
-    session.setMaxTextMessageBufferSize(Integer.MAX_VALUE);
-
-    /* Add message handler for binary data */
-    session.addMessageHandler(new MessageHandler.Whole<byte[]>() {
-
-      /**
-       * Called when the message has been fully received.
-       *
-       * @param message the message data.
-       */
-      @Override
-      public void onMessage(final byte[] message) {
-        callback.onMessageBinary(message, true, session);
-      }
-
-    });
-
-    /* Add message handler for text data */
-    session.addMessageHandler(new MessageHandler.Whole<String>() {
-
-      /**
-       * Called when the message has been fully received.
-       *
-       * @param message the message data.
-       */
-      @Override
-      public void onMessage(final String message) {
-        callback.onMessageText(message, session);
-      }
-
-    });
-
-    callback.onConnectionOpen(backendSession);
-  }
-
-  @Override
-  public void onClose(final javax.websocket.Session backendSession, final CloseReason closeReason) {
-    callback.onConnectionClose(closeReason);
-    this.session = null;
-  }
-
-  @Override
-  public void onError(final javax.websocket.Session backendSession, final Throwable cause) {
-    callback.onError(cause);
-    this.session = null;
-  }
-
-}


[33/53] [abbrv] knox git commit: Merge branch 'master' into KNOX-998-Package_Restructuring

Posted by mo...@apache.org.
http://git-wip-us.apache.org/repos/asf/knox/blob/22a7304a/gateway-server/src/test/java/org/apache/knox/gateway/util/KnoxCLITest.java
----------------------------------------------------------------------
diff --cc gateway-server/src/test/java/org/apache/knox/gateway/util/KnoxCLITest.java
index 4c4d419,0000000..902327c
mode 100644,000000..100644
--- a/gateway-server/src/test/java/org/apache/knox/gateway/util/KnoxCLITest.java
+++ b/gateway-server/src/test/java/org/apache/knox/gateway/util/KnoxCLITest.java
@@@ -1,649 -1,0 +1,1032 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.util;
 +
 +import com.mycila.xmltool.XMLDoc;
 +import com.mycila.xmltool.XMLTag;
 +import org.apache.commons.io.FileUtils;
++import org.apache.knox.conf.Configuration;
 +import org.apache.knox.gateway.config.impl.GatewayConfigImpl;
 +import org.apache.knox.gateway.services.GatewayServices;
++import org.apache.knox.gateway.services.config.client.RemoteConfigurationRegistryClient;
++import org.apache.knox.gateway.services.config.client.RemoteConfigurationRegistryClientService;
 +import org.apache.knox.gateway.services.security.AliasService;
 +import org.apache.knox.gateway.services.security.MasterService;
++import org.apache.knox.test.TestUtils;
 +import org.junit.Before;
 +import org.junit.Test;
 +
 +import java.io.ByteArrayOutputStream;
 +import java.io.File;
 +import java.io.FileOutputStream;
 +import java.io.IOException;
 +import java.io.PrintStream;
 +import java.net.URL;
 +import java.util.UUID;
 +
 +import static org.hamcrest.CoreMatchers.containsString;
 +import static org.hamcrest.CoreMatchers.is;
 +import static org.hamcrest.CoreMatchers.not;
 +import static org.hamcrest.CoreMatchers.notNullValue;
 +import static org.junit.Assert.assertEquals;
 +import static org.junit.Assert.assertFalse;
 +import static org.junit.Assert.assertNotNull;
++import static org.junit.Assert.assertNull;
 +import static org.junit.Assert.assertThat;
 +import static org.junit.Assert.assertTrue;
 +
 +/**
 + * @author larry
 + *
 + */
 +public class KnoxCLITest {
 +  private final ByteArrayOutputStream outContent = new ByteArrayOutputStream();
 +  private final ByteArrayOutputStream errContent = new ByteArrayOutputStream();
 +
 +  @Before
 +  public void setup() throws Exception {
 +    System.setOut(new PrintStream(outContent));
 +    System.setErr(new PrintStream(errContent));
 +  }
 +
 +  @Test
-   public void testSuccessfulAlaisLifecycle() throws Exception {
++  public void testRemoteConfigurationRegistryClientService() throws Exception {
++    outContent.reset();
++
++    KnoxCLI cli = new KnoxCLI();
++    Configuration config = new GatewayConfigImpl();
++    // Configure a client for the test local filesystem registry implementation
++    config.set("gateway.remote.config.registry.test_client", "type=LocalFileSystem;address=/test");
++    cli.setConf(config);
++
++    // This is only to get the gateway services initialized
++    cli.run(new String[]{"version"});
++
++    RemoteConfigurationRegistryClientService service =
++                                   cli.getGatewayServices().getService(GatewayServices.REMOTE_REGISTRY_CLIENT_SERVICE);
++    assertNotNull(service);
++    RemoteConfigurationRegistryClient client = service.get("test_client");
++    assertNotNull(client);
++
++    assertNull(service.get("bogus"));
++  }
++
++  @Test
++  public void testListRemoteConfigurationRegistryClients() throws Exception {
++    outContent.reset();
++
++    KnoxCLI cli = new KnoxCLI();
++    String[] args = { "list-registry-clients", "--master","master" };
++
++    Configuration config = new GatewayConfigImpl();
++    cli.setConf(config);
++
++    // Test with no registry clients configured
++    int rc = cli.run(args);
++    assertEquals(0, rc);
++    assertTrue(outContent.toString(), outContent.toString().isEmpty());
++
++    // Test with a single client configured
++    // Configure a client for the test local filesystem registry implementation
++    config.set("gateway.remote.config.registry.test_client", "type=LocalFileSystem;address=/test1");
++    cli.setConf(config);
++    outContent.reset();
++    rc = cli.run(args);
++    assertEquals(0, rc);
++    assertTrue(outContent.toString(), outContent.toString().contains("test_client"));
++
++    // Configure another client for the test local filesystem registry implementation
++    config.set("gateway.remote.config.registry.another_client", "type=LocalFileSystem;address=/test2");
++    cli.setConf(config);
++    outContent.reset();
++    rc = cli.run(args);
++    assertEquals(0, rc);
++    assertTrue(outContent.toString(), outContent.toString().contains("test_client"));
++    assertTrue(outContent.toString(), outContent.toString().contains("another_client"));
++  }
++
++  @Test
++  public void testRemoteConfigurationRegistryGetACLs() throws Exception {
++    outContent.reset();
++
++
++    final File testRoot = TestUtils.createTempDir(this.getClass().getName());
++    try {
++      final File testRegistry = new File(testRoot, "registryRoot");
++
++      final String providerConfigName = "my-provider-config.xml";
++      final String providerConfigContent = "<gateway/>\n";
++      final File testProviderConfig = new File(testRoot, providerConfigName);
++      final String[] uploadArgs = {"upload-provider-config", testProviderConfig.getAbsolutePath(),
++                                   "--registry-client", "test_client",
++                                   "--master", "master"};
++      FileUtils.writeStringToFile(testProviderConfig, providerConfigContent);
++
++
++      final String[] args = {"get-registry-acl", "/knox/config/shared-providers",
++                             "--registry-client", "test_client",
++                             "--master", "master"};
++
++      KnoxCLI cli = new KnoxCLI();
++      Configuration config = new GatewayConfigImpl();
++      // Configure a client for the test local filesystem registry implementation
++      config.set("gateway.remote.config.registry.test_client", "type=LocalFileSystem;address=" + testRegistry);
++      cli.setConf(config);
++
++      int rc = cli.run(uploadArgs);
++      assertEquals(0, rc);
++
++      // Run the test command
++      rc = cli.run(args);
++
++      // Validate the result
++      assertEquals(0, rc);
++      String result = outContent.toString();
++      assertEquals(result, 3, result.split("\n").length);
++    } finally {
++      FileUtils.forceDelete(testRoot);
++    }
++  }
++
++
++  @Test
++  public void testRemoteConfigurationRegistryUploadProviderConfig() throws Exception {
++    outContent.reset();
++
++    final String providerConfigName = "my-provider-config.xml";
++    final String providerConfigContent = "<gateway/>\n";
++
++    final File testRoot = TestUtils.createTempDir(this.getClass().getName());
++    try {
++      final File testRegistry = new File(testRoot, "registryRoot");
++      final File testProviderConfig = new File(testRoot, providerConfigName);
++
++      final String[] args = {"upload-provider-config", testProviderConfig.getAbsolutePath(),
++                             "--registry-client", "test_client",
++                             "--master", "master"};
++
++      FileUtils.writeStringToFile(testProviderConfig, providerConfigContent);
++
++      KnoxCLI cli = new KnoxCLI();
++      Configuration config = new GatewayConfigImpl();
++      // Configure a client for the test local filesystem registry implementation
++      config.set("gateway.remote.config.registry.test_client", "type=LocalFileSystem;address=" + testRegistry);
++      cli.setConf(config);
++
++      // Run the test command
++      int rc = cli.run(args);
++
++      // Validate the result
++      assertEquals(0, rc);
++      File registryFile = new File(testRegistry, "knox/config/shared-providers/" + providerConfigName);
++      assertTrue(registryFile.exists());
++      assertEquals(FileUtils.readFileToString(registryFile), providerConfigContent);
++    } finally {
++      FileUtils.forceDelete(testRoot);
++    }
++  }
++
++
++  @Test
++  public void testRemoteConfigurationRegistryUploadProviderConfigWithDestinationOverride() throws Exception {
++    outContent.reset();
++
++    final String providerConfigName = "my-provider-config.xml";
++    final String entryName = "my-providers.xml";
++    final String providerConfigContent = "<gateway/>\n";
++
++    final File testRoot = TestUtils.createTempDir(this.getClass().getName());
++    try {
++      final File testRegistry = new File(testRoot, "registryRoot");
++      final File testProviderConfig = new File(testRoot, providerConfigName);
++
++      final String[] args = {"upload-provider-config", testProviderConfig.getAbsolutePath(),
++                             "--entry-name", entryName,
++                             "--registry-client", "test_client",
++                             "--master", "master"};
++
++      FileUtils.writeStringToFile(testProviderConfig, providerConfigContent);
++
++      KnoxCLI cli = new KnoxCLI();
++      Configuration config = new GatewayConfigImpl();
++      // Configure a client for the test local filesystem registry implementation
++      config.set("gateway.remote.config.registry.test_client", "type=LocalFileSystem;address=" + testRegistry);
++      cli.setConf(config);
++
++      // Run the test command
++      int rc = cli.run(args);
++
++      // Validate the result
++      assertEquals(0, rc);
++      assertFalse((new File(testRegistry, "knox/config/shared-providers/" + providerConfigName)).exists());
++      File registryFile = new File(testRegistry, "knox/config/shared-providers/" + entryName);
++      assertTrue(registryFile.exists());
++      assertEquals(FileUtils.readFileToString(registryFile), providerConfigContent);
++    } finally {
++      FileUtils.forceDelete(testRoot);
++    }
++  }
++
++
++  @Test
++  public void testRemoteConfigurationRegistryUploadDescriptor() throws Exception {
++    outContent.reset();
++
++    final String descriptorName = "my-topology.json";
++    final String descriptorContent = testDescriptorContentJSON;
++
++    final File testRoot = TestUtils.createTempDir(this.getClass().getName());
++    try {
++      final File testRegistry = new File(testRoot, "registryRoot");
++      final File testDescriptor = new File(testRoot, descriptorName);
++
++      final String[] args = {"upload-descriptor", testDescriptor.getAbsolutePath(),
++                             "--registry-client", "test_client",
++                             "--master", "master"};
++
++      FileUtils.writeStringToFile(testDescriptor, descriptorContent);
++
++      KnoxCLI cli = new KnoxCLI();
++      Configuration config = new GatewayConfigImpl();
++      // Configure a client for the test local filesystem registry implementation
++      config.set("gateway.remote.config.registry.test_client", "type=LocalFileSystem;address=" + testRegistry);
++      cli.setConf(config);
++
++      // Run the test command
++      int rc = cli.run(args);
++
++      // Validate the result
++      assertEquals(0, rc);
++      File registryFile = new File(testRegistry, "knox/config/descriptors/" + descriptorName);
++      assertTrue(registryFile.exists());
++      assertEquals(FileUtils.readFileToString(registryFile), descriptorContent);
++    } finally {
++      FileUtils.forceDelete(testRoot);
++    }
++  }
++
++  @Test
++  public void testRemoteConfigurationRegistryUploadDescriptorWithDestinationOverride() throws Exception {
++    outContent.reset();
++
++    final String descriptorName = "my-topology.json";
++    final String entryName = "different-topology.json";
++    final String descriptorContent = testDescriptorContentJSON;
++
++    final File testRoot = TestUtils.createTempDir(this.getClass().getName());
++    try {
++      final File testRegistry = new File(testRoot, "registryRoot");
++      final File testDescriptor = new File(testRoot, descriptorName);
++
++      final String[] args = {"upload-descriptor", testDescriptor.getAbsolutePath(),
++                             "--entry-name", entryName,
++                             "--registry-client", "test_client",
++                             "--master", "master"};
++
++      FileUtils.writeStringToFile(testDescriptor, descriptorContent);
++
++      KnoxCLI cli = new KnoxCLI();
++      Configuration config = new GatewayConfigImpl();
++      // Configure a client for the test local filesystem registry implementation
++      config.set("gateway.remote.config.registry.test_client", "type=LocalFileSystem;address=" + testRegistry);
++      cli.setConf(config);
++
++      // Run the test command
++      int rc = cli.run(args);
++
++      // Validate the result
++      assertEquals(0, rc);
++      assertFalse((new File(testRegistry, "knox/config/descriptors/" + descriptorName)).exists());
++      File registryFile = new File(testRegistry, "knox/config/descriptors/" + entryName);
++      assertTrue(registryFile.exists());
++      assertEquals(FileUtils.readFileToString(registryFile), descriptorContent);
++    } finally {
++      FileUtils.forceDelete(testRoot);
++    }
++  }
++
++  @Test
++  public void testRemoteConfigurationRegistryDeleteProviderConfig() throws Exception {
++    outContent.reset();
++
++    // Create a provider config
++    final String providerConfigName = "my-provider-config.xml";
++    final String providerConfigContent = "<gateway/>\n";
++
++    final File testRoot = TestUtils.createTempDir(this.getClass().getName());
++    try {
++      final File testRegistry = new File(testRoot, "registryRoot");
++      final File testProviderConfig = new File(testRoot, providerConfigName);
++
++      final String[] createArgs = {"upload-provider-config", testProviderConfig.getAbsolutePath(),
++                                   "--registry-client", "test_client",
++                                   "--master", "master"};
++
++      FileUtils.writeStringToFile(testProviderConfig, providerConfigContent);
++
++      KnoxCLI cli = new KnoxCLI();
++      Configuration config = new GatewayConfigImpl();
++      // Configure a client for the test local filesystem registry implementation
++      config.set("gateway.remote.config.registry.test_client", "type=LocalFileSystem;address=" + testRegistry);
++      cli.setConf(config);
++
++      // Run the test command
++      int rc = cli.run(createArgs);
++
++      // Validate the result
++      assertEquals(0, rc);
++      File registryFile = new File(testRegistry, "knox/config/shared-providers/" + providerConfigName);
++      assertTrue(registryFile.exists());
++
++      outContent.reset();
++
++      // Delete the created provider config
++      final String[] deleteArgs = {"delete-provider-config", providerConfigName,
++                                   "--registry-client", "test_client",
++                                   "--master", "master"};
++      rc = cli.run(deleteArgs);
++      assertEquals(0, rc);
++      assertFalse(registryFile.exists());
++
++      // Try to delete a provider config that does not exist
++      rc = cli.run(new String[]{"delete-provider-config", "imaginary-providers.xml",
++                                "--registry-client", "test_client",
++                                "--master", "master"});
++      assertEquals(0, rc);
++    } finally {
++      FileUtils.forceDelete(testRoot);
++    }
++  }
++
++  @Test
++  public void testRemoteConfigurationRegistryDeleteDescriptor() throws Exception {
++    outContent.reset();
++
++    final String descriptorName = "my-topology.json";
++    final String descriptorContent = testDescriptorContentJSON;
++
++    final File testRoot = TestUtils.createTempDir(this.getClass().getName());
++    try {
++      final File testRegistry = new File(testRoot, "registryRoot");
++      final File testDescriptor = new File(testRoot, descriptorName);
++
++      final String[] createArgs = {"upload-descriptor", testDescriptor.getAbsolutePath(),
++                             "--registry-client", "test_client",
++                             "--master", "master"};
++
++      FileUtils.writeStringToFile(testDescriptor, descriptorContent);
++
++      KnoxCLI cli = new KnoxCLI();
++      Configuration config = new GatewayConfigImpl();
++      // Configure a client for the test local filesystem registry implementation
++      config.set("gateway.remote.config.registry.test_client", "type=LocalFileSystem;address=" + testRegistry);
++      cli.setConf(config);
++
++      // Run the test command
++      int rc = cli.run(createArgs);
++
++      // Validate the result
++      assertEquals(0, rc);
++      File registryFile = new File(testRegistry, "knox/config/descriptors/" + descriptorName);
++      assertTrue(registryFile.exists());
++
++      outContent.reset();
++
++      // Delete the created provider config
++      final String[] deleteArgs = {"delete-descriptor", descriptorName,
++                                   "--registry-client", "test_client",
++                                   "--master", "master"};
++      rc = cli.run(deleteArgs);
++      assertEquals(0, rc);
++      assertFalse(registryFile.exists());
++
++      // Try to delete a descriptor that does not exist
++      rc = cli.run(new String[]{"delete-descriptor", "bogus.json",
++                                "--registry-client", "test_client",
++                                "--master", "master"});
++      assertEquals(0, rc);
++    } finally {
++      FileUtils.forceDelete(testRoot);
++    }
++  }
++
++  @Test
++  public void testSuccessfulAliasLifecycle() throws Exception {
 +    outContent.reset();
 +    String[] args1 = {"create-alias", "alias1", "--value", "testvalue1", "--master", "master"};
 +    int rc = 0;
 +    KnoxCLI cli = new KnoxCLI();
 +    cli.setConf(new GatewayConfigImpl());
 +    rc = cli.run(args1);
 +    assertEquals(0, rc);
 +    assertTrue(outContent.toString(), outContent.toString().contains("alias1 has been successfully " +
 +        "created."));
 +
 +    outContent.reset();
 +    String[] args2 = {"list-alias", "--master", 
 +        "master"};
 +    rc = cli.run(args2);
 +    assertEquals(0, rc);
 +    assertTrue(outContent.toString(), outContent.toString().contains("alias1"));
 +
 +    outContent.reset();
 +    String[] args4 = {"delete-alias", "alias1", "--master", 
 +      "master"};
 +    rc = cli.run(args4);
 +    assertEquals(0, rc);
 +    assertTrue(outContent.toString(), outContent.toString().contains("alias1 has been successfully " +
 +        "deleted."));
 +
 +    outContent.reset();
 +    rc = cli.run(args2);
 +    assertEquals(0, rc);
 +    assertFalse(outContent.toString(), outContent.toString().contains("alias1"));
 +  }
 +  
 +  @Test
 +  public void testListAndDeleteOfAliasForInvalidClusterName() throws Exception {
 +    outContent.reset();
 +    String[] args1 =
 +        { "create-alias", "alias1", "--cluster", "cluster1", "--value", "testvalue1", "--master",
 +            "master" };
 +    int rc = 0;
 +    KnoxCLI cli = new KnoxCLI();
 +    cli.setConf(new GatewayConfigImpl());
 +    rc = cli.run(args1);
 +    assertEquals(0, rc);
 +    assertTrue(outContent.toString(), outContent.toString().contains(
 +      "alias1 has been successfully " + "created."));
 +
 +    outContent.reset();
 +    String[] args2 = { "list-alias", "--cluster", "Invalidcluster1", "--master", "master" };
 +    rc = cli.run(args2);
 +    assertEquals(0, rc);
 +    System.out.println(outContent.toString());
 +    assertTrue(outContent.toString(),
 +      outContent.toString().contains("Invalid cluster name provided: Invalidcluster1"));
 +
 +    outContent.reset();
 +    String[] args4 =
 +        { "delete-alias", "alias1", "--cluster", "Invalidcluster1", "--master", "master" };
 +    rc = cli.run(args4);
 +    assertEquals(0, rc);
 +    assertTrue(outContent.toString(),
 +      outContent.toString().contains("Invalid cluster name provided: Invalidcluster1"));
 +
 +  }
 +
 +  @Test
 +  public void testDeleteOfNonExistAliasFromUserDefinedCluster() throws Exception {
 +    KnoxCLI cli = new KnoxCLI();
 +    cli.setConf(new GatewayConfigImpl());
 +    try {
 +      int rc = 0;
 +      outContent.reset();
 +      String[] args1 =
 +          { "create-alias", "alias1", "--cluster", "cluster1", "--value", "testvalue1", "--master",
 +              "master" };
 +      cli.run(args1);
 +
 +      // Delete invalid alias from the cluster
 +      outContent.reset();
 +      String[] args2 = { "delete-alias", "alias2", "--cluster", "cluster1", "--master", "master" };
 +      rc = cli.run(args2);
 +      assertEquals(0, rc);
 +      assertTrue(outContent.toString().contains("No such alias exists in the cluster."));
 +    } finally {
 +      outContent.reset();
 +      String[] args1 = { "delete-alias", "alias1", "--cluster", "cluster1", "--master", "master" };
 +      cli.run(args1);
 +    }
 +  }
 +
 +  @Test
 +  public void testDeleteOfNonExistAliasFromDefaultCluster() throws Exception {
 +    KnoxCLI cli = new KnoxCLI();
 +    cli.setConf(new GatewayConfigImpl());
 +    try {
 +      int rc = 0;
 +      outContent.reset();
 +      String[] args1 = { "create-alias", "alias1", "--value", "testvalue1", "--master", "master" };
 +      cli.run(args1);
 +
 +      // Delete invalid alias from the cluster
 +      outContent.reset();
 +      String[] args2 = { "delete-alias", "alias2", "--master", "master" };
 +      rc = cli.run(args2);
 +      assertEquals(0, rc);
 +      assertTrue(outContent.toString().contains("No such alias exists in the cluster."));
 +    } finally {
 +      outContent.reset();
 +      String[] args1 = { "delete-alias", "alias1", "--master", "master" };
 +      cli.run(args1);
 +    }
 +  }
 +
 +  @Test
 +  public void testForInvalidArgument() throws Exception {
 +    outContent.reset();
 +    String[] args1 = { "--value", "testvalue1", "--master", "master" };
 +    KnoxCLI cli = new KnoxCLI();
 +    cli.setConf(new GatewayConfigImpl());
 +    int rc = cli.run(args1);
 +    assertEquals(-2, rc);
 +    assertTrue(outContent.toString().contains("ERROR: Invalid Command"));
 +  }
 +
 +  @Test
 +  public void testListAndDeleteOfAliasForValidClusterName() throws Exception {
 +    outContent.reset();
 +    String[] args1 =
 +        { "create-alias", "alias1", "--cluster", "cluster1", "--value", "testvalue1", "--master",
 +            "master" };
 +    int rc = 0;
 +    KnoxCLI cli = new KnoxCLI();
 +    cli.setConf(new GatewayConfigImpl());
 +    rc = cli.run(args1);
 +    assertEquals(0, rc);
 +    assertTrue(outContent.toString(), outContent.toString().contains(
 +      "alias1 has been successfully " + "created."));
 +
 +    outContent.reset();
 +    String[] args2 = { "list-alias", "--cluster", "cluster1", "--master", "master" };
 +    rc = cli.run(args2);
 +    assertEquals(0, rc);
 +    System.out.println(outContent.toString());
 +    assertTrue(outContent.toString(), outContent.toString().contains("alias1"));
 +
 +    outContent.reset();
 +    String[] args4 =
 +        { "delete-alias", "alias1", "--cluster", "cluster1", "--master", "master" };
 +    rc = cli.run(args4);
 +    assertEquals(0, rc);
 +    assertTrue(outContent.toString(), outContent.toString().contains(
 +      "alias1 has been successfully " + "deleted."));
 +
 +    outContent.reset();
 +    rc = cli.run(args2);
 +    assertEquals(0, rc);
 +    assertFalse(outContent.toString(), outContent.toString().contains("alias1"));
 +
 +  }
 +
 +  @Test
 +  public void testGatewayAndClusterStores() throws Exception {
 +    GatewayConfigImpl config = new GatewayConfigImpl();
 +    FileUtils.deleteQuietly( new File( config.getGatewaySecurityDir() ) );
 +
 +    outContent.reset();
 +    String[] gwCreateArgs = {"create-alias", "alias1", "--value", "testvalue1", "--master", "master"};
 +    int rc = 0;
 +    KnoxCLI cli = new KnoxCLI();
 +    cli.setConf( config );
 +    rc = cli.run(gwCreateArgs);
 +    assertEquals(0, rc);
 +    assertTrue(outContent.toString(), outContent.toString().contains("alias1 has been successfully " +
 +        "created."));
 +
 +    AliasService as = cli.getGatewayServices().getService(GatewayServices.ALIAS_SERVICE);
 +
 +    outContent.reset();
 +    String[] clusterCreateArgs = {"create-alias", "alias2", "--value", "testvalue1", "--cluster", "test", 
 +        "--master", "master"};
 +    cli = new KnoxCLI();
 +    cli.setConf( config );
 +    rc = cli.run(clusterCreateArgs);
 +    assertEquals(0, rc);
 +    assertTrue(outContent.toString(), outContent.toString().contains("alias2 has been successfully " +
 +        "created."));
 +
 +    outContent.reset();
 +    String[] args2 = {"list-alias", "--master", "master"};
 +    cli = new KnoxCLI();
 +    rc = cli.run(args2);
 +    assertEquals(0, rc);
 +    assertFalse(outContent.toString(), outContent.toString().contains("alias2"));
 +    assertTrue(outContent.toString(), outContent.toString().contains("alias1"));
 +
 +    char[] passwordChars = as.getPasswordFromAliasForCluster("test", "alias2");
 +    assertNotNull(passwordChars);
 +    assertTrue(new String(passwordChars), "testvalue1".equals(new String(passwordChars)));
 +
 +    outContent.reset();
 +    String[] args1 = {"list-alias", "--cluster", "test", "--master", "master"};
 +    cli = new KnoxCLI();
 +    rc = cli.run(args1);
 +    assertEquals(0, rc);
 +    assertFalse(outContent.toString(), outContent.toString().contains("alias1"));
 +    assertTrue(outContent.toString(), outContent.toString().contains("alias2"));
 +
 +    outContent.reset();
 +    String[] args4 = {"delete-alias", "alias1", "--master", "master"};
 +    cli = new KnoxCLI();
 +    rc = cli.run(args4);
 +    assertEquals(0, rc);
 +    assertTrue(outContent.toString(), outContent.toString().contains("alias1 has been successfully " +
 +        "deleted."));
 +    
 +    outContent.reset();
 +    String[] args5 = {"delete-alias", "alias2", "--cluster", "test", "--master", "master"};
 +    cli = new KnoxCLI();
 +    rc = cli.run(args5);
 +    assertEquals(0, rc);
 +    assertTrue(outContent.toString(), outContent.toString().contains("alias2 has been successfully " +
 +        "deleted."));
 +  }
 +
 +  private void createTestMaster() throws Exception {
 +    outContent.reset();
 +    String[] args = new String[]{ "create-master", "--master", "master", "--force" };
 +    KnoxCLI cli = new KnoxCLI();
 +    int rc = cli.run(args);
 +    assertThat( rc, is( 0 ) );
 +    MasterService ms = cli.getGatewayServices().getService("MasterService");
 +    String master = String.copyValueOf( ms.getMasterSecret() );
 +    assertThat( master, is( "master" ) );
 +    assertThat( outContent.toString(), containsString( "Master secret has been persisted to disk." ) );
 +  }
 +
 +  @Test
 +  public void testCreateSelfSignedCert() throws Exception {
 +    GatewayConfigImpl config = new GatewayConfigImpl();
 +    FileUtils.deleteQuietly( new File( config.getGatewaySecurityDir() ) );
 +    createTestMaster();
 +    outContent.reset();
 +    KnoxCLI cli = new KnoxCLI();
 +    cli.setConf( config );
 +    String[] gwCreateArgs = {"create-cert", "--hostname", "hostname1", "--master", "master"};
 +    int rc = 0;
 +    rc = cli.run(gwCreateArgs);
 +    assertEquals(0, rc);
 +    assertTrue(outContent.toString(), outContent.toString().contains("gateway-identity has been successfully " +
 +        "created."));
 +  }
 +
 +  @Test
 +  public void testExportCert() throws Exception {
 +    GatewayConfigImpl config = new GatewayConfigImpl();
 +    FileUtils.deleteQuietly( new File( config.getGatewaySecurityDir() ) );
 +    createTestMaster();
 +    outContent.reset();
 +    KnoxCLI cli = new KnoxCLI();
 +    cli.setConf( config );
 +    String[] gwCreateArgs = {"create-cert", "--hostname", "hostname1", "--master", "master"};
 +    int rc = 0;
 +    rc = cli.run(gwCreateArgs);
 +    assertEquals(0, rc);
 +    assertTrue(outContent.toString(), outContent.toString().contains("gateway-identity has been successfully " +
 +        "created."));
 +
 +    outContent.reset();
 +    String[] gwCreateArgs2 = {"export-cert", "--type", "PEM"};
 +    rc = 0;
 +    rc = cli.run(gwCreateArgs2);
 +    assertEquals(0, rc);
 +    assertTrue(outContent.toString(), outContent.toString().contains("Certificate gateway-identity has been successfully exported to"));
 +    assertTrue(outContent.toString(), outContent.toString().contains("gateway-identity.pem"));
 +
 +    outContent.reset();
 +    String[] gwCreateArgs2_5 = {"export-cert"};
 +    rc = 0;
 +    rc = cli.run(gwCreateArgs2_5);
 +    assertEquals(0, rc);
 +    assertTrue(outContent.toString(), outContent.toString().contains("Certificate gateway-identity has been successfully exported to"));
 +    assertTrue(outContent.toString(), outContent.toString().contains("gateway-identity.pem"));
 +
 +    outContent.reset();
 +    String[] gwCreateArgs3 = {"export-cert", "--type", "JKS"};
 +    rc = 0;
 +    rc = cli.run(gwCreateArgs3);
 +    assertEquals(0, rc);
 +    assertTrue(outContent.toString(), outContent.toString().contains("Certificate gateway-identity has been successfully exported to"));
 +    assertTrue(outContent.toString(), outContent.toString().contains("gateway-client-trust.jks"));
 +
 +    outContent.reset();
 +    String[] gwCreateArgs4 = {"export-cert", "--type", "invalid"};
 +    rc = 0;
 +    rc = cli.run(gwCreateArgs4);
 +    assertEquals(0, rc);
 +    assertTrue(outContent.toString(), outContent.toString().contains("Invalid type for export file provided."));
 +  }
 +
 +  @Test
 +  public void testCreateMaster() throws Exception {
 +    GatewayConfigImpl config = new GatewayConfigImpl();
 +    FileUtils.deleteQuietly( new File( config.getGatewaySecurityDir() ) );
 +    outContent.reset();
 +    String[] args = {"create-master", "--master", "master"};
 +    int rc = 0;
 +    KnoxCLI cli = new KnoxCLI();
 +    cli.setConf( config );
 +    rc = cli.run(args);
 +    assertEquals(0, rc);
 +    MasterService ms = cli.getGatewayServices().getService("MasterService");
 +    // assertTrue(ms.getClass().getName(), ms.getClass().getName().equals("kjdfhgjkhfdgjkh"));
 +    assertTrue( new String( ms.getMasterSecret() ), "master".equals( new String( ms.getMasterSecret() ) ) );
 +    assertTrue(outContent.toString(), outContent.toString().contains("Master secret has been persisted to disk."));
 +  }
 +
 +  @Test
 +  public void testCreateMasterGenerate() throws Exception {
 +    String[] args = {"create-master", "--generate" };
 +    int rc = 0;
 +    GatewayConfigImpl config = new GatewayConfigImpl();
 +    File masterFile = new File( config.getGatewaySecurityDir(), "master" );
 +
 +    // Need to delete the master file so that the change isn't ignored.
 +    if( masterFile.exists() ) {
 +      assertThat( "Failed to delete existing master file.", masterFile.delete(), is( true ) );
 +    }
 +    outContent.reset();
 +    KnoxCLI cli = new KnoxCLI();
 +    cli.setConf(config);
 +    rc = cli.run(args);
 +    assertThat( rc, is( 0 ) );
 +    MasterService ms = cli.getGatewayServices().getService("MasterService");
 +    String master = String.copyValueOf( ms.getMasterSecret() );
 +    assertThat( master.length(), is( 36 ) );
 +    assertThat( master.indexOf( '-' ), is( 8 ) );
 +    assertThat( master.indexOf( '-', 9 ), is( 13 ) );
 +    assertThat( master.indexOf( '-', 14 ), is( 18 ) );
 +    assertThat( master.indexOf( '-', 19 ), is( 23 ) );
 +    assertThat( UUID.fromString( master ), notNullValue() );
 +    assertThat( outContent.toString(), containsString( "Master secret has been persisted to disk." ) );
 +
 +    // Need to delete the master file so that the change isn't ignored.
 +    if( masterFile.exists() ) {
 +      assertThat( "Failed to delete existing master file.", masterFile.delete(), is( true ) );
 +    }
 +    outContent.reset();
 +    cli = new KnoxCLI();
 +    rc = cli.run(args);
 +    ms = cli.getGatewayServices().getService("MasterService");
 +    String master2 = String.copyValueOf( ms.getMasterSecret() );
 +    assertThat( master2.length(), is( 36 ) );
 +    assertThat( UUID.fromString( master2 ), notNullValue() );
 +    assertThat( master2, not( is( master ) ) );
 +    assertThat( rc, is( 0 ) );
 +    assertThat(outContent.toString(), containsString("Master secret has been persisted to disk."));
 +  }
 +
 +  @Test
 +  public void testCreateMasterForce() throws Exception {
 +    GatewayConfigImpl config = new GatewayConfigImpl();
 +    File masterFile = new File( config.getGatewaySecurityDir(), "master" );
 +
 +    // Need to delete the master file so that the change isn't ignored.
 +    if( masterFile.exists() ) {
 +      assertThat( "Failed to delete existing master file.", masterFile.delete(), is( true ) );
 +    }
 +
 +    KnoxCLI cli = new KnoxCLI();
 +    cli.setConf(config);
 +    MasterService ms;
 +    int rc = 0;
 +    outContent.reset();
 +
 +    String[] args = { "create-master", "--master", "test-master-1" };
 +
 +    rc = cli.run(args);
 +    assertThat( rc, is( 0 ) );
 +    ms = cli.getGatewayServices().getService("MasterService");
 +    String master = String.copyValueOf( ms.getMasterSecret() );
 +    assertThat( master, is( "test-master-1" ) );
 +    assertThat( outContent.toString(), containsString( "Master secret has been persisted to disk." ) );
 +
 +    outContent.reset();
 +    rc = cli.run(args);
 +    assertThat( rc, is(0 ) );
 +    assertThat( outContent.toString(), containsString( "Master secret is already present on disk." ) );
 +
 +    outContent.reset();
 +    args = new String[]{ "create-master", "--master", "test-master-2", "--force" };
 +    rc = cli.run(args);
 +    assertThat( rc, is( 0 ) );
 +    ms = cli.getGatewayServices().getService("MasterService");
 +    master = String.copyValueOf( ms.getMasterSecret() );
 +    assertThat( master, is( "test-master-2" ) );
 +    assertThat( outContent.toString(), containsString( "Master secret has been persisted to disk." ) );
 +  }
 +
 +  @Test
 +  public void testListTopology() throws Exception {
 +
 +    GatewayConfigMock config = new GatewayConfigMock();
 +    URL topoURL = ClassLoader.getSystemResource("conf-demo/conf/topologies/admin.xml");
 +    config.setConfDir( new File(topoURL.getFile()).getParentFile().getParent() );
 +    String args[] = {"list-topologies", "--master", "knox"};
 +
 +    KnoxCLI cli = new KnoxCLI();
 +    cli.setConf( config );
 +
 +    cli.run( args );
 +    assertThat(outContent.toString(), containsString("sandbox"));
 +    assertThat(outContent.toString(), containsString("admin"));
 +  }
 +
 +  private class GatewayConfigMock extends GatewayConfigImpl{
 +    private String confDir;
 +    public void setConfDir(String location) {
 +      confDir = location;
 +    }
 +
 +    @Override
 +    public String getGatewayConfDir(){
 +      return confDir;
 +    }
 +  }
 +
 +  private static XMLTag createBadTopology() {
 +    XMLTag xml = XMLDoc.newDocument(true)
 +        .addRoot( "topology" )
 +        .addTag( "gateway" )
 +
 +        .addTag( "provider" )
 +        .addTag( "role" ).addText( "authentication" )
 +        .addTag( "name" ).addText( "ShiroProvider" )
 +        .addTag( "enabled" ).addText( "123" )
 +        .addTag( "param" )
 +        .addTag( "name" ).addText( "" )
 +        .addTag( "value" ).addText( "org.apache.knox.gateway.shirorealm.KnoxLdapRealm" ).gotoParent()
 +        .addTag( "param" )
 +        .addTag( "name" ).addText( "main.ldapRealm.userDnTemplate" )
 +        .addTag( "value" ).addText( "uid={0},ou=people,dc=hadoop,dc=apache,dc=org" ).gotoParent()
 +        .addTag( "param" )
 +        .addTag( "name" ).addText( "main.ldapRealm.contextFactory.url" )
 +        .addTag( "value" ).addText( "ldap://localhost:8443" ).gotoParent()
 +        .addTag( "param" )
 +        .addTag( "name" ).addText( "main.ldapRealm.contextFactory.authenticationMechanism" )
 +        .addTag( "value" ).addText( "simple" ).gotoParent()
 +        .addTag( "param" )
 +        .addTag( "name" ).addText( "urls./**" )
 +        .addTag( "value" ).addText( "authcBasic" ).gotoParent().gotoParent()
 +        .addTag( "provider" )
 +        .addTag( "role" ).addText( "identity-assertion" )
 +        .addTag( "enabled" ).addText( "vvv" )
 +        .addTag( "name" ).addText( "Default" ).gotoParent()
 +        .addTag( "provider" )
 +        .gotoRoot()
 +        .addTag( "service" )
 +        .addTag( "role" ).addText( "test-service-role" )
 +        .gotoRoot();
 +    return xml;
 +  }
 +
 +  private static XMLTag createGoodTopology() {
 +    XMLTag xml = XMLDoc.newDocument( true )
 +        .addRoot( "topology" )
 +        .addTag( "gateway" )
 +
 +        .addTag( "provider" )
 +        .addTag( "role" ).addText( "authentication" )
 +        .addTag( "name" ).addText( "ShiroProvider" )
 +        .addTag( "enabled" ).addText( "true" )
 +        .addTag( "param" )
 +        .addTag( "name" ).addText( "main.ldapRealm" )
 +        .addTag( "value" ).addText( "org.apache.knox.gateway.shirorealm.KnoxLdapRealm" ).gotoParent()
 +        .addTag( "param" )
 +        .addTag( "name" ).addText( "main.ldapRealm.userDnTemplate" )
 +        .addTag( "value" ).addText( "uid={0},ou=people,dc=hadoop,dc=apache,dc=org" ).gotoParent()
 +        .addTag( "param" )
 +        .addTag( "name" ).addText( "main.ldapRealm.contextFactory.url" )
 +        .addTag( "value" ).addText( "ldap://localhost:8443").gotoParent()
 +        .addTag( "param" )
 +        .addTag( "name" ).addText( "main.ldapRealm.contextFactory.authenticationMechanism" )
 +        .addTag( "value" ).addText( "simple" ).gotoParent()
 +        .addTag( "param" )
 +        .addTag( "name" ).addText( "urls./**" )
 +        .addTag( "value" ).addText( "authcBasic" ).gotoParent().gotoParent()
 +        .addTag( "provider" )
 +        .addTag( "role" ).addText( "identity-assertion" )
 +        .addTag( "enabled" ).addText( "true" )
 +        .addTag( "name" ).addText( "Default" ).gotoParent()
 +        .addTag( "provider" )
 +        .gotoRoot()
 +        .addTag( "service" )
 +        .addTag( "role" ).addText( "test-service-role" )
 +        .gotoRoot();
 +    return xml;
 +  }
 +
 +  private File writeTestTopology( String name, XMLTag xml ) throws IOException {
 +    // Create the test topology.
 +
 +    GatewayConfigMock config = new GatewayConfigMock();
 +    URL topoURL = ClassLoader.getSystemResource("conf-demo/conf/topologies/admin.xml");
 +    config.setConfDir( new File(topoURL.getFile()).getParentFile().getParent() );
 +
 +    File tempFile = new File( config.getGatewayTopologyDir(), name + ".xml." + UUID.randomUUID() );
 +    FileOutputStream stream = new FileOutputStream( tempFile );
 +    xml.toStream( stream );
 +    stream.close();
 +    File descriptor = new File( config.getGatewayTopologyDir(), name + ".xml" );
 +    tempFile.renameTo( descriptor );
 +    return descriptor;
 +  }
 +
 +  @Test
 +  public void testValidateTopology() throws Exception {
 +
 +    GatewayConfigMock config = new GatewayConfigMock();
 +    URL topoURL = ClassLoader.getSystemResource("conf-demo/conf/topologies/admin.xml");
 +    config.setConfDir( new File(topoURL.getFile()).getParentFile().getParent() );
 +    String args[] = {"validate-topology", "--master", "knox", "--cluster", "sandbox"};
 +
 +    KnoxCLI cli = new KnoxCLI();
 +    cli.setConf( config );
 +    cli.run( args );
 +
 +    assertThat(outContent.toString(), containsString(config.getGatewayTopologyDir()));
 +    assertThat(outContent.toString(), containsString("sandbox"));
 +    assertThat(outContent.toString(), containsString("success"));
 +    outContent.reset();
 +
 +
 +    String args2[] = {"validate-topology", "--master", "knox", "--cluster", "NotATopology"};
 +    cli.run(args2);
 +
 +    assertThat(outContent.toString(), containsString("NotATopology"));
 +    assertThat(outContent.toString(), containsString("does not exist"));
 +    outContent.reset();
 +
 +    String args3[] = {"validate-topology", "--master", "knox", "--path", config.getGatewayTopologyDir() + "/admin.xml"};
 +    cli.run(args3);
 +
 +    assertThat(outContent.toString(), containsString("admin"));
 +    assertThat(outContent.toString(), containsString("success"));
 +    outContent.reset();
 +
 +    String args4[] = {"validate-topology", "--master", "knox", "--path", "not/a/path"};
 +    cli.run(args4);
 +    assertThat(outContent.toString(), containsString("does not exist"));
 +    assertThat(outContent.toString(), containsString("not/a/path"));
 +  }
 +
 +  @Test
 +  public void testValidateTopologyOutput() throws Exception {
 +
 +    File bad = writeTestTopology( "test-cluster-bad", createBadTopology() );
 +    File good = writeTestTopology( "test-cluster-good", createGoodTopology() );
 +
 +    GatewayConfigMock config = new GatewayConfigMock();
 +    URL topoURL = ClassLoader.getSystemResource("conf-demo/conf/topologies/admin.xml");
 +    config.setConfDir( new File(topoURL.getFile()).getParentFile().getParent() );
 +    String args[] = {"validate-topology", "--master", "knox", "--cluster", "test-cluster-bad"};
 +
 +    KnoxCLI cli = new KnoxCLI();
 +    cli.setConf( config );
 +    cli.run( args );
 +
 +    assertThat(outContent.toString(), containsString(config.getGatewayTopologyDir()));
 +    assertThat(outContent.toString(), containsString("test-cluster-bad"));
 +    assertThat(outContent.toString(), containsString("unsuccessful"));
 +    assertThat(outContent.toString(), containsString("Invalid content"));
 +    assertThat(outContent.toString(), containsString("Line"));
 +
 +
 +    outContent.reset();
 +
 +    String args2[] = {"validate-topology", "--master", "knox", "--cluster", "test-cluster-good"};
 +
 +    cli.run(args2);
 +
 +    assertThat(outContent.toString(), containsString(config.getGatewayTopologyDir()));
 +    assertThat(outContent.toString(), containsString("success"));
 +    assertThat(outContent.toString(), containsString("test-cluster-good"));
 +
 +
 +  }
 +
++  private static final String testDescriptorContentJSON = "{\n" +
++                                                          "  \"discovery-address\":\"http://localhost:8080\",\n" +
++                                                          "  \"discovery-user\":\"maria_dev\",\n" +
++                                                          "  \"discovery-pwd-alias\":\"sandbox.discovery.password\",\n" +
++                                                          "  \"provider-config-ref\":\"my-provider-config\",\n" +
++                                                          "  \"cluster\":\"Sandbox\",\n" +
++                                                          "  \"services\":[\n" +
++                                                          "    {\"name\":\"NAMENODE\"},\n" +
++                                                          "    {\"name\":\"JOBTRACKER\"},\n" +
++                                                          "    {\"name\":\"WEBHDFS\"},\n" +
++                                                          "    {\"name\":\"WEBHCAT\"},\n" +
++                                                          "    {\"name\":\"OOZIE\"},\n" +
++                                                          "    {\"name\":\"WEBHBASE\"},\n" +
++                                                          "    {\"name\":\"HIVE\"},\n" +
++                                                          "    {\"name\":\"RESOURCEMANAGER\"}\n" +
++                                                          "  ]\n" +
++                                                          "}";
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/22a7304a/gateway-server/src/test/java/org/apache/knox/gateway/websockets/BadUrlTest.java
----------------------------------------------------------------------
diff --cc gateway-server/src/test/java/org/apache/knox/gateway/websockets/BadUrlTest.java
index 3aceadd,0000000..2ca5ede
mode 100644,000000..100644
--- a/gateway-server/src/test/java/org/apache/knox/gateway/websockets/BadUrlTest.java
+++ b/gateway-server/src/test/java/org/apache/knox/gateway/websockets/BadUrlTest.java
@@@ -1,309 -1,0 +1,320 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.websockets;
 +
 +import java.io.File;
 +import java.io.FileOutputStream;
 +import java.io.IOException;
 +import java.net.URI;
 +import java.net.URL;
 +import java.util.ArrayList;
++import java.util.Collections;
 +import java.util.HashMap;
 +import java.util.List;
 +import java.util.Map;
 +import java.util.concurrent.TimeUnit;
 +
 +import javax.websocket.CloseReason;
 +import javax.websocket.ContainerProvider;
 +import javax.websocket.WebSocketContainer;
 +
 +import org.apache.commons.io.FileUtils;
 +import org.apache.knox.gateway.config.GatewayConfig;
 +import org.apache.knox.gateway.config.impl.GatewayConfigImpl;
 +import org.apache.knox.gateway.deploy.DeploymentFactory;
 +import org.apache.knox.gateway.services.DefaultGatewayServices;
 +import org.apache.knox.gateway.services.GatewayServices;
 +import org.apache.knox.gateway.services.ServiceLifecycleException;
 +import org.apache.knox.gateway.services.topology.TopologyService;
 +import org.apache.knox.gateway.topology.TopologyEvent;
 +import org.apache.knox.gateway.topology.TopologyListener;
 +import org.apache.knox.test.TestUtils;
 +import org.easymock.EasyMock;
 +import org.eclipse.jetty.server.Server;
 +import org.eclipse.jetty.server.ServerConnector;
 +import org.eclipse.jetty.server.handler.ContextHandler;
 +import org.eclipse.jetty.server.handler.HandlerCollection;
 +import org.hamcrest.CoreMatchers;
 +import org.junit.AfterClass;
 +import org.junit.Assert;
 +import org.junit.BeforeClass;
 +import org.junit.Test;
 +
 +import com.mycila.xmltool.XMLDoc;
 +import com.mycila.xmltool.XMLTag;
 +
 +/**
 + * Test for bad URLs.
 + * <p>
 + * This test will set up a bad URL through the topology, so this test case will
 + * attempt to test the bad url case and also the plumbing around it.
 + * @since 0.10
 + */
 +public class BadUrlTest {
 +
 +  /**
 +   * Non-existant backend websocket server
 +   */
 +  private static String BACKEND = "http://localhost:9999";
 +
 +  /**
 +   * Mock Gateway server
 +   */
 +  private static Server gatewayServer;
 +
 +  /**
 +   * Mock gateway config
 +   */
 +  private static GatewayConfig gatewayConfig;
 +
 +  private static GatewayServices services;
 +
 +  /**
 +   * URI for gateway server
 +   */
 +  private static URI serverUri;
 +
 +  private static File topoDir;
 +
 +  public BadUrlTest() {
 +    super();
 +  }
 +
 +  @BeforeClass
 +  public static void startServers() throws Exception {
 +
 +    startGatewayServer();
 +
 +  }
 +
 +  @AfterClass
 +  public static void stopServers() {
 +    try {
 +      gatewayServer.stop();
 +    } catch (final Exception e) {
 +      e.printStackTrace(System.err);
 +    }
 +
 +    /* Cleanup the created files */
 +    FileUtils.deleteQuietly(topoDir);
 +
 +  }
 +
 +  /**
 +   * Test websocket proxying through gateway.
 +   *
 +   * @throws Exception
 +   */
 +
 +  @Test
 +  public void testBadUrl() throws Exception {
 +    WebSocketContainer container = ContainerProvider.getWebSocketContainer();
 +
 +    WebsocketClient client = new WebsocketClient();
 +
 +    container.connectToServer(client,
 +        new URI(serverUri.toString() + "gateway/websocket/ws"));
 +
 +    client.awaitClose(CloseReason.CloseCodes.UNEXPECTED_CONDITION.getCode(),
 +        1000, TimeUnit.MILLISECONDS);
 +
 +    Assert.assertThat(client.close.getCloseCode().getCode(),
 +        CoreMatchers.is(CloseReason.CloseCodes.UNEXPECTED_CONDITION.getCode()));
 +
 +  }
 +
 +
 +  /**
 +   * Start Gateway Server.
 +   *
 +   * @throws Exception
 +   */
 +  private static void startGatewayServer() throws Exception {
 +    gatewayServer = new Server();
 +    final ServerConnector connector = new ServerConnector(gatewayServer);
 +    gatewayServer.addConnector(connector);
 +
 +    /* workaround so we can add our handler later at runtime */
 +    HandlerCollection handlers = new HandlerCollection(true);
 +
 +    /* add some initial handlers */
 +    ContextHandler context = new ContextHandler();
 +    context.setContextPath("/");
 +    handlers.addHandler(context);
 +
 +    gatewayServer.setHandler(handlers);
 +
 +    // Start Server
 +    gatewayServer.start();
 +
 +    String host = connector.getHost();
 +    if (host == null) {
 +      host = "localhost";
 +    }
 +    int port = connector.getLocalPort();
 +    serverUri = new URI(String.format("ws://%s:%d/", host, port));
 +
 +    /* Setup websocket handler */
 +    setupGatewayConfig(BACKEND);
 +
 +    final GatewayWebsocketHandler gatewayWebsocketHandler = new GatewayWebsocketHandler(
 +        gatewayConfig, services);
 +    handlers.addHandler(gatewayWebsocketHandler);
 +    gatewayWebsocketHandler.start();
 +  }
 +
 +  /**
 +   * Initialize the configs and components required for this test.
 +   *
 +   * @param backend
 +   * @throws IOException
 +   */
 +  private static void setupGatewayConfig(final String backend)
 +      throws IOException {
 +    services = new DefaultGatewayServices();
 +
 +    topoDir = createDir();
 +    URL serviceUrl = ClassLoader.getSystemResource("websocket-services");
 +
 +    final File descriptor = new File(topoDir, "websocket.xml");
 +    final FileOutputStream stream = new FileOutputStream(descriptor);
 +    createKnoxTopology(backend).toStream(stream);
 +    stream.close();
 +
 +    final TestTopologyListener topoListener = new TestTopologyListener();
 +
 +    final Map<String, String> options = new HashMap<>();
 +    options.put("persist-master", "false");
 +    options.put("master", "password");
 +
 +    gatewayConfig = EasyMock.createNiceMock(GatewayConfig.class);
 +    EasyMock.expect(gatewayConfig.getGatewayTopologyDir())
 +        .andReturn(topoDir.toString()).anyTimes();
 +
++    EasyMock.expect(gatewayConfig.getGatewayProvidersConfigDir())
++            .andReturn(topoDir.getAbsolutePath() + "/shared-providers").anyTimes();
++
++    EasyMock.expect(gatewayConfig.getGatewayDescriptorsDir())
++            .andReturn(topoDir.getAbsolutePath() + "/descriptors").anyTimes();
++
 +    EasyMock.expect(gatewayConfig.getGatewayServicesDir())
 +        .andReturn(serviceUrl.getFile()).anyTimes();
 +
 +    EasyMock.expect(gatewayConfig.getEphemeralDHKeySize()).andReturn("2048")
 +        .anyTimes();
 +
 +    EasyMock.expect(gatewayConfig.getGatewaySecurityDir())
 +        .andReturn(topoDir.toString()).anyTimes();
 +
 +    /* Websocket configs */
 +    EasyMock.expect(gatewayConfig.isWebsocketEnabled()).andReturn(true)
 +        .anyTimes();
 +
 +    EasyMock.expect(gatewayConfig.getWebsocketMaxTextMessageSize())
 +        .andReturn(GatewayConfigImpl.DEFAULT_WEBSOCKET_MAX_TEXT_MESSAGE_SIZE)
 +        .anyTimes();
 +
 +    EasyMock.expect(gatewayConfig.getWebsocketMaxBinaryMessageSize())
 +        .andReturn(GatewayConfigImpl.DEFAULT_WEBSOCKET_MAX_BINARY_MESSAGE_SIZE)
 +        .anyTimes();
 +
 +    EasyMock.expect(gatewayConfig.getWebsocketMaxTextMessageBufferSize())
 +        .andReturn(
 +            GatewayConfigImpl.DEFAULT_WEBSOCKET_MAX_TEXT_MESSAGE_BUFFER_SIZE)
 +        .anyTimes();
 +
 +    EasyMock.expect(gatewayConfig.getWebsocketMaxBinaryMessageBufferSize())
 +        .andReturn(
 +            GatewayConfigImpl.DEFAULT_WEBSOCKET_MAX_BINARY_MESSAGE_BUFFER_SIZE)
 +        .anyTimes();
 +
 +    EasyMock.expect(gatewayConfig.getWebsocketInputBufferSize())
 +        .andReturn(GatewayConfigImpl.DEFAULT_WEBSOCKET_INPUT_BUFFER_SIZE)
 +        .anyTimes();
 +
 +    EasyMock.expect(gatewayConfig.getWebsocketAsyncWriteTimeout())
 +        .andReturn(GatewayConfigImpl.DEFAULT_WEBSOCKET_ASYNC_WRITE_TIMEOUT)
 +        .anyTimes();
 +
 +    EasyMock.expect(gatewayConfig.getWebsocketIdleTimeout())
 +        .andReturn(GatewayConfigImpl.DEFAULT_WEBSOCKET_IDLE_TIMEOUT).anyTimes();
 +
++    EasyMock.expect(gatewayConfig.getRemoteRegistryConfigurationNames())
++            .andReturn(Collections.emptyList())
++            .anyTimes();
++
 +    EasyMock.replay(gatewayConfig);
 +
 +    try {
 +      services.init(gatewayConfig, options);
 +    } catch (ServiceLifecycleException e) {
 +      e.printStackTrace();
 +    }
 +
 +    DeploymentFactory.setGatewayServices(services);
 +    final TopologyService monitor = services
 +        .getService(GatewayServices.TOPOLOGY_SERVICE);
 +    monitor.addTopologyChangeListener(topoListener);
 +    monitor.reloadTopologies();
 +
 +  }
 +
 +  private static File createDir() throws IOException {
 +    return TestUtils
 +        .createTempDir(WebsocketEchoTest.class.getSimpleName() + "-");
 +  }
 +
 +  /**
 +   * Intentionally add bad URL
 +   *
 +   * @param backend
 +   * @return
 +   */
 +  private static XMLTag createKnoxTopology(final String backend) {
 +    XMLTag xml = XMLDoc.newDocument(true).addRoot("topology").addTag("service")
 +        .addTag("role").addText("WEBSOCKET").addTag("url").addText(backend)
 +        .gotoParent().gotoRoot();
 +    // System.out.println( "GATEWAY=" + xml.toString() );
 +    return xml;
 +  }
 +
 +  private static class TestTopologyListener implements TopologyListener {
 +
 +    public ArrayList<List<TopologyEvent>> events = new ArrayList<List<TopologyEvent>>();
 +
 +    @Override
 +    public void handleTopologyEvent(List<TopologyEvent> events) {
 +      this.events.add(events);
 +
 +      synchronized (this) {
 +        for (TopologyEvent event : events) {
 +          if (!event.getType().equals(TopologyEvent.Type.DELETED)) {
 +
 +            /* for this test we only care about this part */
 +            DeploymentFactory.createDeployment(gatewayConfig,
 +                event.getTopology());
 +
 +          }
 +        }
 +
 +      }
 +
 +    }
 +
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/22a7304a/gateway-server/src/test/java/org/apache/knox/gateway/websockets/WebsocketEchoTest.java
----------------------------------------------------------------------
diff --cc gateway-server/src/test/java/org/apache/knox/gateway/websockets/WebsocketEchoTest.java
index 268e14b,0000000..64ad87c
mode 100644,000000..100644
--- a/gateway-server/src/test/java/org/apache/knox/gateway/websockets/WebsocketEchoTest.java
+++ b/gateway-server/src/test/java/org/apache/knox/gateway/websockets/WebsocketEchoTest.java
@@@ -1,388 -1,0 +1,399 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.websockets;
 +
 +import static org.hamcrest.CoreMatchers.is;
 +import static org.hamcrest.MatcherAssert.assertThat;
 +
 +import java.io.File;
 +import java.io.FileOutputStream;
 +import java.io.IOException;
 +import java.net.URI;
 +import java.net.URL;
 +import java.util.ArrayList;
++import java.util.Collections;
 +import java.util.HashMap;
 +import java.util.List;
 +import java.util.Map;
 +import java.util.concurrent.TimeUnit;
 +
 +import javax.websocket.ContainerProvider;
 +import javax.websocket.Session;
 +import javax.websocket.WebSocketContainer;
 +
 +import org.apache.commons.io.FileUtils;
 +import org.apache.knox.gateway.config.GatewayConfig;
 +import org.apache.knox.gateway.config.impl.GatewayConfigImpl;
 +import org.apache.knox.gateway.deploy.DeploymentFactory;
 +import org.apache.knox.gateway.services.DefaultGatewayServices;
 +import org.apache.knox.gateway.services.GatewayServices;
 +import org.apache.knox.gateway.services.ServiceLifecycleException;
 +import org.apache.knox.gateway.services.topology.TopologyService;
 +import org.apache.knox.gateway.topology.TopologyEvent;
 +import org.apache.knox.gateway.topology.TopologyListener;
 +import org.apache.knox.test.TestUtils;
 +import org.easymock.EasyMock;
 +import org.eclipse.jetty.server.Server;
 +import org.eclipse.jetty.server.ServerConnector;
 +import org.eclipse.jetty.server.handler.ContextHandler;
 +import org.eclipse.jetty.server.handler.HandlerCollection;
 +import org.junit.AfterClass;
 +import org.junit.BeforeClass;
 +import org.junit.Test;
 +
 +import com.mycila.xmltool.XMLDoc;
 +import com.mycila.xmltool.XMLTag;
 +
 +/**
 + * A basic test that attempts to proxy websocket connections through Knox
 + * gateway.
 + * <p>
 + * The way the test is set up is as follows: <br/>
 + * <ul>
 + * <li>A Mock Websocket server is setup which simply echos the responses sent by
 + * client.
 + * <li>Knox Gateway is set up with websocket handler
 + * {@link GatewayWebsocketHandler} that can proxy the requests.
 + * <li>Appropriate Topology and service definition files are set up with the
 + * address of the Websocket server.
 + * <li>A mock client is setup to connect to gateway.
 + * </ul>
 + * 
 + * The test is to confirm whether the message is sent all the way to the backend
 + * Websocket server through Knox and back.
 + * 
 + * 
 + * @since 0.10
 + */
 +public class WebsocketEchoTest {
 +
 +  /**
 +   * Simulate backend websocket
 +   */
 +  private static Server backendServer;
 +  /**
 +   * URI for backend websocket server
 +   */
 +  private static URI backendServerUri;
 +
 +  /**
 +   * Mock Gateway server
 +   */
 +  private static Server gatewayServer;
 +
 +  /**
 +   * Mock gateway config
 +   */
 +  private static GatewayConfig gatewayConfig;
 +
 +  private static GatewayServices services;
 +
 +  /**
 +   * URI for gateway server
 +   */
 +  private static URI serverUri;
 +
 +  private static File topoDir;
 +
 +  public WebsocketEchoTest() {
 +    super();
 +  }
 +
 +  @BeforeClass
 +  public static void startServers() throws Exception {
 +
 +    startWebsocketServer();
 +    startGatewayServer();
 +
 +  }
 +
 +  @AfterClass
 +  public static void stopServers() {
 +    try {
 +      gatewayServer.stop();
 +      backendServer.stop();
 +    } catch (final Exception e) {
 +      e.printStackTrace(System.err);
 +    }
 +
 +    /* Cleanup the created files */
 +    FileUtils.deleteQuietly(topoDir);
 +
 +  }
 +
 +  /**
 +   * Test direct connection to websocket server without gateway
 +   * 
 +   * @throws Exception
 +   */
 +  @Test
 +  public void testDirectEcho() throws Exception {
 +
 +    WebSocketContainer container = ContainerProvider.getWebSocketContainer();
 +    WebsocketClient client = new WebsocketClient();
 +
 +    Session session = container.connectToServer(client, backendServerUri);
 +
 +    session.getBasicRemote().sendText("Echo");
 +    client.messageQueue.awaitMessages(1, 1000, TimeUnit.MILLISECONDS);
 +
 +  }
 +
 +  /**
 +   * Test websocket proxying through gateway.
 +   * 
 +   * @throws Exception
 +   */
 +  @Test
 +  public void testGatewayEcho() throws Exception {
 +    WebSocketContainer container = ContainerProvider.getWebSocketContainer();
 +
 +    WebsocketClient client = new WebsocketClient();
 +    Session session = container.connectToServer(client,
 +        new URI(serverUri.toString() + "gateway/websocket/ws"));
 +
 +    session.getBasicRemote().sendText("Echo");
 +    client.messageQueue.awaitMessages(1, 1000, TimeUnit.MILLISECONDS);
 +
 +    assertThat(client.messageQueue.get(0), is("Echo"));
 +
 +  }
 +
 +  /**
 +   * Test websocket rewrite rules proxying through gateway.
 +   *
 +   * @throws Exception
 +   */
 +  @Test
 +  public void testGatewayRewriteEcho() throws Exception {
 +    WebSocketContainer container = ContainerProvider.getWebSocketContainer();
 +
 +    WebsocketClient client = new WebsocketClient();
 +    Session session = container.connectToServer(client,
 +            new URI(serverUri.toString() + "gateway/websocket/123foo456bar/channels"));
 +
 +    session.getBasicRemote().sendText("Echo");
 +    client.messageQueue.awaitMessages(1, 1000, TimeUnit.MILLISECONDS);
 +
 +    assertThat(client.messageQueue.get(0), is("Echo"));
 +
 +  }
 +
 +  /**
 +   * Start Mock Websocket server that acts as backend.
 +   * 
 +   * @throws Exception
 +   */
 +  private static void startWebsocketServer() throws Exception {
 +
 +    backendServer = new Server();
 +    ServerConnector connector = new ServerConnector(backendServer);
 +    backendServer.addConnector(connector);
 +
 +    final WebsocketEchoHandler handler = new WebsocketEchoHandler();
 +
 +    ContextHandler context = new ContextHandler();
 +    context.setContextPath("/");
 +    context.setHandler(handler);
 +    backendServer.setHandler(context);
 +
 +    // Start Server
 +    backendServer.start();
 +
 +    String host = connector.getHost();
 +    if (host == null) {
 +      host = "localhost";
 +    }
 +    int port = connector.getLocalPort();
 +    backendServerUri = new URI(String.format("ws://%s:%d/ws", host, port));
 +
 +  }
 +
 +  /**
 +   * Start Gateway Server.
 +   * 
 +   * @throws Exception
 +   */
 +  private static void startGatewayServer() throws Exception {
 +    gatewayServer = new Server();
 +    final ServerConnector connector = new ServerConnector(gatewayServer);
 +    gatewayServer.addConnector(connector);
 +
 +    /* workaround so we can add our handler later at runtime */
 +    HandlerCollection handlers = new HandlerCollection(true);
 +
 +    /* add some initial handlers */
 +    ContextHandler context = new ContextHandler();
 +    context.setContextPath("/");
 +    handlers.addHandler(context);
 +
 +    gatewayServer.setHandler(handlers);
 +
 +    // Start Server
 +    gatewayServer.start();
 +
 +    String host = connector.getHost();
 +    if (host == null) {
 +      host = "localhost";
 +    }
 +    int port = connector.getLocalPort();
 +    serverUri = new URI(String.format("ws://%s:%d/", host, port));
 +
 +    /* Setup websocket handler */
 +    setupGatewayConfig(backendServerUri.toString());
 +
 +    final GatewayWebsocketHandler gatewayWebsocketHandler = new GatewayWebsocketHandler(
 +        gatewayConfig, services);
 +    handlers.addHandler(gatewayWebsocketHandler);
 +    gatewayWebsocketHandler.start();
 +  }
 +
 +  /**
 +   * Initialize the configs and components required for this test.
 +   * 
 +   * @param backend
 +   * @throws IOException
 +   */
 +  private static void setupGatewayConfig(final String backend)
 +      throws IOException {
 +    services = new DefaultGatewayServices();
 +
 +    topoDir = createDir();
 +    URL serviceUrl = ClassLoader.getSystemResource("websocket-services");
 +
 +    final File descriptor = new File(topoDir, "websocket.xml");
 +    final FileOutputStream stream = new FileOutputStream(descriptor);
 +    createKnoxTopology(backend).toStream(stream);
 +    stream.close();
 +
 +    final TestTopologyListener topoListener = new TestTopologyListener();
 +
 +    final Map<String, String> options = new HashMap<>();
 +    options.put("persist-master", "false");
 +    options.put("master", "password");
 +
 +    gatewayConfig = EasyMock.createNiceMock(GatewayConfig.class);
 +    EasyMock.expect(gatewayConfig.getGatewayTopologyDir())
 +        .andReturn(topoDir.toString()).anyTimes();
 +
++    EasyMock.expect(gatewayConfig.getGatewayProvidersConfigDir())
++            .andReturn(topoDir.getAbsolutePath() + "/shared-providers").anyTimes();
++
++    EasyMock.expect(gatewayConfig.getGatewayDescriptorsDir())
++            .andReturn(topoDir.getAbsolutePath() + "/descriptors").anyTimes();
++
 +    EasyMock.expect(gatewayConfig.getGatewayServicesDir())
 +        .andReturn(serviceUrl.getFile()).anyTimes();
 +
 +    EasyMock.expect(gatewayConfig.getEphemeralDHKeySize()).andReturn("2048")
 +        .anyTimes();
 +
 +    EasyMock.expect(gatewayConfig.getGatewaySecurityDir())
 +        .andReturn(topoDir.toString()).anyTimes();
 +
 +    /* Websocket configs */
 +    EasyMock.expect(gatewayConfig.isWebsocketEnabled()).andReturn(true)
 +        .anyTimes();
 +
 +    EasyMock.expect(gatewayConfig.getWebsocketMaxTextMessageSize())
 +        .andReturn(GatewayConfigImpl.DEFAULT_WEBSOCKET_MAX_TEXT_MESSAGE_SIZE)
 +        .anyTimes();
 +
 +    EasyMock.expect(gatewayConfig.getWebsocketMaxBinaryMessageSize())
 +        .andReturn(GatewayConfigImpl.DEFAULT_WEBSOCKET_MAX_BINARY_MESSAGE_SIZE)
 +        .anyTimes();
 +
 +    EasyMock.expect(gatewayConfig.getWebsocketMaxTextMessageBufferSize())
 +        .andReturn(
 +            GatewayConfigImpl.DEFAULT_WEBSOCKET_MAX_TEXT_MESSAGE_BUFFER_SIZE)
 +        .anyTimes();
 +
 +    EasyMock.expect(gatewayConfig.getWebsocketMaxBinaryMessageBufferSize())
 +        .andReturn(
 +            GatewayConfigImpl.DEFAULT_WEBSOCKET_MAX_BINARY_MESSAGE_BUFFER_SIZE)
 +        .anyTimes();
 +
 +    EasyMock.expect(gatewayConfig.getWebsocketInputBufferSize())
 +        .andReturn(GatewayConfigImpl.DEFAULT_WEBSOCKET_INPUT_BUFFER_SIZE)
 +        .anyTimes();
 +
 +    EasyMock.expect(gatewayConfig.getWebsocketAsyncWriteTimeout())
 +        .andReturn(GatewayConfigImpl.DEFAULT_WEBSOCKET_ASYNC_WRITE_TIMEOUT)
 +        .anyTimes();
 +
 +    EasyMock.expect(gatewayConfig.getWebsocketIdleTimeout())
 +        .andReturn(GatewayConfigImpl.DEFAULT_WEBSOCKET_IDLE_TIMEOUT).anyTimes();
 +
++    EasyMock.expect(gatewayConfig.getRemoteRegistryConfigurationNames())
++            .andReturn(Collections.emptyList())
++            .anyTimes();
++
 +    EasyMock.replay(gatewayConfig);
 +
 +    try {
 +      services.init(gatewayConfig, options);
 +    } catch (ServiceLifecycleException e) {
 +      e.printStackTrace();
 +    }
 +
 +    DeploymentFactory.setGatewayServices(services);
 +    final TopologyService monitor = services
 +        .getService(GatewayServices.TOPOLOGY_SERVICE);
 +    monitor.addTopologyChangeListener(topoListener);
 +    monitor.reloadTopologies();
 +
 +  }
 +
 +  private static File createDir() throws IOException {
 +    return TestUtils
 +        .createTempDir(WebsocketEchoTest.class.getSimpleName() + "-");
 +  }
 +
 +  private static XMLTag createKnoxTopology(final String backend) {
 +    XMLTag xml = XMLDoc.newDocument(true).addRoot("topology").addTag("service")
 +        .addTag("role").addText("WEBSOCKET").addTag("url").addText(backend)
 +        .gotoParent().gotoRoot();
 +    // System.out.println( "GATEWAY=" + xml.toString() );
 +    return xml;
 +  }
 +
 +  private static class TestTopologyListener implements TopologyListener {
 +
 +    public ArrayList<List<TopologyEvent>> events = new ArrayList<List<TopologyEvent>>();
 +
 +    @Override
 +    public void handleTopologyEvent(List<TopologyEvent> events) {
 +      this.events.add(events);
 +
 +      synchronized (this) {
 +        for (TopologyEvent event : events) {
 +          if (!event.getType().equals(TopologyEvent.Type.DELETED)) {
 +
 +            /* for this test we only care about this part */
 +            DeploymentFactory.createDeployment(gatewayConfig,
 +                event.getTopology());
 +
 +          }
 +        }
 +
 +      }
 +
 +    }
 +
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/22a7304a/gateway-server/src/test/java/org/apache/knox/gateway/websockets/WebsocketMultipleConnectionTest.java
----------------------------------------------------------------------
diff --cc gateway-server/src/test/java/org/apache/knox/gateway/websockets/WebsocketMultipleConnectionTest.java
index 42bc9c3,0000000..5e5006c
mode 100644,000000..100644
--- a/gateway-server/src/test/java/org/apache/knox/gateway/websockets/WebsocketMultipleConnectionTest.java
+++ b/gateway-server/src/test/java/org/apache/knox/gateway/websockets/WebsocketMultipleConnectionTest.java
@@@ -1,389 -1,0 +1,400 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.websockets;
 +
 +import java.io.File;
 +import java.io.FileOutputStream;
 +import java.io.IOException;
 +import java.lang.management.ManagementFactory;
 +import java.lang.management.MemoryMXBean;
 +import java.net.URI;
 +import java.net.URL;
 +import java.util.ArrayList;
++import java.util.Collections;
 +import java.util.HashMap;
 +import java.util.List;
 +import java.util.Map;
 +import java.util.concurrent.CountDownLatch;
 +import java.util.concurrent.TimeUnit;
 +
 +import javax.websocket.ContainerProvider;
 +import javax.websocket.Endpoint;
 +import javax.websocket.EndpointConfig;
 +import javax.websocket.MessageHandler;
 +import javax.websocket.Session;
 +import javax.websocket.WebSocketContainer;
 +
 +import org.apache.commons.io.FileUtils;
 +import org.apache.knox.gateway.config.GatewayConfig;
 +import org.apache.knox.gateway.config.impl.GatewayConfigImpl;
 +import org.apache.knox.gateway.deploy.DeploymentFactory;
 +import org.apache.knox.gateway.services.DefaultGatewayServices;
 +import org.apache.knox.gateway.services.GatewayServices;
 +import org.apache.knox.gateway.services.ServiceLifecycleException;
 +import org.apache.knox.gateway.services.topology.TopologyService;
 +import org.apache.knox.gateway.topology.TopologyEvent;
 +import org.apache.knox.gateway.topology.TopologyListener;
 +import org.apache.knox.test.TestUtils;
 +import org.easymock.EasyMock;
 +import org.eclipse.jetty.server.Server;
 +import org.eclipse.jetty.server.ServerConnector;
 +import org.eclipse.jetty.server.handler.ContextHandler;
 +import org.eclipse.jetty.server.handler.HandlerCollection;
 +import org.eclipse.jetty.util.thread.QueuedThreadPool;
 +import org.junit.AfterClass;
 +import org.junit.BeforeClass;
 +import org.junit.Test;
 +
 +import com.mycila.xmltool.XMLDoc;
 +import com.mycila.xmltool.XMLTag;
 +
 +/**
 + * Test how Knox holds up under multiple concurrent connections.
 + *
 + */
 +public class WebsocketMultipleConnectionTest {
 +  /**
 +   * Simulate backend websocket
 +   */
 +  private static Server backendServer;
 +  /**
 +   * URI for backend websocket server
 +   */
 +  private static URI backendServerUri;
 +
 +  /**
 +   * Mock Gateway server
 +   */
 +  private static Server gatewayServer;
 +
 +  /**
 +   * Mock gateway config
 +   */
 +  private static GatewayConfig gatewayConfig;
 +
 +  private static GatewayServices services;
 +
 +  /**
 +   * URI for gateway server
 +   */
 +  private static URI serverUri;
 +
 +  private static File topoDir;
 +
 +  /**
 +   * Maximum number of open connections to test.
 +   */
 +  private static int MAX_CONNECTIONS = 100;
 +
 +  public WebsocketMultipleConnectionTest() {
 +    super();
 +  }
 +
 +  @BeforeClass
 +  public static void startServers() throws Exception {
 +
 +    startWebsocketServer();
 +    startGatewayServer();
 +
 +  }
 +
 +  @AfterClass
 +  public static void stopServers() {
 +    try {
 +      gatewayServer.stop();
 +      backendServer.stop();
 +    } catch (final Exception e) {
 +      e.printStackTrace(System.err);
 +    }
 +
 +    /* Cleanup the created files */
 +    FileUtils.deleteQuietly(topoDir);
 +
 +  }
 +
 +  /**
 +   * Test websocket proxying through gateway.
 +   * 
 +   * @throws Exception
 +   */
 +  @Test
 +  public void testMultipleConnections() throws Exception {
 +    WebSocketContainer container = ContainerProvider.getWebSocketContainer();
 +
 +    final CountDownLatch latch = new CountDownLatch(MAX_CONNECTIONS);
 +
 +    Session[] sessions = new Session[MAX_CONNECTIONS];
 +
 +    MemoryMXBean memoryMXBean = ManagementFactory.getMemoryMXBean();
 +
 +    System.gc();
 +    final long heapt1 = memoryMXBean.getHeapMemoryUsage().getUsed();
 +    final long nonHeapt1 = memoryMXBean.getNonHeapMemoryUsage().getUsed();
 +
 +    for (int i = 0; i < MAX_CONNECTIONS; i++) {
 +
 +      sessions[i] = container.connectToServer(new WebsocketClient() {
 +
 +        @Override
 +        public void onMessage(String message) {
 +          latch.countDown();
 +
 +        }
 +
 +      }, new URI(serverUri.toString() + "gateway/websocket/ws"));
 +
 +    }
 +
 +    for (int i = 0; i < MAX_CONNECTIONS; i++) {
 +      /* make sure the session is active and valid before trying to connect */
 +      if(sessions[i].isOpen() && sessions[i].getBasicRemote() != null) {
 +        sessions[i].getBasicRemote().sendText("OK");
 +      }
 +    }
 +
 +    latch.await(5 * MAX_CONNECTIONS, TimeUnit.MILLISECONDS);
 +
 +    System.gc();
 +
 +    final long heapUsed = memoryMXBean.getHeapMemoryUsage().getUsed() - heapt1;
 +    final long nonHeapUsed = memoryMXBean.getNonHeapMemoryUsage().getUsed()
 +        - nonHeapt1;
 +
 +    System.out.println("heapUsed = " + heapUsed);
 +    System.out.println("nonHeapUsed = " + nonHeapUsed);
 +
 +    /* 90 KB per connection */
 +    /*
 +    long expected = 90 * 1024 * MAX_CONNECTIONS;
 +    assertThat("heap used", heapUsed, lessThan(expected));
 +    */
 +  }
 +
 +  /**
 +   * Start Mock Websocket server that acts as backend.
 +   * 
 +   * @throws Exception
 +   */
 +  private static void startWebsocketServer() throws Exception {
 +
 +    backendServer = new Server(new QueuedThreadPool(254));
 +    ServerConnector connector = new ServerConnector(backendServer);
 +    backendServer.addConnector(connector);
 +
 +    final WebsocketEchoHandler handler = new WebsocketEchoHandler();
 +
 +    ContextHandler context = new ContextHandler();
 +    context.setContextPath("/");
 +    context.setHandler(handler);
 +    backendServer.setHandler(context);
 +
 +    // Start Server
 +    backendServer.start();
 +
 +    String host = connector.getHost();
 +    if (host == null) {
 +      host = "localhost";
 +    }
 +    int port = connector.getLocalPort();
 +    backendServerUri = new URI(String.format("ws://%s:%d/ws", host, port));
 +
 +  }
 +
 +  /**
 +   * Start Gateway Server.
 +   * 
 +   * @throws Exception
 +   */
 +  private static void startGatewayServer() throws Exception {
 +    /* use default Max threads */
 +    gatewayServer = new Server(new QueuedThreadPool(254));
 +    final ServerConnector connector = new ServerConnector(gatewayServer);
 +    gatewayServer.addConnector(connector);
 +
 +    /* workaround so we can add our handler later at runtime */
 +    HandlerCollection handlers = new HandlerCollection(true);
 +
 +    /* add some initial handlers */
 +    ContextHandler context = new ContextHandler();
 +    context.setContextPath("/");
 +    handlers.addHandler(context);
 +
 +    gatewayServer.setHandler(handlers);
 +
 +    // Start Server
 +    gatewayServer.start();
 +
 +    String host = connector.getHost();
 +    if (host == null) {
 +      host = "localhost";
 +    }
 +    int port = connector.getLocalPort();
 +    serverUri = new URI(String.format("ws://%s:%d/", host, port));
 +
 +    /* Setup websocket handler */
 +    setupGatewayConfig(backendServerUri.toString());
 +
 +    final GatewayWebsocketHandler gatewayWebsocketHandler = new GatewayWebsocketHandler(
 +        gatewayConfig, services);
 +    handlers.addHandler(gatewayWebsocketHandler);
 +    gatewayWebsocketHandler.start();
 +  }
 +
 +  /**
 +   * Initialize the configs and components required for this test.
 +   * 
 +   * @param backend
 +   * @throws IOException
 +   */
 +  private static void setupGatewayConfig(final String backend)
 +      throws IOException {
 +    services = new DefaultGatewayServices();
 +
 +    topoDir = createDir();
 +    URL serviceUrl = ClassLoader.getSystemResource("websocket-services");
 +
 +    final File descriptor = new File(topoDir, "websocket.xml");
 +    final FileOutputStream stream = new FileOutputStream(descriptor);
 +    createKnoxTopology(backend).toStream(stream);
 +    stream.close();
 +
 +    final TestTopologyListener topoListener = new TestTopologyListener();
 +
 +    final Map<String, String> options = new HashMap<>();
 +    options.put("persist-master", "false");
 +    options.put("master", "password");
 +
 +    gatewayConfig = EasyMock.createNiceMock(GatewayConfig.class);
 +    EasyMock.expect(gatewayConfig.getGatewayTopologyDir())
 +        .andReturn(topoDir.toString()).anyTimes();
 +
++    EasyMock.expect(gatewayConfig.getGatewayProvidersConfigDir())
++            .andReturn(topoDir.getAbsolutePath() + "/shared-providers").anyTimes();
++
++    EasyMock.expect(gatewayConfig.getGatewayDescriptorsDir())
++            .andReturn(topoDir.getAbsolutePath() + "/descriptors").anyTimes();
++
 +    EasyMock.expect(gatewayConfig.getGatewayServicesDir())
 +        .andReturn(serviceUrl.getFile()).anyTimes();
 +
 +    EasyMock.expect(gatewayConfig.getEphemeralDHKeySize()).andReturn("2048")
 +        .anyTimes();
 +
 +    EasyMock.expect(gatewayConfig.getGatewaySecurityDir())
 +        .andReturn(topoDir.toString()).anyTimes();
 +
 +    /* Websocket configs */
 +    EasyMock.expect(gatewayConfig.isWebsocketEnabled()).andReturn(true)
 +        .anyTimes();
 +
 +    EasyMock.expect(gatewayConfig.getWebsocketMaxTextMessageSize())
 +        .andReturn(GatewayConfigImpl.DEFAULT_WEBSOCKET_MAX_TEXT_MESSAGE_SIZE)
 +        .anyTimes();
 +
 +    EasyMock.expect(gatewayConfig.getWebsocketMaxBinaryMessageSize())
 +        .andReturn(GatewayConfigImpl.DEFAULT_WEBSOCKET_MAX_BINARY_MESSAGE_SIZE)
 +        .anyTimes();
 +
 +    EasyMock.expect(gatewayConfig.getWebsocketMaxTextMessageBufferSize())
 +        .andReturn(
 +            GatewayConfigImpl.DEFAULT_WEBSOCKET_MAX_TEXT_MESSAGE_BUFFER_SIZE)
 +        .anyTimes();
 +
 +    EasyMock.expect(gatewayConfig.getWebsocketMaxBinaryMessageBufferSize())
 +        .andReturn(
 +            GatewayConfigImpl.DEFAULT_WEBSOCKET_MAX_BINARY_MESSAGE_BUFFER_SIZE)
 +        .anyTimes();
 +
 +    EasyMock.expect(gatewayConfig.getWebsocketInputBufferSize())
 +        .andReturn(GatewayConfigImpl.DEFAULT_WEBSOCKET_INPUT_BUFFER_SIZE)
 +        .anyTimes();
 +
 +    EasyMock.expect(gatewayConfig.getWebsocketAsyncWriteTimeout())
 +        .andReturn(GatewayConfigImpl.DEFAULT_WEBSOCKET_ASYNC_WRITE_TIMEOUT)
 +        .anyTimes();
 +
 +    EasyMock.expect(gatewayConfig.getWebsocketIdleTimeout())
 +        .andReturn(GatewayConfigImpl.DEFAULT_WEBSOCKET_IDLE_TIMEOUT).anyTimes();
 +
++    EasyMock.expect(gatewayConfig.getRemoteRegistryConfigurationNames())
++            .andReturn(Collections.emptyList())
++            .anyTimes();
++
 +    EasyMock.replay(gatewayConfig);
 +
 +    try {
 +      services.init(gatewayConfig, options);
 +    } catch (ServiceLifecycleException e) {
 +      e.printStackTrace();
 +    }
 +
 +    DeploymentFactory.setGatewayServices(services);
 +    final TopologyService monitor = services
 +        .getService(GatewayServices.TOPOLOGY_SERVICE);
 +    monitor.addTopologyChangeListener(topoListener);
 +    monitor.reloadTopologies();
 +
 +  }
 +
 +  private static File createDir() throws IOException {
 +    return TestUtils
 +        .createTempDir(WebsocketEchoTest.class.getSimpleName() + "-");
 +  }
 +
 +  private static XMLTag createKnoxTopology(final String backend) {
 +    XMLTag xml = XMLDoc.newDocument(true).addRoot("topology").addTag("service")
 +        .addTag("role").addText("WEBSOCKET").addTag("url").addText(backend)
 +        .gotoParent().gotoRoot();
 +    // System.out.println( "GATEWAY=" + xml.toString() );
 +    return xml;
 +  }
 +
 +  private static class TestTopologyListener implements TopologyListener {
 +
 +    public ArrayList<List<TopologyEvent>> events = new ArrayList<List<TopologyEvent>>();
 +
 +    @Override
 +    public void handleTopologyEvent(List<TopologyEvent> events) {
 +      this.events.add(events);
 +
 +      synchronized (this) {
 +        for (TopologyEvent event : events) {
 +          if (!event.getType().equals(TopologyEvent.Type.DELETED)) {
 +
 +            /* for this test we only care about this part */
 +            DeploymentFactory.createDeployment(gatewayConfig,
 +                event.getTopology());
 +
 +          }
 +        }
 +
 +      }
 +
 +    }
 +
 +  }
 +
 +  private static abstract class WebsocketClient extends Endpoint
 +      implements MessageHandler.Whole<String> {
 +    @Override
 +    public void onOpen(Session session, EndpointConfig config) {
 +      session.addMessageHandler(this);
 +    }
 +  }
 +
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/22a7304a/gateway-service-definitions/src/main/resources/services/ambariui/2.2.1/service.xml
----------------------------------------------------------------------
diff --cc gateway-service-definitions/src/main/resources/services/ambariui/2.2.1/service.xml
index c6135ae,0000000..e69de29
mode 100644,000000..100644
--- a/gateway-service-definitions/src/main/resources/services/ambariui/2.2.1/service.xml
+++ b/gateway-service-definitions/src/main/resources/services/ambariui/2.2.1/service.xml


[03/53] [abbrv] knox git commit: Merge branch 'master' into KNOX-998-Package_Restructuring

Posted by mo...@apache.org.
http://git-wip-us.apache.org/repos/asf/knox/blob/8affbc02/gateway-server/src/main/java/org/apache/knox/gateway/websockets/ProxyWebSocketAdapter.java
----------------------------------------------------------------------
diff --cc gateway-server/src/main/java/org/apache/knox/gateway/websockets/ProxyWebSocketAdapter.java
index 850157e,0000000..a678a72
mode 100644,000000..100644
--- a/gateway-server/src/main/java/org/apache/knox/gateway/websockets/ProxyWebSocketAdapter.java
+++ b/gateway-server/src/main/java/org/apache/knox/gateway/websockets/ProxyWebSocketAdapter.java
@@@ -1,276 -1,0 +1,289 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.websockets;
 +
 +import java.io.IOException;
 +import java.net.URI;
 +import java.util.concurrent.ExecutorService;
 +
++import javax.websocket.ClientEndpointConfig;
 +import javax.websocket.CloseReason;
 +import javax.websocket.ContainerProvider;
 +import javax.websocket.DeploymentException;
 +import javax.websocket.WebSocketContainer;
 +
 +import org.apache.knox.gateway.i18n.messages.MessagesFactory;
 +import org.eclipse.jetty.io.RuntimeIOException;
 +import org.eclipse.jetty.util.component.LifeCycle;
 +import org.eclipse.jetty.websocket.api.BatchMode;
 +import org.eclipse.jetty.websocket.api.RemoteEndpoint;
 +import org.eclipse.jetty.websocket.api.Session;
 +import org.eclipse.jetty.websocket.api.StatusCode;
 +import org.eclipse.jetty.websocket.api.WebSocketAdapter;
 +
 +/**
 + * Handles outbound/inbound Websocket connections and sessions.
 + *
 + * @since 0.10
 + */
 +public class ProxyWebSocketAdapter extends WebSocketAdapter {
 +
 +  private static final WebsocketLogMessages LOG = MessagesFactory
 +      .get(WebsocketLogMessages.class);
 +
 +  /* URI for the backend */
 +  private final URI backend;
 +
 +  /* Session between the frontend (browser) and Knox */
 +  private Session frontendSession;
 +
 +  /* Session between the backend (outbound) and Knox */
 +  private javax.websocket.Session backendSession;
 +
 +  private WebSocketContainer container;
 +
 +  private ExecutorService pool;
 +
 +  /**
++   * Used to transmit headers from browser to backend server.
++   * @since 0.14
++   */
++  private ClientEndpointConfig clientConfig;
++
++  /**
 +   * Create an instance
 +   */
 +  public ProxyWebSocketAdapter(final URI backend, final ExecutorService pool) {
++    this(backend, pool, null);
++  }
++
++  public ProxyWebSocketAdapter(final URI backend, final ExecutorService pool, final ClientEndpointConfig clientConfig) {
 +    super();
 +    this.backend = backend;
 +    this.pool = pool;
++    this.clientConfig = clientConfig;
 +  }
 +
 +  @Override
 +  public void onWebSocketConnect(final Session frontEndSession) {
 +
 +    /*
 +     * Let's connect to the backend, this is where the Backend-to-frontend
 +     * plumbing takes place
 +     */
 +    container = ContainerProvider.getWebSocketContainer();
-     final ProxyInboundSocket backendSocket = new ProxyInboundSocket(
-         getMessageCallback());
++
++    final ProxyInboundClient backendSocket = new ProxyInboundClient(getMessageCallback());
 +
 +    /* build the configuration */
 +
 +    /* Attempt Connect */
 +    try {
-       backendSession = container.connectToServer(backendSocket, backend);
++      backendSession = container.connectToServer(backendSocket, clientConfig, backend);
++
 +      LOG.onConnectionOpen(backend.toString());
 +
 +    } catch (DeploymentException e) {
 +      LOG.connectionFailed(e);
 +      throw new RuntimeException(e);
 +    } catch (IOException e) {
 +      LOG.connectionFailed(e);
 +      throw new RuntimeIOException(e);
 +    }
 +
 +    super.onWebSocketConnect(frontEndSession);
 +    this.frontendSession = frontEndSession;
 +
 +  }
 +
 +  @Override
 +  public void onWebSocketBinary(final byte[] payload, final int offset,
 +      final int length) {
 +
 +    if (isNotConnected()) {
 +      return;
 +    }
 +
 +    throw new UnsupportedOperationException(
 +        "Websocket support for binary messages is not supported at this time.");
 +  }
 +
 +  @Override
 +  public void onWebSocketText(final String message) {
 +
 +    if (isNotConnected()) {
 +      return;
 +    }
 +
 +    LOG.logMessage("[From Frontend --->]" + message);
 +
 +    /* Proxy message to backend */
 +    try {
 +      backendSession.getBasicRemote().sendText(message);
 +
 +    } catch (IOException e) {
 +      LOG.connectionFailed(e);
 +    }
 +
 +  }
 +
 +  @Override
 +  public void onWebSocketClose(int statusCode, String reason) {
 +    super.onWebSocketClose(statusCode, reason);
 +
 +    /* do the cleaning business in seperate thread so we don't block */
 +    pool.execute(new Runnable() {
 +      @Override
 +      public void run() {
 +        closeQuietly();
 +      }
 +    });
 +
 +    LOG.onConnectionClose(backend.toString());
 +
 +  }
 +
 +  @Override
 +  public void onWebSocketError(final Throwable t) {
 +    cleanupOnError(t);
 +  }
 +
 +  /**
 +   * Cleanup sessions
 +   */
 +  private void cleanupOnError(final Throwable t) {
 +
 +    LOG.onError(t.toString());
 +    if (t.toString().contains("exceeds maximum size")) {
 +      if(frontendSession != null && !frontendSession.isOpen()) {
 +        frontendSession.close(StatusCode.MESSAGE_TOO_LARGE, t.getMessage());
 +      }
 +    }
 +
 +    else {
 +      if(frontendSession != null && !frontendSession.isOpen()) {
 +        frontendSession.close(StatusCode.SERVER_ERROR, t.getMessage());
 +      }
 +
 +      /* do the cleaning business in seperate thread so we don't block */
 +      pool.execute(new Runnable() {
 +        @Override
 +        public void run() {
 +          closeQuietly();
 +        }
 +      });
 +
 +    }
 +  }
 +
 +  private MessageEventCallback getMessageCallback() {
 +
 +    return new MessageEventCallback() {
 +
 +      @Override
 +      public void doCallback(String message) {
 +        /* do nothing */
 +
 +      }
 +
 +      @Override
 +      public void onConnectionOpen(Object session) {
 +        /* do nothing */
 +
 +      }
 +
 +      @Override
 +      public void onConnectionClose(final CloseReason reason) {
 +        try {
 +          frontendSession.close(reason.getCloseCode().getCode(),
 +              reason.getReasonPhrase());
 +        } finally {
 +
 +          /* do the cleaning business in seperate thread so we don't block */
 +          pool.execute(new Runnable() {
 +            @Override
 +            public void run() {
 +              closeQuietly();
 +            }
 +          });
 +
 +        }
 +
 +      }
 +
 +      @Override
 +      public void onError(Throwable cause) {
 +        cleanupOnError(cause);
 +      }
 +
 +      @Override
 +      public void onMessageText(String message, Object session) {
 +        final RemoteEndpoint remote = getRemote();
 +
 +        LOG.logMessage("[From Backend <---]" + message);
 +
 +        /* Proxy message to frontend */
 +        try {
 +          remote.sendString(message);
 +          if (remote.getBatchMode() == BatchMode.ON) {
 +            remote.flush();
 +          }
 +        } catch (IOException e) {
 +          LOG.connectionFailed(e);
 +          throw new RuntimeIOException(e);
 +        }
 +
 +      }
 +
 +      @Override
 +      public void onMessageBinary(byte[] message, boolean last,
 +          Object session) {
 +        throw new UnsupportedOperationException(
 +            "Websocket support for binary messages is not supported at this time.");
 +
 +      }
 +
 +    };
 +
 +  }
 +
 +  private void closeQuietly() {
 +
 +    try {
 +      if(backendSession != null && !backendSession.isOpen()) {
 +        backendSession.close();
 +      }
 +    } catch (IOException e) {
 +      LOG.connectionFailed(e);
 +    }
 +
 +    if (container instanceof LifeCycle) {
 +      try {
 +        ((LifeCycle) container).stop();
 +      } catch (Exception e) {
 +        LOG.connectionFailed(e);
 +      }
 +    }
 +
 +    if(frontendSession != null && !frontendSession.isOpen()) {
 +      frontendSession.close();
 +    }
 +
 +  }
 +
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/8affbc02/gateway-server/src/test/java/org/apache/knox/gateway/topology/simple/SimpleDescriptorHandlerTest.java
----------------------------------------------------------------------
diff --cc gateway-server/src/test/java/org/apache/knox/gateway/topology/simple/SimpleDescriptorHandlerTest.java
index b713491,0000000..b5558fd
mode 100644,000000..100644
--- a/gateway-server/src/test/java/org/apache/knox/gateway/topology/simple/SimpleDescriptorHandlerTest.java
+++ b/gateway-server/src/test/java/org/apache/knox/gateway/topology/simple/SimpleDescriptorHandlerTest.java
@@@ -1,239 -1,0 +1,392 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.topology.simple;
 +
 +import org.apache.knox.gateway.topology.validation.TopologyValidator;
 +import org.apache.knox.gateway.util.XmlUtils;
++import java.io.ByteArrayInputStream;
++import java.io.File;
++import java.io.FileOutputStream;
++import java.io.IOException;
++
++import java.util.ArrayList;
++import java.util.Collections;
++import java.util.HashMap;
++import java.util.List;
++import java.util.Map;
++import java.util.Properties;
++
++import javax.xml.xpath.XPath;
++import javax.xml.xpath.XPathConstants;
++import javax.xml.xpath.XPathFactory;
++
++import org.apache.commons.io.FileUtils;
 +import org.easymock.EasyMock;
 +import org.junit.Test;
 +import org.w3c.dom.Document;
 +import org.w3c.dom.Node;
 +import org.w3c.dom.NodeList;
 +import org.xml.sax.SAXException;
 +
- import javax.xml.xpath.XPath;
- import javax.xml.xpath.XPathConstants;
- import javax.xml.xpath.XPathFactory;
- import java.io.*;
- import java.util.*;
- 
- import static org.junit.Assert.*;
++import static org.junit.Assert.assertEquals;
++import static org.junit.Assert.assertFalse;
++import static org.junit.Assert.assertNotNull;
++import static org.junit.Assert.assertTrue;
++import static org.junit.Assert.fail;
 +
 +
 +public class SimpleDescriptorHandlerTest {
 +
 +    private static final String TEST_PROVIDER_CONFIG =
 +            "    <gateway>\n" +
 +                    "        <provider>\n" +
 +                    "            <role>authentication</role>\n" +
 +                    "            <name>ShiroProvider</name>\n" +
 +                    "            <enabled>true</enabled>\n" +
 +                    "            <param>\n" +
 +                    "                <!-- \n" +
 +                    "                session timeout in minutes,  this is really idle timeout,\n" +
 +                    "                defaults to 30mins, if the property value is not defined,, \n" +
 +                    "                current client authentication would expire if client idles contiuosly for more than this value\n" +
 +                    "                -->\n" +
 +                    "                <name>sessionTimeout</name>\n" +
 +                    "                <value>30</value>\n" +
 +                    "            </param>\n" +
 +                    "            <param>\n" +
 +                    "                <name>main.ldapRealm</name>\n" +
 +                    "                <value>org.apache.knox.gateway.shirorealm.KnoxLdapRealm</value>\n" +
 +                    "            </param>\n" +
 +                    "            <param>\n" +
 +                    "                <name>main.ldapContextFactory</name>\n" +
 +                    "                <value>org.apache.knox.gateway.shirorealm.KnoxLdapContextFactory</value>\n" +
 +                    "            </param>\n" +
 +                    "            <param>\n" +
 +                    "                <name>main.ldapRealm.contextFactory</name>\n" +
 +                    "                <value>$ldapContextFactory</value>\n" +
 +                    "            </param>\n" +
 +                    "            <param>\n" +
 +                    "                <name>main.ldapRealm.userDnTemplate</name>\n" +
 +                    "                <value>uid={0},ou=people,dc=hadoop,dc=apache,dc=org</value>\n" +
 +                    "            </param>\n" +
 +                    "            <param>\n" +
 +                    "                <name>main.ldapRealm.contextFactory.url</name>\n" +
 +                    "                <value>ldap://localhost:33389</value>\n" +
 +                    "            </param>\n" +
 +                    "            <param>\n" +
 +                    "                <name>main.ldapRealm.contextFactory.authenticationMechanism</name>\n" +
 +                    "                <value>simple</value>\n" +
 +                    "            </param>\n" +
 +                    "            <param>\n" +
 +                    "                <name>urls./**</name>\n" +
 +                    "                <value>authcBasic</value>\n" +
 +                    "            </param>\n" +
 +                    "        </provider>\n" +
 +                    "\n" +
 +                    "        <provider>\n" +
 +                    "            <role>identity-assertion</role>\n" +
 +                    "            <name>Default</name>\n" +
 +                    "            <enabled>true</enabled>\n" +
 +                    "        </provider>\n" +
 +                    "\n" +
 +                    "        <!--\n" +
 +                    "        Defines rules for mapping host names internal to a Hadoop cluster to externally accessible host names.\n" +
 +                    "        For example, a hadoop service running in AWS may return a response that includes URLs containing the\n" +
 +                    "        some AWS internal host name.  If the client needs to make a subsequent request to the host identified\n" +
 +                    "        in those URLs they need to be mapped to external host names that the client Knox can use to connect.\n" +
 +                    "\n" +
 +                    "        If the external hostname and internal host names are same turn of this provider by setting the value of\n" +
 +                    "        enabled parameter as false.\n" +
 +                    "\n" +
 +                    "        The name parameter specifies the external host names in a comma separated list.\n" +
 +                    "        The value parameter specifies corresponding internal host names in a comma separated list.\n" +
 +                    "\n" +
 +                    "        Note that when you are using Sandbox, the external hostname needs to be localhost, as seen in out\n" +
 +                    "        of box sandbox.xml.  This is because Sandbox uses port mapping to allow clients to connect to the\n" +
 +                    "        Hadoop services using localhost.  In real clusters, external host names would almost never be localhost.\n" +
 +                    "        -->\n" +
 +                    "        <provider>\n" +
 +                    "            <role>hostmap</role>\n" +
 +                    "            <name>static</name>\n" +
 +                    "            <enabled>true</enabled>\n" +
 +                    "            <param><name>localhost</name><value>sandbox,sandbox.hortonworks.com</value></param>\n" +
 +                    "        </provider>\n" +
 +                    "    </gateway>\n";
 +
 +
 +    /**
 +     * KNOX-1006
 +     *
 +     * N.B. This test depends on the DummyServiceDiscovery extension being configured:
 +     *             org.apache.knox.gateway.topology.discovery.test.extension.DummyServiceDiscovery
 +     */
 +    @Test
 +    public void testSimpleDescriptorHandler() throws Exception {
 +
 +        final String type = "DUMMY";
 +        final String address = "http://c6401.ambari.apache.org:8080";
 +        final String clusterName = "dummy";
 +        final Map<String, List<String>> serviceURLs = new HashMap<>();
 +        serviceURLs.put("NAMENODE", null);
 +        serviceURLs.put("JOBTRACKER", null);
 +        serviceURLs.put("WEBHDFS", null);
 +        serviceURLs.put("WEBHCAT", null);
 +        serviceURLs.put("OOZIE", null);
 +        serviceURLs.put("WEBHBASE", null);
 +        serviceURLs.put("HIVE", null);
 +        serviceURLs.put("RESOURCEMANAGER", null);
-         serviceURLs.put("AMBARIUI", Arrays.asList("http://c6401.ambari.apache.org:8080"));
++        serviceURLs.put("AMBARIUI", Collections.singletonList("http://c6401.ambari.apache.org:8080"));
 +
 +        // Write the externalized provider config to a temp file
 +        File providerConfig = writeProviderConfig("ambari-cluster-policy.xml", TEST_PROVIDER_CONFIG);
 +
 +        File topologyFile = null;
 +        try {
 +            File destDir = (new File(".")).getCanonicalFile();
 +
 +            // Mock out the simple descriptor
 +            SimpleDescriptor testDescriptor = EasyMock.createNiceMock(SimpleDescriptor.class);
 +            EasyMock.expect(testDescriptor.getName()).andReturn("mysimpledescriptor").anyTimes();
 +            EasyMock.expect(testDescriptor.getDiscoveryAddress()).andReturn(address).anyTimes();
 +            EasyMock.expect(testDescriptor.getDiscoveryType()).andReturn(type).anyTimes();
 +            EasyMock.expect(testDescriptor.getDiscoveryUser()).andReturn(null).anyTimes();
 +            EasyMock.expect(testDescriptor.getProviderConfig()).andReturn(providerConfig.getAbsolutePath()).anyTimes();
 +            EasyMock.expect(testDescriptor.getClusterName()).andReturn(clusterName).anyTimes();
 +            List<SimpleDescriptor.Service> serviceMocks = new ArrayList<>();
 +            for (String serviceName : serviceURLs.keySet()) {
 +                SimpleDescriptor.Service svc = EasyMock.createNiceMock(SimpleDescriptor.Service.class);
 +                EasyMock.expect(svc.getName()).andReturn(serviceName).anyTimes();
 +                EasyMock.expect(svc.getURLs()).andReturn(serviceURLs.get(serviceName)).anyTimes();
 +                EasyMock.replay(svc);
 +                serviceMocks.add(svc);
 +            }
 +            EasyMock.expect(testDescriptor.getServices()).andReturn(serviceMocks).anyTimes();
 +            EasyMock.replay(testDescriptor);
 +
 +            // Invoke the simple descriptor handler
 +            Map<String, File> files =
 +                           SimpleDescriptorHandler.handle(testDescriptor,
 +                                                          providerConfig.getParentFile(), // simple desc co-located with provider config
 +                                                          destDir);
 +            topologyFile = files.get("topology");
 +
 +            // Validate the resulting topology descriptor
 +            assertTrue(topologyFile.exists());
 +
 +            // Validate the topology descriptor's correctness
 +            TopologyValidator validator = new TopologyValidator( topologyFile.getAbsolutePath() );
 +            if( !validator.validateTopology() ){
 +                throw new SAXException( validator.getErrorString() );
 +            }
 +
 +            XPathFactory xPathfactory = XPathFactory.newInstance();
 +            XPath xpath = xPathfactory.newXPath();
 +
 +            // Parse the topology descriptor
 +            Document topologyXml = XmlUtils.readXml(topologyFile);
 +
 +            // Validate the provider configuration
 +            Document extProviderConf = XmlUtils.readXml(new ByteArrayInputStream(TEST_PROVIDER_CONFIG.getBytes()));
 +            Node gatewayNode = (Node) xpath.compile("/topology/gateway").evaluate(topologyXml, XPathConstants.NODE);
 +            assertTrue("Resulting provider config should be identical to the referenced content.",
 +                       extProviderConf.getDocumentElement().isEqualNode(gatewayNode));
 +
 +            // Validate the service declarations
 +            Map<String, List<String>> topologyServiceURLs = new HashMap<>();
 +            NodeList serviceNodes =
 +                        (NodeList) xpath.compile("/topology/service").evaluate(topologyXml, XPathConstants.NODESET);
 +            for (int serviceNodeIndex=0; serviceNodeIndex < serviceNodes.getLength(); serviceNodeIndex++) {
 +                Node serviceNode = serviceNodes.item(serviceNodeIndex);
 +                Node roleNode = (Node) xpath.compile("role/text()").evaluate(serviceNode, XPathConstants.NODE);
 +                assertNotNull(roleNode);
 +                String role = roleNode.getNodeValue();
 +                NodeList urlNodes = (NodeList) xpath.compile("url/text()").evaluate(serviceNode, XPathConstants.NODESET);
 +                for(int urlNodeIndex = 0 ; urlNodeIndex < urlNodes.getLength(); urlNodeIndex++) {
 +                    Node urlNode = urlNodes.item(urlNodeIndex);
 +                    assertNotNull(urlNode);
 +                    String url = urlNode.getNodeValue();
 +                    assertNotNull("Every declared service should have a URL.", url);
 +                    if (!topologyServiceURLs.containsKey(role)) {
 +                        topologyServiceURLs.put(role, new ArrayList<String>());
 +                    }
 +                    topologyServiceURLs.get(role).add(url);
 +                }
 +            }
 +            assertEquals("Unexpected number of service declarations.", serviceURLs.size(), topologyServiceURLs.size());
 +
 +        } catch (Exception e) {
 +            e.printStackTrace();
 +            fail(e.getMessage());
 +        } finally {
 +            providerConfig.delete();
 +            if (topologyFile != null) {
 +                topologyFile.delete();
 +            }
 +        }
 +    }
 +
 +
-     private File writeProviderConfig(String path, String content) throws IOException {
-         File f = new File(path);
++    /**
++     * KNOX-1006
++     *
++     * Verify the behavior of the SimpleDescriptorHandler when service discovery fails to produce a valid URL for
++     * a service.
++     *
++     * N.B. This test depends on the PropertiesFileServiceDiscovery extension being configured:
++     *             org.apache.hadoop.gateway.topology.discovery.test.extension.PropertiesFileServiceDiscovery
++     */
++    @Test
++    public void testInvalidServiceURLFromDiscovery() throws Exception {
++        final String CLUSTER_NAME = "myproperties";
++
++        // Configure the PropertiesFile Service Discovery implementation for this test
++        final String DEFAULT_VALID_SERVICE_URL = "http://localhost:9999/thiswillwork";
++        Properties serviceDiscoverySourceProps = new Properties();
++        serviceDiscoverySourceProps.setProperty(CLUSTER_NAME + ".NAMENODE",
++                                                DEFAULT_VALID_SERVICE_URL.replace("http", "hdfs"));
++        serviceDiscoverySourceProps.setProperty(CLUSTER_NAME + ".JOBTRACKER",
++                                                DEFAULT_VALID_SERVICE_URL.replace("http", "rpc"));
++        serviceDiscoverySourceProps.setProperty(CLUSTER_NAME + ".WEBHDFS",         DEFAULT_VALID_SERVICE_URL);
++        serviceDiscoverySourceProps.setProperty(CLUSTER_NAME + ".WEBHCAT",         DEFAULT_VALID_SERVICE_URL);
++        serviceDiscoverySourceProps.setProperty(CLUSTER_NAME + ".OOZIE",           DEFAULT_VALID_SERVICE_URL);
++        serviceDiscoverySourceProps.setProperty(CLUSTER_NAME + ".WEBHBASE",        DEFAULT_VALID_SERVICE_URL);
++        serviceDiscoverySourceProps.setProperty(CLUSTER_NAME + ".HIVE",            "{SCHEME}://localhost:10000/");
++        serviceDiscoverySourceProps.setProperty(CLUSTER_NAME + ".RESOURCEMANAGER", DEFAULT_VALID_SERVICE_URL);
++        serviceDiscoverySourceProps.setProperty(CLUSTER_NAME + ".AMBARIUI",        DEFAULT_VALID_SERVICE_URL);
++        File serviceDiscoverySource = File.createTempFile("service-discovery", ".properties");
++        serviceDiscoverySourceProps.store(new FileOutputStream(serviceDiscoverySource),
++                                          "Test Service Discovery Source");
++
++        // Prepare a mock SimpleDescriptor
++        final String type = "PROPERTIES_FILE";
++        final String address = serviceDiscoverySource.getAbsolutePath();
++        final Map<String, List<String>> serviceURLs = new HashMap<>();
++        serviceURLs.put("NAMENODE", null);
++        serviceURLs.put("JOBTRACKER", null);
++        serviceURLs.put("WEBHDFS", null);
++        serviceURLs.put("WEBHCAT", null);
++        serviceURLs.put("OOZIE", null);
++        serviceURLs.put("WEBHBASE", null);
++        serviceURLs.put("HIVE", null);
++        serviceURLs.put("RESOURCEMANAGER", null);
++        serviceURLs.put("AMBARIUI", Collections.singletonList("http://c6401.ambari.apache.org:8080"));
 +
-         Writer fw = new FileWriter(f);
-         fw.write(content);
-         fw.flush();
-         fw.close();
++        // Write the externalized provider config to a temp file
++        File providerConfig = writeProviderConfig("ambari-cluster-policy.xml", TEST_PROVIDER_CONFIG);
++
++        File topologyFile = null;
++        try {
++            File destDir = (new File(".")).getCanonicalFile();
++
++            // Mock out the simple descriptor
++            SimpleDescriptor testDescriptor = EasyMock.createNiceMock(SimpleDescriptor.class);
++            EasyMock.expect(testDescriptor.getName()).andReturn("mysimpledescriptor").anyTimes();
++            EasyMock.expect(testDescriptor.getDiscoveryAddress()).andReturn(address).anyTimes();
++            EasyMock.expect(testDescriptor.getDiscoveryType()).andReturn(type).anyTimes();
++            EasyMock.expect(testDescriptor.getDiscoveryUser()).andReturn(null).anyTimes();
++            EasyMock.expect(testDescriptor.getProviderConfig()).andReturn(providerConfig.getAbsolutePath()).anyTimes();
++            EasyMock.expect(testDescriptor.getClusterName()).andReturn(CLUSTER_NAME).anyTimes();
++            List<SimpleDescriptor.Service> serviceMocks = new ArrayList<>();
++            for (String serviceName : serviceURLs.keySet()) {
++                SimpleDescriptor.Service svc = EasyMock.createNiceMock(SimpleDescriptor.Service.class);
++                EasyMock.expect(svc.getName()).andReturn(serviceName).anyTimes();
++                EasyMock.expect(svc.getURLs()).andReturn(serviceURLs.get(serviceName)).anyTimes();
++                EasyMock.replay(svc);
++                serviceMocks.add(svc);
++            }
++            EasyMock.expect(testDescriptor.getServices()).andReturn(serviceMocks).anyTimes();
++            EasyMock.replay(testDescriptor);
++
++            // Invoke the simple descriptor handler
++            Map<String, File> files =
++                    SimpleDescriptorHandler.handle(testDescriptor,
++                                                   providerConfig.getParentFile(), // simple desc co-located with provider config
++                                                   destDir);
++
++            topologyFile = files.get("topology");
 +
++            // Validate the resulting topology descriptor
++            assertTrue(topologyFile.exists());
++
++            // Validate the topology descriptor's correctness
++            TopologyValidator validator = new TopologyValidator( topologyFile.getAbsolutePath() );
++            if( !validator.validateTopology() ){
++                throw new SAXException( validator.getErrorString() );
++            }
++
++            XPathFactory xPathfactory = XPathFactory.newInstance();
++            XPath xpath = xPathfactory.newXPath();
++
++            // Parse the topology descriptor
++            Document topologyXml = XmlUtils.readXml(topologyFile);
++
++            // Validate the provider configuration
++            Document extProviderConf = XmlUtils.readXml(new ByteArrayInputStream(TEST_PROVIDER_CONFIG.getBytes()));
++            Node gatewayNode = (Node) xpath.compile("/topology/gateway").evaluate(topologyXml, XPathConstants.NODE);
++            assertTrue("Resulting provider config should be identical to the referenced content.",
++                    extProviderConf.getDocumentElement().isEqualNode(gatewayNode));
++
++            // Validate the service declarations
++            List<String> topologyServices = new ArrayList<>();
++            Map<String, List<String>> topologyServiceURLs = new HashMap<>();
++            NodeList serviceNodes =
++                    (NodeList) xpath.compile("/topology/service").evaluate(topologyXml, XPathConstants.NODESET);
++            for (int serviceNodeIndex=0; serviceNodeIndex < serviceNodes.getLength(); serviceNodeIndex++) {
++                Node serviceNode = serviceNodes.item(serviceNodeIndex);
++                Node roleNode = (Node) xpath.compile("role/text()").evaluate(serviceNode, XPathConstants.NODE);
++                assertNotNull(roleNode);
++                String role = roleNode.getNodeValue();
++                topologyServices.add(role);
++                NodeList urlNodes = (NodeList) xpath.compile("url/text()").evaluate(serviceNode, XPathConstants.NODESET);
++                for(int urlNodeIndex = 0 ; urlNodeIndex < urlNodes.getLength(); urlNodeIndex++) {
++                    Node urlNode = urlNodes.item(urlNodeIndex);
++                    assertNotNull(urlNode);
++                    String url = urlNode.getNodeValue();
++                    assertNotNull("Every declared service should have a URL.", url);
++                    if (!topologyServiceURLs.containsKey(role)) {
++                        topologyServiceURLs.put(role, new ArrayList<String>());
++                    }
++                    topologyServiceURLs.get(role).add(url);
++                }
++            }
++
++            // There should not be a service element for HIVE, since it had no valid URLs
++            assertEquals("Unexpected number of service declarations.", serviceURLs.size() - 1, topologyServices.size());
++            assertFalse("The HIVE service should have been omitted from the generated topology.", topologyServices.contains("HIVE"));
++
++            assertEquals("Unexpected number of service URLs.", serviceURLs.size() - 1, topologyServiceURLs.size());
++
++        } catch (Exception e) {
++            e.printStackTrace();
++            fail(e.getMessage());
++        } finally {
++            serviceDiscoverySource.delete();
++            providerConfig.delete();
++            if (topologyFile != null) {
++                topologyFile.delete();
++            }
++        }
++    }
++
++
++    private File writeProviderConfig(String path, String content) throws IOException {
++        File f = new File(path);
++        FileUtils.write(f, content);
 +        return f;
 +    }
 +
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/8affbc02/gateway-service-definitions/src/main/resources/services/ambariui/2.2.0/service.xml
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/knox/blob/8affbc02/gateway-service-knoxsso/src/main/java/org/apache/knox/gateway/service/knoxsso/WebSSOResource.java
----------------------------------------------------------------------
diff --cc gateway-service-knoxsso/src/main/java/org/apache/knox/gateway/service/knoxsso/WebSSOResource.java
index 8a9d028,0000000..a97cee2
mode 100644,000000..100644
--- a/gateway-service-knoxsso/src/main/java/org/apache/knox/gateway/service/knoxsso/WebSSOResource.java
+++ b/gateway-service-knoxsso/src/main/java/org/apache/knox/gateway/service/knoxsso/WebSSOResource.java
@@@ -1,322 -1,0 +1,322 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.service.knoxsso;
 +
 +import java.io.IOException;
 +import java.net.URI;
 +import java.net.URISyntaxException;
 +import java.security.Principal;
 +import java.util.ArrayList;
 +import java.util.Arrays;
 +import java.util.List;
 +import java.util.Map;
 +import java.util.Map.Entry;
 +
 +import javax.annotation.PostConstruct;
 +import javax.servlet.ServletContext;
 +import javax.servlet.http.Cookie;
 +import javax.servlet.http.HttpServletRequest;
 +import javax.servlet.http.HttpServletResponse;
 +import javax.servlet.http.HttpSession;
 +import javax.ws.rs.GET;
 +import javax.ws.rs.POST;
 +import javax.ws.rs.Path;
 +import javax.ws.rs.Produces;
 +import javax.ws.rs.core.Context;
 +import javax.ws.rs.core.Response;
 +import javax.ws.rs.WebApplicationException;
 +
 +import org.apache.knox.gateway.i18n.messages.MessagesFactory;
 +import org.apache.knox.gateway.services.GatewayServices;
 +import org.apache.knox.gateway.services.security.token.JWTokenAuthority;
 +import org.apache.knox.gateway.services.security.token.TokenServiceException;
 +import org.apache.knox.gateway.services.security.token.impl.JWT;
 +import org.apache.knox.gateway.util.RegExUtils;
 +import org.apache.knox.gateway.util.Urls;
 +
 +import static javax.ws.rs.core.MediaType.APPLICATION_JSON;
 +import static javax.ws.rs.core.MediaType.APPLICATION_XML;
 +
 +@Path( WebSSOResource.RESOURCE_PATH )
 +public class WebSSOResource {
 +  private static final String SSO_COOKIE_NAME = "knoxsso.cookie.name";
 +  private static final String SSO_COOKIE_SECURE_ONLY_INIT_PARAM = "knoxsso.cookie.secure.only";
 +  private static final String SSO_COOKIE_MAX_AGE_INIT_PARAM = "knoxsso.cookie.max.age";
 +  private static final String SSO_COOKIE_DOMAIN_SUFFIX_PARAM = "knoxsso.cookie.domain.suffix";
 +  private static final String SSO_COOKIE_TOKEN_TTL_PARAM = "knoxsso.token.ttl";
 +  private static final String SSO_COOKIE_TOKEN_AUDIENCES_PARAM = "knoxsso.token.audiences";
 +  private static final String SSO_COOKIE_TOKEN_WHITELIST_PARAM = "knoxsso.redirect.whitelist.regex";
 +  private static final String SSO_ENABLE_SESSION_PARAM = "knoxsso.enable.session";
 +  private static final String ORIGINAL_URL_REQUEST_PARAM = "originalUrl";
 +  private static final String ORIGINAL_URL_COOKIE_NAME = "original-url";
 +  private static final String DEFAULT_SSO_COOKIE_NAME = "hadoop-jwt";
 +  // default for the whitelist - open up for development - relative paths and localhost only
 +  private static final String DEFAULT_WHITELIST = "^/.*$;^https?://(localhost|127.0.0.1|0:0:0:0:0:0:0:1|::1):\\d{0,9}/.*$";
 +  static final String RESOURCE_PATH = "/api/v1/websso";
 +  private static KnoxSSOMessages log = MessagesFactory.get( KnoxSSOMessages.class );
 +  private String cookieName = null;
 +  private boolean secureOnly = true;
 +  private int maxAge = -1;
 +  private long tokenTTL = 30000l;
 +  private String whitelist = null;
 +  private String domainSuffix = null;
 +  private List<String> targetAudiences = new ArrayList<>();
 +  private boolean enableSession = false;
 +
 +  @Context
 +  HttpServletRequest request;
 +
 +  @Context
 +  HttpServletResponse response;
 +
 +  @Context
 +  ServletContext context;
 +
 +  @PostConstruct
 +  public void init() {
 +
 +    // configured cookieName
 +    cookieName = context.getInitParameter(SSO_COOKIE_NAME);
 +    if (cookieName == null) {
 +      cookieName = DEFAULT_SSO_COOKIE_NAME;
 +    }
 +
 +    String secure = context.getInitParameter(SSO_COOKIE_SECURE_ONLY_INIT_PARAM);
 +    if (secure != null) {
 +      secureOnly = ("false".equals(secure) ? false : true);
 +      if (!secureOnly) {
 +        log.cookieSecureOnly(secureOnly);
 +      }
 +    }
 +
 +    String age = context.getInitParameter(SSO_COOKIE_MAX_AGE_INIT_PARAM);
 +    if (age != null) {
 +      try {
 +        log.setMaxAge(age);
 +        maxAge = Integer.parseInt(age);
 +      }
 +      catch (NumberFormatException nfe) {
 +        log.invalidMaxAgeEncountered(age);
 +      }
 +    }
 +
 +    domainSuffix = context.getInitParameter(SSO_COOKIE_DOMAIN_SUFFIX_PARAM);
 +
 +    whitelist = context.getInitParameter(SSO_COOKIE_TOKEN_WHITELIST_PARAM);
 +    if (whitelist == null) {
 +      // default to local/relative targets
 +      whitelist = DEFAULT_WHITELIST;
 +    }
 +
 +    String audiences = context.getInitParameter(SSO_COOKIE_TOKEN_AUDIENCES_PARAM);
 +    if (audiences != null) {
 +      String[] auds = audiences.split(",");
 +      for (int i = 0; i < auds.length; i++) {
-         targetAudiences.add(auds[i]);
++        targetAudiences.add(auds[i].trim());
 +      }
 +    }
 +
 +    String ttl = context.getInitParameter(SSO_COOKIE_TOKEN_TTL_PARAM);
 +    if (ttl != null) {
 +      try {
 +        tokenTTL = Long.parseLong(ttl);
 +      }
 +      catch (NumberFormatException nfe) {
 +        log.invalidTokenTTLEncountered(ttl);
 +      }
 +    }
 +
 +    String enableSession = context.getInitParameter(SSO_ENABLE_SESSION_PARAM);
 +    this.enableSession = ("true".equals(enableSession));
 +  }
 +
 +  @GET
 +  @Produces({APPLICATION_JSON, APPLICATION_XML})
 +  public Response doGet() {
 +    return getAuthenticationToken(HttpServletResponse.SC_TEMPORARY_REDIRECT);
 +  }
 +
 +  @POST
 +  @Produces({APPLICATION_JSON, APPLICATION_XML})
 +  public Response doPost() {
 +    return getAuthenticationToken(HttpServletResponse.SC_SEE_OTHER);
 +  }
 +
 +  private Response getAuthenticationToken(int statusCode) {
 +    GatewayServices services = (GatewayServices) request.getServletContext()
 +            .getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE);
 +    boolean removeOriginalUrlCookie = true;
 +    String original = getCookieValue((HttpServletRequest) request, ORIGINAL_URL_COOKIE_NAME);
 +    if (original == null) {
 +      // in the case where there are no SAML redirects done before here
 +      // we need to get it from the request parameters
 +      removeOriginalUrlCookie = false;
 +      original = getOriginalUrlFromQueryParams();
 +      if (original.isEmpty()) {
 +        log.originalURLNotFound();
 +        throw new WebApplicationException("Original URL not found in the request.", Response.Status.BAD_REQUEST);
 +      }
 +      boolean validRedirect = RegExUtils.checkWhitelist(whitelist, original);
 +      if (!validRedirect) {
 +        log.whiteListMatchFail(original, whitelist);
 +        throw new WebApplicationException("Original URL not valid according to the configured whitelist.",
 +                Response.Status.BAD_REQUEST);
 +      }
 +    }
 +
 +    JWTokenAuthority ts = services.getService(GatewayServices.TOKEN_SERVICE);
 +    Principal p = ((HttpServletRequest)request).getUserPrincipal();
 +
 +    try {
 +      JWT token = null;
 +      if (targetAudiences.isEmpty()) {
 +        token = ts.issueToken(p, "RS256", getExpiry());
 +      } else {
 +        token = ts.issueToken(p, targetAudiences, "RS256", getExpiry());
 +      }
 +
 +      // Coverity CID 1327959
 +      if( token != null ) {
 +        addJWTHadoopCookie( original, token );
 +      }
 +
 +      if (removeOriginalUrlCookie) {
 +        removeOriginalUrlCookie(response);
 +      }
 +
 +      log.aboutToRedirectToOriginal(original);
 +      response.setStatus(statusCode);
 +      response.setHeader("Location", original);
 +      try {
 +        response.getOutputStream().close();
 +      } catch (IOException e) {
 +        log.unableToCloseOutputStream(e.getMessage(), Arrays.toString(e.getStackTrace()));
 +      }
 +    }
 +    catch (TokenServiceException e) {
 +      log.unableToIssueToken(e);
 +    }
 +    URI location = null;
 +    try {
 +      location = new URI(original);
 +    }
 +    catch(URISyntaxException urise) {
 +      // todo log return error response
 +    }
 +
 +    if (!enableSession) {
 +      // invalidate the session to avoid autologin
 +      // Coverity CID 1352857
 +      HttpSession session = request.getSession(false);
 +      if( session != null ) {
 +        session.invalidate();
 +      }
 +    }
 +
 +    return Response.seeOther(location).entity("{ \"redirectTo\" : " + original + " }").build();
 +  }
 +
 +  private String getOriginalUrlFromQueryParams() {
 +    String original = request.getParameter(ORIGINAL_URL_REQUEST_PARAM);
 +    StringBuffer buf = new StringBuffer(original);
 +
 +    // Add any other query params.
 +    // Probably not ideal but will not break existing integrations by requiring
 +    // some encoding.
 +    Map<String, String[]> params = request.getParameterMap();
 +    for (Entry<String, String[]> entry : params.entrySet()) {
 +      if (!ORIGINAL_URL_REQUEST_PARAM.equals(entry.getKey())
 +          && !original.contains(entry.getKey() + "=")) {
 +        buf.append("&").append(entry.getKey());
 +        String[] values = entry.getValue();
 +        if (values.length > 0 && values[0] != null) {
 +          buf.append("=");
 +        }
 +        for (int i = 0; i < values.length; i++) {
 +          if (values[0] != null) {
 +            buf.append(values[i]);
 +            if (i < values.length-1) {
 +              buf.append("&").append(entry.getKey()).append("=");
 +            }
 +          }
 +        }
 +      }
 +    }
 +
 +    return buf.toString();
 +  }
 +
 +  private long getExpiry() {
 +    long expiry = 0l;
 +    if (tokenTTL == -1) {
 +      expiry = -1;
 +    }
 +    else {
 +      expiry = System.currentTimeMillis() + tokenTTL;
 +    }
 +    return expiry;
 +  }
 +
 +  private void addJWTHadoopCookie(String original, JWT token) {
 +    log.addingJWTCookie(token.toString());
 +    Cookie c = new Cookie(cookieName,  token.toString());
 +    c.setPath("/");
 +    try {
 +      String domain = Urls.getDomainName(original, domainSuffix);
 +      if (domain != null) {
 +        c.setDomain(domain);
 +      }
 +      c.setHttpOnly(true);
 +      if (secureOnly) {
 +        c.setSecure(true);
 +      }
 +      if (maxAge != -1) {
 +        c.setMaxAge(maxAge);
 +      }
 +      response.addCookie(c);
 +      log.addedJWTCookie();
 +    }
 +    catch(Exception e) {
 +      log.unableAddCookieToResponse(e.getMessage(), Arrays.toString(e.getStackTrace()));
 +      throw new WebApplicationException("Unable to add JWT cookie to response.");
 +    }
 +  }
 +
 +  private void removeOriginalUrlCookie(HttpServletResponse response) {
 +    Cookie c = new Cookie(ORIGINAL_URL_COOKIE_NAME, null);
 +    c.setMaxAge(0);
 +    c.setPath(RESOURCE_PATH);
 +    response.addCookie(c);
 +  }
 +
 +  private String getCookieValue(HttpServletRequest request, String name) {
 +    Cookie[] cookies = request.getCookies();
 +    String value = null;
 +    if (cookies != null) {
 +      for(Cookie cookie : cookies){
 +        if(name.equals(cookie.getName())){
 +          value = cookie.getValue();
 +        }
 +      }
 +    }
 +    if (value == null) {
 +      log.cookieNotFound(name);
 +    }
 +    return value;
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/8affbc02/gateway-service-knoxsso/src/test/java/org/apache/knox/gateway/service/knoxsso/WebSSOResourceTest.java
----------------------------------------------------------------------
diff --cc gateway-service-knoxsso/src/test/java/org/apache/knox/gateway/service/knoxsso/WebSSOResourceTest.java
index 6f0a805,0000000..6b8411e
mode 100644,000000..100644
--- a/gateway-service-knoxsso/src/test/java/org/apache/knox/gateway/service/knoxsso/WebSSOResourceTest.java
+++ b/gateway-service-knoxsso/src/test/java/org/apache/knox/gateway/service/knoxsso/WebSSOResourceTest.java
@@@ -1,352 -1,0 +1,410 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.service.knoxsso;
 +
 +import org.apache.knox.gateway.util.RegExUtils;
 +import static org.junit.Assert.assertEquals;
 +import static org.junit.Assert.assertNotNull;
 +import static org.junit.Assert.assertTrue;
 +
 +import java.security.KeyPair;
 +import java.security.KeyPairGenerator;
 +import java.security.NoSuchAlgorithmException;
 +import java.security.Principal;
 +import java.security.interfaces.RSAPrivateKey;
 +import java.security.interfaces.RSAPublicKey;
 +import java.util.ArrayList;
 +import java.util.Arrays;
 +import java.util.Collections;
 +import java.util.HashMap;
 +import java.util.List;
 +import java.util.Map;
 +
 +import javax.security.auth.Subject;
 +import javax.servlet.ServletContext;
 +import javax.servlet.ServletOutputStream;
 +import javax.servlet.http.Cookie;
 +import javax.servlet.http.HttpServletRequest;
 +import javax.servlet.http.HttpServletResponse;
 +import javax.servlet.http.HttpServletResponseWrapper;
 +
 +import org.apache.knox.gateway.services.GatewayServices;
 +import org.apache.knox.gateway.services.security.token.JWTokenAuthority;
 +import org.apache.knox.gateway.services.security.token.TokenServiceException;
 +import org.apache.knox.gateway.services.security.token.impl.JWT;
 +import org.apache.knox.gateway.services.security.token.impl.JWTToken;
 +import org.apache.knox.gateway.util.RegExUtils;
 +import org.easymock.EasyMock;
 +import org.junit.Assert;
 +import org.junit.BeforeClass;
 +import org.junit.Test;
 +
 +import com.nimbusds.jose.JWSSigner;
 +import com.nimbusds.jose.JWSVerifier;
 +import com.nimbusds.jose.crypto.RSASSASigner;
 +import com.nimbusds.jose.crypto.RSASSAVerifier;
 +
 +/**
 + * Some tests for the Knox SSO service.
 + */
 +public class WebSSOResourceTest {
 +
 +  protected static RSAPublicKey publicKey;
 +  protected static RSAPrivateKey privateKey;
 +
 +  @BeforeClass
 +  public static void setup() throws Exception, NoSuchAlgorithmException {
 +    KeyPairGenerator kpg = KeyPairGenerator.getInstance("RSA");
 +    kpg.initialize(1024);
 +    KeyPair KPair = kpg.generateKeyPair();
 +
 +    publicKey = (RSAPublicKey) KPair.getPublic();
 +    privateKey = (RSAPrivateKey) KPair.getPrivate();
 +  }
 +
 +  @Test
 +  public void testWhitelistMatching() throws Exception {
 +    String whitelist = "^https?://.*example.com:8080/.*$;" +
 +        "^https?://.*example.com/.*$;" +
 +        "^https?://.*example2.com:\\d{0,9}/.*$;" +
 +        "^https://.*example3.com:\\d{0,9}/.*$;" +
 +        "^https?://localhost:\\d{0,9}/.*$;^/.*$";
 +
 +    // match on explicit hostname/domain and port
 +    Assert.assertTrue("Failed to match whitelist", RegExUtils.checkWhitelist(whitelist,
 +        "http://host.example.com:8080/"));
 +    // match on non-required port
 +    Assert.assertTrue("Failed to match whitelist", RegExUtils.checkWhitelist(whitelist,
 +        "http://host.example.com/"));
 +    // match on required but any port
 +    Assert.assertTrue("Failed to match whitelist", RegExUtils.checkWhitelist(whitelist,
 +        "http://host.example2.com:1234/"));
 +    // fail on missing port
 +    Assert.assertFalse("Matched whitelist inappropriately", RegExUtils.checkWhitelist(whitelist,
 +        "http://host.example2.com/"));
 +    // fail on invalid port
 +    Assert.assertFalse("Matched whitelist inappropriately", RegExUtils.checkWhitelist(whitelist,
 +        "http://host.example.com:8081/"));
 +    // fail on alphanumeric port
 +    Assert.assertFalse("Matched whitelist inappropriately", RegExUtils.checkWhitelist(whitelist,
 +        "http://host.example.com:A080/"));
 +    // fail on invalid hostname/domain
 +    Assert.assertFalse("Matched whitelist inappropriately", RegExUtils.checkWhitelist(whitelist,
 +        "http://host.example.net:8080/"));
 +    // fail on required port
 +    Assert.assertFalse("Matched whitelist inappropriately", RegExUtils.checkWhitelist(whitelist,
 +        "http://host.example2.com/"));
 +    // fail on required https
 +    Assert.assertFalse("Matched whitelist inappropriately", RegExUtils.checkWhitelist(whitelist,
 +        "http://host.example3.com/"));
 +    // match on localhost and port
 +    Assert.assertTrue("Failed to match whitelist", RegExUtils.checkWhitelist(whitelist,
 +        "http://localhost:8080/"));
 +    // match on local/relative path
 +    Assert.assertTrue("Failed to match whitelist", RegExUtils.checkWhitelist(whitelist,
 +        "/local/resource/"));
 +  }
 +
 +  @Test
 +  public void testGetToken() throws Exception {
 +
 +    ServletContext context = EasyMock.createNiceMock(ServletContext.class);
 +    EasyMock.expect(context.getInitParameter("knoxsso.cookie.name")).andReturn(null);
 +    EasyMock.expect(context.getInitParameter("knoxsso.cookie.secure.only")).andReturn(null);
 +    EasyMock.expect(context.getInitParameter("knoxsso.cookie.max.age")).andReturn(null);
 +    EasyMock.expect(context.getInitParameter("knoxsso.cookie.domain.suffix")).andReturn(null);
 +    EasyMock.expect(context.getInitParameter("knoxsso.redirect.whitelist.regex")).andReturn(null);
 +    EasyMock.expect(context.getInitParameter("knoxsso.token.audiences")).andReturn(null);
 +    EasyMock.expect(context.getInitParameter("knoxsso.token.ttl")).andReturn(null);
 +    EasyMock.expect(context.getInitParameter("knoxsso.enable.session")).andReturn(null);
 +
 +    HttpServletRequest request = EasyMock.createNiceMock(HttpServletRequest.class);
 +    EasyMock.expect(request.getParameter("originalUrl")).andReturn("http://localhost:9080/service");
 +    EasyMock.expect(request.getParameterMap()).andReturn(Collections.<String,String[]>emptyMap());
 +    EasyMock.expect(request.getServletContext()).andReturn(context).anyTimes();
 +
 +    Principal principal = EasyMock.createNiceMock(Principal.class);
 +    EasyMock.expect(principal.getName()).andReturn("alice").anyTimes();
 +    EasyMock.expect(request.getUserPrincipal()).andReturn(principal).anyTimes();
 +
 +    GatewayServices services = EasyMock.createNiceMock(GatewayServices.class);
 +    EasyMock.expect(context.getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE)).andReturn(services);
 +
 +    JWTokenAuthority authority = new TestJWTokenAuthority(publicKey, privateKey);
 +    EasyMock.expect(services.getService(GatewayServices.TOKEN_SERVICE)).andReturn(authority);
 +
 +    HttpServletResponse response = EasyMock.createNiceMock(HttpServletResponse.class);
 +    ServletOutputStream outputStream = EasyMock.createNiceMock(ServletOutputStream.class);
 +    CookieResponseWrapper responseWrapper = new CookieResponseWrapper(response, outputStream);
 +
 +    EasyMock.replay(principal, services, context, request);
 +
 +    WebSSOResource webSSOResponse = new WebSSOResource();
 +    webSSOResponse.request = request;
 +    webSSOResponse.response = responseWrapper;
 +    webSSOResponse.context = context;
 +    webSSOResponse.init();
 +
 +    // Issue a token
 +    webSSOResponse.doGet();
 +
 +    // Check the cookie
 +    Cookie cookie = responseWrapper.getCookie("hadoop-jwt");
 +    assertNotNull(cookie);
 +
 +    JWTToken parsedToken = new JWTToken(cookie.getValue());
 +    assertEquals("alice", parsedToken.getSubject());
 +    assertTrue(authority.verifyToken(parsedToken));
 +  }
 +
 +  @Test
 +  public void testAudiences() throws Exception {
 +
 +    ServletContext context = EasyMock.createNiceMock(ServletContext.class);
 +    EasyMock.expect(context.getInitParameter("knoxsso.cookie.name")).andReturn(null);
 +    EasyMock.expect(context.getInitParameter("knoxsso.cookie.secure.only")).andReturn(null);
 +    EasyMock.expect(context.getInitParameter("knoxsso.cookie.max.age")).andReturn(null);
 +    EasyMock.expect(context.getInitParameter("knoxsso.cookie.domain.suffix")).andReturn(null);
 +    EasyMock.expect(context.getInitParameter("knoxsso.redirect.whitelist.regex")).andReturn(null);
 +    EasyMock.expect(context.getInitParameter("knoxsso.token.audiences")).andReturn("recipient1,recipient2");
 +    EasyMock.expect(context.getInitParameter("knoxsso.token.ttl")).andReturn(null);
 +    EasyMock.expect(context.getInitParameter("knoxsso.enable.session")).andReturn(null);
 +
 +    HttpServletRequest request = EasyMock.createNiceMock(HttpServletRequest.class);
 +    EasyMock.expect(request.getParameter("originalUrl")).andReturn("http://localhost:9080/service");
 +    EasyMock.expect(request.getParameterMap()).andReturn(Collections.<String,String[]>emptyMap());
 +    EasyMock.expect(request.getServletContext()).andReturn(context).anyTimes();
 +
 +    Principal principal = EasyMock.createNiceMock(Principal.class);
 +    EasyMock.expect(principal.getName()).andReturn("alice").anyTimes();
 +    EasyMock.expect(request.getUserPrincipal()).andReturn(principal).anyTimes();
++
++    GatewayServices services = EasyMock.createNiceMock(GatewayServices.class);
++    EasyMock.expect(context.getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE)).andReturn(services);
++
++    JWTokenAuthority authority = new TestJWTokenAuthority(publicKey, privateKey);
++    EasyMock.expect(services.getService(GatewayServices.TOKEN_SERVICE)).andReturn(authority);
++
++    HttpServletResponse response = EasyMock.createNiceMock(HttpServletResponse.class);
++    ServletOutputStream outputStream = EasyMock.createNiceMock(ServletOutputStream.class);
++    CookieResponseWrapper responseWrapper = new CookieResponseWrapper(response, outputStream);
++
++    EasyMock.replay(principal, services, context, request);
++
++    WebSSOResource webSSOResponse = new WebSSOResource();
++    webSSOResponse.request = request;
++    webSSOResponse.response = responseWrapper;
++    webSSOResponse.context = context;
++    webSSOResponse.init();
++
++    // Issue a token
++    webSSOResponse.doGet();
++
++    // Check the cookie
++    Cookie cookie = responseWrapper.getCookie("hadoop-jwt");
++    assertNotNull(cookie);
++
++    JWTToken parsedToken = new JWTToken(cookie.getValue());
++    assertEquals("alice", parsedToken.getSubject());
++    assertTrue(authority.verifyToken(parsedToken));
++
++    // Verify the audiences
++    List<String> audiences = Arrays.asList(parsedToken.getAudienceClaims());
++    assertEquals(2, audiences.size());
++    assertTrue(audiences.contains("recipient1"));
++    assertTrue(audiences.contains("recipient2"));
++  }
++
++  @Test
++  public void testAudiencesWhitespace() throws Exception {
++
++    ServletContext context = EasyMock.createNiceMock(ServletContext.class);
++    EasyMock.expect(context.getInitParameter("knoxsso.cookie.name")).andReturn(null);
++    EasyMock.expect(context.getInitParameter("knoxsso.cookie.secure.only")).andReturn(null);
++    EasyMock.expect(context.getInitParameter("knoxsso.cookie.max.age")).andReturn(null);
++    EasyMock.expect(context.getInitParameter("knoxsso.cookie.domain.suffix")).andReturn(null);
++    EasyMock.expect(context.getInitParameter("knoxsso.redirect.whitelist.regex")).andReturn(null);
++    EasyMock.expect(context.getInitParameter("knoxsso.token.audiences")).andReturn(" recipient1, recipient2 ");
++    EasyMock.expect(context.getInitParameter("knoxsso.token.ttl")).andReturn(null);
++    EasyMock.expect(context.getInitParameter("knoxsso.enable.session")).andReturn(null);
++
++    HttpServletRequest request = EasyMock.createNiceMock(HttpServletRequest.class);
++    EasyMock.expect(request.getParameter("originalUrl")).andReturn("http://localhost:9080/service");
++    EasyMock.expect(request.getParameterMap()).andReturn(Collections.<String,String[]>emptyMap());
++    EasyMock.expect(request.getServletContext()).andReturn(context).anyTimes();
++
++    Principal principal = EasyMock.createNiceMock(Principal.class);
++    EasyMock.expect(principal.getName()).andReturn("alice").anyTimes();
++    EasyMock.expect(request.getUserPrincipal()).andReturn(principal).anyTimes();
 +
 +    GatewayServices services = EasyMock.createNiceMock(GatewayServices.class);
 +    EasyMock.expect(context.getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE)).andReturn(services);
 +
 +    JWTokenAuthority authority = new TestJWTokenAuthority(publicKey, privateKey);
 +    EasyMock.expect(services.getService(GatewayServices.TOKEN_SERVICE)).andReturn(authority);
 +
 +    HttpServletResponse response = EasyMock.createNiceMock(HttpServletResponse.class);
 +    ServletOutputStream outputStream = EasyMock.createNiceMock(ServletOutputStream.class);
 +    CookieResponseWrapper responseWrapper = new CookieResponseWrapper(response, outputStream);
 +
 +    EasyMock.replay(principal, services, context, request);
 +
 +    WebSSOResource webSSOResponse = new WebSSOResource();
 +    webSSOResponse.request = request;
 +    webSSOResponse.response = responseWrapper;
 +    webSSOResponse.context = context;
 +    webSSOResponse.init();
 +
 +    // Issue a token
 +    webSSOResponse.doGet();
 +
 +    // Check the cookie
 +    Cookie cookie = responseWrapper.getCookie("hadoop-jwt");
 +    assertNotNull(cookie);
 +
 +    JWTToken parsedToken = new JWTToken(cookie.getValue());
 +    assertEquals("alice", parsedToken.getSubject());
 +    assertTrue(authority.verifyToken(parsedToken));
 +
 +    // Verify the audiences
 +    List<String> audiences = Arrays.asList(parsedToken.getAudienceClaims());
 +    assertEquals(2, audiences.size());
 +    assertTrue(audiences.contains("recipient1"));
 +    assertTrue(audiences.contains("recipient2"));
 +  }
 +
 +  /**
 +   * A wrapper for HttpServletResponseWrapper to store the cookies
 +   */
 +  private static class CookieResponseWrapper extends HttpServletResponseWrapper {
 +
 +    private ServletOutputStream outputStream;
 +    private Map<String, Cookie> cookies = new HashMap<>();
 +
 +    public CookieResponseWrapper(HttpServletResponse response) {
 +        super(response);
 +    }
 +
 +    public CookieResponseWrapper(HttpServletResponse response, ServletOutputStream outputStream) {
 +        super(response);
 +        this.outputStream = outputStream;
 +    }
 +
 +    @Override
 +    public ServletOutputStream getOutputStream() {
 +        return outputStream;
 +    }
 +
 +    @Override
 +    public void addCookie(Cookie cookie) {
 +        super.addCookie(cookie);
 +        cookies.put(cookie.getName(), cookie);
 +    }
 +
 +    public Cookie getCookie(String name) {
 +        return cookies.get(name);
 +    }
 +
 +  }
 +
 +  private static class TestJWTokenAuthority implements JWTokenAuthority {
 +
 +    private RSAPublicKey publicKey;
 +    private RSAPrivateKey privateKey;
 +
 +    public TestJWTokenAuthority(RSAPublicKey publicKey, RSAPrivateKey privateKey) {
 +      this.publicKey = publicKey;
 +      this.privateKey = privateKey;
 +    }
 +
 +    @Override
 +    public JWT issueToken(Subject subject, String algorithm)
 +      throws TokenServiceException {
 +      Principal p = (Principal) subject.getPrincipals().toArray()[0];
 +      return issueToken(p, algorithm);
 +    }
 +
 +    @Override
 +    public JWT issueToken(Principal p, String algorithm)
 +      throws TokenServiceException {
 +      return issueToken(p, null, algorithm);
 +    }
 +
 +    @Override
 +    public JWT issueToken(Principal p, String audience, String algorithm)
 +      throws TokenServiceException {
 +      return issueToken(p, audience, algorithm, -1);
 +    }
 +
 +    @Override
 +    public boolean verifyToken(JWT token) throws TokenServiceException {
 +      JWSVerifier verifier = new RSASSAVerifier(publicKey);
 +      return token.verify(verifier);
 +    }
 +
 +    @Override
 +    public JWT issueToken(Principal p, String audience, String algorithm,
 +                               long expires) throws TokenServiceException {
 +      List<String> audiences = null;
 +      if (audience != null) {
 +        audiences = new ArrayList<String>();
 +        audiences.add(audience);
 +      }
 +      return issueToken(p, audiences, algorithm, expires);
 +    }
 +
 +    @Override
 +    public JWT issueToken(Principal p, List<String> audiences, String algorithm,
 +                               long expires) throws TokenServiceException {
 +      String[] claimArray = new String[4];
 +      claimArray[0] = "KNOXSSO";
 +      claimArray[1] = p.getName();
 +      claimArray[2] = null;
 +      if (expires == -1) {
 +        claimArray[3] = null;
 +      } else {
 +        claimArray[3] = String.valueOf(expires);
 +      }
 +
 +      JWTToken token = null;
 +      if ("RS256".equals(algorithm)) {
 +        token = new JWTToken("RS256", claimArray, audiences);
 +        JWSSigner signer = new RSASSASigner(privateKey);
 +        token.sign(signer);
 +      } else {
 +        throw new TokenServiceException("Cannot issue token - Unsupported algorithm");
 +      }
 +
 +      return token;
 +    }
 +
 +    @Override
 +    public JWT issueToken(Principal p, String algorithm, long expiry)
 +        throws TokenServiceException {
 +      return issueToken(p, Collections.<String>emptyList(), algorithm, expiry);
 +    }
 +
 +    @Override
 +    public boolean verifyToken(JWT token, RSAPublicKey publicKey) throws TokenServiceException {
 +      JWSVerifier verifier = new RSASSAVerifier(publicKey);
 +      return token.verify(verifier);
 +    }
 +
 +  }
 +
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/8affbc02/gateway-service-knoxtoken/src/main/java/org/apache/knox/gateway/service/knoxtoken/TokenResource.java
----------------------------------------------------------------------
diff --cc gateway-service-knoxtoken/src/main/java/org/apache/knox/gateway/service/knoxtoken/TokenResource.java
index 2c77bdf,0000000..1c16ab3
mode 100644,000000..100644
--- a/gateway-service-knoxtoken/src/main/java/org/apache/knox/gateway/service/knoxtoken/TokenResource.java
+++ b/gateway-service-knoxtoken/src/main/java/org/apache/knox/gateway/service/knoxtoken/TokenResource.java
@@@ -1,183 -1,0 +1,218 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.service.knoxtoken;
 +
 +import java.io.IOException;
 +import java.security.Principal;
++import java.security.cert.X509Certificate;
 +import java.util.ArrayList;
 +import java.util.Map;
 +import java.util.HashMap;
 +import java.util.List;
 +
 +import javax.annotation.PostConstruct;
 +import javax.servlet.ServletContext;
 +import javax.servlet.http.HttpServletRequest;
 +import javax.servlet.http.HttpServletResponse;
 +import javax.ws.rs.GET;
 +import javax.ws.rs.POST;
 +import javax.ws.rs.Path;
 +import javax.ws.rs.Produces;
 +import javax.ws.rs.core.Context;
 +import javax.ws.rs.core.Response;
 +import org.apache.knox.gateway.i18n.messages.MessagesFactory;
 +import org.apache.knox.gateway.services.GatewayServices;
 +import org.apache.knox.gateway.services.security.token.JWTokenAuthority;
 +import org.apache.knox.gateway.services.security.token.TokenServiceException;
 +import org.apache.knox.gateway.services.security.token.impl.JWT;
 +import org.apache.knox.gateway.util.JsonUtils;
 +
 +import static javax.ws.rs.core.MediaType.APPLICATION_JSON;
 +import static javax.ws.rs.core.MediaType.APPLICATION_XML;
 +
 +@Path( TokenResource.RESOURCE_PATH )
 +public class TokenResource {
 +  private static final String EXPIRES_IN = "expires_in";
 +  private static final String TOKEN_TYPE = "token_type";
 +  private static final String ACCESS_TOKEN = "access_token";
 +  private static final String TARGET_URL = "target_url";
 +  private static final String BEARER = "Bearer ";
 +  private static final String TOKEN_TTL_PARAM = "knox.token.ttl";
 +  private static final String TOKEN_AUDIENCES_PARAM = "knox.token.audiences";
 +  private static final String TOKEN_TARGET_URL = "knox.token.target.url";
 +  private static final String TOKEN_CLIENT_DATA = "knox.token.client.data";
++  private static final String TOKEN_CLIENT_CERT_REQUIRED = "knox.token.client.cert.required";
++  private static final String TOKEN_ALLOWED_PRINCIPALS = "knox.token.allowed.principals";
 +  static final String RESOURCE_PATH = "knoxtoken/api/v1/token";
 +  private static TokenServiceMessages log = MessagesFactory.get( TokenServiceMessages.class );
 +  private long tokenTTL = 30000l;
 +  private List<String> targetAudiences = new ArrayList<>();
 +  private String tokenTargetUrl = null;
 +  private Map<String,Object> tokenClientDataMap = null;
++  private ArrayList<String> allowedDNs = new ArrayList<>();
++  private boolean clientCertRequired = false;
 +
 +  @Context
 +  HttpServletRequest request;
 +
 +  @Context
 +  HttpServletResponse response;
 +
 +  @Context
 +  ServletContext context;
 +
 +  @PostConstruct
 +  public void init() {
 +
 +    String audiences = context.getInitParameter(TOKEN_AUDIENCES_PARAM);
 +    if (audiences != null) {
 +      String[] auds = audiences.split(",");
 +      for (int i = 0; i < auds.length; i++) {
-         targetAudiences.add(auds[i]);
++        targetAudiences.add(auds[i].trim());
++      }
++    }
++
++    String clientCert = context.getInitParameter(TOKEN_CLIENT_CERT_REQUIRED);
++    clientCertRequired = "true".equals(clientCert);
++
++    String principals = context.getInitParameter(TOKEN_ALLOWED_PRINCIPALS);
++    if (principals != null) {
++      String[] dns = principals.split(";");
++      for (int i = 0; i < dns.length; i++) {
++        allowedDNs.add(dns[i]);
 +      }
 +    }
 +
 +    String ttl = context.getInitParameter(TOKEN_TTL_PARAM);
 +    if (ttl != null) {
 +      try {
 +        tokenTTL = Long.parseLong(ttl);
 +      }
 +      catch (NumberFormatException nfe) {
 +        log.invalidTokenTTLEncountered(ttl);
 +      }
 +    }
 +
 +    tokenTargetUrl = context.getInitParameter(TOKEN_TARGET_URL);
 +
 +    String clientData = context.getInitParameter(TOKEN_CLIENT_DATA);
 +    if (clientData != null) {
 +      tokenClientDataMap = new HashMap<>();
 +      String[] tokenClientData = clientData.split(",");
 +      addClientDataToMap(tokenClientData, tokenClientDataMap);
 +    }
 +  }
 +
 +  @GET
 +  @Produces({APPLICATION_JSON, APPLICATION_XML})
 +  public Response doGet() {
 +    return getAuthenticationToken();
 +  }
 +
 +  @POST
 +  @Produces({APPLICATION_JSON, APPLICATION_XML})
 +  public Response doPost() {
 +    return getAuthenticationToken();
 +  }
 +
++  private X509Certificate extractCertificate(HttpServletRequest req) {
++    X509Certificate[] certs = (X509Certificate[]) req.getAttribute("javax.servlet.request.X509Certificate");
++    if (null != certs && certs.length > 0) {
++        return certs[0];
++    }
++    return null;
++  }
++
 +  private Response getAuthenticationToken() {
++    if (clientCertRequired) {
++      X509Certificate cert = extractCertificate(request);
++      if (cert != null) {
++        if (!allowedDNs.contains(cert.getSubjectDN().getName())) {
++          return Response.status(403).entity("{ \"Unable to get token - untrusted client cert.\" }").build();
++        }
++      }
++      else {
++        return Response.status(403).entity("{ \"Unable to get token - client cert required.\" }").build();
++      }
++    }
 +    GatewayServices services = (GatewayServices) request.getServletContext()
 +            .getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE);
 +
 +    JWTokenAuthority ts = services.getService(GatewayServices.TOKEN_SERVICE);
 +    Principal p = ((HttpServletRequest)request).getUserPrincipal();
 +    long expires = getExpiry();
 +
 +    try {
 +      JWT token = null;
 +      if (targetAudiences.isEmpty()) {
 +        token = ts.issueToken(p, "RS256", expires);
 +      } else {
 +        token = ts.issueToken(p, targetAudiences, "RS256", expires);
 +      }
 +
 +      if (token != null) {
 +        String accessToken = token.toString();
 +
 +        HashMap<String, Object> map = new HashMap<>();
 +        map.put(ACCESS_TOKEN, accessToken);
 +        map.put(TOKEN_TYPE, BEARER);
 +        map.put(EXPIRES_IN, expires);
 +        if (tokenTargetUrl != null) {
 +          map.put(TARGET_URL, tokenTargetUrl);
 +        }
 +        if (tokenClientDataMap != null) {
 +          map.putAll(tokenClientDataMap);
 +        }
 +
 +        String jsonResponse = JsonUtils.renderAsJsonString(map);
 +
 +        response.getWriter().write(jsonResponse);
 +        return Response.ok().build();
 +      }
 +      else {
 +        return Response.serverError().build();
 +      }
 +    }
 +    catch (TokenServiceException | IOException e) {
 +      log.unableToIssueToken(e);
 +    }
 +    return Response.ok().entity("{ \"Unable to acquire token.\" }").build();
 +  }
 +
 +  void addClientDataToMap(String[] tokenClientData,
 +      Map<String,Object> map) {
 +    String[] kv = null;
 +    for (int i = 0; i < tokenClientData.length; i++) {
 +      kv = tokenClientData[i].split("=");
 +      if (kv.length == 2) {
 +        map.put(kv[0], kv[1]);
 +      }
 +    }
 +  }
 +
 +  private long getExpiry() {
 +    long expiry = 0l;
 +    if (tokenTTL == -1) {
 +      expiry = -1;
 +    }
 +    else {
 +      expiry = System.currentTimeMillis() + tokenTTL;
 +    }
 +    return expiry;
 +  }
 +}


[31/53] [abbrv] knox git commit: Merge branch 'master' into KNOX-998-Package_Restructuring

Posted by mo...@apache.org.
http://git-wip-us.apache.org/repos/asf/knox/blob/22a7304a/gateway-test/src/test/java/org/apache/knox/gateway/GatewayBasicFuncTest.java
----------------------------------------------------------------------
diff --cc gateway-test/src/test/java/org/apache/knox/gateway/GatewayBasicFuncTest.java
index 02be270,0000000..f6536d9
mode 100644,000000..100644
--- a/gateway-test/src/test/java/org/apache/knox/gateway/GatewayBasicFuncTest.java
+++ b/gateway-test/src/test/java/org/apache/knox/gateway/GatewayBasicFuncTest.java
@@@ -1,4508 -1,0 +1,4508 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway;
 +
 +import java.io.ByteArrayOutputStream;
 +import java.io.File;
 +import java.io.FileFilter;
 +import java.io.FileNotFoundException;
 +import java.io.IOException;
 +import java.io.PrintStream;
 +import java.io.StringWriter;
 +import java.net.InetAddress;
 +import java.net.InetSocketAddress;
 +import java.net.URI;
 +import java.net.URISyntaxException;
 +import java.net.URL;
 +import java.nio.charset.Charset;
 +import java.util.HashMap;
 +import java.util.Map;
 +import java.util.Map.Entry;
 +import javax.ws.rs.core.MediaType;
 +
 +import io.restassured.RestAssured;
 +import io.restassured.http.ContentType;
 +import io.restassured.http.Cookie;
 +import io.restassured.http.Header;
 +import io.restassured.path.json.JsonPath;
 +import io.restassured.response.Response;
 +import io.restassured.specification.ResponseSpecification;
 +import com.mycila.xmltool.XMLDoc;
 +import com.mycila.xmltool.XMLTag;
 +import org.apache.commons.io.filefilter.WildcardFileFilter;
 +import org.apache.commons.lang3.ArrayUtils;
 +import org.apache.knox.gateway.util.KnoxCLI;
 +import org.apache.knox.test.TestUtils;
 +import org.apache.knox.test.category.MediumTests;
 +import org.apache.knox.test.category.VerifyTest;
 +import org.apache.knox.test.mock.MockRequestMatcher;
 +import org.apache.http.HttpHost;
 +import org.apache.http.HttpResponse;
 +import org.apache.http.HttpStatus;
 +import org.apache.http.auth.AuthScope;
 +import org.apache.http.auth.UsernamePasswordCredentials;
 +import org.apache.http.client.AuthCache;
 +import org.apache.http.client.CredentialsProvider;
 +import org.apache.http.client.methods.HttpGet;
 +import org.apache.http.client.methods.HttpPost;
 +import org.apache.http.client.protocol.HttpClientContext;
 +import org.apache.http.entity.StringEntity;
 +import org.apache.http.impl.auth.BasicScheme;
 +import org.apache.http.impl.client.BasicAuthCache;
 +import org.apache.http.impl.client.BasicCredentialsProvider;
 +import org.apache.http.impl.client.CloseableHttpClient;
 +import org.apache.http.impl.client.HttpClientBuilder;
 +import org.apache.http.util.EntityUtils;
 +import org.apache.velocity.Template;
 +import org.apache.velocity.VelocityContext;
 +import org.apache.velocity.app.VelocityEngine;
 +import org.apache.velocity.runtime.RuntimeConstants;
 +import org.apache.velocity.runtime.resource.loader.ClasspathResourceLoader;
 +import org.hamcrest.CoreMatchers;
 +import org.hamcrest.Matcher;
 +import org.hamcrest.MatcherAssert;
 +import org.hamcrest.Matchers;
 +import org.junit.After;
 +import org.junit.AfterClass;
 +import org.junit.Assert;
 +import org.junit.BeforeClass;
 +import org.junit.Test;
 +import org.junit.experimental.categories.Category;
 +import org.slf4j.Logger;
 +import org.slf4j.LoggerFactory;
 +
 +import static io.restassured.RestAssured.given;
 +import static org.apache.knox.test.TestUtils.LOG_ENTER;
 +import static org.apache.knox.test.TestUtils.LOG_EXIT;
 +import static org.hamcrest.CoreMatchers.*;
 +import static org.hamcrest.Matchers.containsString;
 +import static org.hamcrest.Matchers.greaterThan;
 +import static org.hamcrest.text.IsEmptyString.isEmptyString;
 +import static org.junit.Assert.assertThat;
 +import static org.junit.Assert.assertTrue;
 +import static org.xmlmatchers.XmlMatchers.isEquivalentTo;
 +import static org.xmlmatchers.transform.XmlConverters.the;
 +import static uk.co.datumedge.hamcrest.json.SameJSONAs.sameJSONAs;
 +
 +@Category( { VerifyTest.class, MediumTests.class } )
 +public class GatewayBasicFuncTest {
 +
 +  private static final Charset UTF8 = Charset.forName("UTF-8");
 +
 +  // Uncomment to cause the test to hang after the gateway instance is setup.
 +  // This will allow the gateway instance to be hit directly via some external client.
 +//  @Test
 +//  public void hang() throws IOException {
 +//    System.out.println( "Server on port " + driver.gateway.getAddresses()[0].getPort() );
 +//    System.out.println();
 +//    System.in.read();
 +//  }
 +
 +  private static Logger log = LoggerFactory.getLogger( GatewayBasicFuncTest.class );
 +
 +  private static GatewayTestDriver driver = new GatewayTestDriver();
 +
 +  // Controls the host name to which the gateway dispatch requests.  This may be the name of a sandbox VM
 +  // or an EC2 instance.  Currently only a single host is supported.
 +  private static final String TEST_HOST = "vm.local";
 +
 +  // Specifies if the test requests should go through the gateway or directly to the services.
 +  // This is frequently used to verify the behavior of the test both with and without the gateway.
 +  private static final boolean USE_GATEWAY = true;
 +
 +  // Specifies if the test requests should be sent to mock services or the real services.
 +  // This is frequently used to verify the behavior of the test both with and without mock services.
 +  private static final boolean USE_MOCK_SERVICES = true;
 +
 +  // Specifies if the GATEWAY_HOME created for the test should be deleted when the test suite is complete.
 +  // This is frequently used during debugging to keep the GATEWAY_HOME around for inspection.
 +  private static final boolean CLEANUP_TEST = true;
 +
 +//  private static final boolean USE_GATEWAY = false;
 +//  private static final boolean USE_MOCK_SERVICES = false;
 +//  private static final boolean CLEANUP_TEST = false;
 +
 +  /**
 +   * Creates a deployment of a gateway instance that all test methods will share.  This method also creates a
 +   * registry of sorts for all of the services that will be used by the test methods.
 +   * The createTopology method is used to create the topology file that would normally be read from disk.
 +   * The driver.setupGateway invocation is where the creation of GATEWAY_HOME occurs.
 +   * @throws Exception Thrown if any failure occurs.
 +   */
 +  @BeforeClass
 +  public static void setupSuite() throws Exception {
 +    //Log.setLog( new NoOpLogger() );
 +    LOG_ENTER();
 +    GatewayTestConfig config = new GatewayTestConfig();
 +    driver.setResourceBase(GatewayBasicFuncTest.class);
 +    driver.setupLdap(0);
 +    driver.setupService("WEBHDFS", "http://" + TEST_HOST + ":50070/webhdfs", "/cluster/webhdfs", USE_MOCK_SERVICES);
 +    driver.setupService( "DATANODE", "http://" + TEST_HOST + ":50075/webhdfs", "/cluster/webhdfs/data", USE_MOCK_SERVICES );
 +    driver.setupService( "WEBHCAT", "http://" + TEST_HOST + ":50111/templeton", "/cluster/templeton", USE_MOCK_SERVICES );
 +    driver.setupService( "OOZIE", "http://" + TEST_HOST + ":11000/oozie", "/cluster/oozie", USE_MOCK_SERVICES );
 +    driver.setupService( "HIVE", "http://" + TEST_HOST + ":10000", "/cluster/hive", USE_MOCK_SERVICES );
 +    driver.setupService( "WEBHBASE", "http://" + TEST_HOST + ":60080", "/cluster/hbase", USE_MOCK_SERVICES );
 +    driver.setupService( "NAMENODE", "hdfs://" + TEST_HOST + ":8020", null, USE_MOCK_SERVICES );
 +    driver.setupService( "JOBTRACKER", "thrift://" + TEST_HOST + ":8021", null, USE_MOCK_SERVICES );
 +    driver.setupService( "RESOURCEMANAGER", "http://" + TEST_HOST + ":8088/ws", "/cluster/resourcemanager", USE_MOCK_SERVICES );
 +    driver.setupService( "FALCON", "http://" + TEST_HOST + ":15000", "/cluster/falcon", USE_MOCK_SERVICES );
 +    driver.setupService( "STORM", "http://" + TEST_HOST + ":8477", "/cluster/storm", USE_MOCK_SERVICES );
 +    driver.setupService( "STORM-LOGVIEWER", "http://" + TEST_HOST + ":8477", "/cluster/storm", USE_MOCK_SERVICES );
 +    driver.setupService( "SOLR", "http://" + TEST_HOST + ":8983", "/cluster/solr", USE_MOCK_SERVICES );
 +    driver.setupService( "KAFKA", "http://" + TEST_HOST + ":8477", "/cluster/kafka", USE_MOCK_SERVICES );
 +    driver.setupGateway( config, "cluster", createTopology(), USE_GATEWAY );
 +    LOG_EXIT();
 +  }
 +
 +  @AfterClass
 +  public static void cleanupSuite() throws Exception {
 +    LOG_ENTER();
 +    if( CLEANUP_TEST ) {
 +      driver.cleanup();
 +    }
 +    LOG_EXIT();
 +  }
 +
 +  @After
 +  public void cleanupTest() {
 +    driver.reset();
 +  }
 +
 +  /**
 +   * Creates a topology that is deployed to the gateway instance for the test suite.
 +   * Note that this topology is shared by all of the test methods in this suite.
 +   * @return A populated XML structure for a topology file.
 +   */
 +  private static XMLTag createTopology() {
 +    XMLTag xml = XMLDoc.newDocument( true )
 +        .addRoot( "topology" )
 +          .addTag( "gateway" )
 +            .addTag( "provider" )
 +              .addTag( "role" ).addText( "webappsec" )
 +              .addTag("name").addText("WebAppSec")
 +              .addTag("enabled").addText("true")
 +              .addTag( "param" )
 +                .addTag("name").addText("csrf.enabled")
 +                .addTag("value").addText("true").gotoParent().gotoParent()
 +            .addTag("provider")
 +              .addTag("role").addText("authentication")
 +              .addTag("name").addText("ShiroProvider")
 +              .addTag("enabled").addText("true")
 +              .addTag( "param" )
 +                .addTag("name").addText("main.ldapRealm")
 +                .addTag("value").addText("org.apache.knox.gateway.shirorealm.KnoxLdapRealm").gotoParent()
 +              .addTag( "param" )
 +                .addTag( "name" ).addText( "main.ldapRealm.userDnTemplate" )
 +                .addTag( "value" ).addText( "uid={0},ou=people,dc=hadoop,dc=apache,dc=org" ).gotoParent()
 +              .addTag( "param" )
 +                .addTag( "name" ).addText( "main.ldapRealm.contextFactory.url" )
 +                .addTag( "value" ).addText( driver.getLdapUrl() ).gotoParent()
 +              .addTag( "param" )
 +                .addTag( "name" ).addText( "main.ldapRealm.contextFactory.authenticationMechanism" )
 +                .addTag( "value" ).addText( "simple" ).gotoParent()
 +              .addTag( "param" )
 +                .addTag( "name" ).addText( "urls./**" )
 +                .addTag( "value" ).addText( "authcBasic" ).gotoParent().gotoParent()
 +            .addTag("provider")
 +              .addTag("role").addText("identity-assertion")
 +              .addTag("enabled").addText("true")
 +              .addTag("name").addText("Default").gotoParent()
 +            .addTag("provider")
 +              .addTag( "role" ).addText( "authorization" )
 +              .addTag( "enabled" ).addText( "true" )
 +              .addTag("name").addText("AclsAuthz").gotoParent()
 +              .addTag("param")
 +                .addTag("name").addText( "webhdfs-acl" )
 +                .addTag("value").addText( "hdfs;*;*" ).gotoParent()
 +          .gotoRoot()
 +          .addTag("service")
 +            .addTag("role").addText("WEBHDFS")
 +            .addTag("url").addText(driver.getRealUrl("WEBHDFS")).gotoParent()
 +          .addTag( "service" )
 +            .addTag( "role" ).addText( "NAMENODE" )
 +            .addTag( "url" ).addText( driver.getRealUrl( "NAMENODE" ) ).gotoParent()
 +          .addTag( "service" )
 +            .addTag( "role" ).addText( "DATANODE" )
 +            .addTag( "url" ).addText( driver.getRealUrl( "DATANODE" ) ).gotoParent()
 +          .addTag( "service" )
 +            .addTag( "role" ).addText( "JOBTRACKER" )
 +            .addTag( "url" ).addText( driver.getRealUrl( "JOBTRACKER" ) ).gotoParent()
 +          .addTag( "service" )
 +            .addTag( "role" ).addText( "WEBHCAT" )
 +            .addTag( "url" ).addText( driver.getRealUrl( "WEBHCAT" ) ).gotoParent()
 +          .addTag( "service" )
 +            .addTag( "role" ).addText( "OOZIE" )
 +            .addTag( "url" ).addText( driver.getRealUrl( "OOZIE" ) ).gotoParent()
 +          .addTag( "service" )
 +            .addTag( "role" ).addText( "HIVE" )
 +            .addTag( "url" ).addText( driver.getRealUrl( "HIVE" ) ).gotoParent()
 +          .addTag( "service" )
 +            .addTag( "role" ).addText( "WEBHBASE" )
 +            .addTag( "url" ).addText( driver.getRealUrl( "WEBHBASE" ) ).gotoParent()
 +        .addTag("service")
 +            .addTag("role").addText("RESOURCEMANAGER")
 +            .addTag("url").addText(driver.getRealUrl("RESOURCEMANAGER")).gotoParent()
 +        .addTag("service")
 +            .addTag("role").addText("FALCON")
 +            .addTag("url").addText(driver.getRealUrl("FALCON")).gotoParent()
 +        .addTag("service")
 +            .addTag("role").addText("STORM")
 +            .addTag("url").addText(driver.getRealUrl("STORM")).gotoParent()
 +        .addTag("service")
 +            .addTag("role").addText("STORM-LOGVIEWER")
 +            .addTag("url").addText(driver.getRealUrl("STORM-LOGVIEWER")).gotoParent()
 +        .addTag("service")
 +            .addTag("role").addText("SOLR")
 +            .addTag("url").addText(driver.getRealUrl("SOLR")).gotoParent()
 +        .addTag("service")
 +            .addTag("role").addText("KAFKA")
 +            .addTag("url").addText(driver.getRealUrl("KAFKA")).gotoParent()
 +        .addTag("service")
 +        .addTag("role").addText("SERVICE-TEST")
 +        .gotoRoot();
 +//     System.out.println( "GATEWAY=" + xml.toString() );
 +    return xml;
 +  }
 +
 +  @Test( timeout = TestUtils.MEDIUM_TIMEOUT )
 +  public void testBasicJsonUseCase() throws IOException {
 +    LOG_ENTER();
 +    String root = "/tmp/GatewayBasicFuncTest/testBasicJsonUseCase";
 +    String username = "hdfs";
 +    String password = "hdfs-password";
 +    /* Create a directory.
 +    curl -i -X PUT "http://<HOST>:<PORT>/<PATH>?op=MKDIRS[&permission=<OCTAL>]"
 +
 +    The client receives a respond with a boolean JSON object:
 +    HTTP/1.1 HttpStatus.SC_OK OK
 +    Content-Type: application/json
 +    Transfer-Encoding: chunked
 +
 +    {"boolean": true}
 +    */
 +    driver.getMock( "WEBHDFS" )
 +        .expect()
 +        .method( "PUT" )
 +        .pathInfo( "/v1" + root + "/dir" )
 +        .queryParam( "op", "MKDIRS" )
 +        .queryParam( "user.name", username )
 +        .respond()
 +        .status( HttpStatus.SC_OK )
 +        .content( driver.getResourceBytes( "webhdfs-success.json" ) )
 +        .contentType( "application/json" );
 +    Cookie cookie = given()
 +        //.log().all()
 +        .auth().preemptive().basic( username, password )
 +        .header("X-XSRF-Header", "jksdhfkhdsf")
 +        .queryParam( "op", "MKDIRS" )
 +        .then()
 +        //.log().all()
 +        .statusCode( HttpStatus.SC_OK )
 +        .contentType( "application/json" )
 +        .body( "boolean", is( true ) )
 +        .when().put( driver.getUrl( "WEBHDFS" ) + "/v1" + root + "/dir" ).getDetailedCookie( "JSESSIONID" );
 +    assertThat( cookie.isSecured(), is( true ) );
 +    assertThat( cookie.isHttpOnly(), is( true ) );
 +    assertThat( cookie.getPath(), is( "/gateway/cluster" ) );
 +    assertThat( cookie.getValue().length(), greaterThan( 16 ) );
 +    driver.assertComplete();
 +    LOG_EXIT();
 +  }
 +
 +  @Test( timeout = TestUtils.MEDIUM_TIMEOUT )
 +  public void testBasicOutboundHeaderUseCase() throws IOException {
 +    LOG_ENTER();
 +    String root = "/tmp/GatewayBasicFuncTest/testBasicOutboundHeaderUseCase";
 +    String username = "hdfs";
 +    String password = "hdfs-password";
 +    InetSocketAddress gatewayAddress = driver.gateway.getAddresses()[0];
 +    String gatewayHostName = gatewayAddress.getHostName();
 +    String gatewayAddrName = InetAddress.getByName(gatewayHostName).getHostAddress();
 +
 +    driver.getMock( "WEBHDFS" )
 +        .expect()
 +        .method( "PUT" )
 +        .pathInfo( "/v1" + root + "/dir/file" )
 +        .header( "Host", driver.getRealAddr( "WEBHDFS" ) )
 +        .queryParam( "op", "CREATE" )
 +        .queryParam( "user.name", username )
 +        .respond()
 +        .status( HttpStatus.SC_TEMPORARY_REDIRECT )
 +        .header("Location", driver.getRealUrl("DATANODE") + "/v1" + root + "/dir/file?op=CREATE&user.name=hdfs");
 +    Response response = given()
 +        //.log().all()
 +        .auth().preemptive().basic( username, password )
 +        .header("X-XSRF-Header", "jksdhfkhdsf")
 +        .queryParam( "op", "CREATE" )
 +        .then()
 +        //.log().ifError()
 +        .statusCode( HttpStatus.SC_TEMPORARY_REDIRECT )
 +        .when().put( driver.getUrl("WEBHDFS") + "/v1" + root + "/dir/file" );
 +    String location = response.getHeader( "Location" );
 +    //System.out.println( location );
 +    log.debug( "Redirect location: " + response.getHeader( "Location" ) );
 +    if( driver.isUseGateway() ) {
 +      MatcherAssert.assertThat( location, anyOf(
 +          startsWith( "http://" + gatewayHostName + ":" + gatewayAddress.getPort() + "/" ),
 +          startsWith( "http://" + gatewayAddrName + ":" + gatewayAddress.getPort() + "/" ) ) );
 +      MatcherAssert.assertThat( location, containsString( "?_=" ) );
 +    }
 +    MatcherAssert.assertThat(location, not(containsString("host=")));
 +    MatcherAssert.assertThat(location, not(containsString("port=")));
 +    LOG_EXIT();
 +  }
 +
 +  @Test( timeout = TestUtils.MEDIUM_TIMEOUT )
 +  public void testBasicOutboundEncodedHeaderUseCase() throws IOException {
 +    LOG_ENTER();
 +    String root = "/tmp/GatewayBasicFuncTest/testBasicOutboundHeaderUseCase";
 +    String username = "hdfs";
 +    String password = "hdfs-password";
 +
 +    driver.getMock( "WEBHDFS" )
 +        .expect()
 +        .method( "PUT" )
 +        .pathInfo( "/v1" + root + "/dir/fileレポー" )
 +        .header( "Host", driver.getRealAddr( "WEBHDFS" ) )
 +        .queryParam( "op", "CREATE" )
 +        .queryParam( "user.name", username )
 +        .respond()
 +        .status( HttpStatus.SC_TEMPORARY_REDIRECT )
 +        .header("Location", driver.getRealUrl("DATANODE") + "/v1" + root + "/dir/file%E3%83%AC%E3%83%9D%E3%83%BC?op=CREATE&user.name=hdfs");
 +    Response response = given()
 +        //.log().all()
 +        .auth().preemptive().basic( username, password )
 +        .header("X-XSRF-Header", "jksdhfkhdsf")
 +        .queryParam( "op", "CREATE" )
 +        .then()
 +        //.log().ifError()
 +        .statusCode( HttpStatus.SC_TEMPORARY_REDIRECT )
 +        .when().put( driver.getUrl("WEBHDFS") + "/v1" + root + "/dir/fileレポー" );
 +//        .when().put( driver.getUrl("WEBHDFS") + "/v1" + root + "/dir/file%E3%83%AC%E3%83%9D%E3%83%BC" );
 +    String location = response.getHeader( "Location" );
 +    //System.out.println( location );
 +    log.debug( "Redirect location: " + response.getHeader( "Location" ) );
 +    if( driver.isUseGateway() ) {
 +      MatcherAssert.assertThat( location, containsString("/dir/file%E3%83%AC%E3%83%9D%E3%83%BC") );
 +    }
 +    LOG_EXIT();
 +  }
 +
 +  @Test( timeout = TestUtils.MEDIUM_TIMEOUT )
 +  public void testHdfsTildeUseCase() throws IOException {
 +    LOG_ENTER();
 +    String root = "/tmp/GatewayBasicFuncTest/testHdfsTildeUseCase";
 +    String username = "hdfs";
 +    String password = "hdfs-password";
 +
 +    // Attempt to delete the test directory in case a previous run failed.
 +    // Ignore any result.
 +    // Cleanup anything that might have been leftover because the test failed previously.
 +    driver.getMock( "WEBHDFS" )
 +        .expect()
 +        .method( "DELETE" )
 +        .from( "testHdfsTildeUseCase" )
 +        .pathInfo( "/v1/user/hdfs" + root )
 +        .queryParam( "op", "DELETE" )
 +        .queryParam( "user.name", username )
 +        .queryParam( "recursive", "true" )
 +        .respond()
 +        .status( HttpStatus.SC_OK );
 +
 +    try {
 +      // Need to turn off URL encoding here or otherwise the tilde gets encoded and the rewrite rules fail
 +      RestAssured.urlEncodingEnabled = false;
 +      given()
 +          //.log().all()
 +          .auth().preemptive().basic( username, password )
 +          .header("X-XSRF-Header", "jksdhfkhdsf")
 +          .queryParam( "op", "DELETE" )
 +          .queryParam( "recursive", "true" )
 +          .then()
 +          //.log().all()
 +          .statusCode( HttpStatus.SC_OK )
 +          .when().delete( driver.getUrl( "WEBHDFS" ) + "/v1/~" + root + ( driver.isUseGateway() ? "" : "?user.name=" + username ) );
 +      driver.assertComplete();
 +
 +      driver.getMock( "WEBHDFS" )
 +          .expect()
 +          .method( "PUT" )
 +          .pathInfo( "/v1/user/hdfs/dir" )
 +          .queryParam( "op", "MKDIRS" )
 +          .queryParam( "user.name", username )
 +          .respond()
 +          .status( HttpStatus.SC_OK )
 +          .content( driver.getResourceBytes( "webhdfs-success.json" ) )
 +          .contentType("application/json");
 +      given()
 +          //.log().all()
 +          .auth().preemptive().basic( username, password )
 +          .header("X-XSRF-Header", "jksdhfkhdsf")
 +          .queryParam( "op", "MKDIRS" )
 +          .then()
 +          //.log().all();
 +          .statusCode( HttpStatus.SC_OK )
 +          .contentType( "application/json" )
 +          .body( "boolean", is( true ) )
 +          .when().put( driver.getUrl( "WEBHDFS" ) + "/v1/~/dir" );
 +      driver.assertComplete();
 +    } finally {
 +      RestAssured.urlEncodingEnabled = true;
 +    }
 +    LOG_EXIT();
 +  }
 +
 +  @Test( timeout = TestUtils.MEDIUM_TIMEOUT )
 +  public void testBasicHdfsUseCase() throws IOException {
 +    LOG_ENTER();
 +    String root = "/tmp/GatewayBasicFuncTest/testBasicHdfsUseCase";
 +    String username = "hdfs";
 +    String password = "hdfs-password";
 +    InetSocketAddress gatewayAddress = driver.gateway.getAddresses()[0];
 +    String gatewayHostName = gatewayAddress.getHostName();
 +    String gatewayAddrName = InetAddress.getByName( gatewayHostName ).getHostAddress();
 +
 +    // Attempt to delete the test directory in case a previous run failed.
 +    // Ignore any result.
 +    // Cleanup anything that might have been leftover because the test failed previously.
 +    driver.getMock( "WEBHDFS" )
 +        .expect()
 +        .method( "DELETE" )
 +        .from( "testBasicHdfsUseCase-1" )
 +        .pathInfo( "/v1" + root )
 +        .queryParam( "op", "DELETE" )
 +        .queryParam( "user.name", username )
 +        .queryParam( "recursive", "true" )
 +        .respond()
 +        .status( HttpStatus.SC_OK );
 +    given()
 +        //.log().all()
 +        .auth().preemptive().basic( username, password )
 +        .header("X-XSRF-Header", "jksdhfkhdsf")
 +        .queryParam( "op", "DELETE" )
 +        .queryParam( "recursive", "true" )
 +        .then()
 +        //.log().all()
 +        .statusCode( HttpStatus.SC_OK )
 +        .when().delete( driver.getUrl( "WEBHDFS" ) + "/v1" + root + ( driver.isUseGateway() ? "" : "?user.name=" + username ) );
 +    driver.assertComplete();
 +
 +    /* Create a directory.
 +    curl -i -X PUT "http://<HOST>:<PORT>/<PATH>?op=MKDIRS[&permission=<OCTAL>]"
 +
 +    The client receives a respond with a boolean JSON object:
 +    HTTP/1.1 HttpStatus.SC_OK OK
 +    Content-Type: application/json
 +    Transfer-Encoding: chunked
 +
 +    {"boolean": true}
 +    */
 +    driver.getMock( "WEBHDFS" )
 +        .expect()
 +        .method( "PUT" )
 +        .pathInfo( "/v1" + root + "/dir" )
 +        .queryParam( "op", "MKDIRS" )
 +        .queryParam( "user.name", username )
 +        .respond()
 +        .status( HttpStatus.SC_OK )
 +        .content( driver.getResourceBytes( "webhdfs-success.json" ) )
 +        .contentType( "application/json" );
 +    given()
 +        //.log().all()
 +        .auth().preemptive().basic( username, password )
 +        .header("X-XSRF-Header", "jksdhfkhdsf")
 +        .queryParam( "op", "MKDIRS" )
 +        .then()
 +        //.log().all();
 +        .statusCode( HttpStatus.SC_OK )
 +        .contentType( "application/json" )
 +        .body( "boolean", is( true ) )
 +        .when().put( driver.getUrl( "WEBHDFS" ) + "/v1" + root + "/dir" );
 +    driver.assertComplete();
 +
 +    driver.getMock( "WEBHDFS" )
 +        .expect()
 +        .method( "GET" )
 +        .pathInfo( "/v1" + root )
 +        .queryParam( "op", "LISTSTATUS" )
 +        .queryParam( "user.name", username )
 +        .respond()
 +        .status( HttpStatus.SC_OK )
 +        .content( driver.getResourceBytes( "webhdfs-liststatus-test.json" ) )
 +        .contentType( "application/json" );
 +    given()
 +        //.log().all()
 +        .auth().preemptive().basic( username, password )
 +        .header("X-XSRF-Header", "jksdhfkhdsf")
 +        .queryParam( "op", "LISTSTATUS" )
 +        .then()
 +        //.log().ifError()
 +        .statusCode( HttpStatus.SC_OK )
 +        .body( "FileStatuses.FileStatus[0].pathSuffix", is( "dir" ) )
 +        .when().get( driver.getUrl( "WEBHDFS" ) + "/v1" + root );
 +    driver.assertComplete();
 +
 +    //NEGATIVE: Test a bad password.
 +    given()
 +        //.log().all()
 +        .auth().preemptive().basic( username, "invalid-password" )
 +        .header("X-XSRF-Header", "jksdhfkhdsf")
 +        .queryParam( "op", "LISTSTATUS" )
 +        .then()
 +        //.log().ifError()
 +        .statusCode( HttpStatus.SC_UNAUTHORIZED )
 +        .when().get( driver.getUrl( "WEBHDFS" ) + "/v1" + root );
 +    driver.assertComplete();
 +
 +    //NEGATIVE: Test a bad user.
 +    given()
 +        //.log().all()
 +        .auth().preemptive().basic( "hdfs-user", "hdfs-password" )
 +        .header("X-XSRF-Header", "jksdhfkhdsf")
 +        .queryParam( "op", "LISTSTATUS" )
 +        .then()
 +        //.log().ifError()
 +        .statusCode( HttpStatus.SC_UNAUTHORIZED )
 +        .when().get( driver.getUrl( "WEBHDFS" ) + "/v1" + root );
 +    driver.assertComplete();
 +
 +    //NEGATIVE: Test a valid but unauthorized user.
 +    given()
 +      //.log().all()
 +      .auth().preemptive().basic( "mapred-user", "mapred-password" )
 +      .header("X-XSRF-Header", "jksdhfkhdsf")
 +      .queryParam( "op", "LISTSTATUS" )
 +      .then()
 +      //.log().ifError()
 +      .statusCode( HttpStatus.SC_UNAUTHORIZED )
 +      .when().get( driver.getUrl( "WEBHDFS" ) + "/v1" + root );
 +
 +    /* Add a file.
 +    curl -i -X PUT "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=CREATE
 +                       [&overwrite=<true|false>][&blocksize=<LONG>][&replication=<SHORT>]
 +                     [&permission=<OCTAL>][&buffersize=<INT>]"
 +
 +    The then is redirected to a datanode where the file data is to be written:
 +    HTTP/1.1 307 TEMPORARY_REDIRECT
 +    Location: http://<DATANODE>:<PORT>/webhdfs/v1/<PATH>?op=CREATE...
 +    Content-Length: 0
 +
 +    Step 2: Submit another HTTP PUT then using the URL in the Location header with the file data to be written.
 +    curl -i -X PUT -T <LOCAL_FILE> "http://<DATANODE>:<PORT>/webhdfs/v1/<PATH>?op=CREATE..."
 +
 +    The client receives a HttpStatus.SC_CREATED Created respond with zero content length and the WebHDFS URI of the file in the Location header:
 +    HTTP/1.1 HttpStatus.SC_CREATED Created
 +    Location: webhdfs://<HOST>:<PORT>/<PATH>
 +    Content-Length: 0
 +    */
 +    driver.getMock( "WEBHDFS" )
 +        .expect()
 +        .method( "PUT" )
 +        .pathInfo( "/v1" + root + "/dir/file" )
 +        .queryParam( "op", "CREATE" )
 +        .queryParam( "user.name", username )
 +        .respond()
 +        .status( HttpStatus.SC_TEMPORARY_REDIRECT )
 +        .header( "Location", driver.getRealUrl( "DATANODE" ) + "/v1" + root + "/dir/file?op=CREATE&user.name=hdfs" );
 +    driver.getMock( "DATANODE" )
 +        .expect()
 +        .method( "PUT" )
 +        .pathInfo( "/v1" + root + "/dir/file" )
 +        .queryParam( "op", "CREATE" )
 +        .queryParam( "user.name", username )
 +        .contentType( "text/plain" )
 +        .content( driver.getResourceBytes( "test.txt" ) )
 +            //.content( driver.gerResourceBytes( "hadoop-examples.jar" ) )
 +        .respond()
 +        .status( HttpStatus.SC_CREATED )
 +        .header( "Location", "webhdfs://" + driver.getRealAddr( "DATANODE" ) + "/v1" + root + "/dir/file" );
 +    Response response = given()
 +        //.log().all()
 +        .auth().preemptive().basic( username, password )
 +        .header("X-XSRF-Header", "jksdhfkhdsf")
 +        .queryParam( "op", "CREATE" )
 +        .then()
 +        //.log().ifError()
 +        .statusCode( HttpStatus.SC_TEMPORARY_REDIRECT )
 +        .when().put( driver.getUrl("WEBHDFS") + "/v1" + root + "/dir/file" );
 +    String location = response.getHeader( "Location" );
 +    log.debug( "Redirect location: " + response.getHeader( "Location" ) );
 +    if( driver.isUseGateway() ) {
 +      MatcherAssert.assertThat( location, anyOf(
 +          startsWith( "http://" + gatewayHostName + ":" + gatewayAddress.getPort() + "/" ),
 +          startsWith( "http://" + gatewayAddrName + ":" + gatewayAddress.getPort() + "/" ) ) );
 +      MatcherAssert.assertThat( location, containsString( "?_=" ) );
 +    }
 +    MatcherAssert.assertThat( location, not( containsString( "host=" ) ) );
 +    MatcherAssert.assertThat( location, not( containsString( "port=" ) ) );
 +    response = given()
 +        //.log().all()
 +        .auth().preemptive().basic( username, password )
 +        .header("X-XSRF-Header", "jksdhfkhdsf")
 +        .body( driver.getResourceBytes( "test.txt" ) )
 +        .contentType( "text/plain" )
 +        .then()
 +        //.log().ifError()
 +        .statusCode( HttpStatus.SC_CREATED )
 +        .when().put( location );
 +    location = response.getHeader( "Location" );
 +    log.debug( "Created location: " + location );
 +    if( driver.isUseGateway() ) {
 +      MatcherAssert.assertThat( location, anyOf(
 +          startsWith( "http://" + gatewayHostName + ":" + gatewayAddress.getPort() + "/" ),
 +          startsWith( "http://" + gatewayAddrName + ":" + gatewayAddress.getPort() + "/" ) ) );
 +    }
 +    driver.assertComplete();
 +
 +    /* Get the file.
 +    curl -i -L "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=OPEN
 +                       [&offset=<LONG>][&length=<LONG>][&buffersize=<INT>]"
 +
 +    The then is redirected to a datanode where the file data can be read:
 +    HTTP/1.1 307 TEMPORARY_REDIRECT
 +    Location: http://<DATANODE>:<PORT>/webhdfs/v1/<PATH>?op=OPEN...
 +    Content-Length: 0
 +
 +    The client follows the redirect to the datanode and receives the file data:
 +    HTTP/1.1 HttpStatus.SC_OK OK
 +    Content-Type: application/octet-stream
 +    Content-Length: 22
 +
 +    Hello, webhdfs user!
 +    */
 +    driver.getMock( "WEBHDFS" )
 +        .expect()
 +        .method( "GET" )
 +        .pathInfo( "/v1" + root + "/dir/file" )
 +        .queryParam( "op", "OPEN" )
 +        .queryParam( "user.name", username )
 +        .respond()
 +        .status( HttpStatus.SC_TEMPORARY_REDIRECT )
 +        .header( "Location", driver.getRealUrl( "DATANODE" ) + "/v1" + root + "/dir/file?op=OPEN&user.name=hdfs" );
 +    driver.getMock( "DATANODE" )
 +        .expect()
 +        .method( "GET" )
 +        .pathInfo( "/v1" + root + "/dir/file" )
 +        .queryParam( "op", "OPEN" )
 +        .queryParam( "user.name", username )
 +        .respond()
 +        .status( HttpStatus.SC_OK )
 +        .contentType( "text/plain" )
 +        .content( driver.getResourceBytes( "test.txt" ) );
 +    given()
 +        //.log().all()
 +        .auth().preemptive().basic( username, password )
 +        .header("X-XSRF-Header", "jksdhfkhdsf")
 +        .queryParam( "op", "OPEN" )
 +        .then()
 +        //.log().ifError()
 +        .statusCode( HttpStatus.SC_OK )
 +        .body( is( "TEST" ) )
 +        .when().get( driver.getUrl("WEBHDFS") + "/v1" + root + "/dir/file" );
 +    driver.assertComplete();
 +
 +    /* Delete the directory.
 +    curl -i -X DELETE "http://<host>:<port>/webhdfs/v1/<path>?op=DELETE
 +                                 [&recursive=<true|false>]"
 +
 +    The client receives a respond with a boolean JSON object:
 +    HTTP/1.1 HttpStatus.SC_OK OK
 +    Content-Type: application/json
 +    Transfer-Encoding: chunked
 +
 +    {"boolean": true}
 +    */
 +    // Mock the interaction with the namenode.
 +    driver.getMock( "WEBHDFS" )
 +        .expect()
 +        .from( "testBasicHdfsUseCase-1" )
 +        .method( "DELETE" )
 +        .pathInfo( "/v1" + root )
 +        .queryParam( "op", "DELETE" )
 +        .queryParam( "user.name", username )
 +        .queryParam( "recursive", "true" )
 +        .respond()
 +        .status( HttpStatus.SC_OK );
 +    given()
 +        .auth().preemptive().basic( username, password )
 +        .header("X-XSRF-Header", "jksdhfkhdsf")
 +        .queryParam( "op", "DELETE" )
 +        .queryParam( "recursive", "true" )
 +        .then()
 +        //.log().ifError()
 +        .statusCode( HttpStatus.SC_OK )
 +        .when().delete( driver.getUrl( "WEBHDFS" ) + "/v1" + root );
 +    driver.assertComplete();
 +    LOG_EXIT();
 +  }
 +
 +  // User hdfs in groups hadoop, hdfs
 +  // User mapred in groups hadoop, mapred
 +  // User hcat in group hcat
 +  @Test( timeout = TestUtils.MEDIUM_TIMEOUT )
 +  public void testPmHdfsM1UseCase() throws IOException {
 +    LOG_ENTER();
 +    String root = "/tmp/GatewayBasicFuncTest/testPmHdfdM1UseCase";
 +    String userA = "hdfs";
 +    String passA = "hdfs-password";
 +    String userB = "mapred";
 +    String passB = "mapred-password";
 +    String userC = "hcat";
 +    String passC = "hcat-password";
 +    String groupA = "hdfs";
 +    String groupB = "mapred";
 +    String groupAB = "hadoop";
 +    String groupC = "hcat";
 +
 +    deleteFile( userA, passA, root, "true", 200 );
 +
 +    createDir( userA, passA, groupA, root + "/dirA700", "700", 200, 200 );
 +    createDir( userA, passA, groupA, root + "/dirA770", "770", 200, 200 );
 +    createDir( userA, passA, groupA, root + "/dirA707", "707", 200, 200 );
 +    createDir( userA, passA, groupA, root + "/dirA777", "777", 200, 200 );
 +    createDir( userA, passA, groupAB, root + "/dirAB700", "700", 200, 200 );
 +    createDir( userA, passA, groupAB, root + "/dirAB770", "770", 200, 200 );
 +    createDir( userA, passA, groupAB, root + "/dirAB707", "707", 200, 200 );
 +    createDir( userA, passA, groupAB, root + "/dirAB777", "777", 200, 200 );
 +
 +    // CREATE: Files
 +    // userA:groupA
 +    createFile( userA, passA, groupA, root + "/dirA700/fileA700", "700", "text/plain", "small1.txt", 307, 201, 200 );
 +    createFile( userA, passA, groupA, root + "/dirA770/fileA770", "770", "text/plain", "small1.txt", 307, 201, 200 );
 +    createFile( userA, passA, groupA, root + "/dirA707/fileA707", "707", "text/plain", "small1.txt", 307, 201, 200 );
 +    createFile( userA, passA, groupA, root + "/dirA777/fileA777", "777", "text/plain", "small1.txt", 307, 201, 200 );
 +    // userA:groupAB
 +    createFile( userA, passA, groupAB, root + "/dirAB700/fileAB700", "700", "text/plain", "small1.txt", 307, 201, 200 );
 +    createFile( userA, passA, groupAB, root + "/dirAB770/fileAB770", "770", "text/plain", "small1.txt", 307, 201, 200 );
 +    createFile( userA, passA, groupAB, root + "/dirAB707/fileAB707", "707", "text/plain", "small1.txt", 307, 201, 200 );
 +    createFile( userA, passA, groupAB, root + "/dirAB777/fileAB777", "777", "text/plain", "small1.txt", 307, 201, 200 );
 +    // userB:groupB
 +    createFile( userB, passB, groupB, root + "/dirA700/fileB700", "700", "text/plain", "small1.txt", 307, 403, 0 );
 +    createFile( userB, passB, groupB, root + "/dirA770/fileB700", "700", "text/plain", "small1.txt", 307, 403, 0 );
 +//kam:20130219[ chmod seems to be broken at least in Sandbox 1.2
 +//    createFile( userB, passB, groupB, root + "/dirA707/fileB700", "700", "text/plain", "small1.txt", 307, 201, 200 );
 +//    createFile( userB, passB, groupB, root + "/dirA777/fileB700", "700", "text/plain", "small1.txt", 307, 201, 200 );
 +//kam]
 +    // userB:groupAB
 +    createFile( userB, passB, groupAB, root + "/dirA700/fileBA700", "700", "text/plain", "small1.txt", 307, 403, 0 );
 +    createFile( userB, passB, groupAB, root + "/dirA770/fileBA700", "700", "text/plain", "small1.txt", 307, 403, 0 );
 +    createFile( userB, passB, groupAB, root + "/dirA707/fileBA700", "700", "text/plain", "small1.txt", 307, 201, 200 );
 +    createFile( userB, passB, groupAB, root + "/dirA777/fileBA700", "700", "text/plain", "small1.txt", 307, 201, 200 );
 +    // userC:groupC
 +    createFile( userC, passC, groupC, root + "/dirA700/fileC700", "700", "text/plain", "small1.txt", 307, 403, 0 );
 +    createFile( userC, passC, groupC, root + "/dirA770/fileC700", "700", "text/plain", "small1.txt", 307, 403, 0 );
 +//kam:20130219[ chmod seems to be broken at least in Sandbox 1.2
 +//    createFile( userC, passC, groupC, root + "/dirA707/fileC700", "700", "text/plain", "small1.txt", 307, 201, 200 );
 +//    createFile( userC, passC, groupC, root + "/dirA777/fileC700", "700", "text/plain", "small1.txt", 307, 201, 200 );
 +//kam]
 +
 +    // READ
 +    // userA
 +    readFile( userA, passA, root + "/dirA700/fileA700", "text/plain", "small1.txt", HttpStatus.SC_OK );
 +    readFile( userA, passA, root + "/dirA770/fileA770", "text/plain", "small1.txt", HttpStatus.SC_OK );
 +    readFile( userA, passA, root + "/dirA707/fileA707", "text/plain", "small1.txt", HttpStatus.SC_OK );
 +    readFile( userA, passA, root + "/dirA777/fileA777", "text/plain", "small1.txt", HttpStatus.SC_OK );
 +    // userB:groupB
 +    readFile( userB, passB, root + "/dirA700/fileA700", "text/plain", "small1.txt", HttpStatus.SC_FORBIDDEN );
 +    readFile( userB, passB, root + "/dirA770/fileA770", "text/plain", "small1.txt", HttpStatus.SC_FORBIDDEN );
 +    readFile( userB, passB, root + "/dirA707/fileA707", "text/plain", "small1.txt", HttpStatus.SC_OK );
 +    readFile( userB, passB, root + "/dirA777/fileA777", "text/plain", "small1.txt", HttpStatus.SC_OK );
 +    // userB:groupAB
 +    readFile( userB, passB, root + "/dirAB700/fileAB700", "text/plain", "small1.txt", HttpStatus.SC_FORBIDDEN );
 +    readFile( userB, passB, root + "/dirAB770/fileAB770", "text/plain", "small1.txt", HttpStatus.SC_FORBIDDEN );
 +    readFile( userB, passB, root + "/dirAB707/fileAB707", "text/plain", "small1.txt", HttpStatus.SC_FORBIDDEN );
 +    readFile( userB, passB, root + "/dirAB777/fileAB777", "text/plain", "small1.txt", HttpStatus.SC_OK );
 +    // userC:groupC
 +    readFile( userC, passC, root + "/dirA700/fileA700", "text/plain", "small1.txt", HttpStatus.SC_FORBIDDEN );
 +    readFile( userC, passC, root + "/dirA770/fileA770", "text/plain", "small1.txt", HttpStatus.SC_FORBIDDEN );
 +    readFile( userC, passC, root + "/dirA707/fileA707", "text/plain", "small1.txt", HttpStatus.SC_OK );
 +    readFile( userC, passC, root + "/dirA777/fileA777", "text/plain", "small1.txt", HttpStatus.SC_OK );
 +
 +    //NEGATIVE: Test a bad password.
 +    if( driver.isUseGateway() ) {
 +      given()
 +          //.log().all()
 +          .auth().preemptive().basic( userA, "invalid-password" )
 +          .header("X-XSRF-Header", "jksdhfkhdsf")
 +          .queryParam( "op", "OPEN" )
 +          .then()
 +          //.log().all()
 +          .statusCode( HttpStatus.SC_UNAUTHORIZED )
 +          .when().get( driver.getUrl("WEBHDFS") + "/v1" + root + "/dirA700/fileA700" );
 +    }
 +    driver.assertComplete();
 +
 +    // UPDATE (Negative First)
 +    updateFile( userC, passC, root + "/dirA700/fileA700", "text/plain", "small2.txt", 307, 403 );
 +    updateFile( userB, passB, root + "/dirAB700/fileAB700", "text/plain", "small2.txt", 307, 403 );
 +    updateFile( userB, passB, root + "/dirAB770/fileAB700", "text/plain", "small2.txt", 307, 403 );
 +    updateFile( userB, passB, root + "/dirAB770/fileAB770", "text/plain", "small2.txt", 307, 403 );
 +    updateFile( userA, passA, root + "/dirA700/fileA700", "text/plain", "small2.txt", 307, 201 );
 +
 +    // DELETE (Negative First)
 +    deleteFile( userC, passC, root + "/dirA700/fileA700", "false", HttpStatus.SC_FORBIDDEN );
 +    deleteFile( userB, passB, root + "/dirAB700/fileAB700", "false", HttpStatus.SC_FORBIDDEN );
 +    deleteFile( userB, passB, root + "/dirAB770/fileAB770", "false", HttpStatus.SC_FORBIDDEN );
 +    deleteFile( userA, passA, root + "/dirA700/fileA700", "false", HttpStatus.SC_OK );
 +
 +    // Cleanup anything that might have been leftover because the test failed previously.
 +    deleteFile( userA, passA, root, "true", HttpStatus.SC_OK );
 +    LOG_EXIT();
 +  }
 +
 +  @Test( timeout = TestUtils.MEDIUM_TIMEOUT )
 +  public void testJavaMapReduceViaWebHCat() throws IOException {
 +    LOG_ENTER();
 +    String root = "/tmp/GatewayBasicFuncTest/testJavaMapReduceViaWebHCat";
 +    String user = "mapred";
 +    String pass = "mapred-password";
 +//    String user = "hcat";
 +//    String pass = "hcat-password";
 +//    String group = "hcat";
 +
 +    // Cleanup anything that might have been leftover because the test failed previously.
 +    deleteFile( user, pass, root, "true", HttpStatus.SC_OK );
 +
 +    /* Put the mapreduce code into HDFS. (hadoop-examples.jar)
 +    curl -X PUT --data-binary @hadoop-examples.jar 'http://192.168.1.163:8888/org.apache.org.apache.knox.gateway/cluster/webhdfs/v1/user/hdfs/wordcount/hadoop-examples.jar?user.name=hdfs&op=CREATE'
 +     */
 +    createFile( user, pass, null, root+"/hadoop-examples.jar", "777", "application/octet-stream", findHadoopExamplesJar(), 307, 201, 200 );
 +
 +    /* Put the data file into HDFS (changes.txt)
 +    curl -X PUT --data-binary @changes.txt 'http://192.168.1.163:8888/org.apache.org.apache.knox.gateway/cluster/webhdfs/v1/user/hdfs/wordcount/input/changes.txt?user.name=hdfs&op=CREATE'
 +     */
 +    createFile( user, pass, null, root+"/input/changes.txt", "777", "text/plain", "changes.txt", 307, 201, 200 );
 +
 +    /* Create the output directory
 +    curl -X PUT 'http://192.168.1.163:8888/org.apache.org.apache.knox.gateway/cluster/webhdfs/v1/user/hdfs/wordcount/output?op=MKDIRS&user.name=hdfs'
 +    */
 +    createDir( user, pass, null, root+"/output", "777", 200, 200 );
 +
 +    /* Submit the job
 +    curl -d user.name=hdfs -d jar=wordcount/hadoop-examples.jar -d class=org.apache.org.apache.hadoop.examples.WordCount -d arg=wordcount/input -d arg=wordcount/output 'http://localhost:8888/org.apache.org.apache.knox.gateway/cluster/templeton/v1/mapreduce/jar'
 +    {"id":"job_201210301335_0059"}
 +    */
 +    String job = submitJava(
 +        user, pass,
 +        root+"/hadoop-examples.jar", "org.apache.org.apache.hadoop.examples.WordCount",
 +        root+"/input", root+"/output",
 +        200 );
 +
 +    /* Get the job status
 +    curl 'http://vm:50111/templeton/v1/queue/:jobid?user.name=hdfs'
 +    */
 +    queryQueue( user, pass, job );
 +
 +    // Can't really check for the output here because the job won't be done.
 +    /* Retrieve results
 +    curl 'http://192.168.1.163:8888/org.apache.org.apache.knox.gateway/cluster/webhdfs/v1/user/hdfs/wordcount/input?op=LISTSTATUS'
 +    */
 +
 +    if( CLEANUP_TEST ) {
 +      // Cleanup anything that might have been leftover because the test failed previously.
 +      deleteFile( user, pass, root, "true", HttpStatus.SC_OK );
 +    }
 +    LOG_EXIT();
 +  }
 +
 +  @Test( timeout = TestUtils.MEDIUM_TIMEOUT )
 +  public void testPigViaWebHCat() throws IOException {
 +    LOG_ENTER();
 +    String root = "/tmp/GatewayWebHCatFuncTest/testPigViaWebHCat";
 +    String user = "mapred";
 +    String pass = "mapred-password";
 +    String group = "mapred";
 +
 +    // Cleanup if previous run failed.
 +    deleteFile( user, pass, root, "true", 200, 404 );
 +
 +    // Post the data to HDFS
 +    createFile( user, pass, null, root + "/passwd.txt", "777", "text/plain", "passwd.txt", 307, 201, 200 );
 +
 +    // Post the script to HDFS
 +    createFile( user, pass, null, root+"/script.pig", "777", "text/plain", "script.pig", 307, 201, 200 );
 +
 +    // Create the output directory
 +    createDir( user, pass, null, root + "/output", "777", 200, 200 );
 +
 +    // Submit the job
 +    submitPig( user, pass, group, root + "/script.pig", "-v", root + "/output", 200 );
 +
 +    // Check job status (if possible)
 +    // Check output (if possible)
 +
 +    // Cleanup
 +    deleteFile( user, pass, root, "true", 200 );
 +    LOG_EXIT();
 +  }
 +
 +  @Test( timeout = TestUtils.MEDIUM_TIMEOUT )
 +  public void testHiveViaWebHCat() throws IOException {
 +    LOG_ENTER();
 +    String user = "hive";
 +    String pass = "hive-password";
 +    String group = "hive";
 +    String root = "/tmp/GatewayWebHCatFuncTest/testHiveViaWebHCat";
 +
 +    // Cleanup if previous run failed.
 +    deleteFile( user, pass, root, "true", 200, 404 );
 +
 +    // Post the data to HDFS
 +
 +    // Post the script to HDFS
 +    createFile(user, pass, null, root + "/script.hive", "777", "text/plain", "script.hive", 307, 201, 200);
 +
 +    // Submit the job
 +    submitHive(user, pass, group, root + "/script.hive", root + "/output", 200);
 +
 +    // Check job status (if possible)
 +    // Check output (if possible)
 +
 +    // Cleanup
 +    deleteFile( user, pass, root, "true", 200 );
 +    LOG_EXIT();
 +  }
 +
 +  @Test( timeout = TestUtils.MEDIUM_TIMEOUT )
 +  public void testOozieJobSubmission() throws Exception {
 +    LOG_ENTER();
 +    String root = "/tmp/GatewayBasicFuncTest/testOozieJobSubmission";
 +    String user = "hdfs";
 +    String pass = "hdfs-password";
 +    String group = "hdfs";
 +
 +    // Cleanup anything that might have been leftover because the test failed previously.
 +    deleteFile( user, pass, root, "true", HttpStatus.SC_OK );
 +
 +    /* Put the workflow definition into HDFS */
 +    createFile( user, pass, group, root+"/workflow.xml", "666", "application/octet-stream", "oozie-workflow.xml", 307, 201, 200 );
 +
 +    /* Put the mapreduce code into HDFS. (hadoop-examples.jar)
 +    curl -X PUT --data-binary @hadoop-examples.jar 'http://192.168.1.163:8888/org.apache.org.apache.knox.gateway/cluster/webhdfs/v1/user/hdfs/wordcount/hadoop-examples.jar?user.name=hdfs&op=CREATE'
 +     */
 +    createFile( user, pass, group, root+"/lib/hadoop-examples.jar", "777", "application/octet-stream", findHadoopExamplesJar(), 307, 201, 200 );
 +
 +    /* Put the data file into HDFS (changes.txt)
 +    curl -X PUT --data-binary @changes.txt 'http://192.168.1.163:8888/org.apache.org.apache.knox.gateway/cluster/webhdfs/v1/user/hdfs/wordcount/input/changes.txt?user.name=hdfs&op=CREATE'
 +     */
 +    createFile( user, pass, group, root+"/input/changes.txt", "666", "text/plain", "changes.txt", 307, 201, 200 );
 +
 +    VelocityEngine velocity = new VelocityEngine();
 +    velocity.setProperty( RuntimeConstants.RUNTIME_LOG_LOGSYSTEM_CLASS, "org.apache.velocity.runtime.log.NullLogSystem" );
 +    velocity.setProperty( RuntimeConstants.RESOURCE_LOADER, "classpath" );
 +    velocity.setProperty( "classpath.resource.loader.class", ClasspathResourceLoader.class.getName() );
 +    velocity.init();
 +
 +    VelocityContext context = new VelocityContext();
 +    context.put( "userName", user );
 +    context.put( "nameNode", "hdfs://sandbox:8020" );
 +    context.put( "jobTracker", "sandbox:50300" );
 +    //context.put( "appPath", "hdfs://sandbox:8020" + root );
 +    context.put( "appPath", root );
 +    context.put( "inputDir", root + "/input" );
 +    context.put( "outputDir", root + "/output" );
 +
 +    //URL url = TestUtils.getResourceUrl( GatewayBasicFuncTest.class, "oozie-jobs-submit-request.xml" );
 +    //String name = url.toExternalForm();
 +    String name = TestUtils.getResourceName( this.getClass(), "oozie-jobs-submit-request.xml" );
 +    Template template = velocity.getTemplate( name );
 +    StringWriter sw = new StringWriter();
 +    template.merge( context, sw );
 +    String request = sw.toString();
 +    //System.out.println( "REQUEST=" + request );
 +
 +    /* Submit the job via Oozie. */
 +    String id = oozieSubmitJob( user, pass, request, 201 );
 +    //System.out.println( "ID=" + id );
 +
 +    String success = "SUCCEEDED";
 +    String status = "UNKNOWN";
 +    long delay = 1000 * 1; // 1 second.
 +    long limit = 1000 * 60; // 60 seconds.
 +    long start = System.currentTimeMillis();
 +    while( System.currentTimeMillis() <= start+limit ) {
 +      status = oozieQueryJobStatus( user, pass, id, 200 );
 +      //System.out.println( "Status=" + status );
 +      if( success.equalsIgnoreCase( status ) ) {
 +        break;
 +      } else {
 +        //System.out.println( "Status=" + status );
 +        Thread.sleep( delay );
 +      }
 +    }
 +    //System.out.println( "Status is " + status + " after " + ((System.currentTimeMillis()-start)/1000) + " seconds." );
 +    MatcherAssert.assertThat( status, is( success ) );
 +
 +    if( CLEANUP_TEST ) {
 +      // Cleanup anything that might have been leftover because the test failed previously.
 +      deleteFile( user, pass, root, "true", HttpStatus.SC_OK );
 +    }
 +    LOG_EXIT();
 +  }
 +
 +  @Test( timeout = TestUtils.MEDIUM_TIMEOUT )
 +  public void testBasicHiveJDBCUseCase() throws IOException {
 +    LOG_ENTER();
 +    String username = "hive";
 +    String password = "hive-password";
 +
 +    // This use case emulates simple JDBC scenario which consists of following steps:
 +    // -open connection;
 +    // -configure Hive using 'execute' statements (this also includes execution of 'close operation' requests internally);
 +    // -execution of create table command;
 +    // -execution of select from table command;
 +    // Data insertion is omitted because it causes a lot of additional command during insertion/querying.
 +    // All binary data was intercepted during real scenario and stored into files as array of bytes.
 +
 +    // open session
 +    driver.getMock( "HIVE" )
 +        .expect()
 +        .method( "POST" )
 +        .content( driver.getResourceBytes( "hive/open-session-request.bin" ) )
 +        .contentType( "application/x-thrift" )
 +        .respond()
 +        .characterEncoding( "UTF-8" )
 +        .status( HttpStatus.SC_OK )
 +        .content( driver.getResourceBytes( "hive/open-session-result.bin" ) )
 +        .contentType( "application/x-thrift" );
 +    Response response = given()
 +        .auth().preemptive().basic( username, password )
 +        .header("X-XSRF-Header", "jksdhfkhdsf")
 +        .body( driver.getResourceBytes( "hive/open-session-request.bin" ) )
 +        .contentType( "application/x-thrift" )
 +        .then()
 +        .statusCode( HttpStatus.SC_OK )
 +        //.content( is( driver.getResourceBytes( "hive/open-session-result.bin" ) ) )
 +        .contentType( "application/x-thrift" )
 +        .when().post( driver.getUrl( "HIVE" ) );
 +    assertThat( response.body().asByteArray(), is( driver.getResourceBytes( "hive/open-session-result.bin" ) ) );
 +
 +    driver.assertComplete();
 +
 +    // execute 'set hive.fetch.output.serde=...' (is called internally be JDBC driver)
 +    driver.getMock( "HIVE" )
 +        .expect()
 +        .method( "POST" )
 +        .content( driver.getResourceBytes( "hive/execute-set-fetch-output-serde-request.bin" ) )
 +        .contentType( "application/x-thrift" )
 +        .respond()
 +        .characterEncoding( "UTF-8" )
 +        .status( HttpStatus.SC_OK )
 +        .content( driver.getResourceBytes( "hive/execute-set-fetch-output-serde-result.bin" ) )
 +        .contentType( "application/x-thrift" );
 +    response = given()
 +        .auth().preemptive().basic( username, password )
 +        .header("X-XSRF-Header", "jksdhfkhdsf")
 +        .body( driver.getResourceBytes( "hive/execute-set-fetch-output-serde-request.bin" ) )
 +        .contentType( "application/x-thrift" )
 +        .then()
 +        .statusCode( HttpStatus.SC_OK )
 +        //.content( is( driver.getResourceBytes( "hive/execute-set-fetch-output-serde-result.bin" ) ) )
 +        .contentType( "application/x-thrift" )
 +        .when().post( driver.getUrl( "HIVE" ) );
 +    assertThat( response.body().asByteArray(), is( driver.getResourceBytes( "hive/execute-set-fetch-output-serde-result.bin" ) ) );
 +    driver.assertComplete();
 +
 +    // close operation for execute 'set hive.fetch.output.serde=...'
 +    driver.getMock( "HIVE" )
 +        .expect()
 +        .method( "POST" )
 +        .content( driver.getResourceBytes( "hive/close-operation-1-request.bin" ) )
 +        .contentType( "application/x-thrift" )
 +        .respond()
 +        .characterEncoding( "UTF-8" )
 +        .status( HttpStatus.SC_OK )
 +        .content( driver.getResourceBytes( "hive/close-operation-1-result.bin" ) )
 +        .contentType( "application/x-thrift" );
 +    response = given()
 +        .auth().preemptive().basic( username, password )
 +        .header("X-XSRF-Header", "jksdhfkhdsf")
 +        .body( driver.getResourceBytes( "hive/close-operation-1-request.bin" ) )
 +        .contentType( "application/x-thrift" )
 +        .then()
 +        .statusCode( HttpStatus.SC_OK )
 +        //.content( is( driver.getResourceBytes( "hive/close-operation-1-result.bin" ) ) )
 +        .contentType( "application/x-thrift" )
 +        .when().post( driver.getUrl( "HIVE" ) );
 +    assertThat( response.body().asByteArray(), is( driver.getResourceBytes( "hive/close-operation-1-result.bin" ) ) );
 +    driver.assertComplete();
 +
 +    // execute 'set hive.server2.http.path=...' (is called internally be JDBC driver)
 +    driver.getMock( "HIVE" )
 +        .expect()
 +        .method( "POST" )
 +        .content( driver.getResourceBytes( "hive/execute-set-server2-http-path-request.bin" ) )
 +        .contentType( "application/x-thrift" )
 +        .respond()
 +        .characterEncoding( "UTF-8" )
 +        .status( HttpStatus.SC_OK )
 +        .content( driver.getResourceBytes( "hive/execute-set-server2-http-path-result.bin" ) )
 +        .contentType( "application/x-thrift" );
 +    response = given()
 +        .auth().preemptive().basic( username, password )
 +        .header("X-XSRF-Header", "jksdhfkhdsf")
 +        .body( driver.getResourceBytes( "hive/execute-set-server2-http-path-request.bin" ) )
 +        .contentType( "application/x-thrift" )
 +        .then()
 +        .statusCode( HttpStatus.SC_OK )
 +        //.content( is( driver.getResourceBytes( "hive/execute-set-server2-http-path-result.bin" ) ) )
 +        .contentType( "application/x-thrift" )
 +        .when().post( driver.getUrl( "HIVE" ) );
 +    assertThat( response.body().asByteArray(), is( driver.getResourceBytes( "hive/execute-set-server2-http-path-result.bin" ) ) );
 +    driver.assertComplete();
 +
 +    // close operation for execute 'set hive.server2.http.path=...'
 +    driver.getMock( "HIVE" )
 +        .expect()
 +        .method( "POST" )
 +        .content( driver.getResourceBytes( "hive/close-operation-2-request.bin" ) )
 +        .contentType( "application/x-thrift" )
 +        .respond()
 +        .characterEncoding( "UTF-8" )
 +        .status( HttpStatus.SC_OK )
 +        .content( driver.getResourceBytes( "hive/close-operation-2-result.bin" ) )
 +        .contentType( "application/x-thrift" );
 +    response = given()
 +        .auth().preemptive().basic( username, password )
 +        .header("X-XSRF-Header", "jksdhfkhdsf")
 +        .body( driver.getResourceBytes( "hive/close-operation-2-request.bin" ) )
 +        .contentType( "application/x-thrift" )
 +        .then()
 +        .statusCode( HttpStatus.SC_OK )
 +        //.content( is( driver.getResourceBytes( "hive/close-operation-2-result.bin" ) ) )
 +        .contentType( "application/x-thrift" )
 +        .when().post( driver.getUrl( "HIVE" ) );
 +    assertThat( response.body().asByteArray(), is( driver.getResourceBytes( "hive/close-operation-2-result.bin" ) ) );
 +    driver.assertComplete();
 +
 +    // execute 'set hive.server2.servermode=...' (is called internally be JDBC driver)
 +    driver.getMock( "HIVE" )
 +        .expect()
 +        .method( "POST" )
 +        .content( driver.getResourceBytes( "hive/execute-set-server2-servermode-request.bin" ) )
 +        .contentType( "application/x-thrift" )
 +        .respond()
 +        .characterEncoding( "UTF-8" )
 +        .status( HttpStatus.SC_OK )
 +        .content( driver.getResourceBytes( "hive/execute-set-server2-servermode-result.bin" ) )
 +        .contentType( "application/x-thrift" );
 +    response = given()
 +        .auth().preemptive().basic( username, password )
 +        .header("X-XSRF-Header", "jksdhfkhdsf")
 +        .body( driver.getResourceBytes( "hive/execute-set-server2-servermode-request.bin" ) )
 +        .contentType( "application/x-thrift" )
 +        .then()
 +        .statusCode( HttpStatus.SC_OK )
 +        //.content( is( driver.getResourceBytes( "hive/execute-set-server2-servermode-result.bin" ) ) )
 +        .contentType( "application/x-thrift" )
 +        .when().post( driver.getUrl( "HIVE" ) );
 +    assertThat( response.body().asByteArray(), is( driver.getResourceBytes( "hive/execute-set-server2-servermode-result.bin" ) ) );
 +    driver.assertComplete();
 +
 +    // close operation for execute 'set hive.server2.servermode=...'
 +    driver.getMock( "HIVE" )
 +        .expect()
 +        .method( "POST" )
 +        .content( driver.getResourceBytes( "hive/close-operation-3-request.bin" ) )
 +        .contentType( "application/x-thrift" )
 +        .respond()
 +        .characterEncoding( "UTF-8" )
 +        .status( HttpStatus.SC_OK )
 +        .content( driver.getResourceBytes( "hive/close-operation-3-result.bin" ) )
 +        .contentType( "application/x-thrift" );
 +    response = given()
 +        .auth().preemptive().basic( username, password )
 +        .header("X-XSRF-Header", "jksdhfkhdsf")
 +        .body( driver.getResourceBytes( "hive/close-operation-3-request.bin" ) )
 +        .contentType( "application/x-thrift" )
 +        .then()
 +        .statusCode( HttpStatus.SC_OK )
 +        //.content( is( driver.getResourceBytes( "hive/close-operation-3-result.bin" ) ) )
 +        .contentType( "application/x-thrift" )
 +        .when().post( driver.getUrl( "HIVE" ) );
 +    assertThat( response.body().asByteArray(), is( driver.getResourceBytes( "hive/close-operation-3-result.bin" ) ) );
 +    driver.assertComplete();
 +
 +    // execute 'set hive.security.authorization.enabled=...'
 +    driver.getMock( "HIVE" )
 +        .expect()
 +        .method( "POST" )
 +        .content( driver.getResourceBytes( "hive/execute-set-security-authorization-enabled-request.bin" ) )
 +        .contentType( "application/x-thrift" )
 +        .respond()
 +        .characterEncoding( "UTF-8" )
 +        .status( HttpStatus.SC_OK )
 +        .content( driver.getResourceBytes( "hive/execute-set-security-authorization-enabled-result.bin" ) )
 +        .contentType( "application/x-thrift" );
 +    response = given()
 +        .auth().preemptive().basic( username, password )
 +        .header("X-XSRF-Header", "jksdhfkhdsf")
 +        .body( driver.getResourceBytes( "hive/execute-set-security-authorization-enabled-request.bin" ) )
 +        .contentType( "application/x-thrift" )
 +        .then()
 +        .statusCode( HttpStatus.SC_OK )
 +        //.content( is( driver.getResourceBytes( "hive/execute-set-security-authorization-enabled-result.bin" ) ) )
 +        .contentType( "application/x-thrift" )
 +        .when().post( driver.getUrl( "HIVE" ) );
 +    assertThat( response.body().asByteArray(), is( driver.getResourceBytes( "hive/execute-set-security-authorization-enabled-result.bin" ) ) );
 +    driver.assertComplete();
 +
 +    // close operation for execute 'set hive.security.authorization.enabled=...'
 +    driver.getMock( "HIVE" )
 +        .expect()
 +        .method( "POST" )
 +        .content( driver.getResourceBytes( "hive/close-operation-4-request.bin" ) )
 +        .contentType( "application/x-thrift" )
 +        .respond()
 +        .characterEncoding( "UTF-8" )
 +        .status( HttpStatus.SC_OK )
 +        .content( driver.getResourceBytes( "hive/close-operation-4-result.bin" ) )
 +        .contentType( "application/x-thrift" );
 +    response = given()
 +        .auth().preemptive().basic( username, password )
 +        .header("X-XSRF-Header", "jksdhfkhdsf")
 +        .body( driver.getResourceBytes( "hive/close-operation-4-request.bin" ) )
 +        .contentType( "application/x-thrift" )
 +        .then()
 +        .statusCode( HttpStatus.SC_OK )
 +        //.content( is( driver.getResourceBytes( "hive/close-operation-4-result.bin" ) ) )
 +        .contentType( "application/x-thrift" )
 +        .when().post( driver.getUrl( "HIVE" ) );
 +    assertThat( response.body().asByteArray(), is( driver.getResourceBytes( "hive/close-operation-4-result.bin" ) ) );
 +    driver.assertComplete();
 +
 +    // execute 'create table...'
 +    driver.getMock( "HIVE" )
 +        .expect()
 +        .method( "POST" )
 +        .content( driver.getResourceBytes( "hive/execute-create-table-request.bin" ) )
 +        .contentType( "application/x-thrift" )
 +        .respond()
 +        .characterEncoding( "UTF-8" )
 +        .status( HttpStatus.SC_OK )
 +        .content( driver.getResourceBytes( "hive/execute-create-table-result.bin" ) )
 +        .contentType( "application/x-thrift" );
 +    response = given()
 +        .auth().preemptive().basic( username, password )
 +        .header("X-XSRF-Header", "jksdhfkhdsf")
 +        .body( driver.getResourceBytes( "hive/execute-create-table-request.bin" ) )
 +        .contentType( "application/x-thrift" )
 +        .then()
 +        .statusCode( HttpStatus.SC_OK )
 +        //.content( is( driver.getResourceBytes( "hive/execute-create-table-result.bin" ) ) )
 +        .contentType( "application/x-thrift" )
 +        .when().post( driver.getUrl( "HIVE" ) );
 +    assertThat( response.body().asByteArray(), is( driver.getResourceBytes( "hive/execute-create-table-result.bin" ) ) );
 +    driver.assertComplete();
 +
 +    // close operation for execute 'create table...'
 +    driver.getMock( "HIVE" )
 +        .expect()
 +        .method( "POST" )
 +        .content( driver.getResourceBytes( "hive/close-operation-5-request.bin" ) )
 +        .contentType( "application/x-thrift" )
 +        .respond()
 +        .characterEncoding( "UTF-8" )
 +        .status( HttpStatus.SC_OK )
 +        .content( driver.getResourceBytes( "hive/close-operation-5-result.bin" ) )
 +        .contentType( "application/x-thrift" );
 +    response = given()
 +        .auth().preemptive().basic( username, password )
 +        .header("X-XSRF-Header", "jksdhfkhdsf")
 +        .body( driver.getResourceBytes( "hive/close-operation-5-request.bin" ) )
 +        .contentType( "application/x-thrift" )
 +        .then()
 +        .statusCode( HttpStatus.SC_OK )
 +        //.content( is( driver.getResourceBytes( "hive/close-operation-5-result.bin" ) ) )
 +        .contentType( "application/x-thrift" )
 +        .when().post( driver.getUrl( "HIVE" ) );
 +    assertThat( response.body().asByteArray(), is( driver.getResourceBytes( "hive/close-operation-5-result.bin" ) ) );
 +    driver.assertComplete();
 +
 +    // execute 'select * from...'
 +    driver.getMock( "HIVE" )
 +        .expect()
 +        .method( "POST" )
 +        .content( driver.getResourceBytes( "hive/execute-select-from-table-request.bin" ) )
 +        .contentType( "application/x-thrift" )
 +        .respond()
 +        .characterEncoding( "UTF-8" )
 +        .status( HttpStatus.SC_OK )
 +        .content( driver.getResourceBytes( "hive/execute-select-from-table-result.bin" ) )
 +        .contentType( "application/x-thrift" );
 +    response = given()
 +        .auth().preemptive().basic( username, password )
 +        .header("X-XSRF-Header", "jksdhfkhdsf")
 +        .body( driver.getResourceBytes( "hive/execute-select-from-table-request.bin" ) )
 +        .contentType( "application/x-thrift" )
 +        .then()
 +        .statusCode( HttpStatus.SC_OK )
 +        //.content( is( driver.getResourceBytes( "hive/execute-select-from-table-result.bin" ) ) )
 +        .contentType( "application/x-thrift" )
 +        .when().post( driver.getUrl( "HIVE" ) );
 +    assertThat( response.body().asByteArray(), is( driver.getResourceBytes( "hive/execute-select-from-table-result.bin" ) ) );
 +    driver.assertComplete();
 +
 +    // execute 'GetResultSetMetadata' (is called internally be JDBC driver)
 +    driver.getMock( "HIVE" )
 +        .expect()
 +        .method( "POST" )
 +        .content( driver.getResourceBytes( "hive/get-result-set-metadata-request.bin" ) )
 +        .contentType( "application/x-thrift" )
 +        .respond()
 +        .characterEncoding( "UTF-8" )
 +        .status( HttpStatus.SC_OK )
 +        .content( driver.getResourceBytes( "hive/get-result-set-metadata-result.bin" ) )
 +        .contentType( "application/x-thrift" );
 +    response = given()
 +        .auth().preemptive().basic( username, password )
 +        .header("X-XSRF-Header", "jksdhfkhdsf")
 +        .body( driver.getResourceBytes( "hive/get-result-set-metadata-request.bin" ) )
 +        .contentType( "application/x-thrift" )
 +        .then()
 +        .statusCode( HttpStatus.SC_OK )
 +        //.content( is( driver.getResourceBytes( "hive/get-result-set-metadata-result.bin" ) ) )
 +        .contentType( "application/x-thrift" )
 +        .when().post( driver.getUrl( "HIVE" ) );
 +    assertThat( response.body().asByteArray(), is( driver.getResourceBytes( "hive/get-result-set-metadata-result.bin" ) ) );
 +    driver.assertComplete();
 +
 +    // execute 'FetchResults' (is called internally be JDBC driver)
 +    driver.getMock( "HIVE" )
 +        .expect()
 +        .method( "POST" )
 +        .content( driver.getResourceBytes( "hive/fetch-results-request.bin" ) )
 +        .contentType( "application/x-thrift" )
 +        .respond()
 +        .characterEncoding( "UTF-8" )
 +        .status( HttpStatus.SC_OK )
 +        .content( driver.getResourceBytes( "hive/fetch-results-result.bin" ) )
 +        .contentType( "application/x-thrift" );
 +    response = given()
 +        .auth().preemptive().basic( username, password )
 +        .header("X-XSRF-Header", "jksdhfkhdsf")
 +        .body( driver.getResourceBytes( "hive/fetch-results-request.bin" ) )
 +        .contentType( "application/x-thrift" )
 +        .then()
 +        .statusCode( HttpStatus.SC_OK )
 +        //.content( is( driver.getResourceBytes( "hive/fetch-results-result.bin" ) ) )
 +        .contentType( "application/x-thrift" )
 +        .when().post( driver.getUrl( "HIVE" ) );
 +    assertThat( response.body().asByteArray(), is( driver.getResourceBytes( "hive/fetch-results-result.bin" ) ) );
 +    driver.assertComplete();
 +
 +    // close operation for execute 'select * from...'
 +    driver.getMock( "HIVE" )
 +        .expect()
 +        .method( "POST" )
 +        .content( driver.getResourceBytes( "hive/close-operation-6-request.bin" ) )
 +        .contentType( "application/x-thrift" )
 +        .respond()
 +        .characterEncoding( "UTF-8" )
 +        .status( HttpStatus.SC_OK )
 +        .content( driver.getResourceBytes( "hive/close-operation-6-result.bin" ) )
 +        .contentType( "application/x-thrift" );
 +    response = given()
 +        .auth().preemptive().basic( username, password )
 +        .header("X-XSRF-Header", "jksdhfkhdsf")
 +        .body( driver.getResourceBytes( "hive/close-operation-6-request.bin" ) )
 +        .contentType( "application/x-thrift" )
 +        .then()
 +        .statusCode( HttpStatus.SC_OK )
 +        //.content( is( driver.getResourceBytes( "hive/close-operation-6-result.bin" ) ) )
 +        .contentType( "application/x-thrift" )
 +        .when().post( driver.getUrl( "HIVE" ) );
 +    assertThat( response.body().asByteArray(), is( driver.getResourceBytes( "hive/close-operation-6-result.bin" ) ) );
 +    driver.assertComplete();
 +
 +    // close session
 +    driver.getMock( "HIVE" )
 +        .expect()
 +        .method( "POST" )
 +        .content( driver.getResourceBytes( "hive/close-session-request.bin" ) )
 +        .contentType( "application/x-thrift" )
 +        .respond()
 +        .characterEncoding( "UTF-8" )
 +        .status( HttpStatus.SC_OK )
 +        .content( driver.getResourceBytes( "hive/close-session-result.bin" ) )
 +        .contentType( "application/x-thrift" );
 +    response = given()
 +        .auth().preemptive().basic( username, password )
 +        .header("X-XSRF-Header", "jksdhfkhdsf")
 +        .body( driver.getResourceBytes( "hive/close-session-request.bin" ) )
 +        .contentType( "application/x-thrift" )
 +        .then()
 +        .statusCode( HttpStatus.SC_OK )
 +        //.content( is( driver.getResourceBytes( "hive/close-session-result.bin" ) ) )
 +        .contentType( "application/x-thrift" )
 +        .when().post( driver.getUrl( "HIVE" ) );
 +    assertThat( response.body().asByteArray(), is( driver.getResourceBytes( "hive/close-session-result.bin" ) ) );
 +    driver.assertComplete();
 +    LOG_EXIT();
 +  }
 +
 +  @Test( timeout = TestUtils.MEDIUM_TIMEOUT )
 +  public void testHBaseGetTableList() throws IOException {
 +    LOG_ENTER();
 +    String username = "hbase";
 +    String password = "hbase-password";
 +    String resourceName = "hbase/table-list";
 +
 +    driver.getMock( "WEBHBASE" )
 +    .expect()
 +    .method( "GET" )
 +    .pathInfo( "/" )
 +    .header( "Accept", ContentType.XML.toString() )
 +    .respond()
 +    .status( HttpStatus.SC_OK )
 +    .content( driver.getResourceBytes( resourceName + ".xml" ) )
 +    .contentType( ContentType.XML.toString() );
 +
 +    Response response = given()
 +    .auth().preemptive().basic( username, password )
 +    .header("X-XSRF-Header", "jksdhfkhdsf")
 +    .header( "Accept", ContentType.XML.toString() )
 +    .then()
 +    .statusCode( HttpStatus.SC_OK )
 +    .contentType( ContentType.XML )
 +    .when().get( driver.getUrl( "WEBHBASE" ) );
 +
 +    MatcherAssert
 +        .assertThat(
 +            the( response.getBody().asString() ),
 +            isEquivalentTo( the( driver.getResourceString( resourceName + ".xml", UTF8 ) ) ) );
 +    driver.assertComplete();
 +
 +    driver.getMock( "WEBHBASE" )
 +    .expect()
 +    .method( "GET" )
 +    .pathInfo( "/" )
 +    .header( "Accept", ContentType.JSON.toString() )
 +    .respond()
 +    .status( HttpStatus.SC_OK )
 +    .content( driver.getResourceBytes( resourceName + ".json" ) )
 +    .contentType( ContentType.JSON.toString() );
 +
 +    response = given()
 +    .auth().preemptive().basic( username, password )
 +    .header("X-XSRF-Header", "jksdhfkhdsf")
 +    .header( "Accept", ContentType.JSON.toString() )
 +    .then()
 +    .statusCode( HttpStatus.SC_OK )
 +    .contentType( ContentType.JSON )
 +    .when().get( driver.getUrl( "WEBHBASE" ) );
 +
 +    MatcherAssert
 +    .assertThat( response.getBody().asString(), sameJSONAs( driver.getResourceString( resourceName + ".json", UTF8 ) ) );
 +    driver.assertComplete();
 +
 +    driver.getMock( "WEBHBASE" )
 +    .expect()
 +    .method( "GET" )
 +    .pathInfo( "/" )
 +    .header( "Accept", "application/x-protobuf" )
 +    .respond()
 +    .status( HttpStatus.SC_OK )
 +    .content( driver.getResourceString( resourceName + ".protobuf", UTF8 ), UTF8 )
 +    .contentType( "application/x-protobuf" );
 +
 +    given()
 +    .auth().preemptive().basic( username, password )
 +    .header("X-XSRF-Header", "jksdhfkhdsf")
 +    .header( "Accept", "application/x-protobuf" )
 +    .then()
 +    .statusCode( HttpStatus.SC_OK )
 +    .contentType( "application/x-protobuf" )
 +    .body( is( driver.getResourceString( resourceName + ".protobuf", UTF8 ) ) )
 +    .when().get( driver.getUrl( "WEBHBASE" ) );
 +    driver.assertComplete();
 +    LOG_EXIT();
 +  }
 +
 +  @Test( timeout = TestUtils.MEDIUM_TIMEOUT )
 +  public void testHBaseCreateTableAndVerifySchema() throws IOException {
 +    LOG_ENTER();
 +    String username = "hbase";
 +    String password = "hbase-password";
 +    String resourceName = "hbase/table-schema";
 +    String path = "/table/schema";
 +
 +    driver.getMock( "WEBHBASE" )
 +    .expect()
 +    .method( "PUT" )
 +    .pathInfo( path )
 +    .respond()
 +    .status( HttpStatus.SC_CREATED )
 +    .content( driver.getResourceBytes( resourceName + ".xml" ) )
 +    .contentType( ContentType.XML.toString() )
 +    .header( "Location", driver.getRealUrl( "WEBHBASE" ) + path  );
 +
 +    given()
 +    .auth().preemptive().basic( username, password )
 +    .header("X-XSRF-Header", "jksdhfkhdsf")
 +    .then()
 +    .statusCode( HttpStatus.SC_CREATED )
 +    .contentType( ContentType.XML )
 +    .header( "Location", startsWith( driver.getUrl( "WEBHBASE" ) + path ) )
 +    .when().put(driver.getUrl("WEBHBASE") + path);
 +    driver.assertComplete();
 +
 +    driver.getMock( "WEBHBASE" )
 +    .expect()
 +    .method( "PUT" )
 +    .pathInfo( path )
 +    .respond()
 +    .status(HttpStatus.SC_CREATED)
 +    .content(driver.getResourceBytes(resourceName + ".json"))
 +    .contentType(ContentType.JSON.toString())
 +    .header("Location", driver.getRealUrl("WEBHBASE") + path);
 +
 +    given()
 +    .auth().preemptive().basic( username, password )
 +    .header("X-XSRF-Header", "jksdhfkhdsf")
 +    .then()
 +    .statusCode( HttpStatus.SC_CREATED )
 +    .contentType( ContentType.JSON )
 +    .header( "Location", startsWith( driver.getUrl( "WEBHBASE" ) + path ) )
 +    .when().put( driver.getUrl( "WEBHBASE" ) + path );
 +    driver.assertComplete();
 +
 +    driver.getMock( "WEBHBASE" )
 +    .expect()
 +    .method( "PUT" )
 +    .pathInfo( path )
 +    .respond()
 +    .status( HttpStatus.SC_CREATED )
 +    .content( driver.getResourceBytes( resourceName + ".protobuf" ) )
 +    .contentType( "application/x-protobuf" )
 +    .header("Location", driver.getRealUrl("WEBHBASE") + path);
 +
 +    given()
 +    .auth().preemptive().basic(username, password)
 +    .header("X-XSRF-Header", "jksdhfkhdsf")
 +    .then()
 +    .statusCode(HttpStatus.SC_CREATED)
 +    .contentType("application/x-protobuf")
 +    .header("Location", startsWith(driver.getUrl("WEBHBASE") + path))
 +    .when().put(driver.getUrl("WEBHBASE") + path);
 +    driver.assertComplete();
 +
 +    LOG_EXIT();
 +  }
 +
 +  @Test( timeout = TestUtils.MEDIUM_TIMEOUT )
 +  public void testHBaseGetTableSchema() throws IOException {
 +    LOG_ENTER();
 +    String username = "hbase";
 +    String password = "hbase-password";
 +    String resourceName = "hbase/table-metadata";
 +    String path = "/table/schema";
 +
 +    driver.getMock( "WEBHBASE" )
 +    .expect()
 +    .method( "GET" )
 +    .pathInfo( path )
 +    .header("Accept", ContentType.XML.toString())
 +    .respond()
 +    .status(HttpStatus.SC_OK)
 +    .content(driver.getResourceBytes(resourceName + ".xml"))
 +    .contentType(ContentType.XML.toString());
 +
 +    Response response = given()
 +    .auth().preemptive().basic( username, password )
 +    .header("X-XSRF-Header", "jksdhfkhdsf")
 +    .header( "Accept", ContentType.XML.toString() )
 +    .then()
 +    .statusCode( HttpStatus.SC_OK )
 +    .contentType( ContentType.XML )
 +    .when().get( driver.getUrl( "WEBHBASE" ) + path );
 +
 +    MatcherAssert
 +        .assertThat(
 +            the(response.getBody().asString()),
 +            isEquivalentTo(the(driver.getResourceString(resourceName + ".xml", UTF8))));
 +    driver.assertComplete();
 +
 +    driver.getMock("WEBHBASE")
 +    .expect()
 +    .method("GET")
 +    .pathInfo(path)
 +    .header("Accept", ContentType.JSON.toString())
 +    .respond()
 +    .status(HttpStatus.SC_OK)
 +    .content(driver.getResourceBytes(resourceName + ".json"))
 +    .contentType(ContentType.JSON.toString());
 +
 +    response = given()
 +    .auth().preemptive().basic( username, password )
 +    .header("X-XSRF-Header", "jksdhfkhdsf")
 +    .header( "Accept", ContentType.JSON.toString() )
 +    .then()
 +    .statusCode( HttpStatus.SC_OK )
 +    .contentType( ContentType.JSON )
 +    .when().get( driver.getUrl( "WEBHBASE" ) + path );
 +
 +    MatcherAssert
 +    .assertThat(response.getBody().asString(), sameJSONAs(driver.getResourceString(resourceName + ".json", UTF8)));
 +    driver.assertComplete();
 +
 +    driver.getMock( "WEBHBASE" )
 +    .expect()
 +    .method( "GET" )
 +    .pathInfo( path )
 +    .header( "Accept", "application/x-protobuf" )
 +    .respond()
 +    .status( HttpStatus.SC_OK )
 +    .content( driver.getResourceBytes( resourceName + ".protobuf" ) )
 +    .contentType("application/x-protobuf");
 +
 +    response = given()
 +    .auth().preemptive().basic( username, password )
 +    .header("X-XSRF-Header", "jksdhfkhdsf")
 +    .header( "Accept", "application/x-protobuf" )
 +    .then()
 +    .statusCode( HttpStatus.SC_OK )
 +    //.content( is( driver.getResourceBytes( resourceName + ".protobuf" ) ) )
 +    .contentType( "application/x-protobuf" )
 +    .when().get( driver.getUrl( "WEBHBASE" ) + path );
 +    // RestAssured seems to be screwing up the binary comparison so do it explicitly.
 +    assertThat( driver.getResourceBytes( resourceName + ".protobuf" ), is( response.body().asByteArray() ) );
 +    driver.assertComplete();
 +    LOG_EXIT();
 +  }
 +
 +  @Test( timeout = TestUtils.MEDIUM_TIMEOUT )
 +  public void testHBaseInsertDataIntoTable() throws IOException {
 +    LOG_ENTER();
 +    String username = "hbase";
 +    String password = "hbase-password";
 +
 +    String resourceName = "hbase/table-data";
 +    String singleRowPath = "/table/testrow";
 +    String multipleRowPath = "/table/false-row-key";
 +
 +    //PUT request
 +
 +    driver.getMock( "WEBHBASE" )
 +    .expect()
 +    .method( "PUT" )
 +    .pathInfo( multipleRowPath )
 +    //.header( "Content-Type", ContentType.XML.toString() )
 +    .content( driver.getResourceBytes( resourceName + ".xml" ) )
 +    .contentType( ContentType.XML.toString() )
 +    .respond()
 +    .status(HttpStatus.SC_OK);
 +
 +    given()
 +    .auth().preemptive().basic( username, password )
 +    .header("X-XSRF-Header", "jksdhfkhdsf")
 +    //.header( "Content-Type", ContentType.XML.toString() )
 +    .body( driver.getResourceBytes( resourceName + ".xml" ) )
 +    .contentType( ContentType.XML.toString() )
 +    .then()
 +    .statusCode( HttpStatus.SC_OK )
 +    .when().put(driver.getUrl("WEBHBASE") + multipleRowPath);
 +    driver.assertComplete();
 +
 +    driver.getMock( "WEBHBASE" )
 +    .expect()
 +    .method( "PUT" )
 +    .pathInfo( singleRowPath )
 +    //.header( "Content-Type", ContentType.JSON.toString() )
 +    .contentType( ContentType.JSON.toString() )
 +    .respond()
 +    .status( HttpStatus.SC_OK );
 +
 +    given()
 +    .auth().preemptive().basic( username, password )
 +    .header("X-XSRF-Header", "jksdhfkhdsf")
 +    //.header( "Content-Type", ContentType.JSON.toString() )
 +    .body( driver.getResourceBytes( resourceName + ".json" ) )
 +    .contentType( ContentType.JSON.toString() )
 +    .then()
 +    .statusCode( HttpStatus.SC_OK )
 +    .when().put(driver.getUrl("WEBHBASE") + singleRowPath);
 +    driver.assertComplete();
 +
 +    driver.getMock("WEBHBASE")
 +    .expect()
 +    .method("PUT")
 +    .pathInfo(multipleRowPath)
 +    //.header( "Content-Type", "application/x-protobuf" )
 +    .contentType("application/x-protobuf")
 +    .content(driver.getResourceBytes(resourceName + ".protobuf"))
 +    .respond()
 +    .status(HttpStatus.SC_OK);
 +
 +    given()
 +    .auth().preemptive().basic( username, password )
 +    .header("X-XSRF-Header", "jksdhfkhdsf")
 +    //.header( "Content-Type", "application/x-protobuf" )
 +    .body( driver.getResourceBytes( resourceName + ".protobuf" ) )
 +    .contentType( "application/x-protobuf" )
 +    .then()
 +    .statusCode( HttpStatus.SC_OK )
 +    .when().put( driver.getUrl( "WEBHBASE" ) + multipleRowPath );
 +    driver.assertComplete();
 +
 +    //POST request
 +
 +    driver.getMock( "WEBHBASE" )
 +    .expect()
 +    .method( "POST" )
 +    .pathInfo( multipleRowPath )
 +    //.header( "Content-Type", ContentType.XML.toString() )
 +    .content( driver.getResourceBytes( resourceName + ".xml" ) )
 +    .contentType( ContentType.XML.toString() )
 +    .respond()
 +    .status( HttpStatus.SC_OK );
 +
 +    given()
 +      .auth().preemptive().basic( username, password )
 +      .header("X-XSRF-Header", "jksdhfkhdsf")
 +      //.header( "Content-Type", ContentType.XML.toString() )
 +      .body( driver.getResourceBytes( resourceName + ".xml" ) )
 +      .contentType( ContentType.XML.toString() )
 +      .then()
 +      .statusCode( HttpStatus.SC_OK )
 +      .when().post( driver.getUrl( "WEBHBASE" ) + multipleRowPath );
 +    driver.assertComplete();
 +
 +    driver.getMock( "WEBHBASE" )
 +    .expect()
 +    .method( "POST" )
 +    .pathInfo( singleRowPath )
 +    //.header( "Content-Type", ContentType.JSON.toString() )
 +    .contentType( ContentType.JSON.toString() )
 +    .respond()
 +    .status( HttpStatus.SC_OK );
 +
 +    given()
 +    .auth().preemptive().basic( username, password )
 +    .header("X-XSRF-Header", "jksdhfkhdsf")
 +    //.header( "Content-Type", ContentType.JSON.toString() )
 +    .body( driver.getResourceBytes( resourceName + ".json" ) )
 +    .contentType( ContentType.JSON.toString() )
 +    .then()
 +    .statusCode( HttpStatus.SC_OK )
 +    .when().post( driver.getUrl( "WEBHBASE" ) + singleRowPath );
 +    driver.assertComplete();
 +
 +    driver.getMock( "WEBHBASE" )
 +    .expect()
 +    .method( "POST" )
 +    .pathInfo( multipleRowPath )
 +    //.header( "Content-Type", "application/x-protobuf" )
 +    .content( driver.getResourceBytes( resourceName + ".protobuf" ) )
 +    .contentType( "application/x-protobuf" )
 +    .respond()
 +    .status( HttpStatus.SC_OK );
 +
 +    given()
 +    .auth().preemptive().basic( username, password )
 +    .header("X-XSRF-Header", "jksdhfkhdsf")
 +    //.header( "Content-Type", "application/x-protobuf" )
 +    .body( driver.getResourceBytes( resourceName + ".protobuf" ) )
 +    .contentType( "application/x-protobuf" )
 +    .then()
 +    .statusCode( HttpStatus.SC_OK )
 +    .when().post(driver.getUrl("WEBHBASE") + multipleRowPath);
 +    driver.assertComplete();
 +    LOG_EXIT();
 +  }
 +
 +  @Test( timeout = TestUtils.MEDIUM_TIMEOUT )
 +  public void testHBaseDeleteDataFromTable() {
 +    LOG_ENTER();
 +    String username = "hbase";
 +    String password = "hbase-password";
 +
 +    String tableId = "table";
 +    String rowId = "row";
 +    String familyId = "family";
 +    String columnId = "column";
 +
 +    driver.getMock("WEBHBASE")
 +    .expect()
 +    .from("testHBaseDeleteDataFromTable-1")
 +    .method("DELETE")
 +    .pathInfo("/" + tableId + "/" + rowId)
 +    .respond()
 +    .status(HttpStatus.SC_OK);
 +
 +    given()
 +    .auth().preemptive().basic(username, password)
 +    .header("X-XSRF-Header", "jksdhfkhdsf")
 +    .then()
 +    .statusCode( HttpStatus.SC_OK )
 +    .when().delete(driver.getUrl("WEBHBASE") + "/" + tableId + "/" + rowId);
 +    driver.assertComplete();
 +
 +    driver.getMock( "WEBHBASE" )
 +    .expect()
 +    .from("testHBaseDeleteDataFromTable-2")
 +    .method("DELETE")
 +    .pathInfo("/" + tableId + "/" + rowId + "/" + familyId)
 +    .respond()
 +    .status( HttpStatus.SC_OK );
 +
 +    given()
 +    .auth().preemptive().basic(username, password)
 +    .header("X-XSRF-Header", "jksdhfkhdsf")
 +    .then()
 +    .statusCode( HttpStatus.SC_OK )
 +    .when().delete(driver.getUrl("WEBHBASE") + "/" + tableId + "/" + rowId + "/" + familyId);
 +    driver.assertComplete();
 +
 +    driver.getMock("WEBHBASE")
 +    .expect()
 +    .from("testHBaseDeleteDataFromTable-3")
 +    .method("DELETE")
 +    .pathInfo("/" + tableId + "/" + rowId + "/" + familyId + ":" + columnId)
 +    .respond()
 +    .status(HttpStatus.SC_OK);
 +
 +    given()
 +    .auth().preemptive().basic(username, password)
 +    .header("X-XSRF-Header", "jksdhfkhdsf")
 +    .then()
 +    .statusCode( HttpStatus.SC_OK )
 +    .when().delete(driver.getUrl("WEBHBASE") + "/" + tableId + "/" + rowId + "/" + familyId + ":" + columnId);
 +    driver.assertComplete();
 +
 +    LOG_EXIT();
 +  }
 +
 +  @Test( timeout = TestUtils.MEDIUM_TIMEOUT )
 +  public void testHBaseQueryTableData() throws IOException {
 +    LOG_ENTER();
 +    String username = "hbase";
 +    String password = "hbase-password";
 +
 +    String resourceName = "hbase/table-data";
 +
 +    String allRowsPath = "/table/*";
 +    String rowsStartsWithPath = "/table/row*";
 +    String rowsWithKeyPath = "/table/row";
 +    String rowsWithKeyAndColumnPath = "/table/row/family:col";
 +
 +    driver.getMock("WEBHBASE")
 +    .expect()
 +    .method("GET")
 +    .pathInfo(allRowsPath)
 +    .header("Accept", ContentType.XML.toString())
 +    .respond()
 +    .status(HttpStatus.SC_OK)
 +    .content(driver.getResourceBytes(resourceName + ".xml"))
 +    .contentType(ContentType.XML.toString());
 +
 +    Response response = given()
 +    .auth().preemptive().basic( username, password )
 +    .header("X-XSRF-Header", "jksdhfkhdsf")
 +    .header( "Accept", ContentType.XML.toString() )
 +    .then()
 +    .statusCode( HttpStatus.SC_OK )
 +    .contentType( ContentType.XML )
 +    .when().get( driver.getUrl( "WEBHBASE" ) + allRowsPath );
 +
 +    MatcherAssert
 +    .assertThat(
 +        the(response.getBody().asString()),
 +        isEquivalentTo(the(driver.getResourceString(resourceName + ".xml", UTF8))));
 +    driver.assertComplete();
 +
 +    driver.getMock( "WEBHBASE" )
 +    .expect()
 +    .method( "GET" )
 +    .pathInfo( rowsStartsWithPath )
 +    .header( "Accept", ContentType.XML.toString() )
 +    .respond()
 +    .status( HttpStatus.SC_OK )
 +    .content( driver.getResourceBytes( resourceName + ".xml" ) )
 +    .contentType(ContentType.XML.toString());
 +
 +    response = given()
 +    .auth().preemptive().basic( username, password )
 +    .header("X-XSRF-Header", "jksdhfkhdsf")
 +    .header( "Accept", ContentType.XML.toString() )
 +    .then()
 +    .statusCode( HttpStatus.SC_OK )
 +    .contentType( ContentType.XML )
 +    .when().get( driver.getUrl( "WEBHBASE" ) + rowsStartsWithPath );
 +
 +    MatcherAssert
 +    .assertThat(
 +        the(response.getBody().asString()),
 +        isEquivalentTo(the(driver.getResourceString(resourceName + ".xml", UTF8))));
 +    driver.assertComplete();
 +
 +    driver.getMock( "WEBHBASE" )
 +    .expect()
 +    .method( "GET" )
 +    .pathInfo( rowsWithKeyPath )
 +    .header( "Accept", ContentType.JSON.toString() )
 +    .respond()
 +    .status( HttpStatus.SC_OK )
 +    .content( driver.getResourceBytes( resourceName + ".json" ) )
 +    .contentType( ContentType.JSON.toString() );
 +
 +    response = given()
 +    .auth().preemptive().basic( username, password )
 +    .header("X-XSRF-Header", "jksdhfkhdsf")
 +    .header( "Accept", ContentType.JSON.toString() )
 +    .then()
 +    .statusCode( HttpStatus.SC_OK )
 +    .contentType( ContentType.JSON )
 +    .when().get( driver.getUrl( "WEBHBASE" ) + rowsWithKeyPath );
 +
 +    MatcherAssert
 +    .assertThat( response.getBody().asString(), sameJSONAs( driver.getResourceString( resourceName + ".json", UTF8 ) ) );
 +    driver.assertComplete();
 +
 +    driver.getMock( "WEBHBASE" )
 +    .expect()
 +    .method( "GET" )
 +    .pathInfo( rowsWithKeyAndColumnPath )
 +    .header( "Accept", ContentType.JSON.toString() )
 +    .respond()
 +    .status( HttpStatus.SC_OK )
 +    .content( driver.getResourceBytes( resourceName + ".json" ) )
 +    .contentType( ContentType.JSON.toString() );
 +
 +    response = given()
 +    .auth().preemptive().basic( username, password )
 +    .header("X-XSRF-Header", "jksdhfkhdsf")
 +    .header( "Accept", ContentType.JSON.toString() )
 +    .then()
 +    .statusCode( HttpStatus.SC_OK )
 +    .contentType( ContentType.JSON )
 +    .when().get( driver.getUrl( "WEBHBASE" ) + rowsWithKeyAndColumnPath );
 +
 +    MatcherAssert
 +    .assertThat( response.getBody().asString(), sameJSONAs( driver.getResourceString( resourceName + ".json", UTF8 ) ) );
 +    driver.assertComplete();
 +    LOG_EXIT();
 

<TRUNCATED>
http://git-wip-us.apache.org/repos/asf/knox/blob/22a7304a/pom.xml
----------------------------------------------------------------------


[52/53] [abbrv] knox git commit: Merge branch 'master' into KNOX-998-Package_Restructuring

Posted by mo...@apache.org.
Merge branch 'master' into KNOX-998-Package_Restructuring

# Conflicts:
#	gateway-server/src/main/java/org/apache/knox/gateway/services/topology/impl/DefaultTopologyService.java


Project: http://git-wip-us.apache.org/repos/asf/knox/repo
Commit: http://git-wip-us.apache.org/repos/asf/knox/commit/e5fd0622
Tree: http://git-wip-us.apache.org/repos/asf/knox/tree/e5fd0622
Diff: http://git-wip-us.apache.org/repos/asf/knox/diff/e5fd0622

Branch: refs/heads/master
Commit: e5fd0622493a7e3c62811ea03ff7931c979dd87a
Parents: e766b3b 99e6a54
Author: Sandeep More <mo...@apache.org>
Authored: Tue Jan 9 14:25:16 2018 -0500
Committer: Sandeep More <mo...@apache.org>
Committed: Tue Jan 9 14:25:16 2018 -0500

----------------------------------------------------------------------
 LICENSE                                         |  40 ++++-
 NOTICE                                          |   4 +-
 .../discovery/ambari/ServiceURLCreator.java     |  32 ++++
 .../discovery/ambari/ServiceURLFactory.java     |  75 +++++++++
 .../discovery/ambari/WebHdfsUrlCreator.java     |  84 ++++++++++
 .../discovery/ambari/AmbariClientCommon.java    |  14 +-
 .../discovery/ambari/AmbariCluster.java         |   6 +-
 .../ambari/AmbariConfigurationMonitor.java      |  52 +++++--
 .../ambari/AmbariDynamicServiceURLCreator.java  |   4 +-
 .../discovery/ambari/PropertyEqualsHandler.java |  20 ++-
 .../ambari/ServiceURLPropertyConfig.java        |   7 +-
 .../ambari-service-discovery-url-mappings.xml   |  24 +--
 .../AmbariDynamicServiceURLCreatorTest.java     | 116 +++++++++-----
 gateway-release/src/assembly.xml                |   1 +
 gateway-server/pom.xml                          |   2 +-
 .../filter/PortMappingHelperHandler.java        |   2 +-
 .../topology/impl/DefaultTopologyService.java   |  40 +++--
 .../DefaultRemoteConfigurationMonitor.java      |  22 ++-
 .../org/apache/knox/gateway/util/KnoxCLI.java   | 153 ++++++++++++-------
 .../ZooKeeperConfigurationMonitorTest.java      |  17 ++-
 .../apache/knox/gateway/util/KnoxCLITest.java   |  16 ++
 gateway-service-remoteconfig/pom.xml            |   5 -
 gateway-test-release/pom.xml                    |  72 ++++++++-
 pom.xml                                         |  32 +++-
 24 files changed, 690 insertions(+), 150 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/knox/blob/e5fd0622/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariClientCommon.java
----------------------------------------------------------------------
diff --cc gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariClientCommon.java
index 9e5dcb3,0000000..1314305
mode 100644,000000..100644
--- a/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariClientCommon.java
+++ b/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariClientCommon.java
@@@ -1,102 -1,0 +1,108 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements. See the NOTICE file distributed with this
 + * work for additional information regarding copyright ownership. The ASF
 + * licenses this file to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance with the License.
 + * You may obtain a copy of the License at
 + * <p>
 + * http://www.apache.org/licenses/LICENSE-2.0
 + * <p>
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 + * License for the specific language governing permissions and limitations under
 + * the License.
 + */
 +package org.apache.knox.gateway.topology.discovery.ambari;
 +
 +import net.minidev.json.JSONArray;
 +import net.minidev.json.JSONObject;
 +import org.apache.knox.gateway.i18n.messages.MessagesFactory;
 +import org.apache.knox.gateway.services.security.AliasService;
 +import org.apache.knox.gateway.topology.discovery.ServiceDiscoveryConfig;
 +
 +import java.util.HashMap;
 +import java.util.Map;
 +
 +class AmbariClientCommon {
 +
 +    static final String AMBARI_CLUSTERS_URI = "/api/v1/clusters";
 +
 +    static final String AMBARI_HOSTROLES_URI =
 +                                    AMBARI_CLUSTERS_URI + "/%s/services?fields=components/host_components/HostRoles";
 +
 +    static final String AMBARI_SERVICECONFIGS_URI =
 +                                    AMBARI_CLUSTERS_URI + "/%s/configurations/service_config_versions?is_current=true";
 +
 +    private static final AmbariServiceDiscoveryMessages log = MessagesFactory.get(AmbariServiceDiscoveryMessages.class);
 +
 +    private RESTInvoker restClient;
 +
 +
 +    AmbariClientCommon(AliasService aliasService) {
 +        this(new RESTInvoker(aliasService));
 +    }
 +
 +
 +    AmbariClientCommon(RESTInvoker restInvoker) {
 +        this.restClient = restInvoker;
 +    }
 +
 +
 +
 +    Map<String, Map<String, AmbariCluster.ServiceConfiguration>> getActiveServiceConfigurations(String clusterName,
 +                                                                                                ServiceDiscoveryConfig config) {
-         return getActiveServiceConfigurations(config.getAddress(),
-                                               clusterName,
-                                               config.getUser(),
-                                               config.getPasswordAlias());
++        Map<String, Map<String, AmbariCluster.ServiceConfiguration>> activeConfigs = null;
++
++        if (config != null) {
++            activeConfigs = getActiveServiceConfigurations(config.getAddress(),
++                                                           clusterName,
++                                                           config.getUser(),
++                                                           config.getPasswordAlias());
++        }
++
++        return activeConfigs;
 +    }
 +
 +
 +    Map<String, Map<String, AmbariCluster.ServiceConfiguration>> getActiveServiceConfigurations(String discoveryAddress,
 +                                                                                                String clusterName,
 +                                                                                                String discoveryUser,
 +                                                                                                String discoveryPwdAlias) {
 +        Map<String, Map<String, AmbariCluster.ServiceConfiguration>> serviceConfigurations = new HashMap<>();
 +
 +        String serviceConfigsURL = String.format("%s" + AMBARI_SERVICECONFIGS_URI, discoveryAddress, clusterName);
 +
 +        JSONObject serviceConfigsJSON = restClient.invoke(serviceConfigsURL, discoveryUser, discoveryPwdAlias);
 +        if (serviceConfigsJSON != null) {
 +            // Process the service configurations
 +            JSONArray serviceConfigs = (JSONArray) serviceConfigsJSON.get("items");
 +            for (Object serviceConfig : serviceConfigs) {
 +                String serviceName = (String) ((JSONObject) serviceConfig).get("service_name");
 +                JSONArray configurations = (JSONArray) ((JSONObject) serviceConfig).get("configurations");
 +                for (Object configuration : configurations) {
 +                    String configType = (String) ((JSONObject) configuration).get("type");
 +                    String configVersion = String.valueOf(((JSONObject) configuration).get("version"));
 +
 +                    Map<String, String> configProps = new HashMap<>();
 +                    JSONObject configProperties = (JSONObject) ((JSONObject) configuration).get("properties");
 +                    for (String propertyName : configProperties.keySet()) {
 +                        configProps.put(propertyName, String.valueOf(((JSONObject) configProperties).get(propertyName)));
 +                    }
 +                    if (!serviceConfigurations.containsKey(serviceName)) {
 +                        serviceConfigurations.put(serviceName, new HashMap<>());
 +                    }
 +                    serviceConfigurations.get(serviceName).put(configType,
 +                                                               new AmbariCluster.ServiceConfiguration(configType,
 +                                                                                                      configVersion,
 +                                                                                                      configProps));
 +                }
 +            }
 +        }
 +
 +        return serviceConfigurations;
 +    }
 +
 +
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/e5fd0622/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariCluster.java
----------------------------------------------------------------------
diff --cc gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariCluster.java
index bcf3adc,0000000..9d3fa74
mode 100644,000000..100644
--- a/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariCluster.java
+++ b/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariCluster.java
@@@ -1,120 -1,0 +1,120 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements. See the NOTICE file distributed with this
 + * work for additional information regarding copyright ownership. The ASF
 + * licenses this file to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance with the License.
 + * You may obtain a copy of the License at
 + *
 + * http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 + * License for the specific language governing permissions and limitations under
 + * the License.
 + */
 +package org.apache.knox.gateway.topology.discovery.ambari;
 +
 +import org.apache.knox.gateway.topology.discovery.ServiceDiscovery;
 +
 +import java.util.ArrayList;
 +import java.util.HashMap;
 +import java.util.List;
 +import java.util.Map;
 +
 +class AmbariCluster implements ServiceDiscovery.Cluster {
 +
 +    private String name = null;
 +
-     private AmbariDynamicServiceURLCreator urlCreator;
++    private ServiceURLFactory urlFactory;
 +
 +    private Map<String, Map<String, ServiceConfiguration>> serviceConfigurations = new HashMap<>();
 +
 +    private Map<String, AmbariComponent> components = null;
 +
 +
 +    AmbariCluster(String name) {
 +        this.name = name;
 +        components = new HashMap<>();
-         urlCreator = new AmbariDynamicServiceURLCreator(this);
++        urlFactory = ServiceURLFactory.newInstance(this);
 +    }
 +
 +    void addServiceConfiguration(String serviceName, String configurationType, ServiceConfiguration serviceConfig) {
 +        if (!serviceConfigurations.keySet().contains(serviceName)) {
 +            serviceConfigurations.put(serviceName, new HashMap<>());
 +        }
 +        serviceConfigurations.get(serviceName).put(configurationType, serviceConfig);
 +    }
 +
 +
 +    void addComponent(AmbariComponent component) {
 +        components.put(component.getName(), component);
 +    }
 +
 +
 +    ServiceConfiguration getServiceConfiguration(String serviceName, String configurationType) {
 +        ServiceConfiguration sc = null;
 +        Map<String, ServiceConfiguration> configs = serviceConfigurations.get(serviceName);
 +        if (configs != null) {
 +            sc = configs.get(configurationType);
 +        }
 +        return sc;
 +    }
 +
 +
 +    Map<String, Map<String, ServiceConfiguration>> getServiceConfigurations() {
 +        return serviceConfigurations;
 +    }
 +
 +
 +    Map<String, AmbariComponent> getComponents() {
 +        return components;
 +    }
 +
 +
 +    AmbariComponent getComponent(String name) {
 +        return components.get(name);
 +    }
 +
 +
 +    @Override
 +    public String getName() {
 +        return name;
 +    }
 +
 +
 +    @Override
 +    public List<String> getServiceURLs(String serviceName) {
 +        List<String> urls = new ArrayList<>();
-         urls.addAll(urlCreator.create(serviceName));
++        urls.addAll(urlFactory.create(serviceName));
 +        return urls;
 +    }
 +
 +
 +    static class ServiceConfiguration {
 +
 +        private String type;
 +        private String version;
 +        private Map<String, String> props;
 +
 +        ServiceConfiguration(String type, String version, Map<String, String> properties) {
 +            this.type = type;
 +            this.version = version;
 +            this.props = properties;
 +        }
 +
 +        public String getVersion() {
 +            return version;
 +        }
 +
 +        public String getType() {
 +            return type;
 +        }
 +
 +        public Map<String, String> getProperties() {
 +            return props;
 +        }
 +    }
 +
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/e5fd0622/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariConfigurationMonitor.java
----------------------------------------------------------------------
diff --cc gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariConfigurationMonitor.java
index c3aa27a,0000000..920b05c7
mode 100644,000000..100644
--- a/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariConfigurationMonitor.java
+++ b/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariConfigurationMonitor.java
@@@ -1,525 -1,0 +1,559 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements. See the NOTICE file distributed with this
 + * work for additional information regarding copyright ownership. The ASF
 + * licenses this file to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance with the License.
 + * You may obtain a copy of the License at
 + * <p>
 + * http://www.apache.org/licenses/LICENSE-2.0
 + * <p>
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 + * License for the specific language governing permissions and limitations under
 + * the License.
 + */
 +package org.apache.knox.gateway.topology.discovery.ambari;
 +
 +import org.apache.commons.io.FileUtils;
 +import org.apache.knox.gateway.config.GatewayConfig;
 +import org.apache.knox.gateway.i18n.messages.MessagesFactory;
 +import org.apache.knox.gateway.services.security.AliasService;
 +import org.apache.knox.gateway.topology.discovery.ClusterConfigurationMonitor;
 +import org.apache.knox.gateway.topology.discovery.ServiceDiscoveryConfig;
 +
 +import java.io.File;
 +import java.io.FileInputStream;
 +import java.io.FileOutputStream;
 +import java.io.IOException;
 +import java.util.ArrayList;
 +import java.util.Collection;
 +import java.util.HashMap;
 +import java.util.List;
 +import java.util.Map;
 +import java.util.Properties;
 +import java.util.concurrent.locks.ReadWriteLock;
 +import java.util.concurrent.locks.ReentrantReadWriteLock;
 +
 +
 +class AmbariConfigurationMonitor implements ClusterConfigurationMonitor {
 +
 +    private static final String TYPE = "Ambari";
 +
 +    private static final String CLUSTERS_DATA_DIR_NAME = "clusters";
 +
 +    private static final String PERSISTED_FILE_COMMENT = "Generated File. Do Not Edit!";
 +
 +    private static final String PROP_CLUSTER_PREFIX = "cluster.";
 +    private static final String PROP_CLUSTER_SOURCE = PROP_CLUSTER_PREFIX + "source";
 +    private static final String PROP_CLUSTER_NAME   = PROP_CLUSTER_PREFIX + "name";
 +    private static final String PROP_CLUSTER_USER   = PROP_CLUSTER_PREFIX + "user";
 +    private static final String PROP_CLUSTER_ALIAS  = PROP_CLUSTER_PREFIX + "pwd.alias";
 +
 +    static final String INTERVAL_PROPERTY_NAME = "org.apache.knox.gateway.topology.discovery.ambari.monitor.interval";
 +
 +
 +    private static final AmbariServiceDiscoveryMessages log = MessagesFactory.get(AmbariServiceDiscoveryMessages.class);
 +
 +    // Ambari address
 +    //    clusterName -> ServiceDiscoveryConfig
 +    //
 +    Map<String, Map<String, ServiceDiscoveryConfig>> clusterMonitorConfigurations = new HashMap<>();
 +
 +    // Ambari address
 +    //    clusterName
 +    //        configType -> version
 +    //
 +    Map<String, Map<String, Map<String, String>>> ambariClusterConfigVersions = new HashMap<>();
 +
 +    ReadWriteLock configVersionsLock = new ReentrantReadWriteLock();
 +
 +    private List<ConfigurationChangeListener> changeListeners = new ArrayList<>();
 +
 +    private AmbariClientCommon ambariClient;
 +
 +    PollingConfigAnalyzer internalMonitor;
 +
 +    GatewayConfig gatewayConfig = null;
 +
 +    static String getType() {
 +        return TYPE;
 +    }
 +
 +    AmbariConfigurationMonitor(GatewayConfig config, AliasService aliasService) {
 +        this.gatewayConfig   = config;
 +        this.ambariClient    = new AmbariClientCommon(aliasService);
 +        this.internalMonitor = new PollingConfigAnalyzer(this);
 +
 +        // Override the default polling interval if it has been configured
 +        int interval = config.getClusterMonitorPollingInterval(getType());
 +        if (interval > 0) {
 +            setPollingInterval(interval);
 +        }
 +
 +        init();
 +    }
 +
 +    @Override
 +    public void setPollingInterval(int interval) {
 +        internalMonitor.setInterval(interval);
 +    }
 +
 +    private void init() {
 +        loadDiscoveryConfiguration();
 +        loadClusterVersionData();
 +    }
 +
 +    /**
 +     * Load any previously-persisted service discovery configurations.
 +     * This is necessary for checking previously-deployed topologies.
 +     */
 +    private void loadDiscoveryConfiguration() {
 +        File persistenceDir = getPersistenceDir();
 +        if (persistenceDir != null) {
 +            Collection<File> persistedConfigs = FileUtils.listFiles(persistenceDir, new String[]{"conf"}, false);
 +            for (File persisted : persistedConfigs) {
 +                Properties props = new Properties();
++                FileInputStream in = null;
 +                try {
-                     props.load(new FileInputStream(persisted));
++                    in = new FileInputStream(persisted);
++                    props.load(in);
 +
 +                    addDiscoveryConfig(props.getProperty(PROP_CLUSTER_NAME), new ServiceDiscoveryConfig() {
 +                                                            public String getAddress() {
 +                                                                return props.getProperty(PROP_CLUSTER_SOURCE);
 +                                                            }
 +
 +                                                            public String getUser() {
 +                                                                return props.getProperty(PROP_CLUSTER_USER);
 +                                                            }
 +
 +                                                            public String getPasswordAlias() {
 +                                                                return props.getProperty(PROP_CLUSTER_ALIAS);
 +                                                            }
 +                                                        });
 +                } catch (IOException e) {
 +                    log.failedToLoadClusterMonitorServiceDiscoveryConfig(getType(), e);
++                } finally {
++                    if (in != null) {
++                        try {
++                            in.close();
++                        } catch (IOException e) {
++                            //
++                        }
++                    }
 +                }
 +            }
 +        }
 +    }
 +
 +    /**
 +     * Load any previously-persisted cluster configuration version records, so the monitor will check
 +     * previously-deployed topologies against the current cluster configuration.
 +     */
 +    private void loadClusterVersionData() {
 +        File persistenceDir = getPersistenceDir();
 +        if (persistenceDir != null) {
-             Collection<File> persistedConfigs = FileUtils.listFiles(getPersistenceDir(), new String[]{"ver"}, false);
++            Collection<File> persistedConfigs = FileUtils.listFiles(persistenceDir, new String[]{"ver"}, false);
 +            for (File persisted : persistedConfigs) {
 +                Properties props = new Properties();
++                FileInputStream in = null;
 +                try {
-                     props.load(new FileInputStream(persisted));
++                    in = new FileInputStream(persisted);
++                    props.load(in);
 +
 +                    String source = props.getProperty(PROP_CLUSTER_SOURCE);
 +                    String clusterName = props.getProperty(PROP_CLUSTER_NAME);
 +
 +                    Map<String, String> configVersions = new HashMap<>();
 +                    for (String name : props.stringPropertyNames()) {
 +                        if (!name.startsWith(PROP_CLUSTER_PREFIX)) { // Ignore implementation-specific properties
 +                            configVersions.put(name, props.getProperty(name));
 +                        }
 +                    }
 +
 +                    // Map the config versions to the cluster name
 +                    addClusterConfigVersions(source, clusterName, configVersions);
 +
 +                } catch (IOException e) {
 +                    log.failedToLoadClusterMonitorConfigVersions(getType(), e);
++                } finally {
++                    if (in != null) {
++                        try {
++                            in.close();
++                        } catch (IOException e) {
++                            //
++                        }
++                    }
 +                }
 +            }
 +        }
 +    }
 +
 +    private void persistDiscoveryConfiguration(String clusterName, ServiceDiscoveryConfig sdc) {
 +        File persistenceDir = getPersistenceDir();
 +        if (persistenceDir != null) {
 +
 +            Properties props = new Properties();
 +            props.setProperty(PROP_CLUSTER_NAME, clusterName);
 +            props.setProperty(PROP_CLUSTER_SOURCE, sdc.getAddress());
 +
 +            String username = sdc.getUser();
 +            if (username != null) {
 +                props.setProperty(PROP_CLUSTER_USER, username);
 +            }
 +            String pwdAlias = sdc.getPasswordAlias();
 +            if (pwdAlias != null) {
 +                props.setProperty(PROP_CLUSTER_ALIAS, pwdAlias);
 +            }
 +
 +            persist(props, getDiscoveryConfigPersistenceFile(sdc.getAddress(), clusterName));
 +        }
 +    }
 +
 +    private void persistClusterVersionData(String address, String clusterName, Map<String, String> configVersions) {
 +        File persistenceDir = getPersistenceDir();
 +        if (persistenceDir != null) {
 +            Properties props = new Properties();
 +            props.setProperty(PROP_CLUSTER_NAME, clusterName);
 +            props.setProperty(PROP_CLUSTER_SOURCE, address);
 +            for (String name : configVersions.keySet()) {
 +                props.setProperty(name, configVersions.get(name));
 +            }
 +
 +            persist(props, getConfigVersionsPersistenceFile(address, clusterName));
 +        }
 +    }
 +
 +    private void persist(Properties props, File dest) {
++        FileOutputStream out = null;
 +        try {
-             props.store(new FileOutputStream(dest), PERSISTED_FILE_COMMENT);
++            out = new FileOutputStream(dest);
++            props.store(out, PERSISTED_FILE_COMMENT);
++            out.flush();
 +        } catch (Exception e) {
 +            log.failedToPersistClusterMonitorData(getType(), dest.getAbsolutePath(), e);
++        } finally {
++            if (out != null) {
++                try {
++                    out.close();
++                } catch (IOException e) {
++                    //
++                }
++            }
 +        }
 +    }
 +
 +    private File getPersistenceDir() {
 +        File persistenceDir = null;
 +
 +        File dataDir = new File(gatewayConfig.getGatewayDataDir());
 +        if (dataDir.exists()) {
 +            File clustersDir = new File(dataDir, CLUSTERS_DATA_DIR_NAME);
 +            if (!clustersDir.exists()) {
 +                clustersDir.mkdirs();
 +            }
 +            persistenceDir = clustersDir;
 +        }
 +
 +        return persistenceDir;
 +    }
 +
 +    private File getDiscoveryConfigPersistenceFile(String address, String clusterName) {
 +        return getPersistenceFile(address, clusterName, "conf");
 +    }
 +
 +    private File getConfigVersionsPersistenceFile(String address, String clusterName) {
 +        return getPersistenceFile(address, clusterName, "ver");
 +    }
 +
 +    private File getPersistenceFile(String address, String clusterName, String ext) {
 +        String fileName = address.replace(":", "_").replace("/", "_") + "-" + clusterName + "." + ext;
 +        return new File(getPersistenceDir(), fileName);
 +    }
 +
 +    /**
 +     * Add cluster configuration details to the monitor's in-memory record.
 +     *
 +     * @param address        An Ambari instance address.
 +     * @param clusterName    The name of a cluster associated with the Ambari instance.
 +     * @param configVersions A Map of configuration types and their corresponding versions.
 +     */
 +    private void addClusterConfigVersions(String address, String clusterName, Map<String, String> configVersions) {
 +        configVersionsLock.writeLock().lock();
 +        try {
 +            ambariClusterConfigVersions.computeIfAbsent(address, k -> new HashMap<>())
 +                                       .put(clusterName, configVersions);
 +        } finally {
 +            configVersionsLock.writeLock().unlock();
 +        }
 +    }
 +
 +    public void start() {
 +        (new Thread(internalMonitor, "AmbariConfigurationMonitor")).start();
 +    }
 +
 +    public void stop() {
 +        internalMonitor.stop();
 +    }
 +
 +    @Override
 +    public void addListener(ConfigurationChangeListener listener) {
 +        changeListeners.add(listener);
 +    }
 +
 +    /**
 +     * Add discovery configuration details for the specified cluster, so the monitor knows how to connect to check for
 +     * changes.
 +     *
 +     * @param clusterName The name of the cluster.
 +     * @param config      The associated service discovery configuration.
 +     */
 +    void addDiscoveryConfig(String clusterName, ServiceDiscoveryConfig config) {
 +        clusterMonitorConfigurations.computeIfAbsent(config.getAddress(), k -> new HashMap<>()).put(clusterName, config);
 +    }
 +
 +
 +    /**
 +     * Get the service discovery configuration associated with the specified Ambari instance and cluster.
 +     *
 +     * @param address     An Ambari instance address.
 +     * @param clusterName The name of a cluster associated with the Ambari instance.
 +     *
 +     * @return The associated ServiceDiscoveryConfig object.
 +     */
 +    ServiceDiscoveryConfig getDiscoveryConfig(String address, String clusterName) {
 +        ServiceDiscoveryConfig config = null;
 +        if (clusterMonitorConfigurations.containsKey(address)) {
 +            config = clusterMonitorConfigurations.get(address).get(clusterName);
 +        }
 +        return config;
 +    }
 +
 +
 +    /**
 +     * Add cluster configuration data to the monitor, which it will use when determining if configuration has changed.
 +     *
 +     * @param cluster         An AmbariCluster object.
 +     * @param discoveryConfig The discovery configuration associated with the cluster.
 +     */
 +    void addClusterConfigVersions(AmbariCluster cluster, ServiceDiscoveryConfig discoveryConfig) {
 +
 +        String clusterName = cluster.getName();
 +
 +        // Register the cluster discovery configuration for the monitor connections
 +        persistDiscoveryConfiguration(clusterName, discoveryConfig);
 +        addDiscoveryConfig(clusterName, discoveryConfig);
 +
 +        // Build the set of configuration versions
 +        Map<String, String> configVersions = new HashMap<>();
 +        Map<String, Map<String, AmbariCluster.ServiceConfiguration>> serviceConfigs = cluster.getServiceConfigurations();
 +        for (String serviceName : serviceConfigs.keySet()) {
 +            Map<String, AmbariCluster.ServiceConfiguration> configTypeVersionMap = serviceConfigs.get(serviceName);
 +            for (AmbariCluster.ServiceConfiguration config : configTypeVersionMap.values()) {
 +                String configType = config.getType();
 +                String version = config.getVersion();
 +                configVersions.put(configType, version);
 +            }
 +        }
 +
 +        persistClusterVersionData(discoveryConfig.getAddress(), clusterName, configVersions);
 +        addClusterConfigVersions(discoveryConfig.getAddress(), clusterName, configVersions);
 +    }
 +
 +
 +    /**
 +     * Remove the configuration record for the specified Ambari instance and cluster name.
 +     *
 +     * @param address     An Ambari instance address.
 +     * @param clusterName The name of a cluster associated with the Ambari instance.
 +     *
 +     * @return The removed data; A Map of configuration types and their corresponding versions.
 +     */
 +    Map<String, String> removeClusterConfigVersions(String address, String clusterName) {
 +        Map<String, String> result = new HashMap<>();
 +
 +        configVersionsLock.writeLock().lock();
 +        try {
 +            if (ambariClusterConfigVersions.containsKey(address)) {
 +                result.putAll(ambariClusterConfigVersions.get(address).remove(clusterName));
 +            }
 +        } finally {
 +            configVersionsLock.writeLock().unlock();
 +        }
 +
 +        // Delete the associated persisted record
 +        File persisted = getConfigVersionsPersistenceFile(address, clusterName);
 +        if (persisted.exists()) {
 +            persisted.delete();
 +        }
 +
 +        return result;
 +    }
 +
 +    /**
 +     * Get the cluster configuration details for the specified cluster and Ambari instance.
 +     *
 +     * @param address     An Ambari instance address.
 +     * @param clusterName The name of a cluster associated with the Ambari instance.
 +     *
 +     * @return A Map of configuration types and their corresponding versions.
 +     */
 +    Map<String, String> getClusterConfigVersions(String address, String clusterName) {
 +        Map<String, String> result = new HashMap<>();
 +
 +        configVersionsLock.readLock().lock();
 +        try {
 +            if (ambariClusterConfigVersions.containsKey(address)) {
 +                result.putAll(ambariClusterConfigVersions.get(address).get(clusterName));
 +            }
 +        } finally {
 +            configVersionsLock.readLock().unlock();
 +        }
 +
 +        return result;
 +    }
 +
 +
 +    /**
 +     * Get all the clusters the monitor knows about.
 +     *
 +     * @return A Map of Ambari instance addresses to associated cluster names.
 +     */
 +    Map<String, List<String>> getClusterNames() {
 +        Map<String, List<String>> result = new HashMap<>();
 +
 +        configVersionsLock.readLock().lock();
 +        try {
 +            for (String address : ambariClusterConfigVersions.keySet()) {
 +                List<String> clusterNames = new ArrayList<>();
 +                clusterNames.addAll(ambariClusterConfigVersions.get(address).keySet());
 +                result.put(address, clusterNames);
 +            }
 +        } finally {
 +            configVersionsLock.readLock().unlock();
 +        }
 +
 +        return result;
 +
 +    }
 +
 +
 +    /**
 +     * Notify registered change listeners.
 +     *
 +     * @param source      The address of the Ambari instance from which the cluster details were determined.
 +     * @param clusterName The name of the cluster whose configuration details have changed.
 +     */
 +    void notifyChangeListeners(String source, String clusterName) {
 +        for (ConfigurationChangeListener listener : changeListeners) {
 +            listener.onConfigurationChange(source, clusterName);
 +        }
 +    }
 +
 +
 +    /**
 +     * Request the current active configuration version info from Ambari.
 +     *
 +     * @param address     The Ambari instance address.
 +     * @param clusterName The name of the cluster for which the details are desired.
 +     *
 +     * @return A Map of service configuration types and their corresponding versions.
 +     */
 +    Map<String, String> getUpdatedConfigVersions(String address, String clusterName) {
 +        Map<String, String> configVersions = new HashMap<>();
 +
-         Map<String, Map<String, AmbariCluster.ServiceConfiguration>> serviceConfigs =
-                     ambariClient.getActiveServiceConfigurations(clusterName, getDiscoveryConfig(address, clusterName));
++        ServiceDiscoveryConfig sdc = getDiscoveryConfig(address, clusterName);
++        if (sdc != null) {
++            Map<String, Map<String, AmbariCluster.ServiceConfiguration>> serviceConfigs =
++                                                       ambariClient.getActiveServiceConfigurations(clusterName, sdc);
 +
-         for (Map<String, AmbariCluster.ServiceConfiguration> serviceConfig : serviceConfigs.values()) {
-             for (AmbariCluster.ServiceConfiguration config : serviceConfig.values()) {
-                 configVersions.put(config.getType(), config.getVersion());
++            for (Map<String, AmbariCluster.ServiceConfiguration> serviceConfig : serviceConfigs.values()) {
++                for (AmbariCluster.ServiceConfiguration config : serviceConfig.values()) {
++                    configVersions.put(config.getType(), config.getVersion());
++                }
 +            }
 +        }
 +
 +        return configVersions;
 +    }
 +
 +
 +    /**
 +     * The thread that polls Ambari for configuration details for clusters associated with discovered topologies,
 +     * compares them with the current recorded values, and notifies any listeners when differences are discovered.
 +     */
 +    static final class PollingConfigAnalyzer implements Runnable {
 +
 +        private static final int DEFAULT_POLLING_INTERVAL = 60;
 +
 +        // Polling interval in seconds
 +        private int interval = DEFAULT_POLLING_INTERVAL;
 +
 +        private AmbariConfigurationMonitor delegate;
 +
 +        private boolean isActive = false;
 +
 +        PollingConfigAnalyzer(AmbariConfigurationMonitor delegate) {
 +            this.delegate = delegate;
 +            this.interval = Integer.getInteger(INTERVAL_PROPERTY_NAME, PollingConfigAnalyzer.DEFAULT_POLLING_INTERVAL);
 +        }
 +
 +        void setInterval(int interval) {
 +            this.interval = interval;
 +        }
 +
 +
 +        void stop() {
 +            isActive = false;
 +        }
 +
 +        @Override
 +        public void run() {
 +            isActive = true;
 +
 +            log.startedAmbariConfigMonitor(interval);
 +
 +            while (isActive) {
 +                for (Map.Entry<String, List<String>> entry : delegate.getClusterNames().entrySet()) {
 +                    String address = entry.getKey();
 +                    for (String clusterName : entry.getValue()) {
 +                        Map<String, String> configVersions = delegate.getClusterConfigVersions(address, clusterName);
 +                        if (configVersions != null && !configVersions.isEmpty()) {
 +                            Map<String, String> updatedVersions = delegate.getUpdatedConfigVersions(address, clusterName);
 +                            if (updatedVersions != null && !updatedVersions.isEmpty()) {
 +                                boolean configHasChanged = false;
 +
 +                                // If the config sets don't match in size, then something has changed
 +                                if (updatedVersions.size() != configVersions.size()) {
 +                                    configHasChanged = true;
 +                                } else {
 +                                    // Perform the comparison of all the config versions
 +                                    for (Map.Entry<String, String> configVersion : configVersions.entrySet()) {
 +                                        if (!updatedVersions.get(configVersion.getKey()).equals(configVersion.getValue())) {
 +                                            configHasChanged = true;
 +                                            break;
 +                                        }
 +                                    }
 +                                }
 +
 +                                // If a change has occurred, notify the listeners
 +                                if (configHasChanged) {
 +                                    delegate.notifyChangeListeners(address, clusterName);
 +                                }
 +                            }
 +                        }
 +                    }
 +                }
 +
 +                try {
 +                    Thread.sleep(interval * 1000);
 +                } catch (InterruptedException e) {
 +                    // Ignore
 +                }
 +            }
 +        }
 +    }
 +
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/e5fd0622/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariDynamicServiceURLCreator.java
----------------------------------------------------------------------
diff --cc gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariDynamicServiceURLCreator.java
index 3c2269d,0000000..dc4ac49
mode 100644,000000..100644
--- a/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariDynamicServiceURLCreator.java
+++ b/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariDynamicServiceURLCreator.java
@@@ -1,151 -1,0 +1,151 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements. See the NOTICE file distributed with this
 + * work for additional information regarding copyright ownership. The ASF
 + * licenses this file to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance with the License.
 + * You may obtain a copy of the License at
 + * <p>
 + * http://www.apache.org/licenses/LICENSE-2.0
 + * <p>
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 + * License for the specific language governing permissions and limitations under
 + * the License.
 + */
 +package org.apache.knox.gateway.topology.discovery.ambari;
 +
 +import org.apache.knox.gateway.i18n.messages.MessagesFactory;
 +
 +import java.io.ByteArrayInputStream;
 +import java.io.File;
 +import java.io.FileInputStream;
 +import java.io.IOException;
 +import java.util.ArrayList;
 +import java.util.HashMap;
 +import java.util.List;
 +import java.util.Map;
 +
 +
- class AmbariDynamicServiceURLCreator {
++class AmbariDynamicServiceURLCreator implements ServiceURLCreator {
 +
 +    static final String MAPPING_CONFIG_OVERRIDE_PROPERTY = "org.apache.gateway.topology.discovery.ambari.config";
 +
 +    private AmbariServiceDiscoveryMessages log = MessagesFactory.get(AmbariServiceDiscoveryMessages.class);
 +
 +    private AmbariCluster cluster = null;
 +    private ServiceURLPropertyConfig config;
 +
 +    AmbariDynamicServiceURLCreator(AmbariCluster cluster) {
 +        this.cluster = cluster;
 +
 +        String mappingConfiguration = System.getProperty(MAPPING_CONFIG_OVERRIDE_PROPERTY);
 +        if (mappingConfiguration != null) {
 +            File mappingConfigFile = new File(mappingConfiguration);
 +            if (mappingConfigFile.exists()) {
 +                try {
 +                    config = new ServiceURLPropertyConfig(mappingConfigFile);
 +                    log.loadedComponentConfigMappings(mappingConfigFile.getAbsolutePath());
 +                } catch (Exception e) {
 +                    log.failedToLoadComponentConfigMappings(mappingConfigFile.getAbsolutePath(), e);
 +                }
 +            }
 +        }
 +
 +        // If there is no valid override configured, fall-back to the internal mapping configuration
 +        if (config == null) {
 +            config = new ServiceURLPropertyConfig();
 +        }
 +    }
 +
 +    AmbariDynamicServiceURLCreator(AmbariCluster cluster, File mappingConfiguration) throws IOException {
 +        this.cluster = cluster;
 +        config = new ServiceURLPropertyConfig(new FileInputStream(mappingConfiguration));
 +    }
 +
 +    AmbariDynamicServiceURLCreator(AmbariCluster cluster, String mappings) {
 +        this.cluster = cluster;
 +        config = new ServiceURLPropertyConfig(new ByteArrayInputStream(mappings.getBytes()));
 +    }
 +
-     List<String> create(String serviceName) {
++    public List<String> create(String serviceName) {
 +        List<String> urls = new ArrayList<>();
 +
 +        Map<String, String> placeholderValues = new HashMap<>();
 +        List<String> componentHostnames = new ArrayList<>();
 +        String hostNamePlaceholder = null;
 +
 +        ServiceURLPropertyConfig.URLPattern pattern = config.getURLPattern(serviceName);
 +        if (pattern != null) {
 +            for (String propertyName : pattern.getPlaceholders()) {
 +                ServiceURLPropertyConfig.Property configProperty = config.getConfigProperty(serviceName, propertyName);
 +
 +                String propertyValue = null;
 +                String propertyType = configProperty.getType();
 +                if (ServiceURLPropertyConfig.Property.TYPE_SERVICE.equals(propertyType)) {
 +                    log.lookingUpServiceConfigProperty(configProperty.getService(), configProperty.getServiceConfig(), configProperty.getValue());
 +                    AmbariCluster.ServiceConfiguration svcConfig =
 +                        cluster.getServiceConfiguration(configProperty.getService(), configProperty.getServiceConfig());
 +                    if (svcConfig != null) {
 +                        propertyValue = svcConfig.getProperties().get(configProperty.getValue());
 +                    }
 +                } else if (ServiceURLPropertyConfig.Property.TYPE_COMPONENT.equals(propertyType)) {
 +                    String compName = configProperty.getComponent();
 +                    if (compName != null) {
 +                        AmbariComponent component = cluster.getComponent(compName);
 +                        if (component != null) {
 +                            if (ServiceURLPropertyConfig.Property.PROP_COMP_HOSTNAME.equals(configProperty.getValue())) {
 +                                log.lookingUpComponentHosts(compName);
 +                                componentHostnames.addAll(component.getHostNames());
 +                                hostNamePlaceholder = propertyName; // Remember the host name placeholder
 +                            } else {
 +                                log.lookingUpComponentConfigProperty(compName, configProperty.getValue());
 +                                propertyValue = component.getConfigProperty(configProperty.getValue());
 +                            }
 +                        }
 +                    }
 +                } else { // Derived property
 +                    log.handlingDerivedProperty(serviceName, configProperty.getType(), configProperty.getName());
 +                    ServiceURLPropertyConfig.Property p = config.getConfigProperty(serviceName, configProperty.getName());
 +                    propertyValue = p.getValue();
 +                    if (propertyValue == null) {
 +                        if (p.getConditionHandler() != null) {
 +                            propertyValue = p.getConditionHandler().evaluate(config, cluster);
 +                        }
 +                    }
 +                }
 +
 +                log.determinedPropertyValue(configProperty.getName(), propertyValue);
 +                placeholderValues.put(configProperty.getName(), propertyValue);
 +            }
 +
 +            // For patterns with a placeholder value for the hostname (e.g., multiple URL scenarios)
 +            if (!componentHostnames.isEmpty()) {
 +                for (String componentHostname : componentHostnames) {
 +                    String url = pattern.get().replace("{" + hostNamePlaceholder + "}", componentHostname);
 +                    urls.add(createURL(url, placeholderValues));
 +                }
 +            } else { // Single URL result case
 +                urls.add(createURL(pattern.get(), placeholderValues));
 +            }
 +        }
 +
 +        return urls;
 +    }
 +
 +    private String createURL(String pattern, Map<String, String> placeholderValues) {
 +        String url = null;
 +        if (pattern != null) {
 +            url = pattern;
 +            for (String placeHolder : placeholderValues.keySet()) {
 +                String value = placeholderValues.get(placeHolder);
 +                if (value != null) {
 +                    url = url.replace("{" + placeHolder + "}", value);
 +                }
 +            }
 +        }
 +        return url;
 +    }
 +
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/e5fd0622/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/PropertyEqualsHandler.java
----------------------------------------------------------------------
diff --cc gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/PropertyEqualsHandler.java
index 4044d56,0000000..0dfab36
mode 100644,000000..100644
--- a/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/PropertyEqualsHandler.java
+++ b/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/PropertyEqualsHandler.java
@@@ -1,76 -1,0 +1,88 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements. See the NOTICE file distributed with this
 + * work for additional information regarding copyright ownership. The ASF
 + * licenses this file to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance with the License.
 + * You may obtain a copy of the License at
 + * <p>
 + * http://www.apache.org/licenses/LICENSE-2.0
 + * <p>
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 + * License for the specific language governing permissions and limitations under
 + * the License.
 + */
 +package org.apache.knox.gateway.topology.discovery.ambari;
 +
 +
 +class PropertyEqualsHandler implements ConditionalValueHandler {
 +
 +    private String serviceName                        = null;
 +    private String propertyName                       = null;
 +    private String propertyValue                      = null;
 +    private ConditionalValueHandler affirmativeResult = null;
 +    private ConditionalValueHandler negativeResult    = null;
 +
 +    PropertyEqualsHandler(String                  serviceName,
 +                          String                  propertyName,
 +                          String                  propertyValue,
 +                          ConditionalValueHandler affirmativeResult,
 +                          ConditionalValueHandler negativeResult) {
 +        this.serviceName       = serviceName;
 +        this.propertyName      = propertyName;
 +        this.propertyValue     = propertyValue;
 +        this.affirmativeResult = affirmativeResult;
 +        this.negativeResult    = negativeResult;
 +    }
 +
 +    @Override
 +    public String evaluate(ServiceURLPropertyConfig config, AmbariCluster cluster) {
 +        String result = null;
 +
 +        ServiceURLPropertyConfig.Property p = config.getConfigProperty(serviceName, propertyName);
 +        if (p != null) {
 +            String value = getActualPropertyValue(cluster, p);
-             if (propertyValue.equals(value)) {
-                 result = affirmativeResult.evaluate(config, cluster);
-             } else if (negativeResult != null) {
-                 result = negativeResult.evaluate(config, cluster);
++            if (propertyValue == null) {
++                // If the property value isn't specified, then we're just checking if the property is set with any value
++                if (value != null) {
++                    // So, if there is a value in the config, respond with the affirmative
++                    result = affirmativeResult.evaluate(config, cluster);
++                } else if (negativeResult != null) {
++                    result = negativeResult.evaluate(config, cluster);
++                }
++            }
++
++            if (result == null) {
++                if (propertyValue.equals(value)) {
++                    result = affirmativeResult.evaluate(config, cluster);
++                } else if (negativeResult != null) {
++                    result = negativeResult.evaluate(config, cluster);
++                }
 +            }
 +
 +            // Check if the result is a reference to a local derived property
 +            ServiceURLPropertyConfig.Property derived = config.getConfigProperty(serviceName, result);
 +            if (derived != null) {
 +                result = getActualPropertyValue(cluster, derived);
 +            }
 +        }
 +
 +        return result;
 +    }
 +
 +    private String getActualPropertyValue(AmbariCluster cluster, ServiceURLPropertyConfig.Property property) {
 +        String value = null;
 +        String propertyType = property.getType();
 +        if (ServiceURLPropertyConfig.Property.TYPE_COMPONENT.equals(propertyType)) {
 +            AmbariComponent component = cluster.getComponent(property.getComponent());
 +            if (component != null) {
 +                value = component.getConfigProperty(property.getValue());
 +            }
 +        } else if (ServiceURLPropertyConfig.Property.TYPE_SERVICE.equals(propertyType)) {
 +            value = cluster.getServiceConfiguration(property.getService(), property.getServiceConfig()).getProperties().get(property.getValue());
 +        }
 +        return value;
 +    }
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/e5fd0622/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/ServiceURLPropertyConfig.java
----------------------------------------------------------------------
diff --cc gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/ServiceURLPropertyConfig.java
index 47b20e9,0000000..9f3da3d
mode 100644,000000..100644
--- a/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/ServiceURLPropertyConfig.java
+++ b/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/ServiceURLPropertyConfig.java
@@@ -1,324 -1,0 +1,329 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements. See the NOTICE file distributed with this
 + * work for additional information regarding copyright ownership. The ASF
 + * licenses this file to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance with the License.
 + * You may obtain a copy of the License at
 + * <p>
 + * http://www.apache.org/licenses/LICENSE-2.0
 + * <p>
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 + * License for the specific language governing permissions and limitations under
 + * the License.
 + */
 +package org.apache.knox.gateway.topology.discovery.ambari;
 +
 +import org.apache.knox.gateway.i18n.messages.MessagesFactory;
 +import org.apache.knox.gateway.util.XmlUtils;
 +import org.w3c.dom.Document;
 +import org.w3c.dom.NamedNodeMap;
 +import org.w3c.dom.Node;
 +import org.w3c.dom.NodeList;
 +
 +import javax.xml.xpath.XPath;
 +import javax.xml.xpath.XPathConstants;
 +import javax.xml.xpath.XPathExpression;
 +import javax.xml.xpath.XPathExpressionException;
 +import javax.xml.xpath.XPathFactory;
 +import java.io.File;
 +import java.io.FileInputStream;
 +import java.io.IOException;
 +import java.io.InputStream;
 +import java.util.ArrayList;
 +import java.util.HashMap;
 +import java.util.List;
 +import java.util.Map;
 +import java.util.regex.Matcher;
 +import java.util.regex.Pattern;
 +
 +/**
 + * Service URL pattern mapping configuration model.
 + */
 +class ServiceURLPropertyConfig {
 +
 +    private static final AmbariServiceDiscoveryMessages log = MessagesFactory.get(AmbariServiceDiscoveryMessages.class);
 +
 +    private static final String ATTR_NAME = "name";
 +
 +    private static XPathExpression SERVICE_URL_PATTERN_MAPPINGS;
 +    private static XPathExpression URL_PATTERN;
 +    private static XPathExpression PROPERTIES;
 +    static {
 +        XPath xpath = XPathFactory.newInstance().newXPath();
 +        try {
 +            SERVICE_URL_PATTERN_MAPPINGS = xpath.compile("/service-discovery-url-mappings/service");
 +            URL_PATTERN                  = xpath.compile("url-pattern/text()");
 +            PROPERTIES                   = xpath.compile("properties/property");
 +        } catch (XPathExpressionException e) {
 +            e.printStackTrace();
 +        }
 +    }
 +
 +    private static final String DEFAULT_SERVICE_URL_MAPPINGS = "ambari-service-discovery-url-mappings.xml";
 +
 +    private Map<String, URLPattern> urlPatterns = new HashMap<>();
 +
 +    private Map<String, Map<String, Property>> properties = new HashMap<>();
 +
 +
 +    /**
 +     * The default service URL pattern to property mapping configuration will be used.
 +     */
 +    ServiceURLPropertyConfig() {
 +        this(ServiceURLPropertyConfig.class.getClassLoader().getResourceAsStream(DEFAULT_SERVICE_URL_MAPPINGS));
 +    }
 +
 +    /**
 +     * The default service URL pattern to property mapping configuration will be used.
 +     */
 +    ServiceURLPropertyConfig(File mappingConfigurationFile) throws Exception {
 +        this(new FileInputStream(mappingConfigurationFile));
 +    }
 +
 +    /**
 +     *
 +     * @param source An InputStream for the XML content
 +     */
 +    ServiceURLPropertyConfig(InputStream source) {
 +        // Parse the XML, and build the model
 +        try {
 +            Document doc = XmlUtils.readXml(source);
 +
 +            NodeList serviceNodes =
 +                    (NodeList) SERVICE_URL_PATTERN_MAPPINGS.evaluate(doc, XPathConstants.NODESET);
 +            for (int i=0; i < serviceNodes.getLength(); i++) {
 +                Node serviceNode = serviceNodes.item(i);
 +                String serviceName = serviceNode.getAttributes().getNamedItem(ATTR_NAME).getNodeValue();
 +                properties.put(serviceName, new HashMap<String, Property>());
 +
 +                Node urlPatternNode = (Node) URL_PATTERN.evaluate(serviceNode, XPathConstants.NODE);
 +                if (urlPatternNode != null) {
 +                    urlPatterns.put(serviceName, new URLPattern(urlPatternNode.getNodeValue()));
 +                }
 +
 +                NodeList propertiesNode = (NodeList) PROPERTIES.evaluate(serviceNode, XPathConstants.NODESET);
 +                if (propertiesNode != null) {
 +                    processProperties(serviceName, propertiesNode);
 +                }
 +            }
 +        } catch (Exception e) {
 +            log.failedToLoadServiceDiscoveryURLDefConfiguration(e);
 +        } finally {
 +            try {
 +                source.close();
 +            } catch (IOException e) {
 +                // Ignore
 +            }
 +        }
 +    }
 +
 +    private void processProperties(String serviceName, NodeList propertyNodes) {
 +        for (int i = 0; i < propertyNodes.getLength(); i++) {
 +            Property p = Property.createProperty(serviceName, propertyNodes.item(i));
 +            properties.get(serviceName).put(p.getName(), p);
 +        }
 +    }
 +
 +    URLPattern getURLPattern(String service) {
 +        return urlPatterns.get(service);
 +    }
 +
 +    Property getConfigProperty(String service, String property) {
 +        return properties.get(service).get(property);
 +    }
 +
 +    static class URLPattern {
 +        String pattern;
 +        List<String> placeholders = new ArrayList<>();
 +
 +        URLPattern(String pattern) {
 +            this.pattern = pattern;
 +
 +            final Pattern regex = Pattern.compile("\\{(.*?)}", Pattern.DOTALL);
 +            final Matcher matcher = regex.matcher(pattern);
 +            while( matcher.find() ){
 +                placeholders.add(matcher.group(1));
 +            }
 +        }
 +
 +        String get() {return pattern; }
 +        List<String> getPlaceholders() {
 +            return placeholders;
 +        }
 +    }
 +
 +    static class Property {
 +        static final String TYPE_SERVICE   = "SERVICE";
 +        static final String TYPE_COMPONENT = "COMPONENT";
 +        static final String TYPE_DERIVED   = "DERIVED";
 +
 +        static final String PROP_COMP_HOSTNAME = "component.host.name";
 +
 +        static final String ATTR_NAME     = "name";
 +        static final String ATTR_PROPERTY = "property";
 +        static final String ATTR_VALUE    = "value";
 +
 +        static XPathExpression HOSTNAME;
 +        static XPathExpression SERVICE_CONFIG;
 +        static XPathExpression COMPONENT;
 +        static XPathExpression CONFIG_PROPERTY;
 +        static XPathExpression IF;
 +        static XPathExpression THEN;
 +        static XPathExpression ELSE;
 +        static XPathExpression TEXT;
 +        static {
 +            XPath xpath = XPathFactory.newInstance().newXPath();
 +            try {
 +                HOSTNAME        = xpath.compile("hostname");
 +                SERVICE_CONFIG  = xpath.compile("service-config");
 +                COMPONENT       = xpath.compile("component");
 +                CONFIG_PROPERTY = xpath.compile("config-property");
 +                IF              = xpath.compile("if");
 +                THEN            = xpath.compile("then");
 +                ELSE            = xpath.compile("else");
 +                TEXT            = xpath.compile("text()");
 +            } catch (XPathExpressionException e) {
 +                e.printStackTrace();
 +            }
 +        }
 +
 +
 +        String type;
 +        String name;
 +        String component;
 +        String service;
 +        String serviceConfig;
 +        String value;
 +        ConditionalValueHandler conditionHandler = null;
 +
 +        private Property(String type,
 +                         String propertyName,
 +                         String component,
 +                         String service,
 +                         String configType,
 +                         String value,
 +                         ConditionalValueHandler pch) {
 +            this.type = type;
 +            this.name = propertyName;
 +            this.service = service;
 +            this.component = component;
 +            this.serviceConfig = configType;
 +            this.value = value;
 +            conditionHandler = pch;
 +        }
 +
 +        static Property createProperty(String serviceName, Node propertyNode) {
 +            String propertyName = propertyNode.getAttributes().getNamedItem(ATTR_NAME).getNodeValue();
 +            String propertyType = null;
 +            String serviceType = null;
 +            String configType = null;
 +            String componentType = null;
 +            String value = null;
 +            ConditionalValueHandler pch = null;
 +
 +            try {
 +                Node hostNameNode = (Node) HOSTNAME.evaluate(propertyNode, XPathConstants.NODE);
 +                if (hostNameNode != null) {
 +                    value = PROP_COMP_HOSTNAME;
 +                }
 +
 +                // Check for a service-config node
 +                Node scNode = (Node) SERVICE_CONFIG.evaluate(propertyNode, XPathConstants.NODE);
 +                if (scNode != null) {
 +                    // Service config property
 +                    propertyType = Property.TYPE_SERVICE;
 +                    serviceType = scNode.getAttributes().getNamedItem(ATTR_NAME).getNodeValue();
 +                    Node scTextNode = (Node) TEXT.evaluate(scNode, XPathConstants.NODE);
 +                    configType = scTextNode.getNodeValue();
 +                } else { // If not service-config node, check for a component config node
 +                    Node cNode = (Node) COMPONENT.evaluate(propertyNode, XPathConstants.NODE);
 +                    if (cNode != null) {
 +                        // Component config property
 +                        propertyType = Property.TYPE_COMPONENT;
 +                        componentType = cNode.getFirstChild().getNodeValue();
 +                        Node cTextNode = (Node) TEXT.evaluate(cNode, XPathConstants.NODE);
 +                        configType = cTextNode.getNodeValue();
 +                        componentType = cTextNode.getNodeValue();
 +                    }
 +                }
 +
 +                // Check for a config property node
 +                Node cpNode = (Node) CONFIG_PROPERTY.evaluate(propertyNode, XPathConstants.NODE);
 +                if (cpNode != null) {
 +                    // Check for a condition element
 +                    Node ifNode = (Node) IF.evaluate(cpNode, XPathConstants.NODE);
 +                    if (ifNode != null) {
 +                        propertyType = TYPE_DERIVED;
 +                        pch = getConditionHandler(serviceName, ifNode);
 +                    } else {
 +                        Node cpTextNode = (Node) TEXT.evaluate(cpNode, XPathConstants.NODE);
 +                        value = cpTextNode.getNodeValue();
 +                    }
 +                }
 +            } catch (Exception e) {
 +                e.printStackTrace();
 +            }
 +
 +            // Create and return the property representation
 +            return new Property(propertyType, propertyName, componentType, serviceType, configType, value, pch);
 +        }
 +
 +        private static ConditionalValueHandler getConditionHandler(String serviceName, Node ifNode) throws Exception {
 +            ConditionalValueHandler result = null;
 +
 +            if (ifNode != null) {
 +                NamedNodeMap attrs = ifNode.getAttributes();
 +                String comparisonPropName = attrs.getNamedItem(ATTR_PROPERTY).getNodeValue();
-                 String comparisonValue = attrs.getNamedItem(ATTR_VALUE).getNodeValue();
++
++                String comparisonValue = null;
++                Node valueNode = attrs.getNamedItem(ATTR_VALUE);
++                if (valueNode != null) {
++                    comparisonValue = attrs.getNamedItem(ATTR_VALUE).getNodeValue();
++                }
 +
 +                ConditionalValueHandler affirmativeResult = null;
 +                Node thenNode = (Node) THEN.evaluate(ifNode, XPathConstants.NODE);
 +                if (thenNode != null) {
 +                    Node subIfNode = (Node) IF.evaluate(thenNode, XPathConstants.NODE);
 +                    if (subIfNode != null) {
 +                        affirmativeResult = getConditionHandler(serviceName, subIfNode);
 +                    } else {
 +                        affirmativeResult = new SimpleValueHandler(thenNode.getFirstChild().getNodeValue());
 +                    }
 +                }
 +
 +                ConditionalValueHandler negativeResult = null;
 +                Node elseNode = (Node) ELSE.evaluate(ifNode, XPathConstants.NODE);
 +                if (elseNode != null) {
 +                    Node subIfNode = (Node) IF.evaluate(elseNode, XPathConstants.NODE);
 +                    if (subIfNode != null) {
 +                        negativeResult = getConditionHandler(serviceName, subIfNode);
 +                    } else {
 +                        negativeResult = new SimpleValueHandler(elseNode.getFirstChild().getNodeValue());
 +                    }
 +                }
 +
 +                result = new PropertyEqualsHandler(serviceName,
 +                        comparisonPropName,
 +                        comparisonValue,
 +                        affirmativeResult,
 +                        negativeResult);
 +            }
 +
 +            return result;
 +        }
 +
 +        String getType() { return type; }
 +        String getName() { return name; }
 +        String getComponent() { return component; }
 +        String getService() { return service; }
 +        String getServiceConfig() { return serviceConfig; }
 +        String getValue() {
 +            return value;
 +        }
 +        ConditionalValueHandler getConditionHandler() { return conditionHandler; }
 +    }
 +}


[19/53] [abbrv] knox git commit: Merge branch 'master' into KNOX-998-Package_Restructuring

Posted by mo...@apache.org.
http://git-wip-us.apache.org/repos/asf/knox/blob/c754cc06/gateway-server/src/main/java/org/apache/knox/gateway/GatewayMessages.java
----------------------------------------------------------------------
diff --cc gateway-server/src/main/java/org/apache/knox/gateway/GatewayMessages.java
index cd2c0eb,0000000..61c5303
mode 100644,000000..100644
--- a/gateway-server/src/main/java/org/apache/knox/gateway/GatewayMessages.java
+++ b/gateway-server/src/main/java/org/apache/knox/gateway/GatewayMessages.java
@@@ -1,521 -1,0 +1,553 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway;
 +
 +import org.apache.commons.cli.ParseException;
 +import org.apache.knox.gateway.i18n.messages.Message;
 +import org.apache.knox.gateway.i18n.messages.MessageLevel;
 +import org.apache.knox.gateway.i18n.messages.Messages;
 +import org.apache.knox.gateway.i18n.messages.StackTrace;
 +import org.apache.knox.gateway.services.security.KeystoreServiceException;
 +
 +import java.io.File;
 +import java.net.URI;
 +import java.util.Date;
 +import java.util.Map;
 +import java.util.Set;
 +
 +/**
 + *
 + */
 +@Messages(logger="org.apache.knox.gateway")
 +public interface GatewayMessages {
 +
 +  @Message( level = MessageLevel.FATAL, text = "Failed to parse command line: {0}" )
 +  void failedToParseCommandLine( @StackTrace( level = MessageLevel.DEBUG ) ParseException e );
 +
 +  @Message( level = MessageLevel.INFO, text = "Starting gateway..." )
 +  void startingGateway();
 +
 +  @Message( level = MessageLevel.FATAL, text = "Failed to start gateway: {0}" )
 +  void failedToStartGateway( @StackTrace( level = MessageLevel.DEBUG ) Exception e );
 +
 +  @Message( level = MessageLevel.INFO, text = "Started gateway on port {0}." )
 +  void startedGateway( int port );
 +
 +  @Message( level = MessageLevel.INFO, text = "Stopping gateway..." )
 +  void stoppingGateway();
 +
 +  @Message( level = MessageLevel.INFO, text = "Stopped gateway." )
 +  void stoppedGateway();
 +
 +  @Message( level = MessageLevel.INFO, text = "Loading configuration resource {0}" )
 +  void loadingConfigurationResource( String res );
 +
 +  @Message( level = MessageLevel.INFO, text = "Loading configuration file {0}" )
 +  void loadingConfigurationFile( String file );
 +
 +  @Message( level = MessageLevel.WARN, text = "Failed to load configuration file {0}: {1}" )
 +  void failedToLoadConfig( String path, @StackTrace( level = MessageLevel.DEBUG ) Exception e );
 +
 +  @Message( level = MessageLevel.INFO, text = "Using {1} as GATEWAY_HOME via {0}." )
 +  void settingGatewayHomeDir( String location, String home );
 +
 +  @Message( level = MessageLevel.INFO, text = "Loading topologies from directory: {0}" )
 +  void loadingTopologiesFromDirectory( String topologiesDir );
 +
 +  @Message( level = MessageLevel.DEBUG, text = "Loading topology file: {0}" )
 +  void loadingTopologyFile( String fileName );
 +
 +  @Message( level = MessageLevel.INFO, text = "Monitoring topologies in directory: {0}" )
 +  void monitoringTopologyChangesInDirectory( String topologiesDir );
 +
 +  @Message( level = MessageLevel.INFO, text = "Deploying topology {0} to {1}" )
 +  void deployingTopology( String clusterName, String warDirName );
 +
 +  @Message( level = MessageLevel.DEBUG, text = "Deployed topology {0}." )
 +  void deployedTopology( String clusterName );
 +
 +  @Message( level = MessageLevel.INFO, text = "Loading topology {0} from {1}" )
 +  void redeployingTopology( String clusterName, String warDirName );
 +
 +  @Message( level = MessageLevel.DEBUG, text = "Redeployed topology {0}." )
 +  void redeployedTopology( String clusterName );
 +
 +  @Message( level = MessageLevel.INFO, text = "Activating topology {0}" )
 +  void activatingTopology( String name );
 +
 +  @Message( level = MessageLevel.INFO, text = "Activating topology {0} archive {1}" )
 +  void activatingTopologyArchive( String topology, String archive );
 +
 +  @Message( level = MessageLevel.INFO, text = "Deactivating topology {0}" )
 +  void deactivatingTopology( String name );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to deploy topology {0}: {1}" )
 +  void failedToDeployTopology( String name, @StackTrace(level=MessageLevel.DEBUG) Throwable e );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to redeploy topology {0}" )
 +  void failedToRedeployTopology( String name );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to redeploy topology {0}: {1}" )
 +  void failedToRedeployTopology( String name, @StackTrace(level=MessageLevel.DEBUG) Throwable e );
 +
 +  @Message(level = MessageLevel.ERROR, text = "Failed to load topology {0}: Topology configuration is invalid!")
 +  void failedToLoadTopology(String fileName);
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to redeploy topologies: {0}" )
 +  void failedToRedeployTopologies( @StackTrace(level=MessageLevel.DEBUG) Throwable e );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to undeploy topology {0}: {1}" )
 +  void failedToUndeployTopology( String name, @StackTrace(level=MessageLevel.DEBUG) Exception e );
 +
 +  @Message( level = MessageLevel.INFO, text = "Deleting topology {0}" )
 +  void deletingTopology( String topologyName );
 +
 +  @Message( level = MessageLevel.INFO, text = "Deleting deployed topology {0}" )
 +  void deletingDeployment( String warDirName );
 +
 +  @Message( level = MessageLevel.DEBUG, text = "Purge backups of deployed topology {0}" )
 +  void cleanupDeployments( String topologyName );
 +
 +  @Message( level = MessageLevel.INFO, text = "Deleting backup deployed topology {0}" )
 +  void cleanupDeployment( String absolutePath );
 +
 +  @Message( level = MessageLevel.INFO, text = "Creating gateway home directory: {0}" )
 +  void creatingGatewayHomeDir( File homeDir );
 +
 +  @Message( level = MessageLevel.INFO, text = "Creating gateway deployment directory: {0}" )
 +  void creatingGatewayDeploymentDir( File topologiesDir );
 +
 +  @Message( level = MessageLevel.INFO, text = "Creating default gateway configuration file: {0}" )
 +  void creatingDefaultConfigFile( File defaultConfigFile );
 +
 +  @Message( level = MessageLevel.INFO, text = "Creating sample topology file: {0}" )
 +  void creatingDefaultTopologyFile( File defaultConfigFile );
 +
 +  @Message( level = MessageLevel.WARN, text = "Ignoring service deployment contributor with invalid null name: {0}" )
 +  void ignoringServiceContributorWithMissingName( String className );
 +
 +  @Message( level = MessageLevel.WARN, text = "Ignoring service deployment contributor with invalid null role: {0}" )
 +  void ignoringServiceContributorWithMissingRole( String className );
 +
 +  @Message( level = MessageLevel.WARN, text = "Ignoring service deployment contributor with invalid null version: {0}" )
 +  void ignoringServiceContributorWithMissingVersion( String className );
 +
 +  @Message( level = MessageLevel.WARN, text = "Ignoring provider deployment contributor with invalid null name: {0}" )
 +  void ignoringProviderContributorWithMissingName( String className );
 +
 +  @Message( level = MessageLevel.WARN, text = "Ignoring provider deployment contributor with invalid null role: {0}" )
 +  void ignoringProviderContributorWithMissingRole( String className );
 +
 +  @Message( level = MessageLevel.INFO, text = "Loaded logging configuration: {0}" )
 +  void loadedLoggingConfig( String fileName );
 +
 +  @Message( level = MessageLevel.WARN, text = "Failed to load logging configuration: {0}" )
 +  void failedToLoadLoggingConfig( String fileName );
 +
 +  @Message( level = MessageLevel.INFO, text = "Creating credential store for the gateway instance." )
 +  void creatingCredentialStoreForGateway();
 +
 +  @Message( level = MessageLevel.INFO, text = "Credential store for the gateway instance found - no need to create one." )
 +  void credentialStoreForGatewayFoundNotCreating();
 +
 +  @Message( level = MessageLevel.INFO, text = "Creating keystore for the gateway instance." )
 +  void creatingKeyStoreForGateway();
 +
 +  @Message( level = MessageLevel.INFO, text = "Keystore for the gateway instance found - no need to create one." )
 +  void keyStoreForGatewayFoundNotCreating();
 +
 +  @Message( level = MessageLevel.INFO, text = "Creating credential store for the cluster: {0}" )
 +  void creatingCredentialStoreForCluster(String clusterName);
 +
 +  @Message( level = MessageLevel.INFO, text = "Credential store found for the cluster: {0} - no need to create one." )
 +  void credentialStoreForClusterFoundNotCreating(String clusterName);
 +
 +  @Message( level = MessageLevel.DEBUG, text = "Received request: {0} {1}" )
 +  void receivedRequest( String method, String uri );
 +
 +  @Message( level = MessageLevel.DEBUG, text = "Dispatch request: {0} {1}" )
 +  void dispatchRequest( String method, URI uri );
 +  
 +  @Message( level = MessageLevel.WARN, text = "Connection exception dispatching request: {0} {1}" )
 +  void dispatchServiceConnectionException( URI uri, @StackTrace(level=MessageLevel.WARN) Exception e );
 +
 +  @Message( level = MessageLevel.DEBUG, text = "Signature verified: {0}" )
 +  void signatureVerified( boolean verified );
 +
 +  @Message( level = MessageLevel.DEBUG, text = "Apache Knox Gateway {0} ({1})" )
 +  void gatewayVersionMessage( String version, String hash );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to inject service {0}: {1}" )
 +  void failedToInjectService( String serviceName, @StackTrace( level = MessageLevel.DEBUG ) Exception e );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to finalize contribution: {0}" )
 +  void failedToFinalizeContribution( @StackTrace( level = MessageLevel.DEBUG ) Exception e );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to contribute service [role={1}, name={0}]: {2}" )
 +  void failedToContributeService( String name, String role, @StackTrace( level = MessageLevel.ERROR ) Exception e );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to contribute provider [role={1}, name={0}]: {2}" )
 +  void failedToContributeProvider( String name, String role, @StackTrace( level = MessageLevel.ERROR ) Exception e );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to initialize contribution: {0}" )
 +  void failedToInitializeContribution( @StackTrace( level = MessageLevel.DEBUG ) Exception e );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to initialize servlet instance: {0}" )
 +  void failedToInitializeServletInstace( @StackTrace( level = MessageLevel.DEBUG ) Exception e );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Gateway processing failed: {0}" )
 +  void failedToExecuteFilter( @StackTrace( level = MessageLevel.INFO ) Throwable t );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to load topology {0}: {1}")
 +  void failedToLoadTopology( String fileName, @StackTrace( level = MessageLevel.DEBUG ) Exception e );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to load topology {0}, retrying after {1}ms: {2}")
 +  void failedToLoadTopologyRetrying( String friendlyURI, String delay, @StackTrace( level = MessageLevel.DEBUG ) Exception e );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to handle topology events: {0}" )
 +  void failedToHandleTopologyEvents( @StackTrace( level = MessageLevel.DEBUG ) Exception e );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to reload topologies: {0}" )
 +  void failedToReloadTopologies( @StackTrace( level = MessageLevel.DEBUG ) Exception e );
 +
 +  @Message( level = MessageLevel.FATAL, text = "Unsupported encoding: {0}" )
 +  void unsupportedEncoding( @StackTrace( level = MessageLevel.DEBUG ) Exception e );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to persist master secret: {0}" )
 +  void failedToPersistMasterSecret( @StackTrace( level = MessageLevel.DEBUG ) Exception e );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to encrypt master secret: {0}" )
 +  void failedToEncryptMasterSecret( @StackTrace( level = MessageLevel.DEBUG ) Exception e );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to initialize master service from persistent master {0}: {1}" )
 +  void failedToInitializeFromPersistentMaster( String masterFileName, @StackTrace( level = MessageLevel.DEBUG ) Exception e );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to encode passphrase: {0}" )
 +  void failedToEncodePassphrase( @StackTrace( level = MessageLevel.DEBUG ) Exception e );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to verify signature: {0}")
 +  void failedToVerifySignature( @StackTrace(level=MessageLevel.DEBUG) Exception e );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to sign the data: {0}")
 +  void failedToSignData( @StackTrace( level = MessageLevel.DEBUG ) Exception e );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to decrypt password for cluster {0}: {1}" )
 +  void failedToDecryptPasswordForCluster( String clusterName, @StackTrace( level = MessageLevel.DEBUG ) Exception e );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to encrypt password for cluster {0}: {1}")
 +  void failedToEncryptPasswordForCluster( String clusterName, @StackTrace( level = MessageLevel.DEBUG ) Exception e );
 +  
 +  @Message( level = MessageLevel.ERROR, text = "Failed to create keystore [filename={0}, type={1}]: {2}" )
 +  void failedToCreateKeystore( String fileName, String keyStoreType, @StackTrace( level = MessageLevel.DEBUG ) Exception e );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to load keystore [filename={0}, type={1}]: {2}" )
 +  void failedToLoadKeystore( String fileName, String keyStoreType, @StackTrace( level = MessageLevel.DEBUG ) Exception e );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to add key for cluster {0}: {1}" )
 +  void failedToAddKeyForCluster( String clusterName, @StackTrace( level = MessageLevel.DEBUG ) Exception e );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to add credential for cluster {0}: {1}" )
 +  void failedToAddCredentialForCluster( String clusterName, @StackTrace( level = MessageLevel.DEBUG ) Exception e );
 +  
 +  @Message( level = MessageLevel.ERROR, text = "Failed to get key for Gateway {0}: {1}" )
 +  void failedToGetKeyForGateway( String alias, @StackTrace( level=MessageLevel.DEBUG ) Exception e );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to get credential for cluster {0}: {1}" )
 +  void failedToGetCredentialForCluster( String clusterName, @StackTrace(level = MessageLevel.DEBUG ) Exception e );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to get key for cluster {0}: {1}" )
 +  void failedToGetKeyForCluster( String clusterName, @StackTrace(level = MessageLevel.DEBUG ) Exception e );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to add self signed certificate for Gateway {0}: {1}" )
 +  void failedToAddSeflSignedCertForGateway( String alias, @StackTrace( level = MessageLevel.DEBUG ) Exception e );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to generate secret key from password: {0}" )
 +  void failedToGenerateKeyFromPassword( @StackTrace( level = MessageLevel.DEBUG ) Exception e );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to establish connection to {0}: {1}" )
 +  void failedToEstablishConnectionToUrl( String url, @StackTrace( level = MessageLevel.DEBUG ) Exception e );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to interpret property \"{0}\": {1}")
 +  void failedToInterpretProperty( String property, @StackTrace( level = MessageLevel.DEBUG ) Exception e );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to instantiate the internal gateway services." )
 +  void failedToInstantiateGatewayServices();
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to serialize map to Json string {0}: {1}" )
 +  void failedToSerializeMapToJSON( Map<String, Object> map, @StackTrace( level = MessageLevel.DEBUG ) Exception e );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to get map from Json string {0}: {1}" )
 +  void failedToGetMapFromJsonString( String json, @StackTrace( level = MessageLevel.DEBUG ) Exception e );
 +  
 +  @Message( level = MessageLevel.DEBUG, text = "Successful Knox->Hadoop SPNegotiation authentication for URL: {0}" )
 +  void successfulSPNegoAuthn(String uri);
 +  
 +  @Message( level = MessageLevel.ERROR, text = "Failed Knox->Hadoop SPNegotiation authentication for URL: {0}" )
 +  void failedSPNegoAuthn(String uri);
 +
 +  @Message( level = MessageLevel.DEBUG, text = "Dispatch response status: {0}" )
 +  void dispatchResponseStatusCode(int statusCode);
 +
 +  @Message( level = MessageLevel.DEBUG, text = "Dispatch response status: {0}, Location: {1}" )
 +  void dispatchResponseCreatedStatusCode( int statusCode, String location );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to decrypt cipher text for cluster {0}: due to inability to retrieve the password." )
 +  void failedToDecryptCipherForClusterNullPassword(String clusterName);
 +
 +  @Message( level = MessageLevel.DEBUG, text = "Gateway services have not been initialized." )
 +  void gatewayServicesNotInitialized();
 +
 +  @Message( level = MessageLevel.INFO, text = "The Gateway SSL certificate is issued to hostname: {0}." )
 +  void certificateHostNameForGateway(String cn);
 +
 +  @Message( level = MessageLevel.INFO, text = "The Gateway SSL certificate is valid between: {0} and {1}." )
 +  void certificateValidityPeriod(Date notBefore, Date notAfter);
 +
 +  @Message( level = MessageLevel.ERROR, text = "Unable to retrieve certificate for Gateway: {0}." )
 +  void unableToRetrieveCertificateForGateway(Exception e);
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to generate alias for cluster: {0} {1}." )
 +  void failedToGenerateAliasForCluster(String clusterName, KeystoreServiceException e);
 +
 +  @Message( level = MessageLevel.DEBUG, text = "Key passphrase not found in credential store - using master secret." )
 +  void assumingKeyPassphraseIsMaster();
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to remove alias for cluster: {0} {1}." )
 +  void failedToRemoveCredentialForCluster(String clusterName, Exception e);
 +
 +  @Message( level = MessageLevel.WARN, text = "Failed to match path {0}" )
 +  void failedToMatchPath( String path );
 +  
 +  @Message( level = MessageLevel.ERROR, text = "Failed to get system ldap connection: {0}" )
 +  void failedToGetSystemLdapConnection( @StackTrace( level = MessageLevel.DEBUG ) Exception e );
 +  
 +  @Message( level = MessageLevel.WARN, text = "Value not found for cluster:{0}, alias: {1}" )
 +  void aliasValueNotFound( String cluster, String alias );
 +
 +  @Message( level = MessageLevel.INFO, text = "Computed userDn: {0} using dnTemplate for principal: {1}" )
 +  void computedUserDn(String userDn, String principal);
 +
 +  @Message( level = MessageLevel.DEBUG, text = "Searching from {0} where {1} scope {2}" )
 +  void searchBaseFilterScope( String searchBase, String searchFilter, String searchScope );
 +
 +  @Message( level = MessageLevel.INFO, text = "Computed userDn: {0} using ldapSearch for principal: {1}" )
 +  void searchedAndFoundUserDn(String userDn, String principal);
 +
 +  @Message( level = MessageLevel.INFO, text = "Computed roles/groups: {0} for principal: {1}" )
 +  void lookedUpUserRoles(Set<String> roleNames, String userName);
 +
 +  @Message( level = MessageLevel.DEBUG, text = "Initialize provider: {1}/{0}" )
 +  void initializeProvider( String name, String role );
 +
 +  @Message( level = MessageLevel.DEBUG, text = "Initialize service: {1}/{0}" )
 +  void initializeService( String name, String role );
 +
 +  @Message( level = MessageLevel.DEBUG, text = "Contribute provider: {1}/{0}" )
 +  void contributeProvider( String name, String role );
 +
 +  @Message( level = MessageLevel.DEBUG, text = "Contribute service: {1}/{0}" )
 +  void contributeService( String name, String role );
 +
 +  @Message( level = MessageLevel.DEBUG, text = "Finalize provider: {1}/{0}" )
 +  void finalizeProvider( String name, String role );
 +
 +  @Message( level = MessageLevel.DEBUG, text = "Finalize service: {1}/{0}" )
 +  void finalizeService( String name, String role );
 +
 +  @Message( level = MessageLevel.DEBUG, text = "Configured services directory is {0}" )
 +  void usingServicesDirectory(String path);
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to unmarshall service definition file {0} file : {1}" )
 +  void failedToLoadServiceDefinition(String fileName, @StackTrace( level = MessageLevel.DEBUG ) Exception e);
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to find service definition file {0} file : {1}" )
 +  void failedToFindServiceDefinitionFile(String fileName, @StackTrace( level = MessageLevel.DEBUG ) Exception e);
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to find rewrite file {0} file : {1}" )
 +  void failedToFindRewriteFile(String fileName, @StackTrace( level = MessageLevel.DEBUG ) Exception e);
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to unmarshall rewrite file {0} file : {1}" )
 +  void failedToLoadRewriteFile(String fileName, @StackTrace( level = MessageLevel.DEBUG ) Exception e);
 +
 +  @Message( level = MessageLevel.DEBUG, text = "No rewrite file found in service directory {0}" )
 +  void noRewriteFileFound(String path);
 +
 +  @Message( level = MessageLevel.DEBUG, text = "Added Service definition name: {0}, role : {1}, version : {2}" )
 +  void addedServiceDefinition(String serviceName, String serviceRole, String version);
 +
 +  @Message( level = MessageLevel.INFO, text = "System Property: {0}={1}" )
 +  void logSysProp( String name, String property );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Unable to get password: {0}" )
 +  void unableToGetPassword(@StackTrace( level = MessageLevel.DEBUG ) Exception e);
 +
 +  @Message( level = MessageLevel.DEBUG, text = "Initialize application: {0}" )
 +  void initializeApplication( String name );
 +
 +  @Message( level = MessageLevel.DEBUG, text = "Contribute application: {0}" )
 +  void contributeApplication( String name );
 +
 +  @Message( level = MessageLevel.DEBUG, text = "Finalize application: {0}" )
 +  void finalizeApplication( String name );
 +
 +  @Message( level = MessageLevel.INFO, text = "Default topology {0} at {1}" )
 +  void defaultTopologySetup( String defaultTopologyName, String redirectContext );
 +
 +  @Message( level = MessageLevel.DEBUG, text = "Default topology forward from {0} to {1}" )
 +  void defaultTopologyForward( String oldTarget, String newTarget );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Unable to setup PagedResults" )
 +  void unableToSetupPagedResults();
 +
 +  @Message( level = MessageLevel.INFO, text = "Ignoring PartialResultException" )
 +  void ignoringPartialResultException();
 +
 +  @Message( level = MessageLevel.WARN, text = "Only retrieved first {0} groups due to SizeLimitExceededException." )
 +  void sizeLimitExceededOnlyRetrieved(int numResults);
 +
 +  @Message( level = MessageLevel.DEBUG, text = "Failed to parse path into Template: {0} : {1}" )
 +  void failedToParsePath( String path, @StackTrace( level = MessageLevel.DEBUG ) Exception e );
 +
 +  @Message( level = MessageLevel.DEBUG, text = "Failed to initialize metrics reporter {0}  : {1}" )
 +  void failedToInitializeReporter( String name,  @StackTrace( level = MessageLevel.DEBUG ) Exception e);
 +
 +  @Message( level = MessageLevel.DEBUG, text = "Failed to start metrics reporter {0}  : {1}" )
 +  void failedToStartReporter( String name,  @StackTrace( level = MessageLevel.DEBUG ) Exception e);
 +
 +  @Message( level = MessageLevel.DEBUG, text = "Failed to stop metrics reporter {0}  : {1}" )
 +  void failedToStopReporter( String name,  @StackTrace( level = MessageLevel.DEBUG ) Exception e);
 +
 +  @Message( level = MessageLevel.INFO, text = "Cookie scoping feature enabled: {0}" )
 +  void cookieScopingFeatureEnabled( boolean enabled );
 +
 +  /**
 +   * Log whether Topology port mapping feature is enabled/disabled.
 +   *
 +   * @param enabled
 +   */
 +  @Message(level = MessageLevel.INFO,
 +           text = "Topology port mapping feature enabled: {0}")
 +  void gatewayTopologyPortMappingEnabled(final boolean enabled);
 +
 +  /**
 +   * @param topology
 +   * @param port
 +   */
 +  @Message(level = MessageLevel.DEBUG,
 +           text = "Creating a connector for topology {0} listening on port {1}.")
 +  void createJettyConnector(final String topology, final int port);
 +
 +  /**
 +   * @param topology
 +   */
 +  @Message(level = MessageLevel.DEBUG,
 +           text = "Creating a handler for topology {0}.")
 +  void createJettyHandler(final String topology);
 +
 +  /**
 +   * @param oldTarget
 +   * @param newTarget
 +   */
 +  @Message(level = MessageLevel.INFO,
 +           text = "Updating request context from {0} to {1}")
 +  void topologyPortMappingAddContext(final String oldTarget,
 +      final String newTarget);
 +
 +  /**
 +   * @param oldTarget
 +   * @param newTarget
 +   */
 +  @Message(level = MessageLevel.DEBUG,
 +           text = "Updating request target from {0} to {1}")
 +  void topologyPortMappingUpdateRequest(final String oldTarget,
 +      final String newTarget);
 +
 +  /**
 +   * Messages for Topology Port Mapping
 +   *
 +   * @param port
 +   * @param topology
 +   */
 +  @Message(level = MessageLevel.ERROR,
 +           text = "Port {0} configured for Topology - {1} is already in use.")
 +  void portAlreadyInUse(final int port, final String topology);
 +
 +  /**
 +   * Messages for Topology Port Mapping
 +   *
 +   * @param port
 +   */
 +  @Message(level = MessageLevel.ERROR,
 +           text = "Port {0} is already in use.")
 +  void portAlreadyInUse(final int port);
 +
 +  /**
 +   * Log topology and port
 +   *
 +   * @param topology
 +   * @param port
 +   */
 +  @Message(level = MessageLevel.INFO,
 +           text = "Started gateway, topology \"{0}\" listening on port \"{1}\".")
 +  void startedGateway(final String topology, final int port);
 +
 +  @Message(level = MessageLevel.ERROR,
 +           text =
 +               " Could not find topology \"{0}\" mapped to port \"{1}\" configured in gateway-config.xml. "
 +                   + "This invalid topology mapping will be ignored by the gateway. "
 +                   + "Gateway restart will be required if in the future \"{0}\" topology is added.")
 +  void topologyPortMappingCannotFindTopology(final String topology, final int port);
 +
 +
++  @Message( level = MessageLevel.INFO, text = "Monitoring simple descriptors in directory: {0}" )
++  void monitoringDescriptorChangesInDirectory(String descriptorsDir);
++
++
++  @Message( level = MessageLevel.INFO, text = "Monitoring shared provider configurations in directory: {0}" )
++  void monitoringProviderConfigChangesInDirectory(String sharedProviderDir);
++
++  @Message( level = MessageLevel.INFO, text = "Prevented deletion of shared provider configuration because there are referencing descriptors: {0}" )
++  void preventedDeletionOfSharedProviderConfiguration(String providerConfigurationPath);
++
++  @Message( level = MessageLevel.INFO, text = "Generated topology {0} because the associated descriptor {1} changed." )
++  void generatedTopologyForDescriptorChange(String topologyName, String descriptorName);
++
 +  @Message( level = MessageLevel.ERROR, text = "An error occurred while processing {0} : {1}" )
 +  void simpleDescriptorHandlingError(final String simpleDesc,
-                                      @StackTrace( level = MessageLevel.DEBUG ) Exception e );
++                                     @StackTrace(level = MessageLevel.DEBUG) Exception e);
++
++  @Message(level = MessageLevel.DEBUG, text = "Successfully wrote configuration: {0}")
++  void wroteConfigurationFile(final String filePath);
++
++  @Message(level = MessageLevel.ERROR, text = "Failed to write configuration: {0}")
++  void failedToWriteConfigurationFile(final String filePath,
++                                      @StackTrace(level = MessageLevel.DEBUG) Exception e );
++
++  @Message( level = MessageLevel.INFO, text = "Deleting topology {0} because the associated descriptor {1} was deleted." )
++  void deletingTopologyForDescriptorDeletion(String topologyName, String descriptorName);
++
++  @Message( level = MessageLevel.INFO, text = "Deleting descriptor {0} because the associated topology {1} was deleted." )
++  void deletingDescriptorForTopologyDeletion(String descriptorName, String topologyName);
++
++  @Message( level = MessageLevel.DEBUG, text = "Added descriptor {0} reference to provider configuration {1}." )
++  void addedProviderConfigurationReference(String descriptorName, String providerConfigurationName);
++
++  @Message( level = MessageLevel.DEBUG, text = "Removed descriptor {0} reference to provider configuration {1}." )
++  void removedProviderConfigurationReference(String descriptorName, String providerConfigurationName);
 +
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/c754cc06/gateway-server/src/main/java/org/apache/knox/gateway/config/impl/GatewayConfigImpl.java
----------------------------------------------------------------------
diff --cc gateway-server/src/main/java/org/apache/knox/gateway/config/impl/GatewayConfigImpl.java
index dfe34d4,0000000..c7b8df5
mode 100644,000000..100644
--- a/gateway-server/src/main/java/org/apache/knox/gateway/config/impl/GatewayConfigImpl.java
+++ b/gateway-server/src/main/java/org/apache/knox/gateway/config/impl/GatewayConfigImpl.java
@@@ -1,925 -1,0 +1,926 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.config.impl;
 +
++import org.apache.commons.io.FilenameUtils;
 +import org.apache.commons.lang3.StringUtils;
 +import org.apache.hadoop.conf.Configuration;
 +import org.apache.hadoop.fs.Path;
 +import org.apache.knox.gateway.GatewayMessages;
 +import org.apache.knox.gateway.config.GatewayConfig;
 +import org.apache.knox.gateway.i18n.messages.MessagesFactory;
 +import org.joda.time.Period;
 +import org.joda.time.format.PeriodFormatter;
 +import org.joda.time.format.PeriodFormatterBuilder;
 +
 +import java.io.File;
 +import java.net.InetSocketAddress;
 +import java.net.MalformedURLException;
 +import java.net.URL;
 +import java.net.UnknownHostException;
 +import java.util.ArrayList;
 +import java.util.Arrays;
 +import java.util.Collections;
 +import java.util.List;
 +import java.util.Map;
 +import java.util.concurrent.ConcurrentHashMap;
 +
 +/**
 + * The configuration for the Gateway.
 + *
 + * The Gateway configuration variables are described in gateway-default.xml
 + *
 + * The Gateway specific configuration is split into two layers:
 + *
 + * 1. gateway-default.xml - All the configuration variables that the
 + *    Gateway needs.  These are the defaults that ship with the app
 + *    and should only be changed by the app developers.
 + *
 + * 2. gateway-site.xml - The (possibly empty) configuration that the
 + *    system administrator can set variables for their Hadoop cluster.
 + *
 + * To find the gateway configuration files the following process is used.
 + * First, if the GATEWAY_HOME system property contains a valid directory name,
 + * an attempt will be made to read the configuration files from that directory.
 + * Second, if the GATEWAY_HOME environment variable contains a valid directory name,
 + * an attempt will be made to read the configuration files from that directory.
 + * Third, an attempt will be made to load the configuration files from the directory
 + * specified via the "user.dir" system property.
 + * Fourth, an attempt will be made to load the configuration files from the classpath.
 + * Last, defaults will be used for all values will be used.
 + *
 + * If GATEWAY_HOME isn't set via either the system property or environment variable then
 + * a value for this will be defaulted.  The default selected will be the directory that
 + * contained the last loaded configuration file that was not contained in a JAR.  If
 + * no such configuration file is loaded the value of the "user.dir" system property will be used
 + * as the value of GATEWAY_HOME.  This is important to consider for any relative file names as they
 + * will be resolved relative to the value of GATEWAY_HOME.  One such relative value is the
 + * name of the directory containing cluster topologies.  This value default to "clusters".
 + */
 +public class GatewayConfigImpl extends Configuration implements GatewayConfig {
 +
 +  private static final String GATEWAY_DEFAULT_TOPOLOGY_NAME_PARAM = "default.app.topology.name";
 +  private static final String GATEWAY_DEFAULT_TOPOLOGY_NAME = null;
 +
 +  private static final GatewayMessages log = MessagesFactory.get( GatewayMessages.class );
 +
 +  private static final String GATEWAY_CONFIG_DIR_PREFIX = "conf";
 +
 +  private static final String GATEWAY_CONFIG_FILE_PREFIX = "gateway";
 +
 +  private static final String DEFAULT_STACKS_SERVICES_DIR = "services";
 +
 +  private static final String DEFAULT_APPLICATIONS_DIR = "applications";
 +
 +  public static final String[] GATEWAY_CONFIG_FILENAMES = {
 +      GATEWAY_CONFIG_DIR_PREFIX + "/" + GATEWAY_CONFIG_FILE_PREFIX + "-default.xml",
 +      GATEWAY_CONFIG_DIR_PREFIX + "/" + GATEWAY_CONFIG_FILE_PREFIX + "-site.xml"
 +  };
 +
 +//  private static final String[] HADOOP_CONF_FILENAMES = {
 +//      "core-default.xml",
 +//      "core-site.xml"
 +////      "hdfs-default.xml",
 +////      "hdfs-site.xml",
 +////      "mapred-default.xml",
 +////      "mapred-site.xml"
 +//  };
 +
 +//  private static final String[] HADOOP_PREFIX_VARS = {
 +//      "HADOOP_PREFIX",
 +//      "HADOOP_HOME"
 +//  };
 +
 +  public static final String HTTP_HOST = GATEWAY_CONFIG_FILE_PREFIX + ".host";
 +  public static final String HTTP_PORT = GATEWAY_CONFIG_FILE_PREFIX + ".port";
 +  public static final String HTTP_PATH = GATEWAY_CONFIG_FILE_PREFIX + ".path";
 +  public static final String DEPLOYMENT_DIR = GATEWAY_CONFIG_FILE_PREFIX + ".deployment.dir";
 +  public static final String SECURITY_DIR = GATEWAY_CONFIG_FILE_PREFIX + ".security.dir";
 +  public static final String DATA_DIR = GATEWAY_CONFIG_FILE_PREFIX + ".data.dir";
 +  public static final String STACKS_SERVICES_DIR = GATEWAY_CONFIG_FILE_PREFIX + ".services.dir";
 +  public static final String GLOBAL_RULES_SERVICES = GATEWAY_CONFIG_FILE_PREFIX + ".global.rules.services";
 +  public static final String APPLICATIONS_DIR = GATEWAY_CONFIG_FILE_PREFIX + ".applications.dir";
 +  public static final String HADOOP_CONF_DIR = GATEWAY_CONFIG_FILE_PREFIX + ".hadoop.conf.dir";
 +  public static final String FRONTEND_URL = GATEWAY_CONFIG_FILE_PREFIX + ".frontend.url";
 +  private static final String TRUST_ALL_CERTS = GATEWAY_CONFIG_FILE_PREFIX + ".trust.all.certs";
 +  private static final String CLIENT_AUTH_NEEDED = GATEWAY_CONFIG_FILE_PREFIX + ".client.auth.needed";
 +  private static final String CLIENT_AUTH_WANTED = GATEWAY_CONFIG_FILE_PREFIX + ".client.auth.wanted";
 +  private static final String TRUSTSTORE_PATH = GATEWAY_CONFIG_FILE_PREFIX + ".truststore.path";
 +  private static final String TRUSTSTORE_TYPE = GATEWAY_CONFIG_FILE_PREFIX + ".truststore.type";
 +  private static final String KEYSTORE_TYPE = GATEWAY_CONFIG_FILE_PREFIX + ".keystore.type";
 +  private static final String XFORWARDED_ENABLED = GATEWAY_CONFIG_FILE_PREFIX + ".xforwarded.enabled";
 +  private static final String EPHEMERAL_DH_KEY_SIZE = GATEWAY_CONFIG_FILE_PREFIX + ".jdk.tls.ephemeralDHKeySize";
 +  private static final String HTTP_CLIENT_MAX_CONNECTION = GATEWAY_CONFIG_FILE_PREFIX + ".httpclient.maxConnections";
 +  private static final String HTTP_CLIENT_CONNECTION_TIMEOUT = GATEWAY_CONFIG_FILE_PREFIX + ".httpclient.connectionTimeout";
 +  private static final String HTTP_CLIENT_SOCKET_TIMEOUT = GATEWAY_CONFIG_FILE_PREFIX + ".httpclient.socketTimeout";
 +  private static final String THREAD_POOL_MAX = GATEWAY_CONFIG_FILE_PREFIX + ".threadpool.max";
 +  public static final String HTTP_SERVER_REQUEST_BUFFER = GATEWAY_CONFIG_FILE_PREFIX + ".httpserver.requestBuffer";
 +  public static final String HTTP_SERVER_REQUEST_HEADER_BUFFER = GATEWAY_CONFIG_FILE_PREFIX + ".httpserver.requestHeaderBuffer";
 +  public static final String HTTP_SERVER_RESPONSE_BUFFER = GATEWAY_CONFIG_FILE_PREFIX + ".httpserver.responseBuffer";
 +  public static final String HTTP_SERVER_RESPONSE_HEADER_BUFFER = GATEWAY_CONFIG_FILE_PREFIX + ".httpserver.responseHeaderBuffer";
 +  public static final String DEPLOYMENTS_BACKUP_VERSION_LIMIT = GATEWAY_CONFIG_FILE_PREFIX + ".deployment.backup.versionLimit";
 +  public static final String DEPLOYMENTS_BACKUP_AGE_LIMIT = GATEWAY_CONFIG_FILE_PREFIX + ".deployment.backup.ageLimit";
 +  public static final String METRICS_ENABLED = GATEWAY_CONFIG_FILE_PREFIX + ".metrics.enabled";
 +  public static final String JMX_METRICS_REPORTING_ENABLED = GATEWAY_CONFIG_FILE_PREFIX + ".jmx.metrics.reporting.enabled";
 +  public static final String GRAPHITE_METRICS_REPORTING_ENABLED = GATEWAY_CONFIG_FILE_PREFIX + ".graphite.metrics.reporting.enabled";
 +  public static final String GRAPHITE_METRICS_REPORTING_HOST = GATEWAY_CONFIG_FILE_PREFIX + ".graphite.metrics.reporting.host";
 +  public static final String GRAPHITE_METRICS_REPORTING_PORT = GATEWAY_CONFIG_FILE_PREFIX + ".graphite.metrics.reporting.port";
 +  public static final String GRAPHITE_METRICS_REPORTING_FREQUENCY = GATEWAY_CONFIG_FILE_PREFIX + ".graphite.metrics.reporting.frequency";
 +  public static final String GATEWAY_IDLE_TIMEOUT = GATEWAY_CONFIG_FILE_PREFIX + ".idle.timeout";
 +  public static final String REMOTE_IP_HEADER_NAME = GATEWAY_CONFIG_FILE_PREFIX + ".remote.ip.header.name";
 +
 +  /* @since 0.10 Websocket config variables */
 +  public static final String WEBSOCKET_FEATURE_ENABLED = GATEWAY_CONFIG_FILE_PREFIX + ".websocket.feature.enabled";
 +  public static final String WEBSOCKET_MAX_TEXT_MESSAGE_SIZE = GATEWAY_CONFIG_FILE_PREFIX + ".websocket.max.text.size";
 +  public static final String WEBSOCKET_MAX_BINARY_MESSAGE_SIZE = GATEWAY_CONFIG_FILE_PREFIX + ".websocket.max.binary.size";
 +  public static final String WEBSOCKET_MAX_TEXT_MESSAGE_BUFFER_SIZE = GATEWAY_CONFIG_FILE_PREFIX + ".websocket.max.text.buffer.size";
 +  public static final String WEBSOCKET_MAX_BINARY_MESSAGE_BUFFER_SIZE = GATEWAY_CONFIG_FILE_PREFIX + ".websocket.max.binary.buffer.size";
 +  public static final String WEBSOCKET_INPUT_BUFFER_SIZE = GATEWAY_CONFIG_FILE_PREFIX + ".websocket.input.buffer.size";
 +  public static final String WEBSOCKET_ASYNC_WRITE_TIMEOUT = GATEWAY_CONFIG_FILE_PREFIX + ".websocket.async.write.timeout";
 +  public static final String WEBSOCKET_IDLE_TIMEOUT = GATEWAY_CONFIG_FILE_PREFIX + ".websocket.idle.timeout";
 +
 +  /**
 +   * Properties for for gateway port mapping feature
 +   */
 +  public static final String GATEWAY_PORT_MAPPING_PREFIX = GATEWAY_CONFIG_FILE_PREFIX + ".port.mapping.";
 +  public static final String GATEWAY_PORT_MAPPING_REGEX = GATEWAY_CONFIG_FILE_PREFIX + "\\.port\\.mapping\\..*";
 +  public static final String GATEWAY_PORT_MAPPING_ENABLED = GATEWAY_PORT_MAPPING_PREFIX + "enabled";
 +
 +  /**
 +   * Comma seperated list of MIME Types to be compressed by Knox on the way out.
 +   *
 +   * @since 0.12
 +   */
 +  public static final String MIME_TYPES_TO_COMPRESS = GATEWAY_CONFIG_FILE_PREFIX
 +      + ".gzip.compress.mime.types";
 +
 +  // These config property names are not inline with the convention of using the
 +  // GATEWAY_CONFIG_FILE_PREFIX as is done by those above. These are left for
 +  // backward compatibility. 
 +  // LET'S NOT CONTINUE THIS PATTERN BUT LEAVE THEM FOR NOW.
 +  private static final String SSL_ENABLED = "ssl.enabled";
 +  private static final String SSL_EXCLUDE_PROTOCOLS = "ssl.exclude.protocols";
 +  private static final String SSL_INCLUDE_CIPHERS = "ssl.include.ciphers";
 +  private static final String SSL_EXCLUDE_CIPHERS = "ssl.exclude.ciphers";
 +  // END BACKWARD COMPATIBLE BLOCK
 +  
 +  public static final String DEFAULT_HTTP_PORT = "8888";
 +  public static final String DEFAULT_HTTP_PATH = "gateway";
 +  public static final String DEFAULT_DEPLOYMENT_DIR = "deployments";
 +  public static final String DEFAULT_SECURITY_DIR = "security";
 +  public static final String DEFAULT_DATA_DIR = "data";
 +
 +  /* Websocket defaults */
 +  public static final boolean DEFAULT_WEBSOCKET_FEATURE_ENABLED = false;
 +  public static final int DEFAULT_WEBSOCKET_MAX_TEXT_MESSAGE_SIZE = Integer.MAX_VALUE;;
 +  public static final int DEFAULT_WEBSOCKET_MAX_BINARY_MESSAGE_SIZE = Integer.MAX_VALUE;;
 +  public static final int DEFAULT_WEBSOCKET_MAX_TEXT_MESSAGE_BUFFER_SIZE = 32768;
 +  public static final int DEFAULT_WEBSOCKET_MAX_BINARY_MESSAGE_BUFFER_SIZE = 32768;
 +  public static final int DEFAULT_WEBSOCKET_INPUT_BUFFER_SIZE = 4096;
 +  public static final int DEFAULT_WEBSOCKET_ASYNC_WRITE_TIMEOUT = 60000;
 +  public static final int DEFAULT_WEBSOCKET_IDLE_TIMEOUT = 300000;
 +
 +  public static final boolean DEFAULT_GATEWAY_PORT_MAPPING_ENABLED = true;
 +
 +  /**
 +   * Default list of MIME Type to be compressed.
 +   * @since 0.12
 +   */
 +  public static final String DEFAULT_MIME_TYPES_TO_COMPRESS = "text/html, text/plain, text/xml, text/css, "
 +      + "application/javascript, application/x-javascript, text/javascript";
 +
 +  public static final String COOKIE_SCOPING_ENABLED = GATEWAY_CONFIG_FILE_PREFIX + ".scope.cookies.feature.enabled";
 +  public static final boolean DEFAULT_COOKIE_SCOPING_FEATURE_ENABLED = false;
 +  private static final String CRYPTO_ALGORITHM = GATEWAY_CONFIG_FILE_PREFIX + ".crypto.algorithm";
 +  private static final String CRYPTO_PBE_ALGORITHM = GATEWAY_CONFIG_FILE_PREFIX + ".crypto.pbe.algorithm";
 +  private static final String CRYPTO_TRANSFORMATION = GATEWAY_CONFIG_FILE_PREFIX + ".crypto.transformation";
 +  private static final String CRYPTO_SALTSIZE = GATEWAY_CONFIG_FILE_PREFIX + ".crypto.salt.size";
 +  private static final String CRYPTO_ITERATION_COUNT = GATEWAY_CONFIG_FILE_PREFIX + ".crypto.iteration.count";
 +  private static final String CRYPTO_KEY_LENGTH = GATEWAY_CONFIG_FILE_PREFIX + ".crypto.key.length";
 +  public static final String SERVER_HEADER_ENABLED = GATEWAY_CONFIG_FILE_PREFIX + ".server.header.enabled";
 +
 +  private static List<String> DEFAULT_GLOBAL_RULES_SERVICES;
 +
 +
 +  public GatewayConfigImpl() {
 +    init();
 +  }
 +
 +  private String getVar( String variableName, String defaultValue ) {
 +    String value = get( variableName );
 +    if( value == null ) {
 +      value = System.getProperty( variableName );
 +    }
 +    if( value == null ) {
 +      value = System.getenv( variableName );
 +    }
 +    if( value == null ) {
 +      value = defaultValue;
 +    }
 +    return value;
 +  }
 +
 +  private String getGatewayHomeDir() {
 +    String home = get(
 +        GATEWAY_HOME_VAR,
 +        System.getProperty(
 +            GATEWAY_HOME_VAR,
 +            System.getenv( GATEWAY_HOME_VAR ) ) );
 +    return home;
 +  }
 +
 +  private void setGatewayHomeDir( String dir ) {
 +    set( GATEWAY_HOME_VAR, dir );
 +  }
 +
 +  @Override
 +  public String getGatewayConfDir() {
 +    String value = getVar( GATEWAY_CONF_HOME_VAR, getGatewayHomeDir() + File.separator + "conf"  );
-     return value;
++    return FilenameUtils.normalize(value);
 +  }
 +
 +  @Override
 +  public String getGatewayDataDir() {
 +    String systemValue =
 +        System.getProperty(GATEWAY_DATA_HOME_VAR, System.getenv(GATEWAY_DATA_HOME_VAR));
 +    String dataDir = null;
 +    if (systemValue != null) {
 +      dataDir = systemValue;
 +    } else {
 +      dataDir = get(DATA_DIR, getGatewayHomeDir() + File.separator + DEFAULT_DATA_DIR);
 +    }
 +    return dataDir;
 +  }
 +
 +  @Override
 +  public String getGatewayServicesDir() {
 +    return get(STACKS_SERVICES_DIR, getGatewayDataDir() + File.separator + DEFAULT_STACKS_SERVICES_DIR);
 +  }
 +
 +  @Override
 +  public String getGatewayApplicationsDir() {
 +    return get(APPLICATIONS_DIR, getGatewayDataDir() + File.separator + DEFAULT_APPLICATIONS_DIR);
 +  }
 +
 +  @Override
 +  public String getHadoopConfDir() {
 +    return get( HADOOP_CONF_DIR );
 +  }
 +
 +  private void init() {
 +    // Load environment variables.
 +    for( Map.Entry<String, String> e : System.getenv().entrySet() ) {
 +      set( "env." + e.getKey(), e.getValue() );
 +    }
 +    // Load system properties.
 +    for( Map.Entry<Object, Object> p : System.getProperties().entrySet() ) {
 +      set( "sys." + p.getKey().toString(), p.getValue().toString() );
 +    }
 +
 +    URL lastFileUrl = null;
 +    for( String fileName : GATEWAY_CONFIG_FILENAMES ) {
 +      lastFileUrl = loadConfig( fileName, lastFileUrl );
 +    }
 +    //set default services list
 +    setDefaultGlobalRulesServices();
 +
 +    initGatewayHomeDir( lastFileUrl );
 +
 +    // log whether the scoping cookies to the gateway.path feature is enabled
 +    log.cookieScopingFeatureEnabled(isCookieScopingToPathEnabled());
 +  }
 +
 +  private void setDefaultGlobalRulesServices() {
 +    DEFAULT_GLOBAL_RULES_SERVICES = new ArrayList<>();
 +    DEFAULT_GLOBAL_RULES_SERVICES.add("NAMENODE");
 +    DEFAULT_GLOBAL_RULES_SERVICES.add("JOBTRACKER");
 +    DEFAULT_GLOBAL_RULES_SERVICES.add("WEBHDFS");
 +    DEFAULT_GLOBAL_RULES_SERVICES.add("WEBHCAT");
 +    DEFAULT_GLOBAL_RULES_SERVICES.add("OOZIE");
 +    DEFAULT_GLOBAL_RULES_SERVICES.add("WEBHBASE");
 +    DEFAULT_GLOBAL_RULES_SERVICES.add("HIVE");
 +    DEFAULT_GLOBAL_RULES_SERVICES.add("RESOURCEMANAGER");
 +  }
 +
 +  private void initGatewayHomeDir( URL lastFileUrl ) {
 +    String home = System.getProperty( GATEWAY_HOME_VAR );
 +    if( home != null ) {
 +      set( GATEWAY_HOME_VAR, home );
 +      log.settingGatewayHomeDir( "system property", home );
 +      return;
 +    }
 +    home = System.getenv( GATEWAY_HOME_VAR );
 +    if( home != null ) {
 +      set( GATEWAY_HOME_VAR, home );
 +      log.settingGatewayHomeDir( "environment variable", home );
 +      return;
 +    }
 +    if( lastFileUrl != null ) {
 +      File file = new File( lastFileUrl.getFile() ).getAbsoluteFile();
 +      File dir = file.getParentFile().getParentFile(); // Move up two levels to get to parent of conf.
 +      if( dir.exists() && dir.canRead() )
 +        home = dir.getAbsolutePath();
 +      set( GATEWAY_HOME_VAR, home );
 +      log.settingGatewayHomeDir( "configuration file location", home );
 +      return;
 +    }
 +    home = System.getProperty( "user.dir" );
 +    if( home != null ) {
 +      set( GATEWAY_HOME_VAR, home );
 +      log.settingGatewayHomeDir( "user.dir system property", home );
 +      return;
 +    }
 +  }
 +
 +  // 1. GATEWAY_HOME system property
 +  // 2. GATEWAY_HOME environment variable
 +  // 3. user.dir system property
 +  // 4. class path
 +  private URL loadConfig( String fileName, URL lastFileUrl ) {
 +    lastFileUrl = loadConfigFile( System.getProperty( GATEWAY_HOME_VAR ), fileName );
 +    if( lastFileUrl == null ) {
 +      lastFileUrl = loadConfigFile( System.getenv( GATEWAY_HOME_VAR ), fileName );
 +    }
 +    if( lastFileUrl == null ) {
 +      lastFileUrl = loadConfigFile( System.getProperty( "user.dir" ), fileName );
 +    }
 +    if( lastFileUrl == null ) {
 +      lastFileUrl = loadConfigResource( fileName );
 +    }
 +    if( lastFileUrl != null && !"file".equals( lastFileUrl.getProtocol() ) ) {
 +      lastFileUrl = null;
 +    }
 +    return lastFileUrl;
 +  }
 +
 +  private URL loadConfigFile( String dir, String file ) {
 +    URL url = null;
 +    if( dir != null ) {
 +      File f = new File( dir, file );
 +      if( f.exists() ) {
 +        String path = f.getAbsolutePath();
 +        try {
 +          url = f.toURI().toURL();
 +          addResource( new Path( path ) );
 +          log.loadingConfigurationFile( path );
 +        } catch ( MalformedURLException e ) {
 +          log.failedToLoadConfig( path, e );
 +        }
 +      }
 +    }
 +    return url;
 +  }
 +
 +  private URL loadConfigResource( String file ) {
 +    URL url = getResource( file );
 +    if( url != null ) {
 +      log.loadingConfigurationResource( url.toExternalForm() );
 +      addResource( url );
 +    }
 +    return url;
 +  }
 +
 +  @Override
 +  public String getGatewayHost() {
 +    String host = get( HTTP_HOST, "0.0.0.0" );
 +    return host;
 +  }
 +
 +  @Override
 +  public int getGatewayPort() {
 +    return Integer.parseInt( get( HTTP_PORT, DEFAULT_HTTP_PORT ) );
 +  }
 +
 +  @Override
 +  public String getGatewayPath() {
 +    return get( HTTP_PATH, DEFAULT_HTTP_PATH );
 +  }
 +
 +  @Override
 +  public String getGatewayTopologyDir() {
 +    return getGatewayConfDir() + File.separator + "topologies";
 +  }
 +
 +  @Override
 +  public String getGatewayDeploymentDir() {
 +    return get(DEPLOYMENT_DIR, getGatewayDataDir() + File.separator + DEFAULT_DEPLOYMENT_DIR);
 +  }
 +
 +  @Override
 +  public String getGatewaySecurityDir() {
 +    return get(SECURITY_DIR, getGatewayDataDir() + File.separator + DEFAULT_SECURITY_DIR);
 +  }
 +
 +  @Override
 +  public InetSocketAddress getGatewayAddress() throws UnknownHostException {
 +    String host = getGatewayHost();
 +    int port = getGatewayPort();
 +    InetSocketAddress address = new InetSocketAddress( host, port );
 +    return address;
 +  }
 +
 +  @Override
 +  public boolean isSSLEnabled() {
 +    String enabled = get( SSL_ENABLED, "true" );
 +    
 +    return "true".equals(enabled);
 +  }
 +
 +  @Override
 +  public boolean isHadoopKerberosSecured() {
 +    String hadoopKerberosSecured = get( HADOOP_KERBEROS_SECURED, "false" );
 +    return "true".equals(hadoopKerberosSecured);
 +  }
 +
 +  @Override
 +  public String getKerberosConfig() {
 +    return get( KRB5_CONFIG ) ;
 +  }
 +
 +  @Override
 +  public boolean isKerberosDebugEnabled() {
 +    String kerberosDebugEnabled = get( KRB5_DEBUG, "false" );
 +    return "true".equals(kerberosDebugEnabled);
 +  }
 +  
 +  @Override
 +  public String getKerberosLoginConfig() {
 +    return get( KRB5_LOGIN_CONFIG );
 +  }
 +
 +  /* (non-Javadoc)
 +   * @see GatewayConfig#getDefaultTopologyName()
 +   */
 +  @Override
 +  public String getDefaultTopologyName() {
 +    String name = get(GATEWAY_DEFAULT_TOPOLOGY_NAME_PARAM);
 +    return name != null ? name : GATEWAY_DEFAULT_TOPOLOGY_NAME;
 +  }
 +
 +  /* (non-Javadoc)
 +   * @see GatewayConfig#getDefaultAppRedirectPath()
 +   */
 +  @Override
 +  public String getDefaultAppRedirectPath() {
 +    String defTopo = getDefaultTopologyName();
 +    if( defTopo == null ) {
 +      return null;
 +    } else {
 +      return "/" + getGatewayPath() + "/" + defTopo;
 +    }
 +  }
 +
 +  /* (non-Javadoc)
 +   * @see GatewayConfig#getFrontendUrl()
 +   */
 +  @Override
 +  public String getFrontendUrl() {
 +    String url = get( FRONTEND_URL, null );
 +    return url;
 +  }
 +
 +  /* (non-Javadoc)
 +   * @see GatewayConfig#getExcludedSSLProtocols()
 +   */
 +  @Override
 +  public List<String> getExcludedSSLProtocols() {
 +    List<String> protocols = null;
 +    String value = get(SSL_EXCLUDE_PROTOCOLS);
 +    if (!"none".equals(value)) {
 +      protocols = Arrays.asList(value.split("\\s*,\\s*"));
 +    }
 +    return protocols;
 +  }
 +
 +  @Override
 +  public List<String> getIncludedSSLCiphers() {
 +    List<String> list = null;
 +    String value = get(SSL_INCLUDE_CIPHERS);
 +    if (value != null && !value.isEmpty() && !"none".equalsIgnoreCase(value.trim())) {
 +      list = Arrays.asList(value.trim().split("\\s*,\\s*"));
 +    }
 +    return list;
 +  }
 +
 +  @Override
 +  public List<String> getExcludedSSLCiphers() {
 +    List<String> list = null;
 +    String value = get(SSL_EXCLUDE_CIPHERS);
 +    if (value != null && !value.isEmpty() && !"none".equalsIgnoreCase(value.trim())) {
 +      list = Arrays.asList(value.trim().split("\\s*,\\s*"));
 +    }
 +    return list;
 +  }
 +
 +  /* (non-Javadoc)
 +   * @see GatewayConfig#isClientAuthNeeded()
 +   */
 +  @Override
 +  public boolean isClientAuthNeeded() {
 +    String clientAuthNeeded = get( CLIENT_AUTH_NEEDED, "false" );
 +    return "true".equals(clientAuthNeeded);
 +  }
 +
 +  /* (non-Javadoc)
 +   * @see org.apache.knox.gateway.config.GatewayConfig#isClientAuthWanted()
 +   */
 +  @Override
 +  public boolean isClientAuthWanted() {
 +    String clientAuthWanted = get( CLIENT_AUTH_WANTED, "false" );
 +    return "true".equals(clientAuthWanted);
 +  }
 +
 +  /* (non-Javadoc)
 +   * @see GatewayConfig#getTruststorePath()
 +   */
 +  @Override
 +  public String getTruststorePath() {
 +    return get( TRUSTSTORE_PATH, null);
 +  }
 +
 +  /* (non-Javadoc)
 +   * @see GatewayConfig#getTrustAllCerts()
 +   */
 +  @Override
 +  public boolean getTrustAllCerts() {
 +    String trustAllCerts = get( TRUST_ALL_CERTS, "false" );
 +    return "true".equals(trustAllCerts);
 +  }
 +  
 +  /* (non-Javadoc)
 +   * @see GatewayConfig#getTruststorePath()
 +   */
 +  @Override
 +  public String getTruststoreType() {
 +    return get( TRUSTSTORE_TYPE, "JKS");
 +  }
 +
 +  /* (non-Javadoc)
 +   * @see GatewayConfig#getTruststorePath()
 +   */
 +  @Override
 +  public String getKeystoreType() {
 +    return get( KEYSTORE_TYPE, "JKS");
 +  }
 +
 +  @Override
 +  public boolean isXForwardedEnabled() {
 +    String xForwardedEnabled = get( XFORWARDED_ENABLED, "true" );
 +    return "true".equals(xForwardedEnabled);
 +  }
 +
 +  /* (non-Javadoc)
 +   * @see GatewayConfig#getEphemeralDHKeySize()
 +   */
 +  @Override
 +  public String getEphemeralDHKeySize() {
 +    return get( EPHEMERAL_DH_KEY_SIZE, "2048");
 +  }
 +
 +  /* (non-Javadoc)
 +   * @see GatewayConfig#getHttpClientMaxConnections()
 +   */
 +  @Override
 +  public int getHttpClientMaxConnections() {
 +    return getInt( HTTP_CLIENT_MAX_CONNECTION, 32 );
 +  }
 +
 +  @Override
 +  public int getHttpClientConnectionTimeout() {
 +    int t = -1;
 +    String s = get( HTTP_CLIENT_CONNECTION_TIMEOUT, null );
 +    if ( s != null ) {
 +      try {
 +        t = (int)parseNetworkTimeout( s );
 +      } catch ( Exception e ) {
 +        // Ignore it and use the default.
 +      }
 +    }
 +    return t;
 +  }
 +
 +  @Override
 +  public int getHttpClientSocketTimeout() {
 +    int t = -1;
 +    String s = get( HTTP_CLIENT_SOCKET_TIMEOUT, null );
 +    if ( s != null ) {
 +      try {
 +        t = (int)parseNetworkTimeout( s );
 +      } catch ( Exception e ) {
 +        // Ignore it and use the default.
 +      }
 +    }
 +    return t;
 +  }
 +
 +  /* (non-Javadoc)
 +   * @see GatewayConfig#getThreadPoolMax()
 +   */
 +  @Override
 +  public int getThreadPoolMax() {
 +    int i = getInt( THREAD_POOL_MAX, 254 );
 +    // Testing has shown that a value lower than 5 prevents Jetty from servicing request.
 +    if( i < 5 ) {
 +      i = 5;
 +    }
 +    return i;
 +  }
 +
 +  @Override
 +  public int getHttpServerRequestBuffer() {
 +    int i = getInt( HTTP_SERVER_REQUEST_BUFFER, 16 * 1024 );
 +    return i;
 +  }
 +
 +  @Override
 +  public int getHttpServerRequestHeaderBuffer() {
 +    int i = getInt( HTTP_SERVER_REQUEST_HEADER_BUFFER, 8 * 1024 );
 +    return i;
 +  }
 +
 +  @Override
 +  public int getHttpServerResponseBuffer() {
 +    int i = getInt( HTTP_SERVER_RESPONSE_BUFFER, 32 * 1024 );
 +    return i;
 +  }
 +
 +  @Override
 +  public int getHttpServerResponseHeaderBuffer() {
 +    int i = getInt( HTTP_SERVER_RESPONSE_HEADER_BUFFER, 8 * 1024 );
 +    return i;
 +  }
 +
 +  @Override
 +  public int getGatewayDeploymentsBackupVersionLimit() {
 +    int i = getInt( DEPLOYMENTS_BACKUP_VERSION_LIMIT, 5 );
 +    if( i < 0 ) {
 +      i = -1;
 +    }
 +    return i;
 +  }
 +
 +  @Override
 +  public long getGatewayIdleTimeout() {
 +    return getLong(GATEWAY_IDLE_TIMEOUT, 300000l);
 +  }
 +
 +  @Override
 +  public long getGatewayDeploymentsBackupAgeLimit() {
 +    PeriodFormatter f = new PeriodFormatterBuilder().appendDays().toFormatter();
 +    String s = get( DEPLOYMENTS_BACKUP_AGE_LIMIT, "-1" );
 +    long d;
 +    try {
 +      Period p = Period.parse( s, f );
 +      d = p.toStandardDuration().getMillis();
 +      if( d < 0 ) {
 +        d = -1;
 +      }
 +    } catch( Exception e ) {
 +      d = -1;
 +    }
 +    return d;
 +  }
 +
 +  @Override
 +  public String getSigningKeystoreName() {
 +    return get(SIGNING_KEYSTORE_NAME);
 +  }
 +
 +  @Override
 +  public String getSigningKeyAlias() {
 +    return get(SIGNING_KEY_ALIAS);
 +  }
 +
 +  @Override
 +  public List<String> getGlobalRulesServices() {
 +    String value = get( GLOBAL_RULES_SERVICES );
 +    if ( value != null && !value.isEmpty() && !"none".equalsIgnoreCase(value.trim()) ) {
 +      return Arrays.asList( value.trim().split("\\s*,\\s*") );
 +    }
 +    return DEFAULT_GLOBAL_RULES_SERVICES;
 +  }
 +
 +  @Override
 +  public boolean isMetricsEnabled() {
 +    String metricsEnabled = get( METRICS_ENABLED, "false" );
 +    return "true".equals(metricsEnabled);
 +  }
 +
 +  @Override
 +  public boolean isJmxMetricsReportingEnabled() {
 +    String enabled = get( JMX_METRICS_REPORTING_ENABLED, "false" );
 +    return "true".equals(enabled);
 +  }
 +
 +  @Override
 +  public boolean isGraphiteMetricsReportingEnabled() {
 +    String enabled = get( GRAPHITE_METRICS_REPORTING_ENABLED, "false" );
 +    return "true".equals(enabled);
 +  }
 +
 +  @Override
 +  public String getGraphiteHost() {
 +    String host = get( GRAPHITE_METRICS_REPORTING_HOST, "localhost" );
 +    return host;
 +  }
 +
 +  @Override
 +  public int getGraphitePort() {
 +    int i = getInt( GRAPHITE_METRICS_REPORTING_PORT, 32772 );
 +    return i;
 +  }
 +
 +  @Override
 +  public int getGraphiteReportingFrequency() {
 +    int i = getInt( GRAPHITE_METRICS_REPORTING_FREQUENCY, 1 );
 +    return i;
 +  }
 +
 +  /* (non-Javadoc)
 +   * @see GatewayConfig#isWebsocketEnabled()
 +   */
 +  @Override
 +  public boolean isWebsocketEnabled() {
 +    final String result = get( WEBSOCKET_FEATURE_ENABLED, Boolean.toString(DEFAULT_WEBSOCKET_FEATURE_ENABLED));
 +    return Boolean.parseBoolean(result);
 +  }
 +
 +  /* (non-Javadoc)
 +   * @see GatewayConfig#websocketMaxTextMessageSize()
 +   */
 +  @Override
 +  public int getWebsocketMaxTextMessageSize() {
 +    return getInt( WEBSOCKET_MAX_TEXT_MESSAGE_SIZE, DEFAULT_WEBSOCKET_MAX_TEXT_MESSAGE_SIZE);
 +  }
 +
 +  /* (non-Javadoc)
 +   * @see GatewayConfig#websocketMaxBinaryMessageSize()
 +   */
 +  @Override
 +  public int getWebsocketMaxBinaryMessageSize() {
 +    return getInt( WEBSOCKET_MAX_BINARY_MESSAGE_SIZE, DEFAULT_WEBSOCKET_MAX_BINARY_MESSAGE_SIZE);
 +  }
 +
 +  /* (non-Javadoc)
 +   * @see GatewayConfig#websocketMaxTextMessageBufferSize()
 +   */
 +  @Override
 +  public int getWebsocketMaxTextMessageBufferSize() {
 +    return getInt( WEBSOCKET_MAX_TEXT_MESSAGE_BUFFER_SIZE, DEFAULT_WEBSOCKET_MAX_TEXT_MESSAGE_BUFFER_SIZE);
 +  }
 +
 +  /* (non-Javadoc)
 +   * @see GatewayConfig#websocketMaxBinaryMessageBufferSize()
 +   */
 +  @Override
 +  public int getWebsocketMaxBinaryMessageBufferSize() {
 +    return getInt( WEBSOCKET_MAX_BINARY_MESSAGE_BUFFER_SIZE, DEFAULT_WEBSOCKET_MAX_BINARY_MESSAGE_BUFFER_SIZE);
 +  }
 +
 +  /* (non-Javadoc)
 +   * @see GatewayConfig#websocketInputBufferSize()
 +   */
 +  @Override
 +  public int getWebsocketInputBufferSize() {
 +    return getInt( WEBSOCKET_INPUT_BUFFER_SIZE, DEFAULT_WEBSOCKET_INPUT_BUFFER_SIZE);
 +  }
 +
 +  /* (non-Javadoc)
 +   * @see GatewayConfig#websocketAsyncWriteTimeout()
 +   */
 +  @Override
 +  public int getWebsocketAsyncWriteTimeout() {
 +    return getInt( WEBSOCKET_ASYNC_WRITE_TIMEOUT, DEFAULT_WEBSOCKET_ASYNC_WRITE_TIMEOUT);
 +  }
 +
 +  /* (non-Javadoc)
 +   * @see GatewayConfig#websocketIdleTimeout()
 +   */
 +  @Override
 +  public int getWebsocketIdleTimeout() {
 +    return getInt( WEBSOCKET_IDLE_TIMEOUT, DEFAULT_WEBSOCKET_IDLE_TIMEOUT);
 +  }
 +
 +  /*
 +   * (non-Javadoc)
 +   *
 +   * @see
 +   * GatewayConfig#getMimeTypesToCompress()
 +   */
 +  @Override
 +  public List<String> getMimeTypesToCompress() {
 +    List<String> mimeTypes = null;
 +    String value = get(MIME_TYPES_TO_COMPRESS, DEFAULT_MIME_TYPES_TO_COMPRESS);
 +    if (value != null && !value.isEmpty()) {
 +      mimeTypes = Arrays.asList(value.trim().split("\\s*,\\s*"));
 +    }
 +    return mimeTypes;
 +  }
 +
 +  /**
 +   * Map of Topology names and their ports.
 +   *
 +   * @return
 +   */
 +  @Override
 +  public Map<String, Integer> getGatewayPortMappings() {
 +
 +    final Map<String, Integer> result = new ConcurrentHashMap<String, Integer>();
 +    final Map<String, String> properties = getValByRegex(GATEWAY_PORT_MAPPING_REGEX);
 +
 +    // Convert port no. from string to int
 +    for(final Map.Entry<String, String> e : properties.entrySet()) {
 +      // ignore the GATEWAY_PORT_MAPPING_ENABLED property
 +      if(!e.getKey().equalsIgnoreCase(GATEWAY_PORT_MAPPING_ENABLED)) {
 +        // extract the topology name and use it as a key
 +        result.put(StringUtils.substringAfter(e.getKey(), GATEWAY_PORT_MAPPING_PREFIX), Integer.parseInt(e.getValue()) );
 +      }
 +
 +    }
 +
 +    return Collections.unmodifiableMap(result);
 +  }
 +
 +  /**
 +   * Is the Port Mapping feature on ?
 +   *
 +   * @return
 +   */
 +  @Override
 +  public boolean isGatewayPortMappingEnabled() {
 +    final String result = get( GATEWAY_PORT_MAPPING_ENABLED, Boolean.toString(DEFAULT_GATEWAY_PORT_MAPPING_ENABLED));
 +    return Boolean.parseBoolean(result);
 +  }
 +
 +  private static long parseNetworkTimeout(String s ) {
 +    PeriodFormatter f = new PeriodFormatterBuilder()
 +        .appendMinutes().appendSuffix("m"," min")
 +        .appendSeconds().appendSuffix("s"," sec")
 +        .appendMillis().toFormatter();
 +    Period p = Period.parse( s, f );
 +    return p.toStandardDuration().getMillis();
 +  }
 +
 +  @Override
 +  public boolean isCookieScopingToPathEnabled() {
 +    final boolean result = Boolean.parseBoolean(get(COOKIE_SCOPING_ENABLED,
 +            Boolean.toString(DEFAULT_COOKIE_SCOPING_FEATURE_ENABLED)));
 +    return result;
 +  }
 +
 +  @Override
 +  public String getHeaderNameForRemoteAddress() {
 +    String value = getVar(REMOTE_IP_HEADER_NAME, "X-Forwarded-For");
 +    return value;
 +  }
 +
 +  @Override
 +  public String getAlgorithm() {
 +	return getVar(CRYPTO_ALGORITHM, null);
 +  }
 +
 +  @Override
 +  public String getPBEAlgorithm() {
 +	return getVar(CRYPTO_PBE_ALGORITHM, null);
 +  }
 +
 +  @Override
 +  public String getTransformation() {
 +	return getVar(CRYPTO_TRANSFORMATION, null);
 +  }
 +
 +  @Override
 +  public String getSaltSize() {
 +	return getVar(CRYPTO_SALTSIZE, null);
 +  }
 +
 +  @Override
 +  public String getIterationCount() {
 +	return getVar(CRYPTO_ITERATION_COUNT, null);
 +  }
 +
 +  @Override
 +  public String getKeyLength() {
 +	return getVar(CRYPTO_KEY_LENGTH, null);
 +  }
 +
 +  @Override
 +  public boolean isGatewayServerHeaderEnabled() {
 +    return Boolean.parseBoolean(getVar(SERVER_HEADER_ENABLED, "true"));
 +  }
 +}


[07/53] [abbrv] knox git commit: KNOX-998 - package name refactoring

Posted by mo...@apache.org.
http://git-wip-us.apache.org/repos/asf/knox/blob/7d0bff16/gateway-server/src/main/java/org/apache/knox/gateway/websockets/ProxyInboundClient.java
----------------------------------------------------------------------
diff --git a/gateway-server/src/main/java/org/apache/knox/gateway/websockets/ProxyInboundClient.java b/gateway-server/src/main/java/org/apache/knox/gateway/websockets/ProxyInboundClient.java
new file mode 100644
index 0000000..c12ee53
--- /dev/null
+++ b/gateway-server/src/main/java/org/apache/knox/gateway/websockets/ProxyInboundClient.java
@@ -0,0 +1,107 @@
+package org.apache.knox.gateway.websockets;
+
+import javax.websocket.CloseReason;
+import javax.websocket.Endpoint;
+import javax.websocket.EndpointConfig;
+import javax.websocket.MessageHandler;
+import javax.websocket.Session;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+/**
+ * A Websocket client with callback which is not annotation based.
+ * This handler accepts String and binary messages.
+ * @since 0.14.0
+ */
+public class ProxyInboundClient extends Endpoint {
+
+  /**
+   * Callback to be called once we have events on our socket.
+   */
+  private MessageEventCallback callback;
+
+  protected Session session;
+  protected EndpointConfig config;
+
+
+  public ProxyInboundClient(final MessageEventCallback callback) {
+    super();
+    this.callback = callback;
+  }
+
+  /**
+   * Developers must implement this method to be notified when a new
+   * conversation has just begun.
+   *
+   * @param backendSession the session that has just been activated.
+   * @param config  the configuration used to configure this endpoint.
+   */
+  @Override
+  public void onOpen(final javax.websocket.Session backendSession, final EndpointConfig config) {
+    this.session = backendSession;
+    this.config = config;
+
+    /* Set the max message size */
+    session.setMaxBinaryMessageBufferSize(Integer.MAX_VALUE);
+    session.setMaxTextMessageBufferSize(Integer.MAX_VALUE);
+
+    /* Add message handler for binary data */
+    session.addMessageHandler(new MessageHandler.Whole<byte[]>() {
+
+      /**
+       * Called when the message has been fully received.
+       *
+       * @param message the message data.
+       */
+      @Override
+      public void onMessage(final byte[] message) {
+        callback.onMessageBinary(message, true, session);
+      }
+
+    });
+
+    /* Add message handler for text data */
+    session.addMessageHandler(new MessageHandler.Whole<String>() {
+
+      /**
+       * Called when the message has been fully received.
+       *
+       * @param message the message data.
+       */
+      @Override
+      public void onMessage(final String message) {
+        callback.onMessageText(message, session);
+      }
+
+    });
+
+    callback.onConnectionOpen(backendSession);
+  }
+
+  @Override
+  public void onClose(final javax.websocket.Session backendSession, final CloseReason closeReason) {
+    callback.onConnectionClose(closeReason);
+    this.session = null;
+  }
+
+  @Override
+  public void onError(final javax.websocket.Session backendSession, final Throwable cause) {
+    callback.onError(cause);
+    this.session = null;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/knox/blob/7d0bff16/gateway-server/src/test/java/org/apache/hadoop/gateway/websockets/ProxyInboundClientTest.java
----------------------------------------------------------------------
diff --git a/gateway-server/src/test/java/org/apache/hadoop/gateway/websockets/ProxyInboundClientTest.java b/gateway-server/src/test/java/org/apache/hadoop/gateway/websockets/ProxyInboundClientTest.java
deleted file mode 100644
index 69b45dd..0000000
--- a/gateway-server/src/test/java/org/apache/hadoop/gateway/websockets/ProxyInboundClientTest.java
+++ /dev/null
@@ -1,374 +0,0 @@
-package org.apache.hadoop.gateway.websockets;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-import org.apache.commons.lang.RandomStringUtils;
-import org.eclipse.jetty.server.Handler;
-import org.eclipse.jetty.server.Server;
-import org.eclipse.jetty.server.ServerConnector;
-import org.eclipse.jetty.server.handler.ContextHandler;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import javax.websocket.CloseReason;
-import javax.websocket.ContainerProvider;
-import javax.websocket.DeploymentException;
-import javax.websocket.Session;
-import javax.websocket.WebSocketContainer;
-import java.io.IOException;
-import java.net.URI;
-import java.nio.ByteBuffer;
-import java.util.concurrent.atomic.AtomicBoolean;
-
-import static org.hamcrest.Matchers.instanceOf;
-
-/**
- * Test {@link ProxyInboundClient} class.
- * @since 0.14.0
- */
-public class ProxyInboundClientTest {
-
-  private static Server server;
-  private static URI serverUri;
-  private static Handler handler;
-
-  String recievedMessage = null;
-
-  byte[] recievedBinaryMessage = null;
-
-
-  /* create an instance */
-  public ProxyInboundClientTest() {
-    super();
-  }
-
-  @BeforeClass
-  public static void startWSServer() throws Exception
-  {
-    server = new Server();
-    ServerConnector connector = new ServerConnector(server);
-    server.addConnector(connector);
-
-    handler = new WebsocketEchoHandler();
-
-    ContextHandler context = new ContextHandler();
-    context.setContextPath("/");
-    context.setHandler(handler);
-    server.setHandler(context);
-
-    server.start();
-
-    String host = connector.getHost();
-    if (host == null)
-    {
-      host = "localhost";
-    }
-    int port = connector.getLocalPort();
-    serverUri = new URI(String.format("ws://%s:%d/",host,port));
-  }
-
-  @AfterClass
-  public static void stopServer()
-  {
-    try
-    {
-      server.stop();
-    }
-    catch (Exception e)
-    {
-      e.printStackTrace(System.err);
-    }
-  }
-
-  //@Test(timeout = 3000)
-  @Test
-  public void testClientInstance() throws IOException, DeploymentException {
-
-    final String textMessage = "Echo";
-    final ByteBuffer binarymessage = ByteBuffer.wrap(textMessage.getBytes());
-
-    final AtomicBoolean isTestComplete = new AtomicBoolean(false);
-
-    final WebSocketContainer container = ContainerProvider.getWebSocketContainer();
-    final ProxyInboundClient client = new ProxyInboundClient( new MessageEventCallback() {
-
-      /**
-       * A generic callback, can be left un-implemented
-       *
-       * @param message
-       */
-      @Override
-      public void doCallback(String message) {
-
-      }
-
-      /**
-       * Callback when connection is established.
-       *
-       * @param session
-       */
-      @Override
-      public void onConnectionOpen(Object session) {
-
-      }
-
-      /**
-       * Callback when connection is closed.
-       *
-       * @param reason
-       */
-      @Override
-      public void onConnectionClose(CloseReason reason) {
-        isTestComplete.set(true);
-      }
-
-      /**
-       * Callback when there is an error in connection.
-       *
-       * @param cause
-       */
-      @Override
-      public void onError(Throwable cause) {
-        isTestComplete.set(true);
-      }
-
-      /**
-       * Callback when a text message is received.
-       *
-       * @param message
-       * @param session
-       */
-      @Override
-      public void onMessageText(String message, Object session) {
-        recievedMessage = message;
-        isTestComplete.set(true);
-      }
-
-      /**
-       * Callback when a binary message is received.
-       *
-       * @param message
-       * @param last
-       * @param session
-       */
-      @Override
-      public void onMessageBinary(byte[] message, boolean last,
-          Object session) {
-
-      }
-    } );
-
-    Assert.assertThat(client, instanceOf(javax.websocket.Endpoint.class));
-
-    Session session = container.connectToServer(client, serverUri);
-
-    session.getBasicRemote().sendText(textMessage);
-
-    while(!isTestComplete.get()) {
-      /* just wait for the test to finish */
-    }
-
-    Assert.assertEquals("The received text message is not the same as the sent", textMessage, recievedMessage);
-  }
-
-  @Test(timeout = 3000)
-  public void testBinarymessage() throws IOException, DeploymentException {
-
-    final String textMessage = "Echo";
-    final ByteBuffer binarymessage = ByteBuffer.wrap(textMessage.getBytes());
-
-    final AtomicBoolean isTestComplete = new AtomicBoolean(false);
-
-    final WebSocketContainer container = ContainerProvider.getWebSocketContainer();
-    final ProxyInboundClient client = new ProxyInboundClient( new MessageEventCallback() {
-
-      /**
-       * A generic callback, can be left un-implemented
-       *
-       * @param message
-       */
-      @Override
-      public void doCallback(String message) {
-
-      }
-
-      /**
-       * Callback when connection is established.
-       *
-       * @param session
-       */
-      @Override
-      public void onConnectionOpen(Object session) {
-
-      }
-
-      /**
-       * Callback when connection is closed.
-       *
-       * @param reason
-       */
-      @Override
-      public void onConnectionClose(CloseReason reason) {
-        isTestComplete.set(true);
-      }
-
-      /**
-       * Callback when there is an error in connection.
-       *
-       * @param cause
-       */
-      @Override
-      public void onError(Throwable cause) {
-        isTestComplete.set(true);
-      }
-
-      /**
-       * Callback when a text message is received.
-       *
-       * @param message
-       * @param session
-       */
-      @Override
-      public void onMessageText(String message, Object session) {
-        recievedMessage = message;
-        isTestComplete.set(true);
-      }
-
-      /**
-       * Callback when a binary message is received.
-       *
-       * @param message
-       * @param last
-       * @param session
-       */
-      @Override
-      public void onMessageBinary(byte[] message, boolean last,
-          Object session) {
-        recievedBinaryMessage = message;
-        isTestComplete.set(true);
-      }
-    } );
-
-    Assert.assertThat(client, instanceOf(javax.websocket.Endpoint.class));
-
-    Session session = container.connectToServer(client, serverUri);
-
-    session.getBasicRemote().sendBinary(binarymessage);
-
-    while(!isTestComplete.get()) {
-      /* just wait for the test to finish */
-    }
-
-    Assert.assertEquals("Binary message does not match", textMessage, new String(recievedBinaryMessage));
-  }
-
-  @Test(timeout = 3000)
-  public void testTextMaxBufferLimit() throws IOException, DeploymentException {
-
-    final String longMessage = RandomStringUtils.random(100000);
-
-    final AtomicBoolean isTestComplete = new AtomicBoolean(false);
-
-    final WebSocketContainer container = ContainerProvider.getWebSocketContainer();
-    final ProxyInboundClient client = new ProxyInboundClient( new MessageEventCallback() {
-
-      /**
-       * A generic callback, can be left un-implemented
-       *
-       * @param message
-       */
-      @Override
-      public void doCallback(String message) {
-
-      }
-
-      /**
-       * Callback when connection is established.
-       *
-       * @param session
-       */
-      @Override
-      public void onConnectionOpen(Object session) {
-
-      }
-
-      /**
-       * Callback when connection is closed.
-       *
-       * @param reason
-       */
-      @Override
-      public void onConnectionClose(CloseReason reason) {
-        isTestComplete.set(true);
-      }
-
-      /**
-       * Callback when there is an error in connection.
-       *
-       * @param cause
-       */
-      @Override
-      public void onError(Throwable cause) {
-        isTestComplete.set(true);
-      }
-
-      /**
-       * Callback when a text message is received.
-       *
-       * @param message
-       * @param session
-       */
-      @Override
-      public void onMessageText(String message, Object session) {
-        recievedMessage = message;
-        isTestComplete.set(true);
-      }
-
-      /**
-       * Callback when a binary message is received.
-       *
-       * @param message
-       * @param last
-       * @param session
-       */
-      @Override
-      public void onMessageBinary(byte[] message, boolean last,
-          Object session) {
-
-      }
-    } );
-
-    Assert.assertThat(client, instanceOf(javax.websocket.Endpoint.class));
-
-    Session session = container.connectToServer(client, serverUri);
-
-    session.getBasicRemote().sendText(longMessage);
-
-    while(!isTestComplete.get()) {
-      /* just wait for the test to finish */
-    }
-
-    Assert.assertEquals(longMessage, recievedMessage);
-
-  }
-
-
-
-}

http://git-wip-us.apache.org/repos/asf/knox/blob/7d0bff16/gateway-server/src/test/java/org/apache/knox/gateway/websockets/ProxyInboundClientTest.java
----------------------------------------------------------------------
diff --git a/gateway-server/src/test/java/org/apache/knox/gateway/websockets/ProxyInboundClientTest.java b/gateway-server/src/test/java/org/apache/knox/gateway/websockets/ProxyInboundClientTest.java
new file mode 100644
index 0000000..f8dd167
--- /dev/null
+++ b/gateway-server/src/test/java/org/apache/knox/gateway/websockets/ProxyInboundClientTest.java
@@ -0,0 +1,374 @@
+package org.apache.knox.gateway.websockets;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+import org.apache.commons.lang.RandomStringUtils;
+import org.eclipse.jetty.server.Handler;
+import org.eclipse.jetty.server.Server;
+import org.eclipse.jetty.server.ServerConnector;
+import org.eclipse.jetty.server.handler.ContextHandler;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import javax.websocket.CloseReason;
+import javax.websocket.ContainerProvider;
+import javax.websocket.DeploymentException;
+import javax.websocket.Session;
+import javax.websocket.WebSocketContainer;
+import java.io.IOException;
+import java.net.URI;
+import java.nio.ByteBuffer;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import static org.hamcrest.Matchers.instanceOf;
+
+/**
+ * Test {@link ProxyInboundClient} class.
+ * @since 0.14.0
+ */
+public class ProxyInboundClientTest {
+
+  private static Server server;
+  private static URI serverUri;
+  private static Handler handler;
+
+  String recievedMessage = null;
+
+  byte[] recievedBinaryMessage = null;
+
+
+  /* create an instance */
+  public ProxyInboundClientTest() {
+    super();
+  }
+
+  @BeforeClass
+  public static void startWSServer() throws Exception
+  {
+    server = new Server();
+    ServerConnector connector = new ServerConnector(server);
+    server.addConnector(connector);
+
+    handler = new WebsocketEchoHandler();
+
+    ContextHandler context = new ContextHandler();
+    context.setContextPath("/");
+    context.setHandler(handler);
+    server.setHandler(context);
+
+    server.start();
+
+    String host = connector.getHost();
+    if (host == null)
+    {
+      host = "localhost";
+    }
+    int port = connector.getLocalPort();
+    serverUri = new URI(String.format("ws://%s:%d/",host,port));
+  }
+
+  @AfterClass
+  public static void stopServer()
+  {
+    try
+    {
+      server.stop();
+    }
+    catch (Exception e)
+    {
+      e.printStackTrace(System.err);
+    }
+  }
+
+  //@Test(timeout = 3000)
+  @Test
+  public void testClientInstance() throws IOException, DeploymentException {
+
+    final String textMessage = "Echo";
+    final ByteBuffer binarymessage = ByteBuffer.wrap(textMessage.getBytes());
+
+    final AtomicBoolean isTestComplete = new AtomicBoolean(false);
+
+    final WebSocketContainer container = ContainerProvider.getWebSocketContainer();
+    final ProxyInboundClient client = new ProxyInboundClient( new MessageEventCallback() {
+
+      /**
+       * A generic callback, can be left un-implemented
+       *
+       * @param message
+       */
+      @Override
+      public void doCallback(String message) {
+
+      }
+
+      /**
+       * Callback when connection is established.
+       *
+       * @param session
+       */
+      @Override
+      public void onConnectionOpen(Object session) {
+
+      }
+
+      /**
+       * Callback when connection is closed.
+       *
+       * @param reason
+       */
+      @Override
+      public void onConnectionClose(CloseReason reason) {
+        isTestComplete.set(true);
+      }
+
+      /**
+       * Callback when there is an error in connection.
+       *
+       * @param cause
+       */
+      @Override
+      public void onError(Throwable cause) {
+        isTestComplete.set(true);
+      }
+
+      /**
+       * Callback when a text message is received.
+       *
+       * @param message
+       * @param session
+       */
+      @Override
+      public void onMessageText(String message, Object session) {
+        recievedMessage = message;
+        isTestComplete.set(true);
+      }
+
+      /**
+       * Callback when a binary message is received.
+       *
+       * @param message
+       * @param last
+       * @param session
+       */
+      @Override
+      public void onMessageBinary(byte[] message, boolean last,
+          Object session) {
+
+      }
+    } );
+
+    Assert.assertThat(client, instanceOf(javax.websocket.Endpoint.class));
+
+    Session session = container.connectToServer(client, serverUri);
+
+    session.getBasicRemote().sendText(textMessage);
+
+    while(!isTestComplete.get()) {
+      /* just wait for the test to finish */
+    }
+
+    Assert.assertEquals("The received text message is not the same as the sent", textMessage, recievedMessage);
+  }
+
+  @Test(timeout = 3000)
+  public void testBinarymessage() throws IOException, DeploymentException {
+
+    final String textMessage = "Echo";
+    final ByteBuffer binarymessage = ByteBuffer.wrap(textMessage.getBytes());
+
+    final AtomicBoolean isTestComplete = new AtomicBoolean(false);
+
+    final WebSocketContainer container = ContainerProvider.getWebSocketContainer();
+    final ProxyInboundClient client = new ProxyInboundClient( new MessageEventCallback() {
+
+      /**
+       * A generic callback, can be left un-implemented
+       *
+       * @param message
+       */
+      @Override
+      public void doCallback(String message) {
+
+      }
+
+      /**
+       * Callback when connection is established.
+       *
+       * @param session
+       */
+      @Override
+      public void onConnectionOpen(Object session) {
+
+      }
+
+      /**
+       * Callback when connection is closed.
+       *
+       * @param reason
+       */
+      @Override
+      public void onConnectionClose(CloseReason reason) {
+        isTestComplete.set(true);
+      }
+
+      /**
+       * Callback when there is an error in connection.
+       *
+       * @param cause
+       */
+      @Override
+      public void onError(Throwable cause) {
+        isTestComplete.set(true);
+      }
+
+      /**
+       * Callback when a text message is received.
+       *
+       * @param message
+       * @param session
+       */
+      @Override
+      public void onMessageText(String message, Object session) {
+        recievedMessage = message;
+        isTestComplete.set(true);
+      }
+
+      /**
+       * Callback when a binary message is received.
+       *
+       * @param message
+       * @param last
+       * @param session
+       */
+      @Override
+      public void onMessageBinary(byte[] message, boolean last,
+          Object session) {
+        recievedBinaryMessage = message;
+        isTestComplete.set(true);
+      }
+    } );
+
+    Assert.assertThat(client, instanceOf(javax.websocket.Endpoint.class));
+
+    Session session = container.connectToServer(client, serverUri);
+
+    session.getBasicRemote().sendBinary(binarymessage);
+
+    while(!isTestComplete.get()) {
+      /* just wait for the test to finish */
+    }
+
+    Assert.assertEquals("Binary message does not match", textMessage, new String(recievedBinaryMessage));
+  }
+
+  @Test(timeout = 3000)
+  public void testTextMaxBufferLimit() throws IOException, DeploymentException {
+
+    final String longMessage = RandomStringUtils.random(100000);
+
+    final AtomicBoolean isTestComplete = new AtomicBoolean(false);
+
+    final WebSocketContainer container = ContainerProvider.getWebSocketContainer();
+    final ProxyInboundClient client = new ProxyInboundClient( new MessageEventCallback() {
+
+      /**
+       * A generic callback, can be left un-implemented
+       *
+       * @param message
+       */
+      @Override
+      public void doCallback(String message) {
+
+      }
+
+      /**
+       * Callback when connection is established.
+       *
+       * @param session
+       */
+      @Override
+      public void onConnectionOpen(Object session) {
+
+      }
+
+      /**
+       * Callback when connection is closed.
+       *
+       * @param reason
+       */
+      @Override
+      public void onConnectionClose(CloseReason reason) {
+        isTestComplete.set(true);
+      }
+
+      /**
+       * Callback when there is an error in connection.
+       *
+       * @param cause
+       */
+      @Override
+      public void onError(Throwable cause) {
+        isTestComplete.set(true);
+      }
+
+      /**
+       * Callback when a text message is received.
+       *
+       * @param message
+       * @param session
+       */
+      @Override
+      public void onMessageText(String message, Object session) {
+        recievedMessage = message;
+        isTestComplete.set(true);
+      }
+
+      /**
+       * Callback when a binary message is received.
+       *
+       * @param message
+       * @param last
+       * @param session
+       */
+      @Override
+      public void onMessageBinary(byte[] message, boolean last,
+          Object session) {
+
+      }
+    } );
+
+    Assert.assertThat(client, instanceOf(javax.websocket.Endpoint.class));
+
+    Session session = container.connectToServer(client, serverUri);
+
+    session.getBasicRemote().sendText(longMessage);
+
+    while(!isTestComplete.get()) {
+      /* just wait for the test to finish */
+    }
+
+    Assert.assertEquals(longMessage, recievedMessage);
+
+  }
+
+
+
+}


[18/53] [abbrv] knox git commit: Merge branch 'master' into KNOX-998-Package_Restructuring

Posted by mo...@apache.org.
http://git-wip-us.apache.org/repos/asf/knox/blob/c754cc06/gateway-server/src/main/java/org/apache/knox/gateway/services/topology/impl/DefaultTopologyService.java
----------------------------------------------------------------------
diff --cc gateway-server/src/main/java/org/apache/knox/gateway/services/topology/impl/DefaultTopologyService.java
index 455b0fa,0000000..38653f4
mode 100644,000000..100644
--- a/gateway-server/src/main/java/org/apache/knox/gateway/services/topology/impl/DefaultTopologyService.java
+++ b/gateway-server/src/main/java/org/apache/knox/gateway/services/topology/impl/DefaultTopologyService.java
@@@ -1,689 -1,0 +1,818 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +
 +package org.apache.knox.gateway.services.topology.impl;
 +
 +
 +import org.apache.commons.digester3.Digester;
 +import org.apache.commons.digester3.binder.DigesterLoader;
 +import org.apache.commons.io.FileUtils;
 +import org.apache.commons.io.FilenameUtils;
 +import org.apache.commons.io.monitor.FileAlterationListener;
 +import org.apache.commons.io.monitor.FileAlterationListenerAdaptor;
 +import org.apache.commons.io.monitor.FileAlterationMonitor;
 +import org.apache.commons.io.monitor.FileAlterationObserver;
 +import org.apache.knox.gateway.GatewayMessages;
 +import org.apache.knox.gateway.audit.api.Action;
 +import org.apache.knox.gateway.audit.api.ActionOutcome;
 +import org.apache.knox.gateway.audit.api.AuditServiceFactory;
 +import org.apache.knox.gateway.audit.api.Auditor;
 +import org.apache.knox.gateway.audit.api.ResourceType;
 +import org.apache.knox.gateway.audit.log4j.audit.AuditConstants;
 +import org.apache.knox.gateway.config.GatewayConfig;
 +import org.apache.knox.gateway.i18n.messages.MessagesFactory;
 +import org.apache.knox.gateway.service.definition.ServiceDefinition;
 +import org.apache.knox.gateway.services.ServiceLifecycleException;
 +import org.apache.knox.gateway.services.topology.TopologyService;
 +import org.apache.knox.gateway.topology.Topology;
 +import org.apache.knox.gateway.topology.TopologyEvent;
 +import org.apache.knox.gateway.topology.TopologyListener;
 +import org.apache.knox.gateway.topology.TopologyMonitor;
 +import org.apache.knox.gateway.topology.TopologyProvider;
 +import org.apache.knox.gateway.topology.builder.TopologyBuilder;
 +import org.apache.knox.gateway.topology.validation.TopologyValidator;
 +import org.apache.knox.gateway.topology.xml.AmbariFormatXmlTopologyRules;
 +import org.apache.knox.gateway.topology.xml.KnoxFormatXmlTopologyRules;
 +import org.apache.knox.gateway.util.ServiceDefinitionsLoader;
 +import org.apache.knox.gateway.services.security.AliasService;
 +import org.apache.knox.gateway.topology.simple.SimpleDescriptorHandler;
 +import org.eclipse.persistence.jaxb.JAXBContextProperties;
 +import org.xml.sax.SAXException;
 +
 +import javax.xml.bind.JAXBContext;
 +import javax.xml.bind.JAXBException;
 +import javax.xml.bind.Marshaller;
 +import java.io.File;
 +import java.io.FileFilter;
 +import java.io.IOException;
 +import java.net.URISyntaxException;
 +import java.util.ArrayList;
++import java.util.Arrays;
 +import java.util.Collection;
 +import java.util.Collections;
 +import java.util.HashMap;
 +import java.util.HashSet;
 +import java.util.List;
 +import java.util.Map;
 +import java.util.Set;
 +
 +import static org.apache.commons.digester3.binder.DigesterLoader.newLoader;
 +
 +
 +public class DefaultTopologyService
 +    extends FileAlterationListenerAdaptor
 +    implements TopologyService, TopologyMonitor, TopologyProvider, FileFilter, FileAlterationListener {
 +
 +  private static Auditor auditor = AuditServiceFactory.getAuditService().getAuditor(
 +    AuditConstants.DEFAULT_AUDITOR_NAME, AuditConstants.KNOX_SERVICE_NAME,
 +    AuditConstants.KNOX_COMPONENT_NAME);
 +
 +  private static final List<String> SUPPORTED_TOPOLOGY_FILE_EXTENSIONS = new ArrayList<String>();
 +  static {
 +    SUPPORTED_TOPOLOGY_FILE_EXTENSIONS.add("xml");
 +    SUPPORTED_TOPOLOGY_FILE_EXTENSIONS.add("conf");
 +  }
 +
 +  private static GatewayMessages log = MessagesFactory.get(GatewayMessages.class);
 +  private static DigesterLoader digesterLoader = newLoader(new KnoxFormatXmlTopologyRules(), new AmbariFormatXmlTopologyRules());
 +  private List<FileAlterationMonitor> monitors = new ArrayList<>();
 +  private File topologiesDirectory;
++  private File sharedProvidersDirectory;
 +  private File descriptorsDirectory;
 +
++  private DescriptorsMonitor descriptorsMonitor;
++
 +  private Set<TopologyListener> listeners;
 +  private volatile Map<File, Topology> topologies;
 +  private AliasService aliasService;
 +
 +
 +  private Topology loadTopology(File file) throws IOException, SAXException, URISyntaxException, InterruptedException {
 +    final long TIMEOUT = 250; //ms
 +    final long DELAY = 50; //ms
 +    log.loadingTopologyFile(file.getAbsolutePath());
 +    Topology topology;
 +    long start = System.currentTimeMillis();
 +    while (true) {
 +      try {
 +        topology = loadTopologyAttempt(file);
 +        break;
 +      } catch (IOException e) {
 +        if (System.currentTimeMillis() - start < TIMEOUT) {
 +          log.failedToLoadTopologyRetrying(file.getAbsolutePath(), Long.toString(DELAY), e);
 +          Thread.sleep(DELAY);
 +        } else {
 +          throw e;
 +        }
 +      } catch (SAXException e) {
 +        if (System.currentTimeMillis() - start < TIMEOUT) {
 +          log.failedToLoadTopologyRetrying(file.getAbsolutePath(), Long.toString(DELAY), e);
 +          Thread.sleep(DELAY);
 +        } else {
 +          throw e;
 +        }
 +      }
 +    }
 +    return topology;
 +  }
 +
 +  private Topology loadTopologyAttempt(File file) throws IOException, SAXException, URISyntaxException {
 +    Topology topology;
 +    Digester digester = digesterLoader.newDigester();
 +    TopologyBuilder topologyBuilder = digester.parse(FileUtils.openInputStream(file));
 +    if (null == topologyBuilder) {
 +      return null;
 +    }
 +    topology = topologyBuilder.build();
 +    topology.setUri(file.toURI());
 +    topology.setName(FilenameUtils.removeExtension(file.getName()));
 +    topology.setTimestamp(file.lastModified());
 +    return topology;
 +  }
 +
 +  private void redeployTopology(Topology topology) {
 +    File topologyFile = new File(topology.getUri());
 +    try {
 +      TopologyValidator tv = new TopologyValidator(topology);
 +
 +      if(tv.validateTopology()) {
 +        throw new SAXException(tv.getErrorString());
 +      }
 +
 +      long start = System.currentTimeMillis();
 +      long limit = 1000L; // One second.
 +      long elapsed = 1;
 +      while (elapsed <= limit) {
 +        try {
 +          long origTimestamp = topologyFile.lastModified();
 +          long setTimestamp = Math.max(System.currentTimeMillis(), topologyFile.lastModified() + elapsed);
 +          if(topologyFile.setLastModified(setTimestamp)) {
 +            long newTimstamp = topologyFile.lastModified();
 +            if(newTimstamp > origTimestamp) {
 +              break;
 +            } else {
 +              Thread.sleep(10);
 +              elapsed = System.currentTimeMillis() - start;
 +              continue;
 +            }
 +          } else {
 +            auditor.audit(Action.REDEPLOY, topology.getName(), ResourceType.TOPOLOGY,
 +                ActionOutcome.FAILURE);
 +            log.failedToRedeployTopology(topology.getName());
 +            break;
 +          }
 +        } catch (InterruptedException e) {
 +          auditor.audit(Action.REDEPLOY, topology.getName(), ResourceType.TOPOLOGY,
 +              ActionOutcome.FAILURE);
 +          log.failedToRedeployTopology(topology.getName(), e);
 +          e.printStackTrace();
 +        }
 +      }
 +    } catch (SAXException e) {
 +      auditor.audit(Action.REDEPLOY, topology.getName(), ResourceType.TOPOLOGY, ActionOutcome.FAILURE);
 +      log.failedToRedeployTopology(topology.getName(), e);
 +    }
 +  }
 +
 +  private List<TopologyEvent> createChangeEvents(
 +      Map<File, Topology> oldTopologies,
 +      Map<File, Topology> newTopologies) {
 +    ArrayList<TopologyEvent> events = new ArrayList<TopologyEvent>();
 +    // Go through the old topologies and find anything that was deleted.
 +    for (File file : oldTopologies.keySet()) {
 +      if (!newTopologies.containsKey(file)) {
 +        events.add(new TopologyEvent(TopologyEvent.Type.DELETED, oldTopologies.get(file)));
 +      }
 +    }
 +    // Go through the new topologies and figure out what was updated vs added.
 +    for (File file : newTopologies.keySet()) {
 +      if (oldTopologies.containsKey(file)) {
 +        Topology oldTopology = oldTopologies.get(file);
 +        Topology newTopology = newTopologies.get(file);
 +        if (newTopology.getTimestamp() > oldTopology.getTimestamp()) {
 +          events.add(new TopologyEvent(TopologyEvent.Type.UPDATED, newTopologies.get(file)));
 +        }
 +      } else {
 +        events.add(new TopologyEvent(TopologyEvent.Type.CREATED, newTopologies.get(file)));
 +      }
 +    }
 +    return events;
 +  }
 +
 +  private File calculateAbsoluteTopologiesDir(GatewayConfig config) {
-     String normalizedTopologyDir = FilenameUtils.normalize(config.getGatewayTopologyDir());
-     File topoDir = new File(normalizedTopologyDir);
++    File topoDir = new File(config.getGatewayTopologyDir());
 +    topoDir = topoDir.getAbsoluteFile();
 +    return topoDir;
 +  }
 +
 +  private File calculateAbsoluteConfigDir(GatewayConfig config) {
 +    File configDir = null;
 +
-     String path = FilenameUtils.normalize(config.getGatewayConfDir());
-     if (path != null) {
-       configDir = new File(config.getGatewayConfDir());
-     } else {
-       configDir = (new File(config.getGatewayTopologyDir())).getParentFile();
-     }
-     configDir = configDir.getAbsoluteFile();
++    String path = config.getGatewayConfDir();
++    configDir = (path != null) ? new File(path) : (new File(config.getGatewayTopologyDir())).getParentFile();
 +
-     return configDir;
++    return configDir.getAbsoluteFile();
 +  }
 +
 +  private void  initListener(FileAlterationMonitor  monitor,
 +                            File                   directory,
 +                            FileFilter             filter,
 +                            FileAlterationListener listener) {
 +    monitors.add(monitor);
 +    FileAlterationObserver observer = new FileAlterationObserver(directory, filter);
 +    observer.addListener(listener);
 +    monitor.addObserver(observer);
 +  }
 +
 +  private void initListener(File directory, FileFilter filter, FileAlterationListener listener) throws IOException, SAXException {
 +    // Increasing the monitoring interval to 5 seconds as profiling has shown
 +    // this is rather expensive in terms of generated garbage objects.
 +    initListener(new FileAlterationMonitor(5000L), directory, filter, listener);
 +  }
 +
 +  private Map<File, Topology> loadTopologies(File directory) {
 +    Map<File, Topology> map = new HashMap<>();
 +    if (directory.isDirectory() && directory.canRead()) {
-       for (File file : directory.listFiles(this)) {
-         try {
-           Topology loadTopology = loadTopology(file);
-           if (null != loadTopology) {
-             map.put(file, loadTopology);
-           } else {
++      File[] existingTopologies = directory.listFiles(this);
++      if (existingTopologies != null) {
++        for (File file : existingTopologies) {
++          try {
++            Topology loadTopology = loadTopology(file);
++            if (null != loadTopology) {
++              map.put(file, loadTopology);
++            } else {
++              auditor.audit(Action.LOAD, file.getAbsolutePath(), ResourceType.TOPOLOGY,
++                      ActionOutcome.FAILURE);
++              log.failedToLoadTopology(file.getAbsolutePath());
++            }
++          } catch (IOException e) {
++            // Maybe it makes sense to throw exception
 +            auditor.audit(Action.LOAD, file.getAbsolutePath(), ResourceType.TOPOLOGY,
-               ActionOutcome.FAILURE);
-             log.failedToLoadTopology(file.getAbsolutePath());
++                    ActionOutcome.FAILURE);
++            log.failedToLoadTopology(file.getAbsolutePath(), e);
++          } catch (SAXException e) {
++            // Maybe it makes sense to throw exception
++            auditor.audit(Action.LOAD, file.getAbsolutePath(), ResourceType.TOPOLOGY,
++                    ActionOutcome.FAILURE);
++            log.failedToLoadTopology(file.getAbsolutePath(), e);
++          } catch (Exception e) {
++            // Maybe it makes sense to throw exception
++            auditor.audit(Action.LOAD, file.getAbsolutePath(), ResourceType.TOPOLOGY,
++                    ActionOutcome.FAILURE);
++            log.failedToLoadTopology(file.getAbsolutePath(), e);
 +          }
-         } catch (IOException e) {
-           // Maybe it makes sense to throw exception
-           auditor.audit(Action.LOAD, file.getAbsolutePath(), ResourceType.TOPOLOGY,
-             ActionOutcome.FAILURE);
-           log.failedToLoadTopology(file.getAbsolutePath(), e);
-         } catch (SAXException e) {
-           // Maybe it makes sense to throw exception
-           auditor.audit(Action.LOAD, file.getAbsolutePath(), ResourceType.TOPOLOGY,
-             ActionOutcome.FAILURE);
-           log.failedToLoadTopology(file.getAbsolutePath(), e);
-         } catch (Exception e) {
-           // Maybe it makes sense to throw exception
-           auditor.audit(Action.LOAD, file.getAbsolutePath(), ResourceType.TOPOLOGY,
-             ActionOutcome.FAILURE);
-           log.failedToLoadTopology(file.getAbsolutePath(), e);
 +        }
 +      }
 +    }
 +    return map;
 +  }
 +
 +  public void setAliasService(AliasService as) {
 +    this.aliasService = as;
 +  }
 +
 +  public void deployTopology(Topology t){
 +
 +    try {
 +      File temp = new File(topologiesDirectory.getAbsolutePath() + "/" + t.getName() + ".xml.temp");
 +      Package topologyPkg = Topology.class.getPackage();
 +      String pkgName = topologyPkg.getName();
 +      String bindingFile = pkgName.replace(".", "/") + "/topology_binding-xml.xml";
 +
 +      Map<String, Object> properties = new HashMap<>(1);
 +      properties.put(JAXBContextProperties.OXM_METADATA_SOURCE, bindingFile);
 +      JAXBContext jc = JAXBContext.newInstance(pkgName, Topology.class.getClassLoader(), properties);
 +      Marshaller mr = jc.createMarshaller();
 +
 +      mr.setProperty(Marshaller.JAXB_FORMATTED_OUTPUT, true);
 +      mr.marshal(t, temp);
 +
 +      File topology = new File(topologiesDirectory.getAbsolutePath() + "/" + t.getName() + ".xml");
 +      if(!temp.renameTo(topology)) {
 +        FileUtils.forceDelete(temp);
 +        throw new IOException("Could not rename temp file");
 +      }
 +
 +      // This code will check if the topology is valid, and retrieve the errors if it is not.
 +      TopologyValidator validator = new TopologyValidator( topology.getAbsolutePath() );
 +      if( !validator.validateTopology() ){
 +        throw new SAXException( validator.getErrorString() );
 +      }
 +
 +
 +    } catch (JAXBException e) {
 +      auditor.audit(Action.DEPLOY, t.getName(), ResourceType.TOPOLOGY, ActionOutcome.FAILURE);
 +      log.failedToDeployTopology(t.getName(), e);
 +    } catch (IOException io) {
 +      auditor.audit(Action.DEPLOY, t.getName(), ResourceType.TOPOLOGY, ActionOutcome.FAILURE);
 +      log.failedToDeployTopology(t.getName(), io);
 +    } catch (SAXException sx){
 +      auditor.audit(Action.DEPLOY, t.getName(), ResourceType.TOPOLOGY, ActionOutcome.FAILURE);
 +      log.failedToDeployTopology(t.getName(), sx);
 +    }
 +    reloadTopologies();
 +  }
 +
 +  public void redeployTopologies(String topologyName) {
 +
 +    for (Topology topology : getTopologies()) {
 +      if (topologyName == null || topologyName.equals(topology.getName())) {
 +        redeployTopology(topology);
 +      }
 +    }
 +
 +  }
 +
 +  public void reloadTopologies() {
 +    try {
 +      synchronized (this) {
 +        Map<File, Topology> oldTopologies = topologies;
 +        Map<File, Topology> newTopologies = loadTopologies(topologiesDirectory);
 +        List<TopologyEvent> events = createChangeEvents(oldTopologies, newTopologies);
 +        topologies = newTopologies;
 +        notifyChangeListeners(events);
 +      }
 +    } catch (Exception e) {
 +      // Maybe it makes sense to throw exception
 +      log.failedToReloadTopologies(e);
 +    }
 +  }
 +
 +  public void deleteTopology(Topology t) {
 +    File topoDir = topologiesDirectory;
 +
 +    if(topoDir.isDirectory() && topoDir.canRead()) {
-       File[] results = topoDir.listFiles();
-       for (File f : results) {
++      for (File f : listFiles(topoDir)) {
 +        String fName = FilenameUtils.getBaseName(f.getName());
 +        if(fName.equals(t.getName())) {
 +          f.delete();
 +        }
 +      }
 +    }
 +    reloadTopologies();
 +  }
 +
 +  private void notifyChangeListeners(List<TopologyEvent> events) {
 +    for (TopologyListener listener : listeners) {
 +      try {
 +        listener.handleTopologyEvent(events);
 +      } catch (RuntimeException e) {
 +        auditor.audit(Action.LOAD, "Topology_Event", ResourceType.TOPOLOGY, ActionOutcome.FAILURE);
 +        log.failedToHandleTopologyEvents(e);
 +      }
 +    }
 +  }
 +
 +  public Map<String, List<String>> getServiceTestURLs(Topology t, GatewayConfig config) {
 +    File tFile = null;
 +    Map<String, List<String>> urls = new HashMap<>();
-     if(topologiesDirectory.isDirectory() && topologiesDirectory.canRead()) {
-       for(File f : topologiesDirectory.listFiles()){
-         if(FilenameUtils.removeExtension(f.getName()).equals(t.getName())){
++    if (topologiesDirectory.isDirectory() && topologiesDirectory.canRead()) {
++      for (File f : listFiles(topologiesDirectory)) {
++        if (FilenameUtils.removeExtension(f.getName()).equals(t.getName())) {
 +          tFile = f;
 +        }
 +      }
 +    }
 +    Set<ServiceDefinition> defs;
 +    if(tFile != null) {
 +      defs = ServiceDefinitionsLoader.getServiceDefinitions(new File(config.getGatewayServicesDir()));
 +
 +      for(ServiceDefinition def : defs) {
 +        urls.put(def.getRole(), def.getTestURLs());
 +      }
 +    }
 +    return urls;
 +  }
 +
 +  public Collection<Topology> getTopologies() {
 +    Map<File, Topology> map = topologies;
 +    return Collections.unmodifiableCollection(map.values());
 +  }
 +
 +  @Override
++  public boolean deployProviderConfiguration(String name, String content) {
++    return writeConfig(sharedProvidersDirectory, name, content);
++  }
++
++  @Override
++  public Collection<File> getProviderConfigurations() {
++    List<File> providerConfigs = new ArrayList<>();
++    for (File providerConfig : listFiles(sharedProvidersDirectory)) {
++      if (SharedProviderConfigMonitor.SUPPORTED_EXTENSIONS.contains(FilenameUtils.getExtension(providerConfig.getName()))) {
++        providerConfigs.add(providerConfig);
++      }
++    }
++    return providerConfigs;
++  }
++
++  @Override
++  public boolean deleteProviderConfiguration(String name) {
++    boolean result = false;
++
++    File providerConfig = getExistingFile(sharedProvidersDirectory, name);
++    if (providerConfig != null) {
++      List<String> references = descriptorsMonitor.getReferencingDescriptors(providerConfig.getAbsolutePath());
++      if (references.isEmpty()) {
++        result = providerConfig.delete();
++      } else {
++        log.preventedDeletionOfSharedProviderConfiguration(providerConfig.getAbsolutePath());
++      }
++    } else {
++      result = true; // If it already does NOT exist, then the delete effectively succeeded
++    }
++
++    return result;
++  }
++
++  @Override
++  public boolean deployDescriptor(String name, String content) {
++    return writeConfig(descriptorsDirectory, name, content);
++  }
++
++  @Override
++  public Collection<File> getDescriptors() {
++    List<File> descriptors = new ArrayList<>();
++    for (File descriptor : listFiles(descriptorsDirectory)) {
++      if (DescriptorsMonitor.SUPPORTED_EXTENSIONS.contains(FilenameUtils.getExtension(descriptor.getName()))) {
++        descriptors.add(descriptor);
++      }
++    }
++    return descriptors;
++  }
++
++  @Override
++  public boolean deleteDescriptor(String name) {
++    File descriptor = getExistingFile(descriptorsDirectory, name);
++    return (descriptor == null) || descriptor.delete();
++  }
++
++  @Override
 +  public void addTopologyChangeListener(TopologyListener listener) {
 +    listeners.add(listener);
 +  }
 +
 +  @Override
 +  public void startMonitor() throws Exception {
 +    for (FileAlterationMonitor monitor : monitors) {
 +      monitor.start();
 +    }
 +  }
 +
 +  @Override
 +  public void stopMonitor() throws Exception {
 +    for (FileAlterationMonitor monitor : monitors) {
 +      monitor.stop();
 +    }
 +  }
 +
 +  @Override
 +  public boolean accept(File file) {
 +    boolean accept = false;
 +    if (!file.isDirectory() && file.canRead()) {
 +      String extension = FilenameUtils.getExtension(file.getName());
 +      if (SUPPORTED_TOPOLOGY_FILE_EXTENSIONS.contains(extension)) {
 +        accept = true;
 +      }
 +    }
 +    return accept;
 +  }
 +
 +  @Override
 +  public void onFileCreate(File file) {
 +    onFileChange(file);
 +  }
 +
 +  @Override
 +  public void onFileDelete(java.io.File file) {
 +    // For full topology descriptors, we need to make sure to delete any corresponding simple descriptors to prevent
 +    // unintended subsequent generation of the topology descriptor
 +    for (String ext : DescriptorsMonitor.SUPPORTED_EXTENSIONS) {
 +      File simpleDesc =
 +              new File(descriptorsDirectory, FilenameUtils.getBaseName(file.getName()) + "." + ext);
 +      if (simpleDesc.exists()) {
++        log.deletingDescriptorForTopologyDeletion(simpleDesc.getName(), file.getName());
 +        simpleDesc.delete();
 +      }
 +    }
 +
 +    onFileChange(file);
 +  }
 +
 +  @Override
 +  public void onFileChange(File file) {
 +    reloadTopologies();
 +  }
 +
 +  @Override
 +  public void stop() {
 +
 +  }
 +
 +  @Override
 +  public void start() {
 +
 +  }
 +
 +  @Override
 +  public void init(GatewayConfig config, Map<String, String> options) throws ServiceLifecycleException {
 +
 +    try {
 +      listeners = new HashSet<>();
 +      topologies = new HashMap<>();
 +
 +      topologiesDirectory = calculateAbsoluteTopologiesDir(config);
 +
 +      File configDirectory = calculateAbsoluteConfigDir(config);
 +      descriptorsDirectory = new File(configDirectory, "descriptors");
-       File sharedProvidersDirectory = new File(configDirectory, "shared-providers");
++      sharedProvidersDirectory = new File(configDirectory, "shared-providers");
 +
 +      // Add support for conf/topologies
 +      initListener(topologiesDirectory, this, this);
 +
 +      // Add support for conf/descriptors
-       DescriptorsMonitor dm = new DescriptorsMonitor(topologiesDirectory, aliasService);
++      descriptorsMonitor = new DescriptorsMonitor(topologiesDirectory, aliasService);
 +      initListener(descriptorsDirectory,
-                    dm,
-                    dm);
++                   descriptorsMonitor,
++                   descriptorsMonitor);
++      log.monitoringDescriptorChangesInDirectory(descriptorsDirectory.getAbsolutePath());
 +
 +      // Add support for conf/shared-providers
-       SharedProviderConfigMonitor spm = new SharedProviderConfigMonitor(dm, descriptorsDirectory);
++      SharedProviderConfigMonitor spm = new SharedProviderConfigMonitor(descriptorsMonitor, descriptorsDirectory);
 +      initListener(sharedProvidersDirectory, spm, spm);
++      log.monitoringProviderConfigChangesInDirectory(sharedProvidersDirectory.getAbsolutePath());
 +
 +      // For all the descriptors currently in the descriptors dir at start-up time, trigger topology generation.
 +      // This happens prior to the start-up loading of the topologies.
 +      String[] descriptorFilenames =  descriptorsDirectory.list();
 +      if (descriptorFilenames != null) {
 +          for (String descriptorFilename : descriptorFilenames) {
 +              if (DescriptorsMonitor.isDescriptorFile(descriptorFilename)) {
-                   dm.onFileChange(new File(descriptorsDirectory, descriptorFilename));
++                  descriptorsMonitor.onFileChange(new File(descriptorsDirectory, descriptorFilename));
 +              }
 +          }
 +      }
 +
 +    } catch (IOException | SAXException io) {
 +      throw new ServiceLifecycleException(io.getMessage());
 +    }
 +  }
 +
 +
 +  /**
++   * Utility method for listing the files in the specified directory.
++   * This method is "nicer" than the File#listFiles() because it will not return null.
++   *
++   * @param directory The directory whose files should be returned.
++   *
++   * @return A List of the Files on the directory.
++   */
++  private static List<File> listFiles(File directory) {
++    List<File> result = null;
++    File[] files = directory.listFiles();
++    if (files != null) {
++      result = Arrays.asList(files);
++    } else {
++      result = Collections.emptyList();
++    }
++    return result;
++  }
++
++  /**
++   * Search for a file in the specified directory whose base name (filename without extension) matches the
++   * specified basename.
++   *
++   * @param directory The directory in which to search.
++   * @param basename  The basename of interest.
++   *
++   * @return The matching File
++   */
++  private static File getExistingFile(File directory, String basename) {
++    File match = null;
++    for (File file : listFiles(directory)) {
++      if (FilenameUtils.getBaseName(file.getName()).equals(basename)) {
++        match = file;
++        break;
++      }
++    }
++    return match;
++  }
++
++  /**
++   * Write the specified content to a file.
++   *
++   * @param dest    The destination directory.
++   * @param name    The name of the file.
++   * @param content The contents of the file.
++   *
++   * @return true, if the write succeeds; otherwise, false.
++   */
++  private static boolean writeConfig(File dest, String name, String content) {
++    boolean result = false;
++
++    File destFile = new File(dest, name);
++    try {
++      FileUtils.writeStringToFile(destFile, content);
++      log.wroteConfigurationFile(destFile.getAbsolutePath());
++      result = true;
++    } catch (IOException e) {
++      log.failedToWriteConfigurationFile(destFile.getAbsolutePath(), e);
++    }
++
++    return result;
++  }
++
++
++  /**
 +   * Change handler for simple descriptors
 +   */
 +  public static class DescriptorsMonitor extends FileAlterationListenerAdaptor
 +                                          implements FileFilter {
 +
 +    static final List<String> SUPPORTED_EXTENSIONS = new ArrayList<String>();
 +    static {
 +      SUPPORTED_EXTENSIONS.add("json");
 +      SUPPORTED_EXTENSIONS.add("yml");
 +      SUPPORTED_EXTENSIONS.add("yaml");
 +    }
 +
 +    private File topologiesDir;
 +
 +    private AliasService aliasService;
 +
 +    private Map<String, List<String>> providerConfigReferences = new HashMap<>();
 +
 +
 +    static boolean isDescriptorFile(String filename) {
 +      return SUPPORTED_EXTENSIONS.contains(FilenameUtils.getExtension(filename));
 +    }
 +
 +    public DescriptorsMonitor(File topologiesDir, AliasService aliasService) {
 +      this.topologiesDir  = topologiesDir;
 +      this.aliasService   = aliasService;
 +    }
 +
 +    List<String> getReferencingDescriptors(String providerConfigPath) {
-       List<String> result = providerConfigReferences.get(providerConfigPath);
++      List<String> result = providerConfigReferences.get(FilenameUtils.normalize(providerConfigPath));
 +      if (result == null) {
 +        result = Collections.emptyList();
 +      }
 +      return result;
 +    }
 +
 +    @Override
 +    public void onFileCreate(File file) {
 +      onFileChange(file);
 +    }
 +
 +    @Override
 +    public void onFileDelete(File file) {
 +      // For simple descriptors, we need to make sure to delete any corresponding full topology descriptors to trigger undeployment
 +      for (String ext : DefaultTopologyService.SUPPORTED_TOPOLOGY_FILE_EXTENSIONS) {
 +        File topologyFile =
 +                new File(topologiesDir, FilenameUtils.getBaseName(file.getName()) + "." + ext);
 +        if (topologyFile.exists()) {
++          log.deletingTopologyForDescriptorDeletion(topologyFile.getName(), file.getName());
 +          topologyFile.delete();
 +        }
 +      }
 +
 +      String normalizedFilePath = FilenameUtils.normalize(file.getAbsolutePath());
 +      String reference = null;
 +      for (Map.Entry<String, List<String>> entry : providerConfigReferences.entrySet()) {
 +        if (entry.getValue().contains(normalizedFilePath)) {
 +          reference = entry.getKey();
 +          break;
 +        }
 +      }
++
 +      if (reference != null) {
 +        providerConfigReferences.get(reference).remove(normalizedFilePath);
++        log.removedProviderConfigurationReference(normalizedFilePath, reference);
 +      }
 +    }
 +
 +    @Override
 +    public void onFileChange(File file) {
 +      try {
 +        // When a simple descriptor has been created or modified, generate the new topology descriptor
 +        Map<String, File> result = SimpleDescriptorHandler.handle(file, topologiesDir, aliasService);
++        log.generatedTopologyForDescriptorChange(result.get("topology").getName(), file.getName());
 +
 +        // Add the provider config reference relationship for handling updates to the provider config
 +        String providerConfig = FilenameUtils.normalize(result.get("reference").getAbsolutePath());
 +        if (!providerConfigReferences.containsKey(providerConfig)) {
 +          providerConfigReferences.put(providerConfig, new ArrayList<String>());
 +        }
 +        List<String> refs = providerConfigReferences.get(providerConfig);
 +        String descriptorName = FilenameUtils.normalize(file.getAbsolutePath());
 +        if (!refs.contains(descriptorName)) {
 +          // Need to check if descriptor had previously referenced another provider config, so it can be removed
 +          for (List<String> descs : providerConfigReferences.values()) {
 +            if (descs.contains(descriptorName)) {
 +              descs.remove(descriptorName);
 +            }
 +          }
 +
 +          // Add the current reference relationship
 +          refs.add(descriptorName);
++          log.addedProviderConfigurationReference(descriptorName, providerConfig);
 +        }
 +      } catch (Exception e) {
 +        log.simpleDescriptorHandlingError(file.getName(), e);
 +      }
 +    }
 +
 +    @Override
 +    public boolean accept(File file) {
 +      boolean accept = false;
 +      if (!file.isDirectory() && file.canRead()) {
 +        String extension = FilenameUtils.getExtension(file.getName());
 +        if (SUPPORTED_EXTENSIONS.contains(extension)) {
 +          accept = true;
 +        }
 +      }
 +      return accept;
 +    }
 +  }
 +
 +  /**
 +   * Change handler for shared provider configurations
 +   */
 +  public static class SharedProviderConfigMonitor extends FileAlterationListenerAdaptor
 +          implements FileFilter {
 +
 +    static final List<String> SUPPORTED_EXTENSIONS = new ArrayList<>();
 +    static {
 +      SUPPORTED_EXTENSIONS.add("xml");
 +    }
 +
 +    private DescriptorsMonitor descriptorsMonitor;
 +    private File descriptorsDir;
 +
 +
 +    SharedProviderConfigMonitor(DescriptorsMonitor descMonitor, File descriptorsDir) {
 +      this.descriptorsMonitor = descMonitor;
 +      this.descriptorsDir     = descriptorsDir;
 +    }
 +
 +    @Override
 +    public void onFileCreate(File file) {
 +      onFileChange(file);
 +    }
 +
 +    @Override
 +    public void onFileDelete(File file) {
 +      onFileChange(file);
 +    }
 +
 +    @Override
 +    public void onFileChange(File file) {
 +      // For shared provider configuration, we need to update any simple descriptors that reference it
 +      for (File descriptor : getReferencingDescriptors(file)) {
 +        descriptor.setLastModified(System.currentTimeMillis());
 +      }
 +    }
 +
 +    private List<File> getReferencingDescriptors(File sharedProviderConfig) {
 +      List<File> references = new ArrayList<>();
 +
-       for (File descriptor : descriptorsDir.listFiles()) {
++      for (File descriptor : listFiles(descriptorsDir)) {
 +        if (DescriptorsMonitor.SUPPORTED_EXTENSIONS.contains(FilenameUtils.getExtension(descriptor.getName()))) {
 +          for (String reference : descriptorsMonitor.getReferencingDescriptors(FilenameUtils.normalize(sharedProviderConfig.getAbsolutePath()))) {
 +            references.add(new File(reference));
 +          }
 +        }
 +      }
 +
 +      return references;
 +    }
 +
 +    @Override
 +    public boolean accept(File file) {
 +      boolean accept = false;
 +      if (!file.isDirectory() && file.canRead()) {
 +        String extension = FilenameUtils.getExtension(file.getName());
 +        if (SUPPORTED_EXTENSIONS.contains(extension)) {
 +          accept = true;
 +        }
 +      }
 +      return accept;
 +    }
 +  }
 +
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/c754cc06/gateway-server/src/main/java/org/apache/knox/gateway/topology/builder/BeanPropertyTopologyBuilder.java
----------------------------------------------------------------------
diff --cc gateway-server/src/main/java/org/apache/knox/gateway/topology/builder/BeanPropertyTopologyBuilder.java
index 1caa946,0000000..a1a2609
mode 100644,000000..100644
--- a/gateway-server/src/main/java/org/apache/knox/gateway/topology/builder/BeanPropertyTopologyBuilder.java
+++ b/gateway-server/src/main/java/org/apache/knox/gateway/topology/builder/BeanPropertyTopologyBuilder.java
@@@ -1,94 -1,0 +1,105 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements. See the NOTICE file distributed with this
 + * work for additional information regarding copyright ownership. The ASF
 + * licenses this file to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance with the License.
 + * You may obtain a copy of the License at
 + *
 + * http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 + * License for the specific language governing permissions and limitations under
 + * the License.
 + */
 +package org.apache.knox.gateway.topology.builder;
 +
 +import java.util.ArrayList;
 +import java.util.List;
 +
 +import org.apache.knox.gateway.topology.Application;
 +import org.apache.knox.gateway.topology.Provider;
 +import org.apache.knox.gateway.topology.Service;
 +import org.apache.knox.gateway.topology.Topology;
 +
 +public class BeanPropertyTopologyBuilder implements TopologyBuilder {
 +
 +    private String name;
++    private String defaultService;
 +    private List<Provider> providers;
 +    private List<Service> services;
 +    private List<Application> applications;
 +
 +    public BeanPropertyTopologyBuilder() {
 +        providers = new ArrayList<Provider>();
 +        services = new ArrayList<Service>();
 +        applications = new ArrayList<Application>();
 +    }
 +
 +    public BeanPropertyTopologyBuilder name(String name) {
 +        this.name = name;
 +        return this;
 +    }
 +
 +    public String name() {
 +        return name;
 +    }
 +
++    public BeanPropertyTopologyBuilder defaultService(String defaultService) {
++      this.defaultService = defaultService;
++      return this;
++    }
++
++    public String defaultService() {
++      return defaultService;
++    }
++
 +    public BeanPropertyTopologyBuilder addProvider(Provider provider) {
 +        providers.add(provider);
 +        return this;
 +    }
 +
 +    public List<Provider> providers() {
 +        return providers;
 +    }
 +
 +    public BeanPropertyTopologyBuilder addService(Service service) {
 +        services.add(service);
 +        return this;
 +    }
 +
 +    public List<Service> services() {
 +        return services;
 +    }
 +
 +    public BeanPropertyTopologyBuilder addApplication( Application application ) {
 +        applications.add(application);
 +        return this;
 +    }
 +
 +    public List<Application> applications() {
 +        return applications;
 +    }
 +
 +    public Topology build() {
 +        Topology topology = new Topology();
 +        topology.setName(name);
++        topology.setDefaultServicePath(defaultService);
 +
 +        for (Provider provider : providers) {
 +            topology.addProvider(provider);
 +        }
 +
 +        for (Service service : services) {
 +            topology.addService(service);
 +        }
 +
 +        for (Application application : applications) {
 +            topology.addApplication(application);
 +        }
 +
 +        return topology;
 +    }
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/c754cc06/gateway-server/src/main/java/org/apache/knox/gateway/topology/xml/KnoxFormatXmlTopologyRules.java
----------------------------------------------------------------------
diff --cc gateway-server/src/main/java/org/apache/knox/gateway/topology/xml/KnoxFormatXmlTopologyRules.java
index 6b51ab8,0000000..81aedec
mode 100644,000000..100644
--- a/gateway-server/src/main/java/org/apache/knox/gateway/topology/xml/KnoxFormatXmlTopologyRules.java
+++ b/gateway-server/src/main/java/org/apache/knox/gateway/topology/xml/KnoxFormatXmlTopologyRules.java
@@@ -1,93 -1,0 +1,95 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.topology.xml;
 +
 +import org.apache.commons.digester3.Rule;
 +import org.apache.commons.digester3.binder.AbstractRulesModule;
 +import org.apache.knox.gateway.topology.Application;
 +import org.apache.knox.gateway.topology.Param;
 +import org.apache.knox.gateway.topology.Provider;
 +import org.apache.knox.gateway.topology.Service;
 +import org.apache.knox.gateway.topology.Version;
 +import org.apache.knox.gateway.topology.builder.BeanPropertyTopologyBuilder;
 +import org.xml.sax.Attributes;
 +
 +public class KnoxFormatXmlTopologyRules extends AbstractRulesModule {
 +
 +  private static final String ROOT_TAG = "topology";
 +  private static final String NAME_TAG = "name";
 +  private static final String VERSION_TAG = "version";
++  private static final String DEFAULT_SERVICE_TAG = "path";
 +  private static final String APPLICATION_TAG = "application";
 +  private static final String SERVICE_TAG = "service";
 +  private static final String ROLE_TAG = "role";
 +  private static final String URL_TAG = "url";
 +  private static final String PROVIDER_TAG = "gateway/provider";
 +  private static final String ENABLED_TAG = "enabled";
 +  private static final String PARAM_TAG = "param";
 +  private static final String VALUE_TAG = "value";
 +
 +  private static final Rule paramRule = new ParamRule();
 +
 +  @Override
 +  protected void configure() {
 +    forPattern( ROOT_TAG ).createObject().ofType( BeanPropertyTopologyBuilder.class );
 +    forPattern( ROOT_TAG + "/" + NAME_TAG ).callMethod("name").usingElementBodyAsArgument();
 +    forPattern( ROOT_TAG + "/" + VERSION_TAG ).callMethod("version").usingElementBodyAsArgument();
++    forPattern( ROOT_TAG + "/" + DEFAULT_SERVICE_TAG ).callMethod("defaultService").usingElementBodyAsArgument();
 +
 +    forPattern( ROOT_TAG + "/" + APPLICATION_TAG ).createObject().ofType( Application.class ).then().setNext( "addApplication" );
 +    forPattern( ROOT_TAG + "/" + APPLICATION_TAG + "/" + ROLE_TAG ).setBeanProperty();
 +    forPattern( ROOT_TAG + "/" + APPLICATION_TAG + "/" + NAME_TAG ).setBeanProperty();
 +    forPattern( ROOT_TAG + "/" + APPLICATION_TAG + "/" + VERSION_TAG ).createObject().ofType(Version.class).then().setBeanProperty().then().setNext("setVersion");
 +    forPattern( ROOT_TAG + "/" + APPLICATION_TAG + "/" + URL_TAG ).callMethod( "addUrl" ).usingElementBodyAsArgument();
 +    forPattern( ROOT_TAG + "/" + APPLICATION_TAG + "/" + PARAM_TAG ).createObject().ofType( Param.class ).then().addRule( paramRule ).then().setNext( "addParam" );
 +    forPattern( ROOT_TAG + "/" + APPLICATION_TAG + "/" + PARAM_TAG + "/" + NAME_TAG ).setBeanProperty();
 +    forPattern( ROOT_TAG + "/" + APPLICATION_TAG + "/" + PARAM_TAG + "/" + VALUE_TAG ).setBeanProperty();
 +
 +    forPattern( ROOT_TAG + "/" + SERVICE_TAG ).createObject().ofType( Service.class ).then().setNext( "addService" );
 +    forPattern( ROOT_TAG + "/" + SERVICE_TAG + "/" + ROLE_TAG ).setBeanProperty();
 +    forPattern( ROOT_TAG + "/" + SERVICE_TAG + "/" + NAME_TAG ).setBeanProperty();
 +    forPattern( ROOT_TAG + "/" + SERVICE_TAG + "/" + VERSION_TAG ).createObject().ofType(Version.class).then().setBeanProperty().then().setNext("setVersion");
 +    forPattern( ROOT_TAG + "/" + SERVICE_TAG + "/" + URL_TAG ).callMethod( "addUrl" ).usingElementBodyAsArgument();
 +    forPattern( ROOT_TAG + "/" + SERVICE_TAG + "/" + PARAM_TAG ).createObject().ofType( Param.class ).then().addRule( paramRule ).then().setNext( "addParam" );
 +    forPattern( ROOT_TAG + "/" + SERVICE_TAG + "/" + PARAM_TAG + "/" + NAME_TAG ).setBeanProperty();
 +    forPattern( ROOT_TAG + "/" + SERVICE_TAG + "/" + PARAM_TAG + "/" + VALUE_TAG ).setBeanProperty();
 +
 +    forPattern( ROOT_TAG + "/" + PROVIDER_TAG ).createObject().ofType( Provider.class ).then().setNext( "addProvider" );
 +    forPattern( ROOT_TAG + "/" + PROVIDER_TAG + "/" + ROLE_TAG ).setBeanProperty();
 +    forPattern( ROOT_TAG + "/" + PROVIDER_TAG + "/" + ENABLED_TAG ).setBeanProperty();
 +    forPattern( ROOT_TAG + "/" + PROVIDER_TAG + "/" + NAME_TAG ).setBeanProperty();
 +    forPattern( ROOT_TAG + "/" + PROVIDER_TAG + "/" + PARAM_TAG ).createObject().ofType( Param.class ).then().addRule( paramRule ).then().setNext( "addParam" );
 +    forPattern( ROOT_TAG + "/" + PROVIDER_TAG + "/" + PARAM_TAG + "/" + NAME_TAG ).setBeanProperty();
 +    forPattern( ROOT_TAG + "/" + PROVIDER_TAG + "/" + PARAM_TAG + "/" + VALUE_TAG ).setBeanProperty();
 +  }
 +
 +  private static class ParamRule extends Rule {
 +
 +    @Override
 +    public void begin( String namespace, String name, Attributes attributes ) {
 +      Param param = getDigester().peek();
 +      String paramName = attributes.getValue( "name" );
 +      if( paramName != null ) {
 +        param.setName( paramName );
 +        param.setValue( attributes.getValue( "value" ) );
 +      }
 +    }
 +
 +  }
 +
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/c754cc06/gateway-server/src/test/java/org/apache/knox/gateway/GatewayFilterTest.java
----------------------------------------------------------------------
diff --cc gateway-server/src/test/java/org/apache/knox/gateway/GatewayFilterTest.java
index 178ff5e,0000000..ac22400
mode 100644,000000..100644
--- a/gateway-server/src/test/java/org/apache/knox/gateway/GatewayFilterTest.java
+++ b/gateway-server/src/test/java/org/apache/knox/gateway/GatewayFilterTest.java
@@@ -1,171 -1,0 +1,220 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway;
 +
 +import org.apache.knox.gateway.audit.api.AuditServiceFactory;
 +import org.apache.knox.gateway.config.GatewayConfig;
 +import org.apache.knox.gateway.filter.AbstractGatewayFilter;
++import org.apache.knox.gateway.topology.Topology;
 +import org.apache.hadoop.test.category.FastTests;
 +import org.apache.hadoop.test.category.UnitTests;
 +import org.easymock.EasyMock;
 +import org.junit.After;
 +import org.junit.Before;
 +import org.junit.Test;
 +import org.junit.experimental.categories.Category;
 +
 +import javax.servlet.*;
 +import javax.servlet.http.HttpServletRequest;
 +import javax.servlet.http.HttpServletResponse;
 +import java.io.IOException;
 +import java.net.URISyntaxException;
 +
 +import static org.hamcrest.CoreMatchers.is;
 +import static org.hamcrest.MatcherAssert.assertThat;
 +
 +/**
 + *
 + */
 +@Category( { UnitTests.class, FastTests.class } )
 +public class GatewayFilterTest {
 +
 +  @Before
 +  public void setup() {
 +    AuditServiceFactory.getAuditService().createContext();
 +  }
 +
 +  @After
 +  public void reset() {
 +    AuditServiceFactory.getAuditService().detachContext();
 +  }
 +
 +  @Test
 +  public void testNoFilters() throws ServletException, IOException {
 +
 +    FilterConfig config = EasyMock.createNiceMock( FilterConfig.class );
 +    EasyMock.replay( config );
 +
 +    HttpServletRequest request = EasyMock.createNiceMock( HttpServletRequest.class );
 +    ServletContext context = EasyMock.createNiceMock( ServletContext.class );
 +    GatewayConfig gatewayConfig = EasyMock.createNiceMock( GatewayConfig.class );
 +    EasyMock.expect( request.getPathInfo() ).andReturn( "source" ).anyTimes();
 +    EasyMock.expect( request.getServletContext() ).andReturn( context ).anyTimes();
 +    EasyMock.expect( context.getAttribute(
 +        GatewayConfig.GATEWAY_CONFIG_ATTRIBUTE)).andReturn(gatewayConfig).anyTimes();
 +    EasyMock.expect(gatewayConfig.getHeaderNameForRemoteAddress()).andReturn(
 +        "Custom-Forwarded-For").anyTimes();
 +    EasyMock.replay( request );
 +    EasyMock.replay( context );
 +    EasyMock.replay( gatewayConfig );
 +    
 +    HttpServletResponse response = EasyMock.createNiceMock( HttpServletResponse.class );
 +    EasyMock.replay( response );
 +
 +    FilterChain chain = EasyMock.createNiceMock( FilterChain.class );
 +    EasyMock.replay( chain );
 +
 +    GatewayFilter gateway = new GatewayFilter();
 +    gateway.init( config );
 +    gateway.doFilter( request, response, chain );
 +    gateway.destroy();
 +  }
 +
 +  @Test
 +  public void testNoopFilter() throws ServletException, IOException, URISyntaxException {
 +
 +    FilterConfig config = EasyMock.createNiceMock( FilterConfig.class );
 +    EasyMock.replay( config );
 +
 +    HttpServletRequest request = EasyMock.createNiceMock( HttpServletRequest.class );
 +    ServletContext context = EasyMock.createNiceMock( ServletContext.class );
 +    GatewayConfig gatewayConfig = EasyMock.createNiceMock( GatewayConfig.class );
 +    EasyMock.expect( request.getPathInfo() ).andReturn( "source" ).anyTimes();
 +    EasyMock.expect( request.getServletContext() ).andReturn( context ).anyTimes();
 +    EasyMock.expect( context.getAttribute(
 +        GatewayConfig.GATEWAY_CONFIG_ATTRIBUTE)).andReturn(gatewayConfig).anyTimes();
 +    EasyMock.expect(gatewayConfig.getHeaderNameForRemoteAddress()).andReturn(
 +        "Custom-Forwarded-For").anyTimes();
 +    EasyMock.replay( request );
 +    EasyMock.replay( context );
 +    EasyMock.replay( gatewayConfig );
 +
 +    HttpServletResponse response = EasyMock.createNiceMock( HttpServletResponse.class );
 +    EasyMock.replay( response );
 +
 +    FilterChain chain = EasyMock.createNiceMock( FilterChain.class );
 +    EasyMock.replay( chain );
 +
 +    Filter filter = EasyMock.createNiceMock( Filter.class );
 +    EasyMock.replay( filter );
 +
 +    GatewayFilter gateway = new GatewayFilter();
 +    gateway.addFilter( "path", "filter", filter, null, null );
 +    gateway.init( config );
 +    gateway.doFilter( request, response, chain );
 +    gateway.destroy();
 +
 +  }
 +
 +  public static class TestRoleFilter extends AbstractGatewayFilter {
 +
 +    public Object role;
++    public String defaultServicePath;
++    public String url;
 +
 +    @Override
 +    protected void doFilter( HttpServletRequest request, HttpServletResponse response, FilterChain chain ) throws IOException, ServletException {
 +      this.role = request.getAttribute( AbstractGatewayFilter.TARGET_SERVICE_ROLE );
++      Topology topology = (Topology)request.getServletContext().getAttribute( "org.apache.knox.gateway.topology" );
++      if (topology != null) {
++        this.defaultServicePath = (String) topology.getDefaultServicePath();
++        url = new String(request.getRequestURL());
++      }
 +    }
 +
 +  }
 +
 +  @Test
 +  public void testTargetServiceRoleRequestAttribute() throws Exception {
 +
 +    FilterConfig config = EasyMock.createNiceMock( FilterConfig.class );
 +    EasyMock.replay( config );
 +
 +    HttpServletRequest request = EasyMock.createNiceMock( HttpServletRequest.class );
 +    ServletContext context = EasyMock.createNiceMock( ServletContext.class );
 +    GatewayConfig gatewayConfig = EasyMock.createNiceMock( GatewayConfig.class );
 +    EasyMock.expect( request.getPathInfo() ).andReturn( "test-path/test-resource" ).anyTimes();
 +    EasyMock.expect( request.getServletContext() ).andReturn( context ).anyTimes();
 +    EasyMock.expect( context.getAttribute(
 +        GatewayConfig.GATEWAY_CONFIG_ATTRIBUTE)).andReturn(gatewayConfig).anyTimes();
 +    EasyMock.expect(gatewayConfig.getHeaderNameForRemoteAddress()).andReturn(
 +        "Custom-Forwarded-For").anyTimes();
 +    request.setAttribute( AbstractGatewayFilter.TARGET_SERVICE_ROLE, "test-role" );
 +    EasyMock.expectLastCall().anyTimes();
 +    EasyMock.expect( request.getAttribute( AbstractGatewayFilter.TARGET_SERVICE_ROLE ) ).andReturn( "test-role" ).anyTimes();
 +    EasyMock.replay( request );
 +    EasyMock.replay( context );
 +    EasyMock.replay( gatewayConfig );
 +
 +    HttpServletResponse response = EasyMock.createNiceMock( HttpServletResponse.class );
 +    EasyMock.replay( response );
 +
 +    TestRoleFilter filter = new TestRoleFilter();
 +
 +    GatewayFilter gateway = new GatewayFilter();
 +    gateway.addFilter( "test-path/**", "test-filter", filter, null, "test-role" );
 +    gateway.init( config );
 +    gateway.doFilter( request, response );
 +    gateway.destroy();
 +
 +    assertThat( (String)filter.role, is( "test-role" ) );
 +
 +  }
 +
++  @Test
++  public void testDefaultServicePathTopologyRequestAttribute() throws Exception {
++
++    FilterConfig config = EasyMock.createNiceMock( FilterConfig.class );
++    EasyMock.replay( config );
++
++    Topology topology = EasyMock.createNiceMock( Topology.class );
++    topology.setDefaultServicePath("test-role/");
++    HttpServletRequest request = EasyMock.createNiceMock( HttpServletRequest.class );
++    ServletContext context = EasyMock.createNiceMock( ServletContext.class );
++    GatewayConfig gatewayConfig = EasyMock.createNiceMock( GatewayConfig.class );
++    EasyMock.expect( topology.getDefaultServicePath() ).andReturn( "test-role" ).anyTimes();
++    EasyMock.expect( request.getPathInfo() ).andReturn( "/test-path/test-resource" ).anyTimes();
++    EasyMock.expect( request.getServletContext() ).andReturn( context ).anyTimes();
++    EasyMock.expect( context.getAttribute(
++        GatewayConfig.GATEWAY_CONFIG_ATTRIBUTE)).andReturn(gatewayConfig).anyTimes();
++    EasyMock.expect(gatewayConfig.getHeaderNameForRemoteAddress()).andReturn(
++        "Custom-Forwarded-For").anyTimes();
++    EasyMock.expect( request.getRequestURL() ).andReturn( new StringBuffer("http://host:8443/gateway/sandbox/test-path/test-resource/") ).anyTimes();
++
++    EasyMock.expect( context.getAttribute( "org.apache.hadoop.gateway.topology" ) ).andReturn( topology ).anyTimes();
++    EasyMock.replay( request );
++    EasyMock.replay( context );
++    EasyMock.replay( topology );
++    EasyMock.replay( gatewayConfig );
++
++    HttpServletResponse response = EasyMock.createNiceMock( HttpServletResponse.class );
++    EasyMock.replay( response );
++
++    TestRoleFilter filter = new TestRoleFilter();
++
++    GatewayFilter gateway = new GatewayFilter();
++    gateway.addFilter( "test-role/**/**", "test-filter", filter, null, "test-role" );
++    gateway.init( config );
++    gateway.doFilter( request, response );
++    gateway.destroy();
++
++    assertThat( (String)filter.defaultServicePath, is( "test-role" ) );
++    assertThat( (String)filter.url, is("http://host:8443/gateway/sandbox/test-role/test-path/test-resource"));
++
++  }
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/c754cc06/gateway-server/src/test/java/org/apache/knox/gateway/services/topology/DefaultTopologyServiceTest.java
----------------------------------------------------------------------
diff --cc gateway-server/src/test/java/org/apache/knox/gateway/services/topology/DefaultTopologyServiceTest.java
index d28ad7f,0000000..95d6f9d
mode 100644,000000..100644
--- a/gateway-server/src/test/java/org/apache/knox/gateway/services/topology/DefaultTopologyServiceTest.java
+++ b/gateway-server/src/test/java/org/apache/knox/gateway/services/topology/DefaultTopologyServiceTest.java
@@@ -1,266 -1,0 +1,610 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.services.topology;
 +
 +import org.apache.commons.io.FileUtils;
++import org.apache.commons.io.FilenameUtils;
 +import org.apache.commons.io.IOUtils;
++import org.apache.commons.io.monitor.FileAlterationListener;
 +import org.apache.commons.io.monitor.FileAlterationMonitor;
 +import org.apache.commons.io.monitor.FileAlterationObserver;
 +import org.apache.knox.gateway.config.GatewayConfig;
 +import org.apache.knox.gateway.services.topology.impl.DefaultTopologyService;
++import org.apache.knox.gateway.config.GatewayConfig;
++import org.apache.knox.gateway.services.security.AliasService;
++import org.apache.knox.gateway.services.topology.impl.DefaultTopologyService;
++import org.apache.knox.gateway.topology.*;
 +import org.apache.hadoop.test.TestUtils;
 +import org.apache.knox.gateway.topology.Param;
 +import org.apache.knox.gateway.topology.Provider;
 +import org.apache.knox.gateway.topology.Topology;
 +import org.apache.knox.gateway.topology.TopologyEvent;
 +import org.apache.knox.gateway.topology.TopologyListener;
 +import org.apache.knox.gateway.services.security.AliasService;
 +import org.easymock.EasyMock;
 +import org.junit.After;
 +import org.junit.Before;
 +import org.junit.Test;
 +
 +import java.io.File;
 +import java.io.IOException;
 +import java.io.InputStream;
 +import java.io.OutputStream;
- import java.util.*;
++import java.util.ArrayList;
++import java.util.Arrays;
++import java.util.Collection;
++import java.util.HashMap;
++import java.util.HashSet;
++import java.util.Iterator;
++import java.util.List;
++import java.util.Map;
++import java.util.Set;
 +
 +import static org.easymock.EasyMock.anyObject;
 +import static org.hamcrest.CoreMatchers.is;
 +import static org.hamcrest.Matchers.hasItem;
 +import static org.hamcrest.core.IsNull.notNullValue;
++import static org.junit.Assert.assertEquals;
++import static org.junit.Assert.assertFalse;
++import static org.junit.Assert.assertNotEquals;
++import static org.junit.Assert.assertNotNull;
 +import static org.junit.Assert.assertThat;
 +import static org.junit.Assert.assertTrue;
 +
 +public class DefaultTopologyServiceTest {
 +
 +  @Before
 +  public void setUp() throws Exception {
 +  }
 +
 +  @After
 +  public void tearDown() throws Exception {
 +  }
 +
 +  private File createDir() throws IOException {
 +    return TestUtils.createTempDir(this.getClass().getSimpleName() + "-");
 +  }
 +
 +  private File createFile(File parent, String name, String resource, long timestamp) throws IOException {
 +    File file = new File(parent, name);
 +    if (!file.exists()) {
 +      FileUtils.touch(file);
 +    }
 +    InputStream input = ClassLoader.getSystemResourceAsStream(resource);
 +    OutputStream output = FileUtils.openOutputStream(file);
 +    IOUtils.copy(input, output);
 +    //KNOX-685: output.flush();
 +    input.close();
 +    output.close();
 +    file.setLastModified(timestamp);
 +    assertTrue("Failed to create test file " + file.getAbsolutePath(), file.exists());
 +    assertTrue("Failed to populate test file " + file.getAbsolutePath(), file.length() > 0);
 +
 +    return file;
 +  }
 +
 +  @Test
 +  public void testGetTopologies() throws Exception {
 +
 +    File dir = createDir();
 +    File topologyDir = new File(dir, "topologies");
 +
-     File descriptorsDir = new File(dir, "descriptors");
-     descriptorsDir.mkdirs();
- 
-     File sharedProvidersDir = new File(dir, "shared-providers");
-     sharedProvidersDir.mkdirs();
- 
 +    long time = topologyDir.lastModified();
 +    try {
 +      createFile(topologyDir, "one.xml", "org/apache/knox/gateway/topology/file/topology-one.xml", time);
 +
 +      TestTopologyListener topoListener = new TestTopologyListener();
 +      FileAlterationMonitor monitor = new FileAlterationMonitor(Long.MAX_VALUE);
 +
 +      TopologyService provider = new DefaultTopologyService();
 +      Map<String, String> c = new HashMap<>();
 +
 +      GatewayConfig config = EasyMock.createNiceMock(GatewayConfig.class);
 +      EasyMock.expect(config.getGatewayTopologyDir()).andReturn(topologyDir.getAbsolutePath()).anyTimes();
-       EasyMock.expect(config.getGatewayConfDir()).andReturn(descriptorsDir.getParentFile().getAbsolutePath()).anyTimes();
++      EasyMock.expect(config.getGatewayConfDir()).andReturn(topologyDir.getParentFile().getAbsolutePath()).anyTimes();
 +      EasyMock.replay(config);
 +
 +      provider.init(config, c);
 +
 +      provider.addTopologyChangeListener(topoListener);
 +
 +      provider.reloadTopologies();
 +
 +      Collection<Topology> topologies = provider.getTopologies();
 +      assertThat(topologies, notNullValue());
 +      assertThat(topologies.size(), is(1));
 +      Topology topology = topologies.iterator().next();
 +      assertThat(topology.getName(), is("one"));
 +      assertThat(topology.getTimestamp(), is(time));
 +      assertThat(topoListener.events.size(), is(1));
 +      topoListener.events.clear();
 +
 +      // Add a file to the directory.
 +      File two = createFile(topologyDir, "two.xml",
 +          "org/apache/knox/gateway/topology/file/topology-two.xml", 1L);
 +      provider.reloadTopologies();
 +      topologies = provider.getTopologies();
 +      assertThat(topologies.size(), is(2));
 +      Set<String> names = new HashSet<>(Arrays.asList("one", "two"));
 +      Iterator<Topology> iterator = topologies.iterator();
 +      topology = iterator.next();
 +      assertThat(names, hasItem(topology.getName()));
 +      names.remove(topology.getName());
 +      topology = iterator.next();
 +      assertThat(names, hasItem(topology.getName()));
 +      names.remove(topology.getName());
 +      assertThat(names.size(), is(0));
 +      assertThat(topoListener.events.size(), is(1));
 +      List<TopologyEvent> events = topoListener.events.get(0);
 +      assertThat(events.size(), is(1));
 +      TopologyEvent event = events.get(0);
 +      assertThat(event.getType(), is(TopologyEvent.Type.CREATED));
 +      assertThat(event.getTopology(), notNullValue());
 +
 +      // Update a file in the directory.
 +      two = createFile(topologyDir, "two.xml",
 +          "org/apache/knox/gateway/topology/file/topology-three.xml", 2L);
 +      provider.reloadTopologies();
 +      topologies = provider.getTopologies();
 +      assertThat(topologies.size(), is(2));
 +      names = new HashSet<>(Arrays.asList("one", "two"));
 +      iterator = topologies.iterator();
 +      topology = iterator.next();
 +      assertThat(names, hasItem(topology.getName()));
 +      names.remove(topology.getName());
 +      topology = iterator.next();
 +      assertThat(names, hasItem(topology.getName()));
 +      names.remove(topology.getName());
 +      assertThat(names.size(), is(0));
 +
 +      // Remove a file from the directory.
 +      two.delete();
 +      provider.reloadTopologies();
 +      topologies = provider.getTopologies();
 +      assertThat(topologies.size(), is(1));
 +      topology = topologies.iterator().next();
 +      assertThat(topology.getName(), is("one"));
 +      assertThat(topology.getTimestamp(), is(time));
 +
++    } finally {
++      FileUtils.deleteQuietly(dir);
++    }
++  }
++
++  /**
++   * KNOX-1014
++   *
++   * Test the lifecycle relationship between simple descriptors and topology files.
++   *
++   * N.B. This test depends on the DummyServiceDiscovery extension being configured:
++   *        org.apache.hadoop.gateway.topology.discovery.test.extension.DummyServiceDiscovery
++   */
++  @Test
++  public void testSimpleDescriptorsTopologyGeneration() throws Exception {
++
++    File dir = createDir();
++    File topologyDir = new File(dir, "topologies");
++    topologyDir.mkdirs();
++
++    File descriptorsDir = new File(dir, "descriptors");
++    descriptorsDir.mkdirs();
++
++    File sharedProvidersDir = new File(dir, "shared-providers");
++    sharedProvidersDir.mkdirs();
++
++    try {
++      TestTopologyListener topoListener = new TestTopologyListener();
++      FileAlterationMonitor monitor = new FileAlterationMonitor(Long.MAX_VALUE);
++
++      TopologyService provider = new DefaultTopologyService();
++      Map<String, String> c = new HashMap<>();
++
++      GatewayConfig config = EasyMock.createNiceMock(GatewayConfig.class);
++      EasyMock.expect(config.getGatewayTopologyDir()).andReturn(topologyDir.getAbsolutePath()).anyTimes();
++      EasyMock.expect(config.getGatewayConfDir()).andReturn(descriptorsDir.getParentFile().getAbsolutePath()).anyTimes();
++      EasyMock.replay(config);
++
++      provider.init(config, c);
++      provider.addTopologyChangeListener(topoListener);
++      provider.reloadTopologies();
++
++
 +      // Add a simple descriptor to the descriptors dir to verify topology generation and loading (KNOX-1006)
-       // N.B. This part of the test depends on the DummyServiceDiscovery extension being configured:
-       //         org.apache.knox.gateway.topology.discovery.test.extension.DummyServiceDiscovery
 +      AliasService aliasService = EasyMock.createNiceMock(AliasService.class);
 +      EasyMock.expect(aliasService.getPasswordFromAliasForGateway(anyObject(String.class))).andReturn(null).anyTimes();
 +      EasyMock.replay(aliasService);
 +      DefaultTopologyService.DescriptorsMonitor dm =
-                                           new DefaultTopologyService.DescriptorsMonitor(topologyDir, aliasService);
++              new DefaultTopologyService.DescriptorsMonitor(topologyDir, aliasService);
++
++      // Listener to simulate the topologies directory monitor, to notice when a topology has been deleted
++      provider.addTopologyChangeListener(new TestTopologyDeleteListener((DefaultTopologyService)provider));
 +
 +      // Write out the referenced provider config first
 +      File provCfgFile = createFile(sharedProvidersDir,
 +                                    "ambari-cluster-policy.xml",
-           "org/apache/knox/gateway/topology/file/ambari-cluster-policy.xml",
-                                     1L);
++                                    "org/apache/knox/gateway/topology/file/ambari-cluster-policy.xml",
++                                    System.currentTimeMillis());
 +      try {
 +        // Create the simple descriptor in the descriptors dir
-         File simpleDesc =
-                 createFile(descriptorsDir,
-                            "four.json",
-                     "org/apache/knox/gateway/topology/file/simple-topology-four.json",
-                            1L);
++        File simpleDesc = createFile(descriptorsDir,
++                                     "four.json",
++                                     "org/apache/knox/gateway/topology/file/simple-topology-four.json",
++                                     System.currentTimeMillis());
 +
 +        // Trigger the topology generation by noticing the simple descriptor
 +        dm.onFileChange(simpleDesc);
 +
 +        // Load the generated topology
 +        provider.reloadTopologies();
++        Collection<Topology> topologies = provider.getTopologies();
++        assertThat(topologies.size(), is(1));
++        Iterator<Topology> iterator = topologies.iterator();
++        Topology topology = iterator.next();
++        assertThat("four", is(topology.getName()));
++        int serviceCount = topology.getServices().size();
++        assertEquals("Expected the same number of services as are declared in the simple dscriptor.", 10, serviceCount);
++
++        // Overwrite the simple descriptor with a different set of services, and check that the changes are
++        // propagated to the associated topology
++        simpleDesc = createFile(descriptorsDir,
++                                "four.json",
++                                "org/apache/knox/gateway/topology/file/simple-descriptor-five.json",
++                                System.currentTimeMillis());
++        dm.onFileChange(simpleDesc);
++        provider.reloadTopologies();
++        topologies = provider.getTopologies();
++        topology = topologies.iterator().next();
++        assertNotEquals(serviceCount, topology.getServices().size());
++        assertEquals(6, topology.getServices().size());
++
++        // Delete the simple descriptor, and make sure that the associated topology file is deleted
++        simpleDesc.delete();
++        dm.onFileDelete(simpleDesc);
++        provider.reloadTopologies();
 +        topologies = provider.getTopologies();
-         assertThat(topologies.size(), is(2));
-         names = new HashSet<>(Arrays.asList("one", "four"));
-         iterator = topologies.iterator();
-         topology = iterator.next();
-         assertThat(names, hasItem(topology.getName()));
-         names.remove(topology.getName());
-         topology = iterator.next();
-         assertThat(names, hasItem(topology.getName()));
-         names.remove(topology.getName());
-         assertThat(names.size(), is(0));
++        assertTrue(topologies.isEmpty());
++
++        // Delete a topology file, and make sure that the associated simple descriptor is deleted
++        // Overwrite the simple descriptor with a different set of services, and check that the changes are
++        // propagated to the associated topology
++        simpleDesc = createFile(descriptorsDir,
++                                "deleteme.json",
++                                "org/apache/knox/gateway/topology/file/simple-descriptor-five.json",
++                                System.currentTimeMillis());
++        dm.onFileChange(simpleDesc);
++        provider.reloadTopologies();
++        topologies = provider.getTopologies();
++        assertFalse(topologies.isEmpty());
++        topology = topologies.iterator().next();
++        assertEquals("deleteme", topology.getName());
++        File topologyFile = new File(topologyDir, topology.getName() + ".xml");
++        assertTrue(topologyFile.exists());
++        topologyFile.delete();
++        provider.reloadTopologies();
++        assertFalse("Simple descriptor should have been deleted because the associated topology was.",
++                    simpleDesc.exists());
++
 +      } finally {
 +        provCfgFile.delete();
- 
 +      }
 +    } finally {
 +      FileUtils.deleteQuietly(dir);
 +    }
 +  }
 +
++  /**
++   * KNOX-1014
++   *
++   * Test the lifecycle relationship between provider configuration files, simple descriptors, and topology files.
++   *
++   * N.B. This test depends on the DummyServiceDiscovery extension being configured:
++   *        org.apache.hadoop.gateway.topology.discovery.test.extension.DummyServiceDiscovery
++   */
++  @Test
++  public void testTopologiesUpdateFromProviderConfigChange() throws Exception {
++    File dir = createDir();
++    File topologyDir = new File(dir, "topologies");
++    topologyDir.mkdirs();
++
++    File descriptorsDir = new File(dir, "descriptors");
++    descriptorsDir.mkdirs();
++
++    File sharedProvidersDir = new File(dir, "shared-providers");
++    sharedProvidersDir.mkdirs();
++
++    try {
++      TestTopologyListener topoListener = new TestTopologyListener();
++      FileAlterationMonitor monitor = new FileAlterationMonitor(Long.MAX_VALUE);
++
++      TopologyService ts = new DefaultTopologyService();
++      Map<String, String> c = new HashMap<>();
++
++      GatewayConfig config = EasyMock.createNiceMock(GatewayConfig.class);
++      EasyMock.expect(config.getGatewayTopologyDir()).andReturn(topologyDir.getAbsolutePath()).anyTimes();
++      EasyMock.expect(config.getGatewayConfDir()).andReturn(descriptorsDir.getParentFile().getAbsolutePath()).anyTimes();
++      EasyMock.replay(config);
++
++      ts.init(config, c);
++      ts.addTopologyChangeListener(topoListener);
++      ts.reloadTopologies();
++
++      java.lang.reflect.Field dmField = ts.getClass().getDeclaredField("descriptorsMonitor");
++      dmField.setAccessible(true);
++      DefaultTopologyService.DescriptorsMonitor dm = (DefaultTopologyService.DescriptorsMonitor) dmField.get(ts);
++
++      // Write out the referenced provider configs first
++      createFile(sharedProvidersDir,
++                 "provider-config-one.xml",
++                 "org/apache/knox/gateway/topology/file/provider-config-one.xml",
++                 System.currentTimeMillis());
++
++      // Create the simple descriptor, which depends on provider-config-one.xml
++      File simpleDesc = createFile(descriptorsDir,
++                                   "six.json",
++                                   "org/apache/knox/gateway/topology/file/simple-descriptor-six.json",
++                                   System.currentTimeMillis());
++
++      // "Notice" the simple descriptor change, and generate a topology based on it
++      dm.onFileChange(simpleDesc);
++
++      // Load the generated topology
++      ts.reloadTopologies();
++      Collection<Topology> topologies = ts.getTopologies();
++      assertThat(topologies.size(), is(1));
++      Iterator<Topology> iterator = topologies.iterator();
++      Topology topology = iterator.next();
++      assertFalse("The Shiro provider is disabled in provider-config-one.xml",
++                  topology.getProvider("authentication", "ShiroProvider").isEnabled());
++
++      // Overwrite the referenced provider configuration with a different ShiroProvider config, and check that the
++      // changes are propagated to the associated topology
++      File providerConfig = createFile(sharedProvidersDir,
++                                       "provider-config-one.xml",
++                                       "org/apache/knox/gateway/topology/file/ambari-cluster-policy.xml",
++                                       System.currentTimeMillis());
++
++      // "Notice" the simple descriptor change as a result of the referenced config change
++      dm.onFileChange(simpleDesc);
++
++      // Load the generated topology
++      ts.reloadTopologies();
++      topologies = ts.getTopologies();
++      assertFalse(topologies.isEmpty());
++      topology = topologies.iterator().next();
++      assertTrue("The Shiro provider is enabled in ambari-cluster-policy.xml",
++              topology.getProvider("authentication", "ShiroProvider").isEnabled());
++
++      // Delete the provider configuration, and make sure that the associated topology file is unaffected.
++      // The topology file should not be affected because the simple descriptor handling will fail to resolve the
++      // referenced provider configuration.
++      providerConfig.delete();     // Delete the file
++      dm.onFileChange(simpleDesc); // The provider config deletion will trigger a descriptor change notification
++      ts.reloadTopologies();
++      topologies = ts.getTopologies();
++      assertFalse(topologies.isEmpty());
++      assertTrue("The Shiro provider is enabled in ambari-cluster-policy.xml",
++              topology.getProvider("authentication", "ShiroProvider").isEnabled());
++
++    } finally {
++      FileUtils.deleteQuietly(dir);
++    }
++  }
++
++  /**
++   * KNOX-1039
++   */
++  @Test
++  public void testConfigurationCRUDAPI() throws Exception {
++    File dir = createDir();
++    File topologyDir = new File(dir, "topologies");
++    topologyDir.mkdirs();
++
++    File descriptorsDir = new File(dir, "descriptors");
++    descriptorsDir.mkdirs();
++
++    File sharedProvidersDir = new File(dir, "shared-providers");
++    sharedProvidersDir.mkdirs();
++
++    try {
++      TestTopologyListener topoListener = new TestTopologyListener();
++      FileAlterationMonitor monitor = new FileAlterationMonitor(Long.MAX_VALUE);
++
++      TopologyService ts = new DefaultTopologyService();
++      Map<String, String> c = new HashMap<>();
++
++      GatewayConfig config = EasyMock.createNiceMock(GatewayConfig.class);
++      EasyMock.expect(config.getGatewayTopologyDir()).andReturn(topologyDir.getAbsolutePath()).anyTimes();
++      EasyMock.expect(config.getGatewayConfDir()).andReturn(descriptorsDir.getParentFile().getAbsolutePath()).anyTimes();
++      EasyMock.replay(config);
++
++      ts.init(config, c);
++      ts.addTopologyChangeListener(topoListener);
++      ts.reloadTopologies();
++
++      java.lang.reflect.Field dmField = ts.getClass().getDeclaredField("descriptorsMonitor");
++      dmField.setAccessible(true);
++      DefaultTopologyService.DescriptorsMonitor dm = (DefaultTopologyService.DescriptorsMonitor) dmField.get(ts);
++
++      final String simpleDescName  = "six.json";
++      final String provConfOne     = "provider-config-one.xml";
++      final String provConfTwo     = "ambari-cluster-policy.xml";
++
++      // "Deploy" the referenced provider configs first
++      boolean isDeployed =
++        ts.deployProviderConfiguration(provConfOne,
++                FileUtils.readFileToString(new File(ClassLoader.getSystemResource("org/apache/hadoop/gateway/topology/file/provider-config-one.xml").toURI())));
++      assertTrue(isDeployed);
++      File provConfOneFile = new File(sharedProvidersDir, provConfOne);
++      assertTrue(provConfOneFile.exists());
++
++      isDeployed =
++        ts.deployProviderConfiguration(provConfTwo,
++                FileUtils.readFileToString(new File(ClassLoader.getSystemResource("org/apache/hadoop/gateway/topology/file/ambari-cluster-policy.xml").toURI())));
++      assertTrue(isDeployed);
++      File provConfTwoFile = new File(sharedProvidersDir, provConfTwo);
++      assertTrue(provConfTwoFile.exists());
++
++      // Validate the provider configurations known by the topology service
++      Collection<File> providerConfigurations = ts.getProviderConfigurations();
++      assertNotNull(providerConfigurations);
++      assertEquals(2, providerConfigurations.size());
++      assertTrue(providerConfigurations.contains(provConfOneFile));
++      assertTrue(providerConfigurations.contains(provConfTwoFile));
++
++      // "Deploy" the simple descriptor, which depends on provConfOne
++      isDeployed =
++        ts.deployDescriptor(simpleDescName,
++            FileUtils.readFileToString(new File(ClassLoader.getSystemResource("org/apache/hadoop/gateway/topology/file/simple-descriptor-six.json").toURI())));
++      assertTrue(isDeployed);
++      File simpleDesc = new File(descriptorsDir, simpleDescName);
++      assertTrue(simpleDesc.exists());
++
++      // Validate the simple descriptors known by the topology service
++      Collection<File> descriptors = ts.getDescriptors();
++      assertNotNull(descriptors);
++      assertEquals(1, descriptors.size());
++      assertTrue(descriptors.contains(simpleDesc));
++
++      // "Notice" the simple descriptor, so the provider configuration dependency relationship is recorded
++      dm.onFileChange(simpleDesc);
++
++      // Attempt to delete the referenced provConfOne
++      assertFalse("Should not be able to delete a provider configuration that is referenced by one or more descriptors",
++                  ts.deleteProviderConfiguration(FilenameUtils.getBaseName(provConfOne)));
++
++      // Overwrite the simple descriptor with content that changes the provider config reference to provConfTwo
++      isDeployed =
++        ts.deployDescriptor(simpleDescName,
++              FileUtils.readFileToString(new File(ClassLoader.getSystemResource("org/apache/hadoop/gateway/topology/file/simple-descriptor-five.json").toURI())));
++      assertTrue(isDeployed);
++      assertTrue(simpleDesc.exists());
++      ts.getProviderConfigurations();
++
++      // "Notice" the simple descriptor, so the provider configuration dependency relationship is updated
++      dm.onFileChange(simpleDesc);
++
++      // Attempt to delete the referenced provConfOne
++      assertTrue("Should be able to delete the provider configuration, now that it's not referenced by any descriptors",
++                 ts.deleteProviderConfiguration(FilenameUtils.getBaseName(provConfOne)));
++
++      // Re-validate the provider configurations known by the topology service
++      providerConfigurations = ts.getProviderConfigurations();
++      assertNotNull(providerConfigurations);
++      assertEquals(1, providerConfigurations.size());
++      assertFalse(providerConfigurations.contains(provConfOneFile));
++      assertTrue(providerConfigurations.contains(provConfTwoFile));
++
++      // Attempt to delete the referenced provConfTwo
++      assertFalse("Should not be able to delete a provider configuration that is referenced by one or more descriptors",
++                  ts.deleteProviderConfiguration(FilenameUtils.getBaseName(provConfTwo)));
++
++      // Delete the referencing simple descriptor
++      assertTrue(ts.deleteDescriptor(FilenameUtils.getBaseName(simpleDescName)));
++      assertFalse(simpleDesc.exists());
++
++      // Re-validate the simple descriptors known by the topology service
++      descriptors = ts.getDescriptors();
++      assertNotNull(descriptors);
++      assertTrue(descriptors.isEmpty());
++
++      // "Notice" the simple descriptor, so the provider configuration dependency relationship is updated
++      dm.onFileDelete(simpleDesc);
++
++      // Attempt to delete the referenced provConfTwo
++      assertTrue("Should be able to delete the provider configuration, now that it's not referenced by any descriptors",
++                 ts.deleteProviderConfiguration(FilenameUtils.getBaseName(provConfTwo)));
++
++      // Re-validate the provider configurations known by the topology service
++      providerConfigurations = ts.getProviderConfigurations();
++      assertNotNull(providerConfigurations);
++      assertTrue(providerConfigurations.isEmpty());
++
++    } finally {
++      FileUtils.deleteQuietly(dir);
++    }
++  }
++
 +  private void kickMonitor(FileAlterationMonitor monitor) {
 +    for (FileAlterationObserver observer : monitor.getObservers()) {
 +      observer.checkAndNotify();
 +    }
 +  }
 +
++
 +  @Test
 +  public void testProviderParamsOrderIsPreserved() {
 +
 +    Provider provider = new Provider();
 +    String names[] = {"ldapRealm=",
 +        "ldapContextFactory",
 +        "ldapRealm.contextFactory",
 +        "ldapGroupRealm",
 +        "ldapGroupRealm.contextFactory",
 +        "ldapGroupRealm.contextFactory.systemAuthenticationMechanism"
 +    };
 +
 +    Param param = null;
 +    for (String name : names) {
 +      param = new Param();
 +      param.setName(name);
 +      param.setValue(name);
 +      provider.addParam(param);
 +
 +    }
 +    Map<String, String> params = provider.getParams();
 +    Set<String> keySet = params.keySet();
 +    Iterator<String> iter = keySet.iterator();
 +    int i = 0;
 +    while (iter.hasNext()) {
 +      assertTrue(iter.next().equals(names[i++]));
 +    }
 +
 +  }
 +
 +  private class TestTopologyListener implements TopologyListener {
 +
-     public ArrayList<List<TopologyEvent>> events = new ArrayList<List<TopologyEvent>>();
++    ArrayList<List<TopologyEvent>> events = new ArrayList<List<TopologyEvent>>();
 +
 +    @Override
 +    public void handleTopologyEvent(List<TopologyEvent> events) {
 +      this.events.add(events);
 +    }
 +
 +  }
 +
++
++  private class TestTopologyDeleteListener implements TopologyListener {
++
++    FileAlterationListener delegate;
++
++    TestTopologyDeleteListener(FileAlterationListener delegate) {
++      this.delegate = delegate;
++    }
++
++    @Override
++    public void handleTopologyEvent(List<TopologyEvent> events) {
++      for (TopologyEvent event : events) {
++        if (event.getType().equals(TopologyEvent.Type.DELETED)) {
++          delegate.onFileDelete(new File(event.getTopology().getUri()));
++        }
++      }
++    }
++
++  }
++
 +}