You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@knox.apache.org by mo...@apache.org on 2017/11/02 18:48:22 UTC

[18/25] knox git commit: Merge branch 'master' into KNOX-998-Package_Restructuring

http://git-wip-us.apache.org/repos/asf/knox/blob/c754cc06/gateway-server/src/main/java/org/apache/knox/gateway/services/topology/impl/DefaultTopologyService.java
----------------------------------------------------------------------
diff --cc gateway-server/src/main/java/org/apache/knox/gateway/services/topology/impl/DefaultTopologyService.java
index 455b0fa,0000000..38653f4
mode 100644,000000..100644
--- a/gateway-server/src/main/java/org/apache/knox/gateway/services/topology/impl/DefaultTopologyService.java
+++ b/gateway-server/src/main/java/org/apache/knox/gateway/services/topology/impl/DefaultTopologyService.java
@@@ -1,689 -1,0 +1,818 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +
 +package org.apache.knox.gateway.services.topology.impl;
 +
 +
 +import org.apache.commons.digester3.Digester;
 +import org.apache.commons.digester3.binder.DigesterLoader;
 +import org.apache.commons.io.FileUtils;
 +import org.apache.commons.io.FilenameUtils;
 +import org.apache.commons.io.monitor.FileAlterationListener;
 +import org.apache.commons.io.monitor.FileAlterationListenerAdaptor;
 +import org.apache.commons.io.monitor.FileAlterationMonitor;
 +import org.apache.commons.io.monitor.FileAlterationObserver;
 +import org.apache.knox.gateway.GatewayMessages;
 +import org.apache.knox.gateway.audit.api.Action;
 +import org.apache.knox.gateway.audit.api.ActionOutcome;
 +import org.apache.knox.gateway.audit.api.AuditServiceFactory;
 +import org.apache.knox.gateway.audit.api.Auditor;
 +import org.apache.knox.gateway.audit.api.ResourceType;
 +import org.apache.knox.gateway.audit.log4j.audit.AuditConstants;
 +import org.apache.knox.gateway.config.GatewayConfig;
 +import org.apache.knox.gateway.i18n.messages.MessagesFactory;
 +import org.apache.knox.gateway.service.definition.ServiceDefinition;
 +import org.apache.knox.gateway.services.ServiceLifecycleException;
 +import org.apache.knox.gateway.services.topology.TopologyService;
 +import org.apache.knox.gateway.topology.Topology;
 +import org.apache.knox.gateway.topology.TopologyEvent;
 +import org.apache.knox.gateway.topology.TopologyListener;
 +import org.apache.knox.gateway.topology.TopologyMonitor;
 +import org.apache.knox.gateway.topology.TopologyProvider;
 +import org.apache.knox.gateway.topology.builder.TopologyBuilder;
 +import org.apache.knox.gateway.topology.validation.TopologyValidator;
 +import org.apache.knox.gateway.topology.xml.AmbariFormatXmlTopologyRules;
 +import org.apache.knox.gateway.topology.xml.KnoxFormatXmlTopologyRules;
 +import org.apache.knox.gateway.util.ServiceDefinitionsLoader;
 +import org.apache.knox.gateway.services.security.AliasService;
 +import org.apache.knox.gateway.topology.simple.SimpleDescriptorHandler;
 +import org.eclipse.persistence.jaxb.JAXBContextProperties;
 +import org.xml.sax.SAXException;
 +
 +import javax.xml.bind.JAXBContext;
 +import javax.xml.bind.JAXBException;
 +import javax.xml.bind.Marshaller;
 +import java.io.File;
 +import java.io.FileFilter;
 +import java.io.IOException;
 +import java.net.URISyntaxException;
 +import java.util.ArrayList;
++import java.util.Arrays;
 +import java.util.Collection;
 +import java.util.Collections;
 +import java.util.HashMap;
 +import java.util.HashSet;
 +import java.util.List;
 +import java.util.Map;
 +import java.util.Set;
 +
 +import static org.apache.commons.digester3.binder.DigesterLoader.newLoader;
 +
 +
 +public class DefaultTopologyService
 +    extends FileAlterationListenerAdaptor
 +    implements TopologyService, TopologyMonitor, TopologyProvider, FileFilter, FileAlterationListener {
 +
 +  private static Auditor auditor = AuditServiceFactory.getAuditService().getAuditor(
 +    AuditConstants.DEFAULT_AUDITOR_NAME, AuditConstants.KNOX_SERVICE_NAME,
 +    AuditConstants.KNOX_COMPONENT_NAME);
 +
 +  private static final List<String> SUPPORTED_TOPOLOGY_FILE_EXTENSIONS = new ArrayList<String>();
 +  static {
 +    SUPPORTED_TOPOLOGY_FILE_EXTENSIONS.add("xml");
 +    SUPPORTED_TOPOLOGY_FILE_EXTENSIONS.add("conf");
 +  }
 +
 +  private static GatewayMessages log = MessagesFactory.get(GatewayMessages.class);
 +  private static DigesterLoader digesterLoader = newLoader(new KnoxFormatXmlTopologyRules(), new AmbariFormatXmlTopologyRules());
 +  private List<FileAlterationMonitor> monitors = new ArrayList<>();
 +  private File topologiesDirectory;
++  private File sharedProvidersDirectory;
 +  private File descriptorsDirectory;
 +
++  private DescriptorsMonitor descriptorsMonitor;
++
 +  private Set<TopologyListener> listeners;
 +  private volatile Map<File, Topology> topologies;
 +  private AliasService aliasService;
 +
 +
 +  private Topology loadTopology(File file) throws IOException, SAXException, URISyntaxException, InterruptedException {
 +    final long TIMEOUT = 250; //ms
 +    final long DELAY = 50; //ms
 +    log.loadingTopologyFile(file.getAbsolutePath());
 +    Topology topology;
 +    long start = System.currentTimeMillis();
 +    while (true) {
 +      try {
 +        topology = loadTopologyAttempt(file);
 +        break;
 +      } catch (IOException e) {
 +        if (System.currentTimeMillis() - start < TIMEOUT) {
 +          log.failedToLoadTopologyRetrying(file.getAbsolutePath(), Long.toString(DELAY), e);
 +          Thread.sleep(DELAY);
 +        } else {
 +          throw e;
 +        }
 +      } catch (SAXException e) {
 +        if (System.currentTimeMillis() - start < TIMEOUT) {
 +          log.failedToLoadTopologyRetrying(file.getAbsolutePath(), Long.toString(DELAY), e);
 +          Thread.sleep(DELAY);
 +        } else {
 +          throw e;
 +        }
 +      }
 +    }
 +    return topology;
 +  }
 +
 +  private Topology loadTopologyAttempt(File file) throws IOException, SAXException, URISyntaxException {
 +    Topology topology;
 +    Digester digester = digesterLoader.newDigester();
 +    TopologyBuilder topologyBuilder = digester.parse(FileUtils.openInputStream(file));
 +    if (null == topologyBuilder) {
 +      return null;
 +    }
 +    topology = topologyBuilder.build();
 +    topology.setUri(file.toURI());
 +    topology.setName(FilenameUtils.removeExtension(file.getName()));
 +    topology.setTimestamp(file.lastModified());
 +    return topology;
 +  }
 +
 +  private void redeployTopology(Topology topology) {
 +    File topologyFile = new File(topology.getUri());
 +    try {
 +      TopologyValidator tv = new TopologyValidator(topology);
 +
 +      if(tv.validateTopology()) {
 +        throw new SAXException(tv.getErrorString());
 +      }
 +
 +      long start = System.currentTimeMillis();
 +      long limit = 1000L; // One second.
 +      long elapsed = 1;
 +      while (elapsed <= limit) {
 +        try {
 +          long origTimestamp = topologyFile.lastModified();
 +          long setTimestamp = Math.max(System.currentTimeMillis(), topologyFile.lastModified() + elapsed);
 +          if(topologyFile.setLastModified(setTimestamp)) {
 +            long newTimstamp = topologyFile.lastModified();
 +            if(newTimstamp > origTimestamp) {
 +              break;
 +            } else {
 +              Thread.sleep(10);
 +              elapsed = System.currentTimeMillis() - start;
 +              continue;
 +            }
 +          } else {
 +            auditor.audit(Action.REDEPLOY, topology.getName(), ResourceType.TOPOLOGY,
 +                ActionOutcome.FAILURE);
 +            log.failedToRedeployTopology(topology.getName());
 +            break;
 +          }
 +        } catch (InterruptedException e) {
 +          auditor.audit(Action.REDEPLOY, topology.getName(), ResourceType.TOPOLOGY,
 +              ActionOutcome.FAILURE);
 +          log.failedToRedeployTopology(topology.getName(), e);
 +          e.printStackTrace();
 +        }
 +      }
 +    } catch (SAXException e) {
 +      auditor.audit(Action.REDEPLOY, topology.getName(), ResourceType.TOPOLOGY, ActionOutcome.FAILURE);
 +      log.failedToRedeployTopology(topology.getName(), e);
 +    }
 +  }
 +
 +  private List<TopologyEvent> createChangeEvents(
 +      Map<File, Topology> oldTopologies,
 +      Map<File, Topology> newTopologies) {
 +    ArrayList<TopologyEvent> events = new ArrayList<TopologyEvent>();
 +    // Go through the old topologies and find anything that was deleted.
 +    for (File file : oldTopologies.keySet()) {
 +      if (!newTopologies.containsKey(file)) {
 +        events.add(new TopologyEvent(TopologyEvent.Type.DELETED, oldTopologies.get(file)));
 +      }
 +    }
 +    // Go through the new topologies and figure out what was updated vs added.
 +    for (File file : newTopologies.keySet()) {
 +      if (oldTopologies.containsKey(file)) {
 +        Topology oldTopology = oldTopologies.get(file);
 +        Topology newTopology = newTopologies.get(file);
 +        if (newTopology.getTimestamp() > oldTopology.getTimestamp()) {
 +          events.add(new TopologyEvent(TopologyEvent.Type.UPDATED, newTopologies.get(file)));
 +        }
 +      } else {
 +        events.add(new TopologyEvent(TopologyEvent.Type.CREATED, newTopologies.get(file)));
 +      }
 +    }
 +    return events;
 +  }
 +
 +  private File calculateAbsoluteTopologiesDir(GatewayConfig config) {
-     String normalizedTopologyDir = FilenameUtils.normalize(config.getGatewayTopologyDir());
-     File topoDir = new File(normalizedTopologyDir);
++    File topoDir = new File(config.getGatewayTopologyDir());
 +    topoDir = topoDir.getAbsoluteFile();
 +    return topoDir;
 +  }
 +
 +  private File calculateAbsoluteConfigDir(GatewayConfig config) {
 +    File configDir = null;
 +
-     String path = FilenameUtils.normalize(config.getGatewayConfDir());
-     if (path != null) {
-       configDir = new File(config.getGatewayConfDir());
-     } else {
-       configDir = (new File(config.getGatewayTopologyDir())).getParentFile();
-     }
-     configDir = configDir.getAbsoluteFile();
++    String path = config.getGatewayConfDir();
++    configDir = (path != null) ? new File(path) : (new File(config.getGatewayTopologyDir())).getParentFile();
 +
-     return configDir;
++    return configDir.getAbsoluteFile();
 +  }
 +
 +  private void  initListener(FileAlterationMonitor  monitor,
 +                            File                   directory,
 +                            FileFilter             filter,
 +                            FileAlterationListener listener) {
 +    monitors.add(monitor);
 +    FileAlterationObserver observer = new FileAlterationObserver(directory, filter);
 +    observer.addListener(listener);
 +    monitor.addObserver(observer);
 +  }
 +
 +  private void initListener(File directory, FileFilter filter, FileAlterationListener listener) throws IOException, SAXException {
 +    // Increasing the monitoring interval to 5 seconds as profiling has shown
 +    // this is rather expensive in terms of generated garbage objects.
 +    initListener(new FileAlterationMonitor(5000L), directory, filter, listener);
 +  }
 +
 +  private Map<File, Topology> loadTopologies(File directory) {
 +    Map<File, Topology> map = new HashMap<>();
 +    if (directory.isDirectory() && directory.canRead()) {
-       for (File file : directory.listFiles(this)) {
-         try {
-           Topology loadTopology = loadTopology(file);
-           if (null != loadTopology) {
-             map.put(file, loadTopology);
-           } else {
++      File[] existingTopologies = directory.listFiles(this);
++      if (existingTopologies != null) {
++        for (File file : existingTopologies) {
++          try {
++            Topology loadTopology = loadTopology(file);
++            if (null != loadTopology) {
++              map.put(file, loadTopology);
++            } else {
++              auditor.audit(Action.LOAD, file.getAbsolutePath(), ResourceType.TOPOLOGY,
++                      ActionOutcome.FAILURE);
++              log.failedToLoadTopology(file.getAbsolutePath());
++            }
++          } catch (IOException e) {
++            // Maybe it makes sense to throw exception
 +            auditor.audit(Action.LOAD, file.getAbsolutePath(), ResourceType.TOPOLOGY,
-               ActionOutcome.FAILURE);
-             log.failedToLoadTopology(file.getAbsolutePath());
++                    ActionOutcome.FAILURE);
++            log.failedToLoadTopology(file.getAbsolutePath(), e);
++          } catch (SAXException e) {
++            // Maybe it makes sense to throw exception
++            auditor.audit(Action.LOAD, file.getAbsolutePath(), ResourceType.TOPOLOGY,
++                    ActionOutcome.FAILURE);
++            log.failedToLoadTopology(file.getAbsolutePath(), e);
++          } catch (Exception e) {
++            // Maybe it makes sense to throw exception
++            auditor.audit(Action.LOAD, file.getAbsolutePath(), ResourceType.TOPOLOGY,
++                    ActionOutcome.FAILURE);
++            log.failedToLoadTopology(file.getAbsolutePath(), e);
 +          }
-         } catch (IOException e) {
-           // Maybe it makes sense to throw exception
-           auditor.audit(Action.LOAD, file.getAbsolutePath(), ResourceType.TOPOLOGY,
-             ActionOutcome.FAILURE);
-           log.failedToLoadTopology(file.getAbsolutePath(), e);
-         } catch (SAXException e) {
-           // Maybe it makes sense to throw exception
-           auditor.audit(Action.LOAD, file.getAbsolutePath(), ResourceType.TOPOLOGY,
-             ActionOutcome.FAILURE);
-           log.failedToLoadTopology(file.getAbsolutePath(), e);
-         } catch (Exception e) {
-           // Maybe it makes sense to throw exception
-           auditor.audit(Action.LOAD, file.getAbsolutePath(), ResourceType.TOPOLOGY,
-             ActionOutcome.FAILURE);
-           log.failedToLoadTopology(file.getAbsolutePath(), e);
 +        }
 +      }
 +    }
 +    return map;
 +  }
 +
 +  public void setAliasService(AliasService as) {
 +    this.aliasService = as;
 +  }
 +
 +  public void deployTopology(Topology t){
 +
 +    try {
 +      File temp = new File(topologiesDirectory.getAbsolutePath() + "/" + t.getName() + ".xml.temp");
 +      Package topologyPkg = Topology.class.getPackage();
 +      String pkgName = topologyPkg.getName();
 +      String bindingFile = pkgName.replace(".", "/") + "/topology_binding-xml.xml";
 +
 +      Map<String, Object> properties = new HashMap<>(1);
 +      properties.put(JAXBContextProperties.OXM_METADATA_SOURCE, bindingFile);
 +      JAXBContext jc = JAXBContext.newInstance(pkgName, Topology.class.getClassLoader(), properties);
 +      Marshaller mr = jc.createMarshaller();
 +
 +      mr.setProperty(Marshaller.JAXB_FORMATTED_OUTPUT, true);
 +      mr.marshal(t, temp);
 +
 +      File topology = new File(topologiesDirectory.getAbsolutePath() + "/" + t.getName() + ".xml");
 +      if(!temp.renameTo(topology)) {
 +        FileUtils.forceDelete(temp);
 +        throw new IOException("Could not rename temp file");
 +      }
 +
 +      // This code will check if the topology is valid, and retrieve the errors if it is not.
 +      TopologyValidator validator = new TopologyValidator( topology.getAbsolutePath() );
 +      if( !validator.validateTopology() ){
 +        throw new SAXException( validator.getErrorString() );
 +      }
 +
 +
 +    } catch (JAXBException e) {
 +      auditor.audit(Action.DEPLOY, t.getName(), ResourceType.TOPOLOGY, ActionOutcome.FAILURE);
 +      log.failedToDeployTopology(t.getName(), e);
 +    } catch (IOException io) {
 +      auditor.audit(Action.DEPLOY, t.getName(), ResourceType.TOPOLOGY, ActionOutcome.FAILURE);
 +      log.failedToDeployTopology(t.getName(), io);
 +    } catch (SAXException sx){
 +      auditor.audit(Action.DEPLOY, t.getName(), ResourceType.TOPOLOGY, ActionOutcome.FAILURE);
 +      log.failedToDeployTopology(t.getName(), sx);
 +    }
 +    reloadTopologies();
 +  }
 +
 +  public void redeployTopologies(String topologyName) {
 +
 +    for (Topology topology : getTopologies()) {
 +      if (topologyName == null || topologyName.equals(topology.getName())) {
 +        redeployTopology(topology);
 +      }
 +    }
 +
 +  }
 +
 +  public void reloadTopologies() {
 +    try {
 +      synchronized (this) {
 +        Map<File, Topology> oldTopologies = topologies;
 +        Map<File, Topology> newTopologies = loadTopologies(topologiesDirectory);
 +        List<TopologyEvent> events = createChangeEvents(oldTopologies, newTopologies);
 +        topologies = newTopologies;
 +        notifyChangeListeners(events);
 +      }
 +    } catch (Exception e) {
 +      // Maybe it makes sense to throw exception
 +      log.failedToReloadTopologies(e);
 +    }
 +  }
 +
 +  public void deleteTopology(Topology t) {
 +    File topoDir = topologiesDirectory;
 +
 +    if(topoDir.isDirectory() && topoDir.canRead()) {
-       File[] results = topoDir.listFiles();
-       for (File f : results) {
++      for (File f : listFiles(topoDir)) {
 +        String fName = FilenameUtils.getBaseName(f.getName());
 +        if(fName.equals(t.getName())) {
 +          f.delete();
 +        }
 +      }
 +    }
 +    reloadTopologies();
 +  }
 +
 +  private void notifyChangeListeners(List<TopologyEvent> events) {
 +    for (TopologyListener listener : listeners) {
 +      try {
 +        listener.handleTopologyEvent(events);
 +      } catch (RuntimeException e) {
 +        auditor.audit(Action.LOAD, "Topology_Event", ResourceType.TOPOLOGY, ActionOutcome.FAILURE);
 +        log.failedToHandleTopologyEvents(e);
 +      }
 +    }
 +  }
 +
 +  public Map<String, List<String>> getServiceTestURLs(Topology t, GatewayConfig config) {
 +    File tFile = null;
 +    Map<String, List<String>> urls = new HashMap<>();
-     if(topologiesDirectory.isDirectory() && topologiesDirectory.canRead()) {
-       for(File f : topologiesDirectory.listFiles()){
-         if(FilenameUtils.removeExtension(f.getName()).equals(t.getName())){
++    if (topologiesDirectory.isDirectory() && topologiesDirectory.canRead()) {
++      for (File f : listFiles(topologiesDirectory)) {
++        if (FilenameUtils.removeExtension(f.getName()).equals(t.getName())) {
 +          tFile = f;
 +        }
 +      }
 +    }
 +    Set<ServiceDefinition> defs;
 +    if(tFile != null) {
 +      defs = ServiceDefinitionsLoader.getServiceDefinitions(new File(config.getGatewayServicesDir()));
 +
 +      for(ServiceDefinition def : defs) {
 +        urls.put(def.getRole(), def.getTestURLs());
 +      }
 +    }
 +    return urls;
 +  }
 +
 +  public Collection<Topology> getTopologies() {
 +    Map<File, Topology> map = topologies;
 +    return Collections.unmodifiableCollection(map.values());
 +  }
 +
 +  @Override
++  public boolean deployProviderConfiguration(String name, String content) {
++    return writeConfig(sharedProvidersDirectory, name, content);
++  }
++
++  @Override
++  public Collection<File> getProviderConfigurations() {
++    List<File> providerConfigs = new ArrayList<>();
++    for (File providerConfig : listFiles(sharedProvidersDirectory)) {
++      if (SharedProviderConfigMonitor.SUPPORTED_EXTENSIONS.contains(FilenameUtils.getExtension(providerConfig.getName()))) {
++        providerConfigs.add(providerConfig);
++      }
++    }
++    return providerConfigs;
++  }
++
++  @Override
++  public boolean deleteProviderConfiguration(String name) {
++    boolean result = false;
++
++    File providerConfig = getExistingFile(sharedProvidersDirectory, name);
++    if (providerConfig != null) {
++      List<String> references = descriptorsMonitor.getReferencingDescriptors(providerConfig.getAbsolutePath());
++      if (references.isEmpty()) {
++        result = providerConfig.delete();
++      } else {
++        log.preventedDeletionOfSharedProviderConfiguration(providerConfig.getAbsolutePath());
++      }
++    } else {
++      result = true; // If it already does NOT exist, then the delete effectively succeeded
++    }
++
++    return result;
++  }
++
++  @Override
++  public boolean deployDescriptor(String name, String content) {
++    return writeConfig(descriptorsDirectory, name, content);
++  }
++
++  @Override
++  public Collection<File> getDescriptors() {
++    List<File> descriptors = new ArrayList<>();
++    for (File descriptor : listFiles(descriptorsDirectory)) {
++      if (DescriptorsMonitor.SUPPORTED_EXTENSIONS.contains(FilenameUtils.getExtension(descriptor.getName()))) {
++        descriptors.add(descriptor);
++      }
++    }
++    return descriptors;
++  }
++
++  @Override
++  public boolean deleteDescriptor(String name) {
++    File descriptor = getExistingFile(descriptorsDirectory, name);
++    return (descriptor == null) || descriptor.delete();
++  }
++
++  @Override
 +  public void addTopologyChangeListener(TopologyListener listener) {
 +    listeners.add(listener);
 +  }
 +
 +  @Override
 +  public void startMonitor() throws Exception {
 +    for (FileAlterationMonitor monitor : monitors) {
 +      monitor.start();
 +    }
 +  }
 +
 +  @Override
 +  public void stopMonitor() throws Exception {
 +    for (FileAlterationMonitor monitor : monitors) {
 +      monitor.stop();
 +    }
 +  }
 +
 +  @Override
 +  public boolean accept(File file) {
 +    boolean accept = false;
 +    if (!file.isDirectory() && file.canRead()) {
 +      String extension = FilenameUtils.getExtension(file.getName());
 +      if (SUPPORTED_TOPOLOGY_FILE_EXTENSIONS.contains(extension)) {
 +        accept = true;
 +      }
 +    }
 +    return accept;
 +  }
 +
 +  @Override
 +  public void onFileCreate(File file) {
 +    onFileChange(file);
 +  }
 +
 +  @Override
 +  public void onFileDelete(java.io.File file) {
 +    // For full topology descriptors, we need to make sure to delete any corresponding simple descriptors to prevent
 +    // unintended subsequent generation of the topology descriptor
 +    for (String ext : DescriptorsMonitor.SUPPORTED_EXTENSIONS) {
 +      File simpleDesc =
 +              new File(descriptorsDirectory, FilenameUtils.getBaseName(file.getName()) + "." + ext);
 +      if (simpleDesc.exists()) {
++        log.deletingDescriptorForTopologyDeletion(simpleDesc.getName(), file.getName());
 +        simpleDesc.delete();
 +      }
 +    }
 +
 +    onFileChange(file);
 +  }
 +
 +  @Override
 +  public void onFileChange(File file) {
 +    reloadTopologies();
 +  }
 +
 +  @Override
 +  public void stop() {
 +
 +  }
 +
 +  @Override
 +  public void start() {
 +
 +  }
 +
 +  @Override
 +  public void init(GatewayConfig config, Map<String, String> options) throws ServiceLifecycleException {
 +
 +    try {
 +      listeners = new HashSet<>();
 +      topologies = new HashMap<>();
 +
 +      topologiesDirectory = calculateAbsoluteTopologiesDir(config);
 +
 +      File configDirectory = calculateAbsoluteConfigDir(config);
 +      descriptorsDirectory = new File(configDirectory, "descriptors");
-       File sharedProvidersDirectory = new File(configDirectory, "shared-providers");
++      sharedProvidersDirectory = new File(configDirectory, "shared-providers");
 +
 +      // Add support for conf/topologies
 +      initListener(topologiesDirectory, this, this);
 +
 +      // Add support for conf/descriptors
-       DescriptorsMonitor dm = new DescriptorsMonitor(topologiesDirectory, aliasService);
++      descriptorsMonitor = new DescriptorsMonitor(topologiesDirectory, aliasService);
 +      initListener(descriptorsDirectory,
-                    dm,
-                    dm);
++                   descriptorsMonitor,
++                   descriptorsMonitor);
++      log.monitoringDescriptorChangesInDirectory(descriptorsDirectory.getAbsolutePath());
 +
 +      // Add support for conf/shared-providers
-       SharedProviderConfigMonitor spm = new SharedProviderConfigMonitor(dm, descriptorsDirectory);
++      SharedProviderConfigMonitor spm = new SharedProviderConfigMonitor(descriptorsMonitor, descriptorsDirectory);
 +      initListener(sharedProvidersDirectory, spm, spm);
++      log.monitoringProviderConfigChangesInDirectory(sharedProvidersDirectory.getAbsolutePath());
 +
 +      // For all the descriptors currently in the descriptors dir at start-up time, trigger topology generation.
 +      // This happens prior to the start-up loading of the topologies.
 +      String[] descriptorFilenames =  descriptorsDirectory.list();
 +      if (descriptorFilenames != null) {
 +          for (String descriptorFilename : descriptorFilenames) {
 +              if (DescriptorsMonitor.isDescriptorFile(descriptorFilename)) {
-                   dm.onFileChange(new File(descriptorsDirectory, descriptorFilename));
++                  descriptorsMonitor.onFileChange(new File(descriptorsDirectory, descriptorFilename));
 +              }
 +          }
 +      }
 +
 +    } catch (IOException | SAXException io) {
 +      throw new ServiceLifecycleException(io.getMessage());
 +    }
 +  }
 +
 +
 +  /**
++   * Utility method for listing the files in the specified directory.
++   * This method is "nicer" than the File#listFiles() because it will not return null.
++   *
++   * @param directory The directory whose files should be returned.
++   *
++   * @return A List of the Files on the directory.
++   */
++  private static List<File> listFiles(File directory) {
++    List<File> result = null;
++    File[] files = directory.listFiles();
++    if (files != null) {
++      result = Arrays.asList(files);
++    } else {
++      result = Collections.emptyList();
++    }
++    return result;
++  }
++
++  /**
++   * Search for a file in the specified directory whose base name (filename without extension) matches the
++   * specified basename.
++   *
++   * @param directory The directory in which to search.
++   * @param basename  The basename of interest.
++   *
++   * @return The matching File
++   */
++  private static File getExistingFile(File directory, String basename) {
++    File match = null;
++    for (File file : listFiles(directory)) {
++      if (FilenameUtils.getBaseName(file.getName()).equals(basename)) {
++        match = file;
++        break;
++      }
++    }
++    return match;
++  }
++
++  /**
++   * Write the specified content to a file.
++   *
++   * @param dest    The destination directory.
++   * @param name    The name of the file.
++   * @param content The contents of the file.
++   *
++   * @return true, if the write succeeds; otherwise, false.
++   */
++  private static boolean writeConfig(File dest, String name, String content) {
++    boolean result = false;
++
++    File destFile = new File(dest, name);
++    try {
++      FileUtils.writeStringToFile(destFile, content);
++      log.wroteConfigurationFile(destFile.getAbsolutePath());
++      result = true;
++    } catch (IOException e) {
++      log.failedToWriteConfigurationFile(destFile.getAbsolutePath(), e);
++    }
++
++    return result;
++  }
++
++
++  /**
 +   * Change handler for simple descriptors
 +   */
 +  public static class DescriptorsMonitor extends FileAlterationListenerAdaptor
 +                                          implements FileFilter {
 +
 +    static final List<String> SUPPORTED_EXTENSIONS = new ArrayList<String>();
 +    static {
 +      SUPPORTED_EXTENSIONS.add("json");
 +      SUPPORTED_EXTENSIONS.add("yml");
 +      SUPPORTED_EXTENSIONS.add("yaml");
 +    }
 +
 +    private File topologiesDir;
 +
 +    private AliasService aliasService;
 +
 +    private Map<String, List<String>> providerConfigReferences = new HashMap<>();
 +
 +
 +    static boolean isDescriptorFile(String filename) {
 +      return SUPPORTED_EXTENSIONS.contains(FilenameUtils.getExtension(filename));
 +    }
 +
 +    public DescriptorsMonitor(File topologiesDir, AliasService aliasService) {
 +      this.topologiesDir  = topologiesDir;
 +      this.aliasService   = aliasService;
 +    }
 +
 +    List<String> getReferencingDescriptors(String providerConfigPath) {
-       List<String> result = providerConfigReferences.get(providerConfigPath);
++      List<String> result = providerConfigReferences.get(FilenameUtils.normalize(providerConfigPath));
 +      if (result == null) {
 +        result = Collections.emptyList();
 +      }
 +      return result;
 +    }
 +
 +    @Override
 +    public void onFileCreate(File file) {
 +      onFileChange(file);
 +    }
 +
 +    @Override
 +    public void onFileDelete(File file) {
 +      // For simple descriptors, we need to make sure to delete any corresponding full topology descriptors to trigger undeployment
 +      for (String ext : DefaultTopologyService.SUPPORTED_TOPOLOGY_FILE_EXTENSIONS) {
 +        File topologyFile =
 +                new File(topologiesDir, FilenameUtils.getBaseName(file.getName()) + "." + ext);
 +        if (topologyFile.exists()) {
++          log.deletingTopologyForDescriptorDeletion(topologyFile.getName(), file.getName());
 +          topologyFile.delete();
 +        }
 +      }
 +
 +      String normalizedFilePath = FilenameUtils.normalize(file.getAbsolutePath());
 +      String reference = null;
 +      for (Map.Entry<String, List<String>> entry : providerConfigReferences.entrySet()) {
 +        if (entry.getValue().contains(normalizedFilePath)) {
 +          reference = entry.getKey();
 +          break;
 +        }
 +      }
++
 +      if (reference != null) {
 +        providerConfigReferences.get(reference).remove(normalizedFilePath);
++        log.removedProviderConfigurationReference(normalizedFilePath, reference);
 +      }
 +    }
 +
 +    @Override
 +    public void onFileChange(File file) {
 +      try {
 +        // When a simple descriptor has been created or modified, generate the new topology descriptor
 +        Map<String, File> result = SimpleDescriptorHandler.handle(file, topologiesDir, aliasService);
++        log.generatedTopologyForDescriptorChange(result.get("topology").getName(), file.getName());
 +
 +        // Add the provider config reference relationship for handling updates to the provider config
 +        String providerConfig = FilenameUtils.normalize(result.get("reference").getAbsolutePath());
 +        if (!providerConfigReferences.containsKey(providerConfig)) {
 +          providerConfigReferences.put(providerConfig, new ArrayList<String>());
 +        }
 +        List<String> refs = providerConfigReferences.get(providerConfig);
 +        String descriptorName = FilenameUtils.normalize(file.getAbsolutePath());
 +        if (!refs.contains(descriptorName)) {
 +          // Need to check if descriptor had previously referenced another provider config, so it can be removed
 +          for (List<String> descs : providerConfigReferences.values()) {
 +            if (descs.contains(descriptorName)) {
 +              descs.remove(descriptorName);
 +            }
 +          }
 +
 +          // Add the current reference relationship
 +          refs.add(descriptorName);
++          log.addedProviderConfigurationReference(descriptorName, providerConfig);
 +        }
 +      } catch (Exception e) {
 +        log.simpleDescriptorHandlingError(file.getName(), e);
 +      }
 +    }
 +
 +    @Override
 +    public boolean accept(File file) {
 +      boolean accept = false;
 +      if (!file.isDirectory() && file.canRead()) {
 +        String extension = FilenameUtils.getExtension(file.getName());
 +        if (SUPPORTED_EXTENSIONS.contains(extension)) {
 +          accept = true;
 +        }
 +      }
 +      return accept;
 +    }
 +  }
 +
 +  /**
 +   * Change handler for shared provider configurations
 +   */
 +  public static class SharedProviderConfigMonitor extends FileAlterationListenerAdaptor
 +          implements FileFilter {
 +
 +    static final List<String> SUPPORTED_EXTENSIONS = new ArrayList<>();
 +    static {
 +      SUPPORTED_EXTENSIONS.add("xml");
 +    }
 +
 +    private DescriptorsMonitor descriptorsMonitor;
 +    private File descriptorsDir;
 +
 +
 +    SharedProviderConfigMonitor(DescriptorsMonitor descMonitor, File descriptorsDir) {
 +      this.descriptorsMonitor = descMonitor;
 +      this.descriptorsDir     = descriptorsDir;
 +    }
 +
 +    @Override
 +    public void onFileCreate(File file) {
 +      onFileChange(file);
 +    }
 +
 +    @Override
 +    public void onFileDelete(File file) {
 +      onFileChange(file);
 +    }
 +
 +    @Override
 +    public void onFileChange(File file) {
 +      // For shared provider configuration, we need to update any simple descriptors that reference it
 +      for (File descriptor : getReferencingDescriptors(file)) {
 +        descriptor.setLastModified(System.currentTimeMillis());
 +      }
 +    }
 +
 +    private List<File> getReferencingDescriptors(File sharedProviderConfig) {
 +      List<File> references = new ArrayList<>();
 +
-       for (File descriptor : descriptorsDir.listFiles()) {
++      for (File descriptor : listFiles(descriptorsDir)) {
 +        if (DescriptorsMonitor.SUPPORTED_EXTENSIONS.contains(FilenameUtils.getExtension(descriptor.getName()))) {
 +          for (String reference : descriptorsMonitor.getReferencingDescriptors(FilenameUtils.normalize(sharedProviderConfig.getAbsolutePath()))) {
 +            references.add(new File(reference));
 +          }
 +        }
 +      }
 +
 +      return references;
 +    }
 +
 +    @Override
 +    public boolean accept(File file) {
 +      boolean accept = false;
 +      if (!file.isDirectory() && file.canRead()) {
 +        String extension = FilenameUtils.getExtension(file.getName());
 +        if (SUPPORTED_EXTENSIONS.contains(extension)) {
 +          accept = true;
 +        }
 +      }
 +      return accept;
 +    }
 +  }
 +
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/c754cc06/gateway-server/src/main/java/org/apache/knox/gateway/topology/builder/BeanPropertyTopologyBuilder.java
----------------------------------------------------------------------
diff --cc gateway-server/src/main/java/org/apache/knox/gateway/topology/builder/BeanPropertyTopologyBuilder.java
index 1caa946,0000000..a1a2609
mode 100644,000000..100644
--- a/gateway-server/src/main/java/org/apache/knox/gateway/topology/builder/BeanPropertyTopologyBuilder.java
+++ b/gateway-server/src/main/java/org/apache/knox/gateway/topology/builder/BeanPropertyTopologyBuilder.java
@@@ -1,94 -1,0 +1,105 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements. See the NOTICE file distributed with this
 + * work for additional information regarding copyright ownership. The ASF
 + * licenses this file to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance with the License.
 + * You may obtain a copy of the License at
 + *
 + * http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 + * License for the specific language governing permissions and limitations under
 + * the License.
 + */
 +package org.apache.knox.gateway.topology.builder;
 +
 +import java.util.ArrayList;
 +import java.util.List;
 +
 +import org.apache.knox.gateway.topology.Application;
 +import org.apache.knox.gateway.topology.Provider;
 +import org.apache.knox.gateway.topology.Service;
 +import org.apache.knox.gateway.topology.Topology;
 +
 +public class BeanPropertyTopologyBuilder implements TopologyBuilder {
 +
 +    private String name;
++    private String defaultService;
 +    private List<Provider> providers;
 +    private List<Service> services;
 +    private List<Application> applications;
 +
 +    public BeanPropertyTopologyBuilder() {
 +        providers = new ArrayList<Provider>();
 +        services = new ArrayList<Service>();
 +        applications = new ArrayList<Application>();
 +    }
 +
 +    public BeanPropertyTopologyBuilder name(String name) {
 +        this.name = name;
 +        return this;
 +    }
 +
 +    public String name() {
 +        return name;
 +    }
 +
++    public BeanPropertyTopologyBuilder defaultService(String defaultService) {
++      this.defaultService = defaultService;
++      return this;
++    }
++
++    public String defaultService() {
++      return defaultService;
++    }
++
 +    public BeanPropertyTopologyBuilder addProvider(Provider provider) {
 +        providers.add(provider);
 +        return this;
 +    }
 +
 +    public List<Provider> providers() {
 +        return providers;
 +    }
 +
 +    public BeanPropertyTopologyBuilder addService(Service service) {
 +        services.add(service);
 +        return this;
 +    }
 +
 +    public List<Service> services() {
 +        return services;
 +    }
 +
 +    public BeanPropertyTopologyBuilder addApplication( Application application ) {
 +        applications.add(application);
 +        return this;
 +    }
 +
 +    public List<Application> applications() {
 +        return applications;
 +    }
 +
 +    public Topology build() {
 +        Topology topology = new Topology();
 +        topology.setName(name);
++        topology.setDefaultServicePath(defaultService);
 +
 +        for (Provider provider : providers) {
 +            topology.addProvider(provider);
 +        }
 +
 +        for (Service service : services) {
 +            topology.addService(service);
 +        }
 +
 +        for (Application application : applications) {
 +            topology.addApplication(application);
 +        }
 +
 +        return topology;
 +    }
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/c754cc06/gateway-server/src/main/java/org/apache/knox/gateway/topology/xml/KnoxFormatXmlTopologyRules.java
----------------------------------------------------------------------
diff --cc gateway-server/src/main/java/org/apache/knox/gateway/topology/xml/KnoxFormatXmlTopologyRules.java
index 6b51ab8,0000000..81aedec
mode 100644,000000..100644
--- a/gateway-server/src/main/java/org/apache/knox/gateway/topology/xml/KnoxFormatXmlTopologyRules.java
+++ b/gateway-server/src/main/java/org/apache/knox/gateway/topology/xml/KnoxFormatXmlTopologyRules.java
@@@ -1,93 -1,0 +1,95 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.topology.xml;
 +
 +import org.apache.commons.digester3.Rule;
 +import org.apache.commons.digester3.binder.AbstractRulesModule;
 +import org.apache.knox.gateway.topology.Application;
 +import org.apache.knox.gateway.topology.Param;
 +import org.apache.knox.gateway.topology.Provider;
 +import org.apache.knox.gateway.topology.Service;
 +import org.apache.knox.gateway.topology.Version;
 +import org.apache.knox.gateway.topology.builder.BeanPropertyTopologyBuilder;
 +import org.xml.sax.Attributes;
 +
 +public class KnoxFormatXmlTopologyRules extends AbstractRulesModule {
 +
 +  private static final String ROOT_TAG = "topology";
 +  private static final String NAME_TAG = "name";
 +  private static final String VERSION_TAG = "version";
++  private static final String DEFAULT_SERVICE_TAG = "path";
 +  private static final String APPLICATION_TAG = "application";
 +  private static final String SERVICE_TAG = "service";
 +  private static final String ROLE_TAG = "role";
 +  private static final String URL_TAG = "url";
 +  private static final String PROVIDER_TAG = "gateway/provider";
 +  private static final String ENABLED_TAG = "enabled";
 +  private static final String PARAM_TAG = "param";
 +  private static final String VALUE_TAG = "value";
 +
 +  private static final Rule paramRule = new ParamRule();
 +
 +  @Override
 +  protected void configure() {
 +    forPattern( ROOT_TAG ).createObject().ofType( BeanPropertyTopologyBuilder.class );
 +    forPattern( ROOT_TAG + "/" + NAME_TAG ).callMethod("name").usingElementBodyAsArgument();
 +    forPattern( ROOT_TAG + "/" + VERSION_TAG ).callMethod("version").usingElementBodyAsArgument();
++    forPattern( ROOT_TAG + "/" + DEFAULT_SERVICE_TAG ).callMethod("defaultService").usingElementBodyAsArgument();
 +
 +    forPattern( ROOT_TAG + "/" + APPLICATION_TAG ).createObject().ofType( Application.class ).then().setNext( "addApplication" );
 +    forPattern( ROOT_TAG + "/" + APPLICATION_TAG + "/" + ROLE_TAG ).setBeanProperty();
 +    forPattern( ROOT_TAG + "/" + APPLICATION_TAG + "/" + NAME_TAG ).setBeanProperty();
 +    forPattern( ROOT_TAG + "/" + APPLICATION_TAG + "/" + VERSION_TAG ).createObject().ofType(Version.class).then().setBeanProperty().then().setNext("setVersion");
 +    forPattern( ROOT_TAG + "/" + APPLICATION_TAG + "/" + URL_TAG ).callMethod( "addUrl" ).usingElementBodyAsArgument();
 +    forPattern( ROOT_TAG + "/" + APPLICATION_TAG + "/" + PARAM_TAG ).createObject().ofType( Param.class ).then().addRule( paramRule ).then().setNext( "addParam" );
 +    forPattern( ROOT_TAG + "/" + APPLICATION_TAG + "/" + PARAM_TAG + "/" + NAME_TAG ).setBeanProperty();
 +    forPattern( ROOT_TAG + "/" + APPLICATION_TAG + "/" + PARAM_TAG + "/" + VALUE_TAG ).setBeanProperty();
 +
 +    forPattern( ROOT_TAG + "/" + SERVICE_TAG ).createObject().ofType( Service.class ).then().setNext( "addService" );
 +    forPattern( ROOT_TAG + "/" + SERVICE_TAG + "/" + ROLE_TAG ).setBeanProperty();
 +    forPattern( ROOT_TAG + "/" + SERVICE_TAG + "/" + NAME_TAG ).setBeanProperty();
 +    forPattern( ROOT_TAG + "/" + SERVICE_TAG + "/" + VERSION_TAG ).createObject().ofType(Version.class).then().setBeanProperty().then().setNext("setVersion");
 +    forPattern( ROOT_TAG + "/" + SERVICE_TAG + "/" + URL_TAG ).callMethod( "addUrl" ).usingElementBodyAsArgument();
 +    forPattern( ROOT_TAG + "/" + SERVICE_TAG + "/" + PARAM_TAG ).createObject().ofType( Param.class ).then().addRule( paramRule ).then().setNext( "addParam" );
 +    forPattern( ROOT_TAG + "/" + SERVICE_TAG + "/" + PARAM_TAG + "/" + NAME_TAG ).setBeanProperty();
 +    forPattern( ROOT_TAG + "/" + SERVICE_TAG + "/" + PARAM_TAG + "/" + VALUE_TAG ).setBeanProperty();
 +
 +    forPattern( ROOT_TAG + "/" + PROVIDER_TAG ).createObject().ofType( Provider.class ).then().setNext( "addProvider" );
 +    forPattern( ROOT_TAG + "/" + PROVIDER_TAG + "/" + ROLE_TAG ).setBeanProperty();
 +    forPattern( ROOT_TAG + "/" + PROVIDER_TAG + "/" + ENABLED_TAG ).setBeanProperty();
 +    forPattern( ROOT_TAG + "/" + PROVIDER_TAG + "/" + NAME_TAG ).setBeanProperty();
 +    forPattern( ROOT_TAG + "/" + PROVIDER_TAG + "/" + PARAM_TAG ).createObject().ofType( Param.class ).then().addRule( paramRule ).then().setNext( "addParam" );
 +    forPattern( ROOT_TAG + "/" + PROVIDER_TAG + "/" + PARAM_TAG + "/" + NAME_TAG ).setBeanProperty();
 +    forPattern( ROOT_TAG + "/" + PROVIDER_TAG + "/" + PARAM_TAG + "/" + VALUE_TAG ).setBeanProperty();
 +  }
 +
 +  private static class ParamRule extends Rule {
 +
 +    @Override
 +    public void begin( String namespace, String name, Attributes attributes ) {
 +      Param param = getDigester().peek();
 +      String paramName = attributes.getValue( "name" );
 +      if( paramName != null ) {
 +        param.setName( paramName );
 +        param.setValue( attributes.getValue( "value" ) );
 +      }
 +    }
 +
 +  }
 +
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/c754cc06/gateway-server/src/test/java/org/apache/knox/gateway/GatewayFilterTest.java
----------------------------------------------------------------------
diff --cc gateway-server/src/test/java/org/apache/knox/gateway/GatewayFilterTest.java
index 178ff5e,0000000..ac22400
mode 100644,000000..100644
--- a/gateway-server/src/test/java/org/apache/knox/gateway/GatewayFilterTest.java
+++ b/gateway-server/src/test/java/org/apache/knox/gateway/GatewayFilterTest.java
@@@ -1,171 -1,0 +1,220 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway;
 +
 +import org.apache.knox.gateway.audit.api.AuditServiceFactory;
 +import org.apache.knox.gateway.config.GatewayConfig;
 +import org.apache.knox.gateway.filter.AbstractGatewayFilter;
++import org.apache.knox.gateway.topology.Topology;
 +import org.apache.hadoop.test.category.FastTests;
 +import org.apache.hadoop.test.category.UnitTests;
 +import org.easymock.EasyMock;
 +import org.junit.After;
 +import org.junit.Before;
 +import org.junit.Test;
 +import org.junit.experimental.categories.Category;
 +
 +import javax.servlet.*;
 +import javax.servlet.http.HttpServletRequest;
 +import javax.servlet.http.HttpServletResponse;
 +import java.io.IOException;
 +import java.net.URISyntaxException;
 +
 +import static org.hamcrest.CoreMatchers.is;
 +import static org.hamcrest.MatcherAssert.assertThat;
 +
 +/**
 + *
 + */
 +@Category( { UnitTests.class, FastTests.class } )
 +public class GatewayFilterTest {
 +
 +  @Before
 +  public void setup() {
 +    AuditServiceFactory.getAuditService().createContext();
 +  }
 +
 +  @After
 +  public void reset() {
 +    AuditServiceFactory.getAuditService().detachContext();
 +  }
 +
 +  @Test
 +  public void testNoFilters() throws ServletException, IOException {
 +
 +    FilterConfig config = EasyMock.createNiceMock( FilterConfig.class );
 +    EasyMock.replay( config );
 +
 +    HttpServletRequest request = EasyMock.createNiceMock( HttpServletRequest.class );
 +    ServletContext context = EasyMock.createNiceMock( ServletContext.class );
 +    GatewayConfig gatewayConfig = EasyMock.createNiceMock( GatewayConfig.class );
 +    EasyMock.expect( request.getPathInfo() ).andReturn( "source" ).anyTimes();
 +    EasyMock.expect( request.getServletContext() ).andReturn( context ).anyTimes();
 +    EasyMock.expect( context.getAttribute(
 +        GatewayConfig.GATEWAY_CONFIG_ATTRIBUTE)).andReturn(gatewayConfig).anyTimes();
 +    EasyMock.expect(gatewayConfig.getHeaderNameForRemoteAddress()).andReturn(
 +        "Custom-Forwarded-For").anyTimes();
 +    EasyMock.replay( request );
 +    EasyMock.replay( context );
 +    EasyMock.replay( gatewayConfig );
 +    
 +    HttpServletResponse response = EasyMock.createNiceMock( HttpServletResponse.class );
 +    EasyMock.replay( response );
 +
 +    FilterChain chain = EasyMock.createNiceMock( FilterChain.class );
 +    EasyMock.replay( chain );
 +
 +    GatewayFilter gateway = new GatewayFilter();
 +    gateway.init( config );
 +    gateway.doFilter( request, response, chain );
 +    gateway.destroy();
 +  }
 +
 +  @Test
 +  public void testNoopFilter() throws ServletException, IOException, URISyntaxException {
 +
 +    FilterConfig config = EasyMock.createNiceMock( FilterConfig.class );
 +    EasyMock.replay( config );
 +
 +    HttpServletRequest request = EasyMock.createNiceMock( HttpServletRequest.class );
 +    ServletContext context = EasyMock.createNiceMock( ServletContext.class );
 +    GatewayConfig gatewayConfig = EasyMock.createNiceMock( GatewayConfig.class );
 +    EasyMock.expect( request.getPathInfo() ).andReturn( "source" ).anyTimes();
 +    EasyMock.expect( request.getServletContext() ).andReturn( context ).anyTimes();
 +    EasyMock.expect( context.getAttribute(
 +        GatewayConfig.GATEWAY_CONFIG_ATTRIBUTE)).andReturn(gatewayConfig).anyTimes();
 +    EasyMock.expect(gatewayConfig.getHeaderNameForRemoteAddress()).andReturn(
 +        "Custom-Forwarded-For").anyTimes();
 +    EasyMock.replay( request );
 +    EasyMock.replay( context );
 +    EasyMock.replay( gatewayConfig );
 +
 +    HttpServletResponse response = EasyMock.createNiceMock( HttpServletResponse.class );
 +    EasyMock.replay( response );
 +
 +    FilterChain chain = EasyMock.createNiceMock( FilterChain.class );
 +    EasyMock.replay( chain );
 +
 +    Filter filter = EasyMock.createNiceMock( Filter.class );
 +    EasyMock.replay( filter );
 +
 +    GatewayFilter gateway = new GatewayFilter();
 +    gateway.addFilter( "path", "filter", filter, null, null );
 +    gateway.init( config );
 +    gateway.doFilter( request, response, chain );
 +    gateway.destroy();
 +
 +  }
 +
 +  public static class TestRoleFilter extends AbstractGatewayFilter {
 +
 +    public Object role;
++    public String defaultServicePath;
++    public String url;
 +
 +    @Override
 +    protected void doFilter( HttpServletRequest request, HttpServletResponse response, FilterChain chain ) throws IOException, ServletException {
 +      this.role = request.getAttribute( AbstractGatewayFilter.TARGET_SERVICE_ROLE );
++      Topology topology = (Topology)request.getServletContext().getAttribute( "org.apache.knox.gateway.topology" );
++      if (topology != null) {
++        this.defaultServicePath = (String) topology.getDefaultServicePath();
++        url = new String(request.getRequestURL());
++      }
 +    }
 +
 +  }
 +
 +  @Test
 +  public void testTargetServiceRoleRequestAttribute() throws Exception {
 +
 +    FilterConfig config = EasyMock.createNiceMock( FilterConfig.class );
 +    EasyMock.replay( config );
 +
 +    HttpServletRequest request = EasyMock.createNiceMock( HttpServletRequest.class );
 +    ServletContext context = EasyMock.createNiceMock( ServletContext.class );
 +    GatewayConfig gatewayConfig = EasyMock.createNiceMock( GatewayConfig.class );
 +    EasyMock.expect( request.getPathInfo() ).andReturn( "test-path/test-resource" ).anyTimes();
 +    EasyMock.expect( request.getServletContext() ).andReturn( context ).anyTimes();
 +    EasyMock.expect( context.getAttribute(
 +        GatewayConfig.GATEWAY_CONFIG_ATTRIBUTE)).andReturn(gatewayConfig).anyTimes();
 +    EasyMock.expect(gatewayConfig.getHeaderNameForRemoteAddress()).andReturn(
 +        "Custom-Forwarded-For").anyTimes();
 +    request.setAttribute( AbstractGatewayFilter.TARGET_SERVICE_ROLE, "test-role" );
 +    EasyMock.expectLastCall().anyTimes();
 +    EasyMock.expect( request.getAttribute( AbstractGatewayFilter.TARGET_SERVICE_ROLE ) ).andReturn( "test-role" ).anyTimes();
 +    EasyMock.replay( request );
 +    EasyMock.replay( context );
 +    EasyMock.replay( gatewayConfig );
 +
 +    HttpServletResponse response = EasyMock.createNiceMock( HttpServletResponse.class );
 +    EasyMock.replay( response );
 +
 +    TestRoleFilter filter = new TestRoleFilter();
 +
 +    GatewayFilter gateway = new GatewayFilter();
 +    gateway.addFilter( "test-path/**", "test-filter", filter, null, "test-role" );
 +    gateway.init( config );
 +    gateway.doFilter( request, response );
 +    gateway.destroy();
 +
 +    assertThat( (String)filter.role, is( "test-role" ) );
 +
 +  }
 +
++  @Test
++  public void testDefaultServicePathTopologyRequestAttribute() throws Exception {
++
++    FilterConfig config = EasyMock.createNiceMock( FilterConfig.class );
++    EasyMock.replay( config );
++
++    Topology topology = EasyMock.createNiceMock( Topology.class );
++    topology.setDefaultServicePath("test-role/");
++    HttpServletRequest request = EasyMock.createNiceMock( HttpServletRequest.class );
++    ServletContext context = EasyMock.createNiceMock( ServletContext.class );
++    GatewayConfig gatewayConfig = EasyMock.createNiceMock( GatewayConfig.class );
++    EasyMock.expect( topology.getDefaultServicePath() ).andReturn( "test-role" ).anyTimes();
++    EasyMock.expect( request.getPathInfo() ).andReturn( "/test-path/test-resource" ).anyTimes();
++    EasyMock.expect( request.getServletContext() ).andReturn( context ).anyTimes();
++    EasyMock.expect( context.getAttribute(
++        GatewayConfig.GATEWAY_CONFIG_ATTRIBUTE)).andReturn(gatewayConfig).anyTimes();
++    EasyMock.expect(gatewayConfig.getHeaderNameForRemoteAddress()).andReturn(
++        "Custom-Forwarded-For").anyTimes();
++    EasyMock.expect( request.getRequestURL() ).andReturn( new StringBuffer("http://host:8443/gateway/sandbox/test-path/test-resource/") ).anyTimes();
++
++    EasyMock.expect( context.getAttribute( "org.apache.hadoop.gateway.topology" ) ).andReturn( topology ).anyTimes();
++    EasyMock.replay( request );
++    EasyMock.replay( context );
++    EasyMock.replay( topology );
++    EasyMock.replay( gatewayConfig );
++
++    HttpServletResponse response = EasyMock.createNiceMock( HttpServletResponse.class );
++    EasyMock.replay( response );
++
++    TestRoleFilter filter = new TestRoleFilter();
++
++    GatewayFilter gateway = new GatewayFilter();
++    gateway.addFilter( "test-role/**/**", "test-filter", filter, null, "test-role" );
++    gateway.init( config );
++    gateway.doFilter( request, response );
++    gateway.destroy();
++
++    assertThat( (String)filter.defaultServicePath, is( "test-role" ) );
++    assertThat( (String)filter.url, is("http://host:8443/gateway/sandbox/test-role/test-path/test-resource"));
++
++  }
 +}

http://git-wip-us.apache.org/repos/asf/knox/blob/c754cc06/gateway-server/src/test/java/org/apache/knox/gateway/services/topology/DefaultTopologyServiceTest.java
----------------------------------------------------------------------
diff --cc gateway-server/src/test/java/org/apache/knox/gateway/services/topology/DefaultTopologyServiceTest.java
index d28ad7f,0000000..95d6f9d
mode 100644,000000..100644
--- a/gateway-server/src/test/java/org/apache/knox/gateway/services/topology/DefaultTopologyServiceTest.java
+++ b/gateway-server/src/test/java/org/apache/knox/gateway/services/topology/DefaultTopologyServiceTest.java
@@@ -1,266 -1,0 +1,610 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.services.topology;
 +
 +import org.apache.commons.io.FileUtils;
++import org.apache.commons.io.FilenameUtils;
 +import org.apache.commons.io.IOUtils;
++import org.apache.commons.io.monitor.FileAlterationListener;
 +import org.apache.commons.io.monitor.FileAlterationMonitor;
 +import org.apache.commons.io.monitor.FileAlterationObserver;
 +import org.apache.knox.gateway.config.GatewayConfig;
 +import org.apache.knox.gateway.services.topology.impl.DefaultTopologyService;
++import org.apache.knox.gateway.config.GatewayConfig;
++import org.apache.knox.gateway.services.security.AliasService;
++import org.apache.knox.gateway.services.topology.impl.DefaultTopologyService;
++import org.apache.knox.gateway.topology.*;
 +import org.apache.hadoop.test.TestUtils;
 +import org.apache.knox.gateway.topology.Param;
 +import org.apache.knox.gateway.topology.Provider;
 +import org.apache.knox.gateway.topology.Topology;
 +import org.apache.knox.gateway.topology.TopologyEvent;
 +import org.apache.knox.gateway.topology.TopologyListener;
 +import org.apache.knox.gateway.services.security.AliasService;
 +import org.easymock.EasyMock;
 +import org.junit.After;
 +import org.junit.Before;
 +import org.junit.Test;
 +
 +import java.io.File;
 +import java.io.IOException;
 +import java.io.InputStream;
 +import java.io.OutputStream;
- import java.util.*;
++import java.util.ArrayList;
++import java.util.Arrays;
++import java.util.Collection;
++import java.util.HashMap;
++import java.util.HashSet;
++import java.util.Iterator;
++import java.util.List;
++import java.util.Map;
++import java.util.Set;
 +
 +import static org.easymock.EasyMock.anyObject;
 +import static org.hamcrest.CoreMatchers.is;
 +import static org.hamcrest.Matchers.hasItem;
 +import static org.hamcrest.core.IsNull.notNullValue;
++import static org.junit.Assert.assertEquals;
++import static org.junit.Assert.assertFalse;
++import static org.junit.Assert.assertNotEquals;
++import static org.junit.Assert.assertNotNull;
 +import static org.junit.Assert.assertThat;
 +import static org.junit.Assert.assertTrue;
 +
 +public class DefaultTopologyServiceTest {
 +
 +  @Before
 +  public void setUp() throws Exception {
 +  }
 +
 +  @After
 +  public void tearDown() throws Exception {
 +  }
 +
 +  private File createDir() throws IOException {
 +    return TestUtils.createTempDir(this.getClass().getSimpleName() + "-");
 +  }
 +
 +  private File createFile(File parent, String name, String resource, long timestamp) throws IOException {
 +    File file = new File(parent, name);
 +    if (!file.exists()) {
 +      FileUtils.touch(file);
 +    }
 +    InputStream input = ClassLoader.getSystemResourceAsStream(resource);
 +    OutputStream output = FileUtils.openOutputStream(file);
 +    IOUtils.copy(input, output);
 +    //KNOX-685: output.flush();
 +    input.close();
 +    output.close();
 +    file.setLastModified(timestamp);
 +    assertTrue("Failed to create test file " + file.getAbsolutePath(), file.exists());
 +    assertTrue("Failed to populate test file " + file.getAbsolutePath(), file.length() > 0);
 +
 +    return file;
 +  }
 +
 +  @Test
 +  public void testGetTopologies() throws Exception {
 +
 +    File dir = createDir();
 +    File topologyDir = new File(dir, "topologies");
 +
-     File descriptorsDir = new File(dir, "descriptors");
-     descriptorsDir.mkdirs();
- 
-     File sharedProvidersDir = new File(dir, "shared-providers");
-     sharedProvidersDir.mkdirs();
- 
 +    long time = topologyDir.lastModified();
 +    try {
 +      createFile(topologyDir, "one.xml", "org/apache/knox/gateway/topology/file/topology-one.xml", time);
 +
 +      TestTopologyListener topoListener = new TestTopologyListener();
 +      FileAlterationMonitor monitor = new FileAlterationMonitor(Long.MAX_VALUE);
 +
 +      TopologyService provider = new DefaultTopologyService();
 +      Map<String, String> c = new HashMap<>();
 +
 +      GatewayConfig config = EasyMock.createNiceMock(GatewayConfig.class);
 +      EasyMock.expect(config.getGatewayTopologyDir()).andReturn(topologyDir.getAbsolutePath()).anyTimes();
-       EasyMock.expect(config.getGatewayConfDir()).andReturn(descriptorsDir.getParentFile().getAbsolutePath()).anyTimes();
++      EasyMock.expect(config.getGatewayConfDir()).andReturn(topologyDir.getParentFile().getAbsolutePath()).anyTimes();
 +      EasyMock.replay(config);
 +
 +      provider.init(config, c);
 +
 +      provider.addTopologyChangeListener(topoListener);
 +
 +      provider.reloadTopologies();
 +
 +      Collection<Topology> topologies = provider.getTopologies();
 +      assertThat(topologies, notNullValue());
 +      assertThat(topologies.size(), is(1));
 +      Topology topology = topologies.iterator().next();
 +      assertThat(topology.getName(), is("one"));
 +      assertThat(topology.getTimestamp(), is(time));
 +      assertThat(topoListener.events.size(), is(1));
 +      topoListener.events.clear();
 +
 +      // Add a file to the directory.
 +      File two = createFile(topologyDir, "two.xml",
 +          "org/apache/knox/gateway/topology/file/topology-two.xml", 1L);
 +      provider.reloadTopologies();
 +      topologies = provider.getTopologies();
 +      assertThat(topologies.size(), is(2));
 +      Set<String> names = new HashSet<>(Arrays.asList("one", "two"));
 +      Iterator<Topology> iterator = topologies.iterator();
 +      topology = iterator.next();
 +      assertThat(names, hasItem(topology.getName()));
 +      names.remove(topology.getName());
 +      topology = iterator.next();
 +      assertThat(names, hasItem(topology.getName()));
 +      names.remove(topology.getName());
 +      assertThat(names.size(), is(0));
 +      assertThat(topoListener.events.size(), is(1));
 +      List<TopologyEvent> events = topoListener.events.get(0);
 +      assertThat(events.size(), is(1));
 +      TopologyEvent event = events.get(0);
 +      assertThat(event.getType(), is(TopologyEvent.Type.CREATED));
 +      assertThat(event.getTopology(), notNullValue());
 +
 +      // Update a file in the directory.
 +      two = createFile(topologyDir, "two.xml",
 +          "org/apache/knox/gateway/topology/file/topology-three.xml", 2L);
 +      provider.reloadTopologies();
 +      topologies = provider.getTopologies();
 +      assertThat(topologies.size(), is(2));
 +      names = new HashSet<>(Arrays.asList("one", "two"));
 +      iterator = topologies.iterator();
 +      topology = iterator.next();
 +      assertThat(names, hasItem(topology.getName()));
 +      names.remove(topology.getName());
 +      topology = iterator.next();
 +      assertThat(names, hasItem(topology.getName()));
 +      names.remove(topology.getName());
 +      assertThat(names.size(), is(0));
 +
 +      // Remove a file from the directory.
 +      two.delete();
 +      provider.reloadTopologies();
 +      topologies = provider.getTopologies();
 +      assertThat(topologies.size(), is(1));
 +      topology = topologies.iterator().next();
 +      assertThat(topology.getName(), is("one"));
 +      assertThat(topology.getTimestamp(), is(time));
 +
++    } finally {
++      FileUtils.deleteQuietly(dir);
++    }
++  }
++
++  /**
++   * KNOX-1014
++   *
++   * Test the lifecycle relationship between simple descriptors and topology files.
++   *
++   * N.B. This test depends on the DummyServiceDiscovery extension being configured:
++   *        org.apache.hadoop.gateway.topology.discovery.test.extension.DummyServiceDiscovery
++   */
++  @Test
++  public void testSimpleDescriptorsTopologyGeneration() throws Exception {
++
++    File dir = createDir();
++    File topologyDir = new File(dir, "topologies");
++    topologyDir.mkdirs();
++
++    File descriptorsDir = new File(dir, "descriptors");
++    descriptorsDir.mkdirs();
++
++    File sharedProvidersDir = new File(dir, "shared-providers");
++    sharedProvidersDir.mkdirs();
++
++    try {
++      TestTopologyListener topoListener = new TestTopologyListener();
++      FileAlterationMonitor monitor = new FileAlterationMonitor(Long.MAX_VALUE);
++
++      TopologyService provider = new DefaultTopologyService();
++      Map<String, String> c = new HashMap<>();
++
++      GatewayConfig config = EasyMock.createNiceMock(GatewayConfig.class);
++      EasyMock.expect(config.getGatewayTopologyDir()).andReturn(topologyDir.getAbsolutePath()).anyTimes();
++      EasyMock.expect(config.getGatewayConfDir()).andReturn(descriptorsDir.getParentFile().getAbsolutePath()).anyTimes();
++      EasyMock.replay(config);
++
++      provider.init(config, c);
++      provider.addTopologyChangeListener(topoListener);
++      provider.reloadTopologies();
++
++
 +      // Add a simple descriptor to the descriptors dir to verify topology generation and loading (KNOX-1006)
-       // N.B. This part of the test depends on the DummyServiceDiscovery extension being configured:
-       //         org.apache.knox.gateway.topology.discovery.test.extension.DummyServiceDiscovery
 +      AliasService aliasService = EasyMock.createNiceMock(AliasService.class);
 +      EasyMock.expect(aliasService.getPasswordFromAliasForGateway(anyObject(String.class))).andReturn(null).anyTimes();
 +      EasyMock.replay(aliasService);
 +      DefaultTopologyService.DescriptorsMonitor dm =
-                                           new DefaultTopologyService.DescriptorsMonitor(topologyDir, aliasService);
++              new DefaultTopologyService.DescriptorsMonitor(topologyDir, aliasService);
++
++      // Listener to simulate the topologies directory monitor, to notice when a topology has been deleted
++      provider.addTopologyChangeListener(new TestTopologyDeleteListener((DefaultTopologyService)provider));
 +
 +      // Write out the referenced provider config first
 +      File provCfgFile = createFile(sharedProvidersDir,
 +                                    "ambari-cluster-policy.xml",
-           "org/apache/knox/gateway/topology/file/ambari-cluster-policy.xml",
-                                     1L);
++                                    "org/apache/knox/gateway/topology/file/ambari-cluster-policy.xml",
++                                    System.currentTimeMillis());
 +      try {
 +        // Create the simple descriptor in the descriptors dir
-         File simpleDesc =
-                 createFile(descriptorsDir,
-                            "four.json",
-                     "org/apache/knox/gateway/topology/file/simple-topology-four.json",
-                            1L);
++        File simpleDesc = createFile(descriptorsDir,
++                                     "four.json",
++                                     "org/apache/knox/gateway/topology/file/simple-topology-four.json",
++                                     System.currentTimeMillis());
 +
 +        // Trigger the topology generation by noticing the simple descriptor
 +        dm.onFileChange(simpleDesc);
 +
 +        // Load the generated topology
 +        provider.reloadTopologies();
++        Collection<Topology> topologies = provider.getTopologies();
++        assertThat(topologies.size(), is(1));
++        Iterator<Topology> iterator = topologies.iterator();
++        Topology topology = iterator.next();
++        assertThat("four", is(topology.getName()));
++        int serviceCount = topology.getServices().size();
++        assertEquals("Expected the same number of services as are declared in the simple dscriptor.", 10, serviceCount);
++
++        // Overwrite the simple descriptor with a different set of services, and check that the changes are
++        // propagated to the associated topology
++        simpleDesc = createFile(descriptorsDir,
++                                "four.json",
++                                "org/apache/knox/gateway/topology/file/simple-descriptor-five.json",
++                                System.currentTimeMillis());
++        dm.onFileChange(simpleDesc);
++        provider.reloadTopologies();
++        topologies = provider.getTopologies();
++        topology = topologies.iterator().next();
++        assertNotEquals(serviceCount, topology.getServices().size());
++        assertEquals(6, topology.getServices().size());
++
++        // Delete the simple descriptor, and make sure that the associated topology file is deleted
++        simpleDesc.delete();
++        dm.onFileDelete(simpleDesc);
++        provider.reloadTopologies();
 +        topologies = provider.getTopologies();
-         assertThat(topologies.size(), is(2));
-         names = new HashSet<>(Arrays.asList("one", "four"));
-         iterator = topologies.iterator();
-         topology = iterator.next();
-         assertThat(names, hasItem(topology.getName()));
-         names.remove(topology.getName());
-         topology = iterator.next();
-         assertThat(names, hasItem(topology.getName()));
-         names.remove(topology.getName());
-         assertThat(names.size(), is(0));
++        assertTrue(topologies.isEmpty());
++
++        // Delete a topology file, and make sure that the associated simple descriptor is deleted
++        // Overwrite the simple descriptor with a different set of services, and check that the changes are
++        // propagated to the associated topology
++        simpleDesc = createFile(descriptorsDir,
++                                "deleteme.json",
++                                "org/apache/knox/gateway/topology/file/simple-descriptor-five.json",
++                                System.currentTimeMillis());
++        dm.onFileChange(simpleDesc);
++        provider.reloadTopologies();
++        topologies = provider.getTopologies();
++        assertFalse(topologies.isEmpty());
++        topology = topologies.iterator().next();
++        assertEquals("deleteme", topology.getName());
++        File topologyFile = new File(topologyDir, topology.getName() + ".xml");
++        assertTrue(topologyFile.exists());
++        topologyFile.delete();
++        provider.reloadTopologies();
++        assertFalse("Simple descriptor should have been deleted because the associated topology was.",
++                    simpleDesc.exists());
++
 +      } finally {
 +        provCfgFile.delete();
- 
 +      }
 +    } finally {
 +      FileUtils.deleteQuietly(dir);
 +    }
 +  }
 +
++  /**
++   * KNOX-1014
++   *
++   * Test the lifecycle relationship between provider configuration files, simple descriptors, and topology files.
++   *
++   * N.B. This test depends on the DummyServiceDiscovery extension being configured:
++   *        org.apache.hadoop.gateway.topology.discovery.test.extension.DummyServiceDiscovery
++   */
++  @Test
++  public void testTopologiesUpdateFromProviderConfigChange() throws Exception {
++    File dir = createDir();
++    File topologyDir = new File(dir, "topologies");
++    topologyDir.mkdirs();
++
++    File descriptorsDir = new File(dir, "descriptors");
++    descriptorsDir.mkdirs();
++
++    File sharedProvidersDir = new File(dir, "shared-providers");
++    sharedProvidersDir.mkdirs();
++
++    try {
++      TestTopologyListener topoListener = new TestTopologyListener();
++      FileAlterationMonitor monitor = new FileAlterationMonitor(Long.MAX_VALUE);
++
++      TopologyService ts = new DefaultTopologyService();
++      Map<String, String> c = new HashMap<>();
++
++      GatewayConfig config = EasyMock.createNiceMock(GatewayConfig.class);
++      EasyMock.expect(config.getGatewayTopologyDir()).andReturn(topologyDir.getAbsolutePath()).anyTimes();
++      EasyMock.expect(config.getGatewayConfDir()).andReturn(descriptorsDir.getParentFile().getAbsolutePath()).anyTimes();
++      EasyMock.replay(config);
++
++      ts.init(config, c);
++      ts.addTopologyChangeListener(topoListener);
++      ts.reloadTopologies();
++
++      java.lang.reflect.Field dmField = ts.getClass().getDeclaredField("descriptorsMonitor");
++      dmField.setAccessible(true);
++      DefaultTopologyService.DescriptorsMonitor dm = (DefaultTopologyService.DescriptorsMonitor) dmField.get(ts);
++
++      // Write out the referenced provider configs first
++      createFile(sharedProvidersDir,
++                 "provider-config-one.xml",
++                 "org/apache/knox/gateway/topology/file/provider-config-one.xml",
++                 System.currentTimeMillis());
++
++      // Create the simple descriptor, which depends on provider-config-one.xml
++      File simpleDesc = createFile(descriptorsDir,
++                                   "six.json",
++                                   "org/apache/knox/gateway/topology/file/simple-descriptor-six.json",
++                                   System.currentTimeMillis());
++
++      // "Notice" the simple descriptor change, and generate a topology based on it
++      dm.onFileChange(simpleDesc);
++
++      // Load the generated topology
++      ts.reloadTopologies();
++      Collection<Topology> topologies = ts.getTopologies();
++      assertThat(topologies.size(), is(1));
++      Iterator<Topology> iterator = topologies.iterator();
++      Topology topology = iterator.next();
++      assertFalse("The Shiro provider is disabled in provider-config-one.xml",
++                  topology.getProvider("authentication", "ShiroProvider").isEnabled());
++
++      // Overwrite the referenced provider configuration with a different ShiroProvider config, and check that the
++      // changes are propagated to the associated topology
++      File providerConfig = createFile(sharedProvidersDir,
++                                       "provider-config-one.xml",
++                                       "org/apache/knox/gateway/topology/file/ambari-cluster-policy.xml",
++                                       System.currentTimeMillis());
++
++      // "Notice" the simple descriptor change as a result of the referenced config change
++      dm.onFileChange(simpleDesc);
++
++      // Load the generated topology
++      ts.reloadTopologies();
++      topologies = ts.getTopologies();
++      assertFalse(topologies.isEmpty());
++      topology = topologies.iterator().next();
++      assertTrue("The Shiro provider is enabled in ambari-cluster-policy.xml",
++              topology.getProvider("authentication", "ShiroProvider").isEnabled());
++
++      // Delete the provider configuration, and make sure that the associated topology file is unaffected.
++      // The topology file should not be affected because the simple descriptor handling will fail to resolve the
++      // referenced provider configuration.
++      providerConfig.delete();     // Delete the file
++      dm.onFileChange(simpleDesc); // The provider config deletion will trigger a descriptor change notification
++      ts.reloadTopologies();
++      topologies = ts.getTopologies();
++      assertFalse(topologies.isEmpty());
++      assertTrue("The Shiro provider is enabled in ambari-cluster-policy.xml",
++              topology.getProvider("authentication", "ShiroProvider").isEnabled());
++
++    } finally {
++      FileUtils.deleteQuietly(dir);
++    }
++  }
++
++  /**
++   * KNOX-1039
++   */
++  @Test
++  public void testConfigurationCRUDAPI() throws Exception {
++    File dir = createDir();
++    File topologyDir = new File(dir, "topologies");
++    topologyDir.mkdirs();
++
++    File descriptorsDir = new File(dir, "descriptors");
++    descriptorsDir.mkdirs();
++
++    File sharedProvidersDir = new File(dir, "shared-providers");
++    sharedProvidersDir.mkdirs();
++
++    try {
++      TestTopologyListener topoListener = new TestTopologyListener();
++      FileAlterationMonitor monitor = new FileAlterationMonitor(Long.MAX_VALUE);
++
++      TopologyService ts = new DefaultTopologyService();
++      Map<String, String> c = new HashMap<>();
++
++      GatewayConfig config = EasyMock.createNiceMock(GatewayConfig.class);
++      EasyMock.expect(config.getGatewayTopologyDir()).andReturn(topologyDir.getAbsolutePath()).anyTimes();
++      EasyMock.expect(config.getGatewayConfDir()).andReturn(descriptorsDir.getParentFile().getAbsolutePath()).anyTimes();
++      EasyMock.replay(config);
++
++      ts.init(config, c);
++      ts.addTopologyChangeListener(topoListener);
++      ts.reloadTopologies();
++
++      java.lang.reflect.Field dmField = ts.getClass().getDeclaredField("descriptorsMonitor");
++      dmField.setAccessible(true);
++      DefaultTopologyService.DescriptorsMonitor dm = (DefaultTopologyService.DescriptorsMonitor) dmField.get(ts);
++
++      final String simpleDescName  = "six.json";
++      final String provConfOne     = "provider-config-one.xml";
++      final String provConfTwo     = "ambari-cluster-policy.xml";
++
++      // "Deploy" the referenced provider configs first
++      boolean isDeployed =
++        ts.deployProviderConfiguration(provConfOne,
++                FileUtils.readFileToString(new File(ClassLoader.getSystemResource("org/apache/hadoop/gateway/topology/file/provider-config-one.xml").toURI())));
++      assertTrue(isDeployed);
++      File provConfOneFile = new File(sharedProvidersDir, provConfOne);
++      assertTrue(provConfOneFile.exists());
++
++      isDeployed =
++        ts.deployProviderConfiguration(provConfTwo,
++                FileUtils.readFileToString(new File(ClassLoader.getSystemResource("org/apache/hadoop/gateway/topology/file/ambari-cluster-policy.xml").toURI())));
++      assertTrue(isDeployed);
++      File provConfTwoFile = new File(sharedProvidersDir, provConfTwo);
++      assertTrue(provConfTwoFile.exists());
++
++      // Validate the provider configurations known by the topology service
++      Collection<File> providerConfigurations = ts.getProviderConfigurations();
++      assertNotNull(providerConfigurations);
++      assertEquals(2, providerConfigurations.size());
++      assertTrue(providerConfigurations.contains(provConfOneFile));
++      assertTrue(providerConfigurations.contains(provConfTwoFile));
++
++      // "Deploy" the simple descriptor, which depends on provConfOne
++      isDeployed =
++        ts.deployDescriptor(simpleDescName,
++            FileUtils.readFileToString(new File(ClassLoader.getSystemResource("org/apache/hadoop/gateway/topology/file/simple-descriptor-six.json").toURI())));
++      assertTrue(isDeployed);
++      File simpleDesc = new File(descriptorsDir, simpleDescName);
++      assertTrue(simpleDesc.exists());
++
++      // Validate the simple descriptors known by the topology service
++      Collection<File> descriptors = ts.getDescriptors();
++      assertNotNull(descriptors);
++      assertEquals(1, descriptors.size());
++      assertTrue(descriptors.contains(simpleDesc));
++
++      // "Notice" the simple descriptor, so the provider configuration dependency relationship is recorded
++      dm.onFileChange(simpleDesc);
++
++      // Attempt to delete the referenced provConfOne
++      assertFalse("Should not be able to delete a provider configuration that is referenced by one or more descriptors",
++                  ts.deleteProviderConfiguration(FilenameUtils.getBaseName(provConfOne)));
++
++      // Overwrite the simple descriptor with content that changes the provider config reference to provConfTwo
++      isDeployed =
++        ts.deployDescriptor(simpleDescName,
++              FileUtils.readFileToString(new File(ClassLoader.getSystemResource("org/apache/hadoop/gateway/topology/file/simple-descriptor-five.json").toURI())));
++      assertTrue(isDeployed);
++      assertTrue(simpleDesc.exists());
++      ts.getProviderConfigurations();
++
++      // "Notice" the simple descriptor, so the provider configuration dependency relationship is updated
++      dm.onFileChange(simpleDesc);
++
++      // Attempt to delete the referenced provConfOne
++      assertTrue("Should be able to delete the provider configuration, now that it's not referenced by any descriptors",
++                 ts.deleteProviderConfiguration(FilenameUtils.getBaseName(provConfOne)));
++
++      // Re-validate the provider configurations known by the topology service
++      providerConfigurations = ts.getProviderConfigurations();
++      assertNotNull(providerConfigurations);
++      assertEquals(1, providerConfigurations.size());
++      assertFalse(providerConfigurations.contains(provConfOneFile));
++      assertTrue(providerConfigurations.contains(provConfTwoFile));
++
++      // Attempt to delete the referenced provConfTwo
++      assertFalse("Should not be able to delete a provider configuration that is referenced by one or more descriptors",
++                  ts.deleteProviderConfiguration(FilenameUtils.getBaseName(provConfTwo)));
++
++      // Delete the referencing simple descriptor
++      assertTrue(ts.deleteDescriptor(FilenameUtils.getBaseName(simpleDescName)));
++      assertFalse(simpleDesc.exists());
++
++      // Re-validate the simple descriptors known by the topology service
++      descriptors = ts.getDescriptors();
++      assertNotNull(descriptors);
++      assertTrue(descriptors.isEmpty());
++
++      // "Notice" the simple descriptor, so the provider configuration dependency relationship is updated
++      dm.onFileDelete(simpleDesc);
++
++      // Attempt to delete the referenced provConfTwo
++      assertTrue("Should be able to delete the provider configuration, now that it's not referenced by any descriptors",
++                 ts.deleteProviderConfiguration(FilenameUtils.getBaseName(provConfTwo)));
++
++      // Re-validate the provider configurations known by the topology service
++      providerConfigurations = ts.getProviderConfigurations();
++      assertNotNull(providerConfigurations);
++      assertTrue(providerConfigurations.isEmpty());
++
++    } finally {
++      FileUtils.deleteQuietly(dir);
++    }
++  }
++
 +  private void kickMonitor(FileAlterationMonitor monitor) {
 +    for (FileAlterationObserver observer : monitor.getObservers()) {
 +      observer.checkAndNotify();
 +    }
 +  }
 +
++
 +  @Test
 +  public void testProviderParamsOrderIsPreserved() {
 +
 +    Provider provider = new Provider();
 +    String names[] = {"ldapRealm=",
 +        "ldapContextFactory",
 +        "ldapRealm.contextFactory",
 +        "ldapGroupRealm",
 +        "ldapGroupRealm.contextFactory",
 +        "ldapGroupRealm.contextFactory.systemAuthenticationMechanism"
 +    };
 +
 +    Param param = null;
 +    for (String name : names) {
 +      param = new Param();
 +      param.setName(name);
 +      param.setValue(name);
 +      provider.addParam(param);
 +
 +    }
 +    Map<String, String> params = provider.getParams();
 +    Set<String> keySet = params.keySet();
 +    Iterator<String> iter = keySet.iterator();
 +    int i = 0;
 +    while (iter.hasNext()) {
 +      assertTrue(iter.next().equals(names[i++]));
 +    }
 +
 +  }
 +
 +  private class TestTopologyListener implements TopologyListener {
 +
-     public ArrayList<List<TopologyEvent>> events = new ArrayList<List<TopologyEvent>>();
++    ArrayList<List<TopologyEvent>> events = new ArrayList<List<TopologyEvent>>();
 +
 +    @Override
 +    public void handleTopologyEvent(List<TopologyEvent> events) {
 +      this.events.add(events);
 +    }
 +
 +  }
 +
++
++  private class TestTopologyDeleteListener implements TopologyListener {
++
++    FileAlterationListener delegate;
++
++    TestTopologyDeleteListener(FileAlterationListener delegate) {
++      this.delegate = delegate;
++    }
++
++    @Override
++    public void handleTopologyEvent(List<TopologyEvent> events) {
++      for (TopologyEvent event : events) {
++        if (event.getType().equals(TopologyEvent.Type.DELETED)) {
++          delegate.onFileDelete(new File(event.getTopology().getUri()));
++        }
++      }
++    }
++
++  }
++
 +}