You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@knox.apache.org by lm...@apache.org on 2017/08/01 18:56:33 UTC

svn commit: r1803686 [1/2] - in /knox: site/ site/books/knox-0-10-0/ site/books/knox-0-11-0/ site/books/knox-0-12-0/ site/books/knox-0-13-0/ site/books/knox-0-4-0/ site/books/knox-0-5-0/ site/books/knox-0-6-0/ site/books/knox-0-7-0/ site/books/knox-0-8...

Author: lmccay
Date: Tue Aug  1 18:56:32 2017
New Revision: 1803686

URL: http://svn.apache.org/viewvc?rev=1803686&view=rev
Log:
KNOX-988 - Update docs - Colm O hEigeartaigh via lmccay

Modified:
    knox/site/books/knox-0-10-0/deployment-overview.png
    knox/site/books/knox-0-10-0/deployment-provider.png
    knox/site/books/knox-0-10-0/deployment-service.png
    knox/site/books/knox-0-10-0/general_saml_flow.png
    knox/site/books/knox-0-10-0/runtime-overview.png
    knox/site/books/knox-0-10-0/runtime-request-processing.png
    knox/site/books/knox-0-11-0/deployment-overview.png
    knox/site/books/knox-0-11-0/deployment-provider.png
    knox/site/books/knox-0-11-0/deployment-service.png
    knox/site/books/knox-0-11-0/general_saml_flow.png
    knox/site/books/knox-0-11-0/runtime-overview.png
    knox/site/books/knox-0-11-0/runtime-request-processing.png
    knox/site/books/knox-0-12-0/deployment-overview.png
    knox/site/books/knox-0-12-0/deployment-provider.png
    knox/site/books/knox-0-12-0/deployment-service.png
    knox/site/books/knox-0-12-0/general_saml_flow.png
    knox/site/books/knox-0-12-0/runtime-overview.png
    knox/site/books/knox-0-12-0/runtime-request-processing.png
    knox/site/books/knox-0-13-0/deployment-overview.png
    knox/site/books/knox-0-13-0/deployment-provider.png
    knox/site/books/knox-0-13-0/deployment-service.png
    knox/site/books/knox-0-13-0/general_saml_flow.png
    knox/site/books/knox-0-13-0/runtime-overview.png
    knox/site/books/knox-0-13-0/runtime-request-processing.png
    knox/site/books/knox-0-13-0/user-guide.html
    knox/site/books/knox-0-4-0/deployment-overview.png
    knox/site/books/knox-0-4-0/deployment-provider.png
    knox/site/books/knox-0-4-0/deployment-service.png
    knox/site/books/knox-0-4-0/runtime-overview.png
    knox/site/books/knox-0-4-0/runtime-request-processing.png
    knox/site/books/knox-0-5-0/deployment-overview.png
    knox/site/books/knox-0-5-0/deployment-provider.png
    knox/site/books/knox-0-5-0/deployment-service.png
    knox/site/books/knox-0-5-0/runtime-overview.png
    knox/site/books/knox-0-5-0/runtime-request-processing.png
    knox/site/books/knox-0-6-0/deployment-overview.png
    knox/site/books/knox-0-6-0/deployment-provider.png
    knox/site/books/knox-0-6-0/deployment-service.png
    knox/site/books/knox-0-6-0/runtime-overview.png
    knox/site/books/knox-0-6-0/runtime-request-processing.png
    knox/site/books/knox-0-7-0/deployment-overview.png
    knox/site/books/knox-0-7-0/deployment-provider.png
    knox/site/books/knox-0-7-0/deployment-service.png
    knox/site/books/knox-0-7-0/general_saml_flow.png
    knox/site/books/knox-0-7-0/runtime-overview.png
    knox/site/books/knox-0-7-0/runtime-request-processing.png
    knox/site/books/knox-0-8-0/deployment-overview.png
    knox/site/books/knox-0-8-0/deployment-provider.png
    knox/site/books/knox-0-8-0/deployment-service.png
    knox/site/books/knox-0-8-0/general_saml_flow.png
    knox/site/books/knox-0-8-0/runtime-overview.png
    knox/site/books/knox-0-8-0/runtime-request-processing.png
    knox/site/books/knox-0-9-0/deployment-overview.png
    knox/site/books/knox-0-9-0/deployment-provider.png
    knox/site/books/knox-0-9-0/deployment-service.png
    knox/site/books/knox-0-9-0/general_saml_flow.png
    knox/site/books/knox-0-9-0/runtime-overview.png
    knox/site/books/knox-0-9-0/runtime-request-processing.png
    knox/site/books/knox-0-9-1/deployment-overview.png
    knox/site/books/knox-0-9-1/deployment-provider.png
    knox/site/books/knox-0-9-1/deployment-service.png
    knox/site/books/knox-0-9-1/general_saml_flow.png
    knox/site/books/knox-0-9-1/runtime-overview.png
    knox/site/books/knox-0-9-1/runtime-request-processing.png
    knox/site/index.html
    knox/site/issue-tracking.html
    knox/site/license.html
    knox/site/mail-lists.html
    knox/site/project-info.html
    knox/site/team-list.html
    knox/trunk/books/0.13.0/book_gateway-details.md
    knox/trunk/books/0.13.0/book_getting-started.md
    knox/trunk/books/0.13.0/book_service-details.md
    knox/trunk/books/0.13.0/book_ui_service_details.md
    knox/trunk/books/0.13.0/config.md
    knox/trunk/books/0.13.0/config_authn.md
    knox/trunk/books/0.13.0/config_id_assertion.md
    knox/trunk/books/0.13.0/config_webappsec_provider.md
    knox/trunk/books/0.13.0/service_hive.md
    knox/trunk/books/0.13.0/service_webhcat.md
    knox/trunk/books/0.13.0/service_webhdfs.md
    knox/trunk/books/0.13.0/websocket-support.md

Modified: knox/site/books/knox-0-10-0/deployment-overview.png
URL: http://svn.apache.org/viewvc/knox/site/books/knox-0-10-0/deployment-overview.png?rev=1803686&r1=1803685&r2=1803686&view=diff
==============================================================================
Binary files - no diff available.

Modified: knox/site/books/knox-0-10-0/deployment-provider.png
URL: http://svn.apache.org/viewvc/knox/site/books/knox-0-10-0/deployment-provider.png?rev=1803686&r1=1803685&r2=1803686&view=diff
==============================================================================
Binary files - no diff available.

Modified: knox/site/books/knox-0-10-0/deployment-service.png
URL: http://svn.apache.org/viewvc/knox/site/books/knox-0-10-0/deployment-service.png?rev=1803686&r1=1803685&r2=1803686&view=diff
==============================================================================
Binary files - no diff available.

Modified: knox/site/books/knox-0-10-0/general_saml_flow.png
URL: http://svn.apache.org/viewvc/knox/site/books/knox-0-10-0/general_saml_flow.png?rev=1803686&r1=1803685&r2=1803686&view=diff
==============================================================================
Binary files - no diff available.

Modified: knox/site/books/knox-0-10-0/runtime-overview.png
URL: http://svn.apache.org/viewvc/knox/site/books/knox-0-10-0/runtime-overview.png?rev=1803686&r1=1803685&r2=1803686&view=diff
==============================================================================
Binary files - no diff available.

Modified: knox/site/books/knox-0-10-0/runtime-request-processing.png
URL: http://svn.apache.org/viewvc/knox/site/books/knox-0-10-0/runtime-request-processing.png?rev=1803686&r1=1803685&r2=1803686&view=diff
==============================================================================
Binary files - no diff available.

Modified: knox/site/books/knox-0-11-0/deployment-overview.png
URL: http://svn.apache.org/viewvc/knox/site/books/knox-0-11-0/deployment-overview.png?rev=1803686&r1=1803685&r2=1803686&view=diff
==============================================================================
Binary files - no diff available.

Modified: knox/site/books/knox-0-11-0/deployment-provider.png
URL: http://svn.apache.org/viewvc/knox/site/books/knox-0-11-0/deployment-provider.png?rev=1803686&r1=1803685&r2=1803686&view=diff
==============================================================================
Binary files - no diff available.

Modified: knox/site/books/knox-0-11-0/deployment-service.png
URL: http://svn.apache.org/viewvc/knox/site/books/knox-0-11-0/deployment-service.png?rev=1803686&r1=1803685&r2=1803686&view=diff
==============================================================================
Binary files - no diff available.

Modified: knox/site/books/knox-0-11-0/general_saml_flow.png
URL: http://svn.apache.org/viewvc/knox/site/books/knox-0-11-0/general_saml_flow.png?rev=1803686&r1=1803685&r2=1803686&view=diff
==============================================================================
Binary files - no diff available.

Modified: knox/site/books/knox-0-11-0/runtime-overview.png
URL: http://svn.apache.org/viewvc/knox/site/books/knox-0-11-0/runtime-overview.png?rev=1803686&r1=1803685&r2=1803686&view=diff
==============================================================================
Binary files - no diff available.

Modified: knox/site/books/knox-0-11-0/runtime-request-processing.png
URL: http://svn.apache.org/viewvc/knox/site/books/knox-0-11-0/runtime-request-processing.png?rev=1803686&r1=1803685&r2=1803686&view=diff
==============================================================================
Binary files - no diff available.

Modified: knox/site/books/knox-0-12-0/deployment-overview.png
URL: http://svn.apache.org/viewvc/knox/site/books/knox-0-12-0/deployment-overview.png?rev=1803686&r1=1803685&r2=1803686&view=diff
==============================================================================
Binary files - no diff available.

Modified: knox/site/books/knox-0-12-0/deployment-provider.png
URL: http://svn.apache.org/viewvc/knox/site/books/knox-0-12-0/deployment-provider.png?rev=1803686&r1=1803685&r2=1803686&view=diff
==============================================================================
Binary files - no diff available.

Modified: knox/site/books/knox-0-12-0/deployment-service.png
URL: http://svn.apache.org/viewvc/knox/site/books/knox-0-12-0/deployment-service.png?rev=1803686&r1=1803685&r2=1803686&view=diff
==============================================================================
Binary files - no diff available.

Modified: knox/site/books/knox-0-12-0/general_saml_flow.png
URL: http://svn.apache.org/viewvc/knox/site/books/knox-0-12-0/general_saml_flow.png?rev=1803686&r1=1803685&r2=1803686&view=diff
==============================================================================
Binary files - no diff available.

Modified: knox/site/books/knox-0-12-0/runtime-overview.png
URL: http://svn.apache.org/viewvc/knox/site/books/knox-0-12-0/runtime-overview.png?rev=1803686&r1=1803685&r2=1803686&view=diff
==============================================================================
Binary files - no diff available.

Modified: knox/site/books/knox-0-12-0/runtime-request-processing.png
URL: http://svn.apache.org/viewvc/knox/site/books/knox-0-12-0/runtime-request-processing.png?rev=1803686&r1=1803685&r2=1803686&view=diff
==============================================================================
Binary files - no diff available.

Modified: knox/site/books/knox-0-13-0/deployment-overview.png
URL: http://svn.apache.org/viewvc/knox/site/books/knox-0-13-0/deployment-overview.png?rev=1803686&r1=1803685&r2=1803686&view=diff
==============================================================================
Binary files - no diff available.

Modified: knox/site/books/knox-0-13-0/deployment-provider.png
URL: http://svn.apache.org/viewvc/knox/site/books/knox-0-13-0/deployment-provider.png?rev=1803686&r1=1803685&r2=1803686&view=diff
==============================================================================
Binary files - no diff available.

Modified: knox/site/books/knox-0-13-0/deployment-service.png
URL: http://svn.apache.org/viewvc/knox/site/books/knox-0-13-0/deployment-service.png?rev=1803686&r1=1803685&r2=1803686&view=diff
==============================================================================
Binary files - no diff available.

Modified: knox/site/books/knox-0-13-0/general_saml_flow.png
URL: http://svn.apache.org/viewvc/knox/site/books/knox-0-13-0/general_saml_flow.png?rev=1803686&r1=1803685&r2=1803686&view=diff
==============================================================================
Binary files - no diff available.

Modified: knox/site/books/knox-0-13-0/runtime-overview.png
URL: http://svn.apache.org/viewvc/knox/site/books/knox-0-13-0/runtime-overview.png?rev=1803686&r1=1803685&r2=1803686&view=diff
==============================================================================
Binary files - no diff available.

Modified: knox/site/books/knox-0-13-0/runtime-request-processing.png
URL: http://svn.apache.org/viewvc/knox/site/books/knox-0-13-0/runtime-request-processing.png?rev=1803686&r1=1803685&r2=1803686&view=diff
==============================================================================
Binary files - no diff available.

Modified: knox/site/books/knox-0-13-0/user-guide.html
URL: http://svn.apache.org/viewvc/knox/site/books/knox-0-13-0/user-guide.html?rev=1803686&r1=1803685&r2=1803686&view=diff
==============================================================================
--- knox/site/books/knox-0-13-0/user-guide.html (original)
+++ knox/site/books/knox-0-13-0/user-guide.html Tue Aug  1 18:56:32 2017
@@ -202,7 +202,7 @@ curl -i -k -u guest:guest-password -X GE
 <ol>
   <li>The gateway is <em>not</em> collocated with the Hadoop clusters themselves.</li>
   <li>The host names and IP addresses of the cluster services are accessible by the gateway where ever it happens to be running.</li>
-</ol><p>All of the instructions and samples provided here are tailored and tested to work &ldquo;out of the box&rdquo; against a <a href="http://hortonworks.com/products/hortonworks-sandbox">Hortonworks Sandbox 2.x VM</a>.</p><h4><a id="Apache+Knox+Directory+Layout">Apache Knox Directory Layout</a> <a href="#Apache+Knox+Directory+Layout"><img src="markbook-section-link.png"/></a></h4><p>Knox can be installed by expanding the zip/archive file.</p><p>The table below provides a brief explanation of the important files and directories within <code>{GATEWWAY_HOME}</code></p>
+</ol><p>All of the instructions and samples provided here are tailored and tested to work &ldquo;out of the box&rdquo; against a <a href="http://hortonworks.com/products/hortonworks-sandbox">Hortonworks Sandbox 2.x VM</a>.</p><h4><a id="Apache+Knox+Directory+Layout">Apache Knox Directory Layout</a> <a href="#Apache+Knox+Directory+Layout"><img src="markbook-section-link.png"/></a></h4><p>Knox can be installed by expanding the zip/archive file.</p><p>The table below provides a brief explanation of the important files and directories within <code>{GATEWAY_HOME}</code></p>
 <table>
   <thead>
     <tr>
@@ -417,7 +417,7 @@ curl -i -k -u guest:guest-password -X GE
   <li>How URLs are mapped between a gateway that services multiple Hadoop clusters and the clusters themselves</li>
   <li>How the gateway is configured through gateway-site.xml and cluster specific topology files</li>
   <li>How to configure the various policy enforcement provider features such as authentication, authorization, auditing, hostmapping, etc.</li>
-</ul><h3><a id="URL+Mapping">URL Mapping</a> <a href="#URL+Mapping"><img src="markbook-section-link.png"/></a></h3><p>The gateway functions much like a reverse proxy. As such, it maintains a mapping of URLs that are exposed externally by the gateway to URLs that are provided by the Hadoop cluster.</p><h4><a id="Default+Topology+URLs">Default Topology URLs</a> <a href="#Default+Topology+URLs"><img src="markbook-section-link.png"/></a></h4><p>In order to provide compatibility with the Hadoop java client and existing CLI tools, the Knox Gateway has provided a feature called the Default Topology. This refers to a topology deployment that will be able to route URLs without the additional context that the gateway uses for differentiating from one Hadoop cluster to another. This allows the URLs to match those used by existing clients for that may access webhdfs through the Hadoop file system abstraction.</p><p>When a topology file is deployed with a file name that matches the configured de
 fault topology name, a specialized mapping for URLs is installed for that particular topology. This allows the URLs that are expected by the existing Hadoop CLIs for webhdfs to be used in interacting with the specific Hadoop cluster that is represented by the default topology file.</p><p>The configuration for the default topology name is found in gateway-site.xml as a property called: &ldquo;default.app.topology.name&rdquo;.</p><p>The default value for this property is &ldquo;sandbox&rdquo;.</p><p>Therefore, when deploying the sandbox.xml topology, both of the following example URLs work for the same underlying Hadoop cluster:</p>
+</ul><h3><a id="URL+Mapping">URL Mapping</a> <a href="#URL+Mapping"><img src="markbook-section-link.png"/></a></h3><p>The gateway functions much like a reverse proxy. As such, it maintains a mapping of URLs that are exposed externally by the gateway to URLs that are provided by the Hadoop cluster.</p><h4><a id="Default+Topology+URLs">Default Topology URLs</a> <a href="#Default+Topology+URLs"><img src="markbook-section-link.png"/></a></h4><p>In order to provide compatibility with the Hadoop java client and existing CLI tools, the Knox Gateway has provided a feature called the Default Topology. This refers to a topology deployment that will be able to route URLs without the additional context that the gateway uses for differentiating from one Hadoop cluster to another. This allows the URLs to match those used by existing clients that may access webhdfs through the Hadoop file system abstraction.</p><p>When a topology file is deployed with a file name that matches the configured defaul
 t topology name, a specialized mapping for URLs is installed for that particular topology. This allows the URLs that are expected by the existing Hadoop CLIs for webhdfs to be used in interacting with the specific Hadoop cluster that is represented by the default topology file.</p><p>The configuration for the default topology name is found in gateway-site.xml as a property called: &ldquo;default.app.topology.name&rdquo;.</p><p>The default value for this property is &ldquo;sandbox&rdquo;.</p><p>Therefore, when deploying the sandbox.xml topology, both of the following example URLs work for the same underlying Hadoop cluster:</p>
 <pre><code>https://{gateway-host}:{gateway-port}/webhdfs
 https://{gateway-host}:{gateway-port}/{gateway-path}/{cluster-name}/webhdfs
 </code></pre><p>These default topology URLs exist for all of the services in the topology.</p><h4><a id="Fully+Qualified+URLs">Fully Qualified URLs</a> <a href="#Fully+Qualified+URLs"><img src="markbook-section-link.png"/></a></h4><p>Examples of mappings for the WebHDFS, WebHCat, Oozie and HBase are shown below. These mapping are generated from the combination of the gateway configuration file (i.e. <code>{GATEWAY_HOME}/conf/gateway-site.xml</code>) and the cluster topology descriptors (e.g. <code>{GATEWAY_HOME}/conf/topologies/{cluster-name}.xml</code>). The port numbers shown for the Cluster URLs represent the default ports for these services. The actual port number may be different for a given cluster.</p>
@@ -696,7 +696,7 @@ https://{gateway-host}:{gateway-port}/{g
     &lt;/param&gt;
 &lt;/provider&gt;
 </code></pre>
-<dl><dt>/topology/gateway/provider</dt><dd>Groups information for a specific provider.</dd><dt>/topology/gateway/provider/role</dt><dd>Defines the role of a particular provider. There are a number of pre-defined roles used by out-of-the-box provider plugins for the gateway. These roles are: authentication, identity-assertion, authentication, rewrite and hostmap</dd><dt>/topology/gateway/provider/name</dt><dd>Defines the name of the provider for which this configuration applies. There can be multiple provider implementations for a given role. Specifying the name is used identify which particular provider is being configured. Typically each topology descriptor should contain only one provider for each role but there are exceptions.</dd><dt>/topology/gateway/provider/enabled</dt><dd>Allows a particular provider to be enabled or disabled via <code>true</code> or <code>false</code> respectively. When a provider is disabled any filters associated with that provider are excluded from the p
 rocessing chain.</dd><dt>/topology/gateway/provider/param</dt><dd>These elements are used to supply provider configuration. There can be zero or more of these per provider.</dd><dt>/topology/gateway/provider/param/name</dt><dd>The name of a parameter to pass to the provider.</dd><dt>/topology/gateway/provider/param/value</dt><dd>The value of a parameter to pass to the provider.</dd>
+<dl><dt>/topology/gateway/provider</dt><dd>Groups information for a specific provider.</dd><dt>/topology/gateway/provider/role</dt><dd>Defines the role of a particular provider. There are a number of pre-defined roles used by out-of-the-box provider plugins for the gateway. These roles are: authentication, identity-assertion, rewrite and hostmap</dd><dt>/topology/gateway/provider/name</dt><dd>Defines the name of the provider for which this configuration applies. There can be multiple provider implementations for a given role. Specifying the name is used to identify which particular provider is being configured. Typically each topology descriptor should contain only one provider for each role but there are exceptions.</dd><dt>/topology/gateway/provider/enabled</dt><dd>Allows a particular provider to be enabled or disabled via <code>true</code> or <code>false</code> respectively. When a provider is disabled any filters associated with that provider are excluded from the processing cha
 in.</dd><dt>/topology/gateway/provider/param</dt><dd>These elements are used to supply provider configuration. There can be zero or more of these per provider.</dd><dt>/topology/gateway/provider/param/name</dt><dd>The name of a parameter to pass to the provider.</dd><dt>/topology/gateway/provider/param/value</dt><dd>The value of a parameter to pass to the provider.</dd>
 </dl><h5><a id="Service+Configuration">Service Configuration</a> <a href="#Service+Configuration"><img src="markbook-section-link.png"/></a></h5><p>Service configuration is used to specify the location of services within the Hadoop cluster. The general outline of a service element looks like this.</p>
 <pre><code>&lt;service&gt;
     &lt;role&gt;WEBHDFS&lt;/role&gt;
@@ -704,7 +704,7 @@ https://{gateway-host}:{gateway-port}/{g
 &lt;/service&gt;
 </code></pre>
 <dl><dt>/topology/service</dt><dd>Provider information about a particular service within the Hadoop cluster. Not all services are necessarily exposed as gateway endpoints.</dd><dt>/topology/service/role</dt><dd>Identifies the role of this service. Currently supported roles are: WEBHDFS, WEBHCAT, WEBHBASE, OOZIE, HIVE, NAMENODE, JOBTRACKER, RESOURCEMANAGER Additional service roles can be supported via plugins.</dd><dt>topology/service/url</dt><dd>The URL identifying the location of a particular service within the Hadoop cluster.</dd>
-</dl><h4><a id="Hostmap+Provider">Hostmap Provider</a> <a href="#Hostmap+Provider"><img src="markbook-section-link.png"/></a></h4><p>The purpose of the Hostmap provider is to handle situations where host are known by one name within the cluster and another name externally. This frequently occurs when virtual machines are used and in particular when using cloud hosting services. Currently, the Hostmap provider is configured as part of the topology file. The basic structure is shown below.</p>
+</dl><h4><a id="Hostmap+Provider">Hostmap Provider</a> <a href="#Hostmap+Provider"><img src="markbook-section-link.png"/></a></h4><p>The purpose of the Hostmap provider is to handle situations where hosts are known by one name within the cluster and another name externally. This frequently occurs when virtual machines are used and in particular when using cloud hosting services. Currently, the Hostmap provider is configured as part of the topology file. The basic structure is shown below.</p>
 <pre><code>&lt;topology&gt;
     &lt;gateway&gt;
         ...
@@ -765,8 +765,8 @@ ip-10-39-107-209.ec2.internal
     ...
 &lt;/topology&gt;
 </code></pre><h5><a id="Hostmap+Provider+Configuration">Hostmap Provider Configuration</a> <a href="#Hostmap+Provider+Configuration"><img src="markbook-section-link.png"/></a></h5><p>Details about each provider configuration element is enumerated below.</p>
-<dl><dt>topology/gateway/provider/role</dt><dd>The role for a Hostmap provider must always be <code>hostmap</code>.</dd><dt>topology/gateway/provider/name</dt><dd>The Hostmap provider supplied out-of-the-box is selected via the name <code>static</code>.</dd><dt>topology/gateway/provider/enabled</dt><dd>Host mapping can be enabled or disabled by providing <code>true</code> or <code>false</code>.</dd><dt>topology/gateway/provider/param</dt><dd>Host mapping is configured by providing parameters for each external to internal mapping.</dd><dt>topology/gateway/provider/param/name</dt><dd>The parameter names represent an external host names associated with the internal host names provided by the value element. This can be a comma separated list of host names that all represent the same physical host. When mapping from internal to external host name the first external host name in the list is used.</dd><dt>topology/gateway/provider/param/value</dt><dd>The parameter values represent the inte
 rnal host names associated with the external host names provider by the name element. This can be a comma separated list of host names that all represent the same physical host. When mapping from external to internal host names the first internal host name in the list is used.</dd>
-</dl><h4><a id="Logging">Logging</a> <a href="#Logging"><img src="markbook-section-link.png"/></a></h4><p>If necessary you can enable additional logging by editing the <code>log4j.properties</code> file in the <code>conf</code> directory. Changing the <code>rootLogger</code> value from <code>ERROR</code> to <code>DEBUG</code> will generate a large amount of debug logging. A number of useful, more fine loggers are also provided in the file.</p><h4><a id="Java+VM+Options">Java VM Options</a> <a href="#Java+VM+Options"><img src="markbook-section-link.png"/></a></h4><p>TODO - Java VM options doc.</p><h4><a id="Persisting+the+Master+Secret">Persisting the Master Secret</a> <a href="#Persisting+the+Master+Secret"><img src="markbook-section-link.png"/></a></h4><p>The master secret is required to start the server. This secret is used to access secured artifacts by the gateway instance. Keystore, trust stores and credential stores are all protected with the master secret.</p><p>You may persi
 st the master secret by supplying the <em>-persist-master</em> switch at startup. This will result in a warning indicating that persisting the secret is less secure than providing it at startup. We do make some provisions in order to protect the persisted password.</p><p>It is encrypted with AES 128 bit encryption and where possible the file permissions are set to only be accessible by the user that the gateway is running as.</p><p>After persisting the secret, ensure that the file at config/security/master has the appropriate permissions set for your environment. This is probably the most important layer of defense for master secret. Do not assume that the encryption if sufficient protection.</p><p>A specific user should be created to run the gateway this user will be the only user with permissions for the persisted master file.</p><p>See the Knox CLI section for descriptions of the command line utilities related to the master secret.</p><h4><a id="Management+of+Security+Artifacts">
 Management of Security Artifacts</a> <a href="#Management+of+Security+Artifacts"><img src="markbook-section-link.png"/></a></h4><p>There are a number of artifacts that are used by the gateway in ensuring the security of wire level communications, access to protected resources and the encryption of sensitive data. These artifacts can be managed from outside of the gateway instances or generated and populated by the gateway instance itself.</p><p>The following is a description of how this is coordinated with both standalone (development, demo, etc) gateway instances and instances as part of a cluster of gateways in mind.</p><p>Upon start of the gateway server we:</p>
+<dl><dt>topology/gateway/provider/role</dt><dd>The role for a Hostmap provider must always be <code>hostmap</code>.</dd><dt>topology/gateway/provider/name</dt><dd>The Hostmap provider supplied out-of-the-box is selected via the name <code>static</code>.</dd><dt>topology/gateway/provider/enabled</dt><dd>Host mapping can be enabled or disabled by providing <code>true</code> or <code>false</code>.</dd><dt>topology/gateway/provider/param</dt><dd>Host mapping is configured by providing parameters for each external to internal mapping.</dd><dt>topology/gateway/provider/param/name</dt><dd>The parameter names represent the external host names associated with the internal host names provided by the value element. This can be a comma separated list of host names that all represent the same physical host. When mapping from internal to external host name the first external host name in the list is used.</dd><dt>topology/gateway/provider/param/value</dt><dd>The parameter values represent the int
 ernal host names associated with the external host names provider by the name element. This can be a comma separated list of host names that all represent the same physical host. When mapping from external to internal host names the first internal host name in the list is used.</dd>
+</dl><h4><a id="Logging">Logging</a> <a href="#Logging"><img src="markbook-section-link.png"/></a></h4><p>If necessary you can enable additional logging by editing the <code>log4j.properties</code> file in the <code>conf</code> directory. Changing the <code>rootLogger</code> value from <code>ERROR</code> to <code>DEBUG</code> will generate a large amount of debug logging. A number of useful, more fine loggers are also provided in the file.</p><h4><a id="Java+VM+Options">Java VM Options</a> <a href="#Java+VM+Options"><img src="markbook-section-link.png"/></a></h4><p>TODO - Java VM options doc.</p><h4><a id="Persisting+the+Master+Secret">Persisting the Master Secret</a> <a href="#Persisting+the+Master+Secret"><img src="markbook-section-link.png"/></a></h4><p>The master secret is required to start the server. This secret is used to access secured artifacts by the gateway instance. Keystore, trust stores and credential stores are all protected with the master secret.</p><p>You may persi
 st the master secret by supplying the <em>-persist-master</em> switch at startup. This will result in a warning indicating that persisting the secret is less secure than providing it at startup. We do make some provisions in order to protect the persisted password.</p><p>It is encrypted with AES 128 bit encryption and where possible the file permissions are set to only be accessible by the user that the gateway is running as.</p><p>After persisting the secret, ensure that the file at data/security/master has the appropriate permissions set for your environment. This is probably the most important layer of defense for master secret. Do not assume that the encryption is sufficient protection.</p><p>A specific user should be created to run the gateway. This user will be the only user with permissions for the persisted master file.</p><p>See the Knox CLI section for descriptions of the command line utilities related to the master secret.</p><h4><a id="Management+of+Security+Artifacts">M
 anagement of Security Artifacts</a> <a href="#Management+of+Security+Artifacts"><img src="markbook-section-link.png"/></a></h4><p>There are a number of artifacts that are used by the gateway in ensuring the security of wire level communications, access to protected resources and the encryption of sensitive data. These artifacts can be managed from outside of the gateway instances or generated and populated by the gateway instance itself.</p><p>The following is a description of how this is coordinated with both standalone (development, demo, etc) gateway instances and instances as part of a cluster of gateways in mind.</p><p>Upon start of the gateway server we:</p>
 <ol>
   <li>Look for an identity store at <code>data/security/keystores/gateway.jks</code>.  The identity store contains the certificate and private key used to represent the identity of the server for SSL connections and signature creation.
   <ul>
@@ -782,7 +782,7 @@ ip-10-39-107-209.ec2.internal
 <ol>
   <li>Look for a credential store for the topology. For instance, we have a sample topology that gets deployed out of the box. We look for <code>data/security/keystores/sandbox-credentials.jceks</code>. This topology specific credential store is used for storing secrets/passwords that are used for encrypting sensitive data with topology specific keys.
   <ul>
-    <li>If no credential store is found for the topology being deployed then one is created for it.  Population of the aliases is delegated to the configured providers within the system that will require the use of a secret for a particular task.  They may programmatic set the value of the secret or choose to have the value for the specified alias generated through the AliasService.</li>
+    <li>If no credential store is found for the topology being deployed then one is created for it.  Population of the aliases is delegated to the configured providers within the system that will require the use of a secret for a particular task.  They may programmatically set the value of the secret or choose to have the value for the specified alias generated through the AliasService.</li>
     <li>If a credential store is found then we ensure that it can be loaded with the provided master secret and the configured providers have the opportunity to ensure that the aliases are populated and if not to populate them.</li>
   </ul></li>
 </ol><p>By leveraging the algorithm described above we can provide a window of opportunity for management of these artifacts in a number of ways.</p>
@@ -810,14 +810,14 @@ ip-10-39-107-209.ec2.internal
     -storepass {master-secret} -validity 360 -keysize 2048
 </code></pre><p>Keytool will prompt you for a number of elements used will comprise the distinguished name (DN) within your certificate. </p><p><em>NOTE:</em> When it prompts you for your First and Last name be sure to type in the hostname of the machine that your gateway instance will be running on. This is used by clients during hostname verification to ensure that the presented certificate matches the hostname that was used in the URL for the connection - so they need to match.</p><p><em>NOTE:</em> When it prompts for the key password just press enter to ensure that it is the same as the keystore password. Which, as was described earlier, must match the master secret for the gateway instance. Alternatively, you can set it to another passphrase - take note of it and set the gateway-identity-passphrase alias to that passphrase using the Knox CLI.</p><p>See the Knox CLI section for descriptions of the command line utilities related to the management of the keystores.</p><h5><a id="U
 sing+a+CA+Signed+Key+Pair">Using a CA Signed Key Pair</a> <a href="#Using+a+CA+Signed+Key+Pair"><img src="markbook-section-link.png"/></a></h5><p>For certain deployments a certificate key pair that is signed by a trusted certificate authority is required. There are a number of different ways in which these certificates are acquired and can be converted and imported into the Apache Knox keystore.</p><p>The following steps have been used to do this and are provided here for guidance in your installation. You may have to adjust according to your environment.</p><p>General steps:</p>
 <ol>
-  <li><p>Stop Knox gateway and back up all files in <code>{GATEWWAY_HOME}/data/security/keystores</code></p>
+  <li><p>Stop Knox gateway and back up all files in <code>{GATEWAY_HOME}/data/security/keystores</code></p>
   <pre><code>gateway.sh stop
 </code></pre></li>
   <li><p>Create a new master key for Knox and persist it. The master key will be referred to in following steps as <code>$master-key</code></p>
   <pre><code>knoxcli.sh create-master -force
 </code></pre></li>
   <li><p>Create identity keystore gateway.jks. cert in alias gateway-identity </p>
-  <pre><code>cd {GATEWWAY_HOME}/data/security/keystore  
+  <pre><code>cd {GATEWAY_HOME}/data/security/keystore  
 keytool -genkeypair -alias gateway-identity -keyalg RSA -keysize 1024 -dname &quot;CN=$fqdn_knox,OU=hdp,O=sdge&quot; -keypass $keypass -keystore gateway.jks -storepass $master-key -validity 300  
 </code></pre><p>NOTE: <code>$fqdn_knox</code> is the hostname of the Knox host. Some may choose <code>$keypass</code> to be the same as <code>$master-key</code>.</p></li>
   <li><p>Create credential store to store the <code>$keypass</code> in step 3. This creates <code>__gateway-credentials.jceks</code> file</p>
@@ -1133,7 +1133,7 @@ ldapRealm.userDnTemplate=uid={0},ou=peop
             &lt;value&gt;authcBasic&lt;/value&gt;
         &lt;/param&gt;
     &lt;/provider&gt;
-</code></pre><p>This happens to be the way that we are currently configuring Shiro for BASIC/LDAP authentication. This same config approach may be used to achieve other authentication mechanisms or variations on this one. We however have not tested additional uses for it for this release.</p><h4><a id="LDAP+Configuration">LDAP Configuration</a> <a href="#LDAP+Configuration"><img src="markbook-section-link.png"/></a></h4><p>This section discusses the LDAP configuration used above for the Shiro Provider. Some of these configuration elements will need to be customized to reflect your deployment environment.</p><p><strong>main.ldapRealm</strong> - this element indicates the fully qualified class name of the Shiro realm to be used in authenticating the user. The class name provided by default in the sample is the <code>org.apache.shiro.realm.ldap.JndiLdapRealm</code> this implementation provides us with the ability to authenticate but by default has authorization disabled. In order to pr
 ovide authorization - which is seen by Shiro as dependent on an LDAP schema that is specific to each organization - an extension of JndiLdapRealm is generally used to override and implement the doGetAuhtorizationInfo method. In this particular release we are providing a simple authorization provider that can be used along with the Shiro authentication provider.</p><p><strong>main.ldapRealm.userDnTemplate</strong> - in order to bind a simple username to an LDAP server that generally requires a full distinguished name (DN), we must provide the template into which the simple username will be inserted. This template allows for the creation of a DN by injecting the simple username into the common name (CN) portion of the DN. <strong>This element will need to be customized to reflect your deployment environment.</strong> The template provided in the sample is only an example and is valid only within the LDAP schema distributed with Knox and is represented by the users.ldif file in the <co
 de>{GATEWAY_HOME}/conf</code> directory.</p><p><strong>main.ldapRealm.contextFactory.url</strong> - this element is the URL that represents the host and port of LDAP server. It also includes the scheme of the protocol to use. This may be either ldap or ldaps depending on whether you are communicating with the LDAP over SSL (highly recommended). <strong>This element will need to be customized to reflect your deployment environment.</strong>.</p><p><strong>main.ldapRealm.contextFactory.authenticationMechanism</strong> - this element indicates the type of authentication that should be performed against the LDAP server. The current default value is <code>simple</code> which indicates a simple bind operation. This element should not need to be modified and no mechanism other than a simple bind has been tested for this particular release.</p><p><strong>urls./</strong>** - this element represents a single URL_Ant_Path_Expression and the value the Shiro filter chain to apply to it. This par
 ticular sample indicates that all paths into the application have the same Shiro filter chain applied. The paths are relative to the application context path. The use of the value <code>authcBasic</code> here indicates that BASIC authentication is expected for every path into the application. Adding an additional Shiro filter to that chain for validating that the request isSecure() and over SSL can be achieved by changing the value to <code>ssl, authcBasic</code>. It is not likely that you need to change this element for your environment.</p><h4><a id="Active+Directory+-+Special+Note">Active Directory - Special Note</a> <a href="#Active+Directory+-+Special+Note"><img src="markbook-section-link.png"/></a></h4><p>You would use LDAP configuration as documented above to authenticate against Active Directory as well.</p><p>Some Active Directory specific things to keep in mind:</p><p>Typical AD main.ldapRealm.userDnTemplate value looks slightly different, such as</p>
+</code></pre><p>This happens to be the way that we are currently configuring Shiro for BASIC/LDAP authentication. This same config approach may be used to achieve other authentication mechanisms or variations on this one. We however have not tested additional uses for it for this release.</p><h4><a id="LDAP+Configuration">LDAP Configuration</a> <a href="#LDAP+Configuration"><img src="markbook-section-link.png"/></a></h4><p>This section discusses the LDAP configuration used above for the Shiro Provider. Some of these configuration elements will need to be customized to reflect your deployment environment.</p><p><strong>main.ldapRealm</strong> - this element indicates the fully qualified class name of the Shiro realm to be used in authenticating the user. The class name provided by default in the sample is the <code>org.apache.shiro.realm.ldap.JndiLdapRealm</code> this implementation provides us with the ability to authenticate but by default has authorization disabled. In order to pr
 ovide authorization - which is seen by Shiro as dependent on an LDAP schema that is specific to each organization - an extension of JndiLdapRealm is generally used to override and implement the doGetAuthorizationInfo method. In this particular release we are providing a simple authorization provider that can be used along with the Shiro authentication provider.</p><p><strong>main.ldapRealm.userDnTemplate</strong> - in order to bind a simple username to an LDAP server that generally requires a full distinguished name (DN), we must provide the template into which the simple username will be inserted. This template allows for the creation of a DN by injecting the simple username into the common name (CN) portion of the DN. <strong>This element will need to be customized to reflect your deployment environment.</strong> The template provided in the sample is only an example and is valid only within the LDAP schema distributed with Knox and is represented by the users.ldif file in the <co
 de>{GATEWAY_HOME}/conf</code> directory.</p><p><strong>main.ldapRealm.contextFactory.url</strong> - this element is the URL that represents the host and port of LDAP server. It also includes the scheme of the protocol to use. This may be either ldap or ldaps depending on whether you are communicating with the LDAP over SSL (highly recommended). <strong>This element will need to be customized to reflect your deployment environment.</strong>.</p><p><strong>main.ldapRealm.contextFactory.authenticationMechanism</strong> - this element indicates the type of authentication that should be performed against the LDAP server. The current default value is <code>simple</code> which indicates a simple bind operation. This element should not need to be modified and no mechanism other than a simple bind has been tested for this particular release.</p><p><strong>urls./</strong>** - this element represents a single URL_Ant_Path_Expression and the value the Shiro filter chain to apply to it. This par
 ticular sample indicates that all paths into the application have the same Shiro filter chain applied. The paths are relative to the application context path. The use of the value <code>authcBasic</code> here indicates that BASIC authentication is expected for every path into the application. Adding an additional Shiro filter to that chain for validating that the request isSecure() and over SSL can be achieved by changing the value to <code>ssl, authcBasic</code>. It is not likely that you need to change this element for your environment.</p><h4><a id="Active+Directory+-+Special+Note">Active Directory - Special Note</a> <a href="#Active+Directory+-+Special+Note"><img src="markbook-section-link.png"/></a></h4><p>You would use LDAP configuration as documented above to authenticate against Active Directory as well.</p><p>Some Active Directory specific things to keep in mind:</p><p>Typical AD main.ldapRealm.userDnTemplate value looks slightly different, such as</p>
 <pre><code>cn={0},cn=users,DC=lab,DC=sample,dc=com
 </code></pre><p>Please compare this with a typical Apache DS main.ldapRealm.userDnTemplate value and make note of the difference:</p>
 <pre><code>`uid={0},ou=people,dc=hadoop,dc=apache,dc=org`
@@ -1142,7 +1142,7 @@ ldapRealm.userDnTemplate=uid={0},ou=peop
   <li><strong>main.ldapRealm.contextFactory.url</strong> must be changed to have the <code>ldaps</code> protocol scheme and the port must be the SSL listener port on your LDAP server.</li>
   <li>Identity certificate (keypair) provisioned to LDAP server - your LDAP server specific documentation should indicate what is required for providing a cert or keypair to represent the LDAP server identity to connecting clients.</li>
   <li>Trusting the LDAP Server&rsquo;s public key - if the LDAP Server&rsquo;s identity certificate is issued by a well known and trusted certificate authority and is already represented in the JRE&rsquo;s cacerts truststore then you don&rsquo;t need to do anything for trusting the LDAP server&rsquo;s cert. If, however, the cert is selfsigned or issued by an untrusted authority you will need to either add it to the cacerts keystore or to another truststore that you may direct Knox to utilize through a system property.</li>
-</ol><h4><a id="Session+Configuration">Session Configuration</a> <a href="#Session+Configuration"><img src="markbook-section-link.png"/></a></h4><p>Knox maps each cluster topology to a web application and leverages standard JavaEE session management.</p><p>To configure session idle timeout for the topology, please specify value of parameter sessionTimeout for ShiroProvider in your topology file. If you do not specify the value for this parameter, it defaults to 30 minutes.</p><p>The definition would look like the following in the topoloogy file:</p>
+</ol><h4><a id="Session+Configuration">Session Configuration</a> <a href="#Session+Configuration"><img src="markbook-section-link.png"/></a></h4><p>Knox maps each cluster topology to a web application and leverages standard JavaEE session management.</p><p>To configure session idle timeout for the topology, please specify value of parameter sessionTimeout for ShiroProvider in your topology file. If you do not specify the value for this parameter, it defaults to 30 minutes.</p><p>The definition would look like the following in the topology file:</p>
 <pre><code>...
 &lt;provider&gt;
     &lt;role&gt;authentication&lt;/role&gt;
@@ -1947,7 +1947,7 @@ url -k --header &quot;SM_USER: nobody@ca
         &lt;enabled&gt;true&lt;/enabled&gt;
         &lt;&lt;param&gt; ... &lt;/param&gt;
     &lt;/provider&gt;
-</code></pre><h3><a id="Configuration">Configuration</a> <a href="#Configuration"><img src="markbook-section-link.png"/></a></h3><p>All the configuration for &lsquo;HadoopGroupProvider&rsquo; resides in the provider section in a gateway topology file. The &lsquo;hadoop.security.group.mapping&rsquo; property determines the implementation. Some of the valid implementation are as follows </p><h4><a id="org.apache.hadoop.security.JniBasedUnixGroupsMappingWithFallback">org.apache.hadoop.security.JniBasedUnixGroupsMappingWithFallback</a> <a href="#org.apache.hadoop.security.JniBasedUnixGroupsMappingWithFallback"><img src="markbook-section-link.png"/></a></h4><p>This is the default implementation and will be picked up if &lsquo;hadoop.security.group.mapping&rsquo; is not specified. This implementation will determine if the Java Native Interface (JNI) is available. If JNI is available, the implementation will use the API within Hadoop to resolve a list of groups for a user. If JNI is not av
 ailable then the shell implementation, org.apache.hadoop.security.ShellBasedUnixGroupsMapping, is used, which shells out with the &lsquo;bash -c groups&rsquo; command (for a Linux/Unix environment) or the &lsquo;net group&rsquo; command (for a Windows environment) to resolve a list of groups for a user.</p><h4><a id="org.apache.hadoop.security.LdapGroupsMapping">org.apache.hadoop.security.LdapGroupsMapping</a> <a href="#org.apache.hadoop.security.LdapGroupsMapping"><img src="markbook-section-link.png"/></a></h4><p>This implementation connects directly to an LDAP server to resolve the list of groups. However, this should only be used if the required groups reside exclusively in LDAP, and are not materialized on the Unix servers.</p><p>For more information on the implementation and properties refer to Hadoop Group Mapping.</p><h3><a id="Example">Example</a> <a href="#Example"><img src="markbook-section-link.png"/></a></h3><p>The following example snippet works with the demo ldap serve
 r that ships with Apache Knox. Replace the existing &lsquo;Default&rsquo; identity-assertion provider with the one below (HadoopGroupProvider).</p>
+</code></pre><h3><a id="Configuration">Configuration</a> <a href="#Configuration"><img src="markbook-section-link.png"/></a></h3><p>All the configuration for &lsquo;HadoopGroupProvider&rsquo; resides in the provider section in a gateway topology file. The &lsquo;hadoop.security.group.mapping&rsquo; property determines the implementation. Some of the valid implementations are as follows </p><h4><a id="org.apache.hadoop.security.JniBasedUnixGroupsMappingWithFallback">org.apache.hadoop.security.JniBasedUnixGroupsMappingWithFallback</a> <a href="#org.apache.hadoop.security.JniBasedUnixGroupsMappingWithFallback"><img src="markbook-section-link.png"/></a></h4><p>This is the default implementation and will be picked up if &lsquo;hadoop.security.group.mapping&rsquo; is not specified. This implementation will determine if the Java Native Interface (JNI) is available. If JNI is available, the implementation will use the API within Hadoop to resolve a list of groups for a user. If JNI is not a
 vailable then the shell implementation, org.apache.hadoop.security.ShellBasedUnixGroupsMapping, is used, which shells out with the &lsquo;bash -c groups&rsquo; command (for a Linux/Unix environment) or the &lsquo;net group&rsquo; command (for a Windows environment) to resolve a list of groups for a user.</p><h4><a id="org.apache.hadoop.security.LdapGroupsMapping">org.apache.hadoop.security.LdapGroupsMapping</a> <a href="#org.apache.hadoop.security.LdapGroupsMapping"><img src="markbook-section-link.png"/></a></h4><p>This implementation connects directly to an LDAP server to resolve the list of groups. However, this should only be used if the required groups reside exclusively in LDAP, and are not materialized on the Unix servers.</p><p>For more information on the implementation and properties refer to Hadoop Group Mapping.</p><h3><a id="Example">Example</a> <a href="#Example"><img src="markbook-section-link.png"/></a></h3><p>The following example snippet works with the demo ldap serv
 er that ships with Apache Knox. Replace the existing &lsquo;Default&rsquo; identity-assertion provider with the one below (HadoopGroupProvider).</p>
 <pre><code>    &lt;provider&gt;
         &lt;role&gt;identity-assertion&lt;/role&gt;
         &lt;name&gt;HadoopGroupProvider&lt;/name&gt;
@@ -2382,7 +2382,7 @@ APACHE_HOME/bin/apachectl -k stop
       <td>false</td>
     </tr>
   </tbody>
-</table><h5><a id="X-Frame-Options">X-Frame-Options</a> <a href="#X-Frame-Options"><img src="markbook-section-link.png"/></a></h5><p>Cross Frame Scripting and Clickjacking are attackes that can be prevented by controlling the ability for a third-party to embed an application or resource within a Frame, IFrame or Object html element. This can be done adding the X-Frame-Options HTTP header to responses.</p><h6><a id="Config">Config</a> <a href="#Config"><img src="markbook-section-link.png"/></a></h6>
+</table><h5><a id="X-Frame-Options">X-Frame-Options</a> <a href="#X-Frame-Options"><img src="markbook-section-link.png"/></a></h5><p>Cross Frame Scripting and Clickjacking are attacks that can be prevented by controlling the ability for a third-party to embed an application or resource within a Frame, IFrame or Object html element. This can be done adding the X-Frame-Options HTTP header to responses.</p><h6><a id="Config">Config</a> <a href="#Config"><img src="markbook-section-link.png"/></a></h6>
 <table>
   <thead>
     <tr>
@@ -3097,7 +3097,7 @@ APACHE_HOME/bin/apachectl -k stop
    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    See the License for the specific language governing permissions and
    limitations under the License.
---></p><h2><a id="Websocket+Support">Websocket Support</a> <a href="#Websocket+Support"><img src="markbook-section-link.png"/></a></h2><h3><a id="Introduction">Introduction</a> <a href="#Introduction"><img src="markbook-section-link.png"/></a></h3><p>Websocket is a communication protocol that allows full duplex communication over single TCP connection. Knox provides out-of-the-box support for websocket protocol, currently only text messages are supported.</p><h3><a id="Configuration">Configuration</a> <a href="#Configuration"><img src="markbook-section-link.png"/></a></h3><p>By default websocket functionality is disabled, it can be easily enabled by changing the &lsquo;gateway.websocket.feature.enabled&rsquo; property to &lsquo;true&rsquo; in <KNOX-HOME>/conf/gateway-site.xml file. </p>
+--></p><h2><a id="Websocket+Support">Websocket Support</a> <a href="#Websocket+Support"><img src="markbook-section-link.png"/></a></h2><h3><a id="Introduction">Introduction</a> <a href="#Introduction"><img src="markbook-section-link.png"/></a></h3><p>Websocket is a communication protocol that allows full duplex communication over a single TCP connection. Knox provides out-of-the-box support for websocket protocol, currently only text messages are supported.</p><h3><a id="Configuration">Configuration</a> <a href="#Configuration"><img src="markbook-section-link.png"/></a></h3><p>By default websocket functionality is disabled, it can be easily enabled by changing the &lsquo;gateway.websocket.feature.enabled&rsquo; property to &lsquo;true&rsquo; in <KNOX-HOME>/conf/gateway-site.xml file. </p>
 <pre><code>  &lt;property&gt;
       &lt;name&gt;gateway.websocket.feature.enabled&lt;/name&gt;
       &lt;value&gt;true&lt;/value&gt;
@@ -3633,7 +3633,7 @@ dep/commons-codec-1.7.jar
     <li>JSON Path <a href="https://code.google.com/p/json-path/">API</a></li>
     <li>GPath <a href="http://groovy.codehaus.org/GPath">Overview</a></li>
   </ul></li>
-</ul><h2><a id="Service+Details">Service Details</a> <a href="#Service+Details"><img src="markbook-section-link.png"/></a></h2><p>In the sections that follow the integrations currently available out of the box with the gateway will be described. In general these sections will include examples that demonstrate how to access each of these services via the gateway. In many cases this will include both the use of <a href="http://curl.haxx.se/">cURL</a> as a REST API client as well as the use of the Knox Client DSL. You may notice that there are some minor differences between using the REST API of a given service via the gateway. In general this is necessary in order to achieve the goal of leaking internal Hadoop cluster details to the client.</p><p>Keep in mind that the gateway uses a plugin model for supporting Hadoop services. Check back with the <a href="http://knox.apache.org">Apache Knox</a> site for the latest news on plugin availability. You can also create your own custom plugin
  to extend the capabilities of the gateway.</p><p>These are the current Hadoop services with built-in support.</p>
+</ul><h2><a id="Service+Details">Service Details</a> <a href="#Service+Details"><img src="markbook-section-link.png"/></a></h2><p>In the sections that follow, the integrations currently available out of the box with the gateway will be described. In general these sections will include examples that demonstrate how to access each of these services via the gateway. In many cases this will include both the use of <a href="http://curl.haxx.se/">cURL</a> as a REST API client as well as the use of the Knox Client DSL. You may notice that there are some minor differences between using the REST API of a given service via the gateway. In general this is necessary in order to achieve the goal of not leaking internal Hadoop cluster details to the client.</p><p>Keep in mind that the gateway uses a plugin model for supporting Hadoop services. Check back with the <a href="http://knox.apache.org">Apache Knox</a> site for the latest news on plugin availability. You can also create your own custom p
 lugin to extend the capabilities of the gateway.</p><p>These are the current Hadoop services with built-in support.</p>
 <ul>
   <li><a href="#WebHDFS">WebHDFS</a></li>
   <li><a href="#WebHCat">WebHCat</a></li>
@@ -3652,7 +3652,7 @@ dep/commons-codec-1.7.jar
   <li>The <a href="http://curl.haxx.se/">cURL</a> command line HTTP client utility is installed and functional.</li>
   <li>A few examples optionally require the use of commands from a standard Groovy installation. These examples are optional but to try them you will need Groovy <a href="http://groovy.codehaus.org/Installing+Groovy">installed</a>.</li>
   <li>The default configuration for all of the samples is setup for use with Hortonworks&rsquo; <a href="http://hortonworks.com/products/hortonworks-sandbox">Sandbox</a> version 2.</li>
-</ul><h3><a id="Customization">Customization</a> <a href="#Customization"><img src="markbook-section-link.png"/></a></h3><p>Using these samples with other Hadoop installations will require changes to the steps describe here as well as changes to referenced sample scripts. This will also likely require changes to the gateway&rsquo;s default configuration. In particular host names, ports user names and password may need to be changed to match your environment. These changes may need to be made to gateway configuration and also the Groovy sample script files in the distribution. All of the values that may need to be customized in the sample scripts can be found together at the top of each of these files.</p><h3><a id="cURL">cURL</a> <a href="#cURL"><img src="markbook-section-link.png"/></a></h3><p>The cURL HTTP client command line utility is used extensively in the examples for each service. In particular this form of the cURL command line is used repeatedly.</p>
+</ul><h3><a id="Customization">Customization</a> <a href="#Customization"><img src="markbook-section-link.png"/></a></h3><p>Using these samples with other Hadoop installations will require changes to the steps described here as well as changes to referenced sample scripts. This will also likely require changes to the gateway&rsquo;s default configuration. In particular host names, ports, user names and password may need to be changed to match your environment. These changes may need to be made to gateway configuration and also the Groovy sample script files in the distribution. All of the values that may need to be customized in the sample scripts can be found together at the top of each of these files.</p><h3><a id="cURL">cURL</a> <a href="#cURL"><img src="markbook-section-link.png"/></a></h3><p>The cURL HTTP client command line utility is used extensively in the examples for each service. In particular this form of the cURL command line is used repeatedly.</p>
 <pre><code>curl -i -k -u guest:guest-password ...
 </code></pre><p>The option -i (aka &ndash;include) is used to output HTTP response header information. This will be important when the content of the HTTP Location header is required for subsequent requests.</p><p>The option -k (aka &ndash;insecure) is used to avoid any issues resulting from the use of demonstration SSL certificates.</p><p>The option -u (aka &ndash;user) is used to provide the credentials to be used when the client is challenged by the gateway.</p><p>Keep in mind that the samples do not use the cookie features of cURL for the sake of simplicity. Therefore each request via cURL will result in an authentication.</p><h3><a id="WebHDFS">WebHDFS</a> <a href="#WebHDFS"><img src="markbook-section-link.png"/></a></h3><p>REST API access to HDFS in a Hadoop cluster is provided by WebHDFS. The <a href="http://hadoop.apache.org/docs/stable/hadoop-project-dist/hadoop-hdfs/WebHDFS.html">WebHDFS REST API</a> documentation is available online. WebHDFS must be enabled in the hdfs-si
 te.xml configuration file. In the sandbox this configuration file is located at <code>/etc/hadoop/conf/hdfs-site.xml</code>. Note the properties shown below as they are related to configuration required by the gateway. Some of these represent the default values and may not actually be present in hdfs-site.xml.</p>
 <pre><code>&lt;property&gt;
@@ -3680,7 +3680,7 @@ dep/commons-codec-1.7.jar
     &lt;role&gt;WEBHDFS&lt;/role&gt;
     &lt;url&gt;http://localhost:50070/webhdfs&lt;/url&gt;
 &lt;/service&gt;
-</code></pre><p>The URL provided for the role NAMENODE does not result in an endpoint being exposed by the gateway. This information is only required so that other URLs can be rewritten that reference the Name Node&rsquo;s RPC address. This prevents clients from needed to be aware of the internal cluster details.</p><p>By default the gateway is configured to use the HTTP endpoint for WebHDFS in the Sandbox. This could alternatively be configured to use the HTTPS endpoint by provided the correct address.</p><h4><a id="WebHDFS+URL+Mapping">WebHDFS URL Mapping</a> <a href="#WebHDFS+URL+Mapping"><img src="markbook-section-link.png"/></a></h4><p>For Name Node URLs, the mapping of Knox Gateway accessible WebHDFS URLs to direct WebHDFS URLs is simple.</p>
+</code></pre><p>The URL provided for the role NAMENODE does not result in an endpoint being exposed by the gateway. This information is only required so that other URLs can be rewritten that reference the Name Node&rsquo;s RPC address. This prevents clients from needing to be aware of the internal cluster details.</p><p>By default the gateway is configured to use the HTTP endpoint for WebHDFS in the Sandbox. This could alternatively be configured to use the HTTPS endpoint by providing the correct address.</p><h4><a id="WebHDFS+URL+Mapping">WebHDFS URL Mapping</a> <a href="#WebHDFS+URL+Mapping"><img src="markbook-section-link.png"/></a></h4><p>For Name Node URLs, the mapping of Knox Gateway accessible WebHDFS URLs to direct WebHDFS URLs is simple.</p>
 <table>
   <tbody>
     <tr>
@@ -3695,7 +3695,7 @@ dep/commons-codec-1.7.jar
 </table><p>However, there is a subtle difference to URLs that are returned by WebHDFS in the Location header of many requests. Direct WebHDFS requests may return Location headers that contain the address of a particular DataNode. The gateway will rewrite these URLs to ensure subsequent requests come back through the gateway and internal cluster details are protected.</p><p>A WebHDFS request to the NameNode to retrieve a file will return a URL of the form below in the Location header.</p>
 <pre><code>http://{datanode-host}:{data-node-port}/webhdfs/v1/{path}?...
 </code></pre><p>Note that this URL contains the network location of a DataNode. The gateway will rewrite this URL to look like the URL below.</p>
-<pre><code>https://{gateway-host}:{gateway-port}/{gateway-path}/{custer-name}/webhdfs/data/v1/{path}?_={encrypted-query-parameters}
+<pre><code>https://{gateway-host}:{gateway-port}/{gateway-path}/{cluster-name}/webhdfs/data/v1/{path}?_={encrypted-query-parameters}
 </code></pre><p>The <code>{encrypted-query-parameters}</code> will contain the <code>{datanode-host}</code> and <code>{datanode-port}</code> information. This information along with the original query parameters are encrypted so that the internal Hadoop details are protected.</p><h4><a id="WebHDFS+Examples">WebHDFS Examples</a> <a href="#WebHDFS+Examples"><img src="markbook-section-link.png"/></a></h4><p>The examples below upload a file, download the file and list the contents of the directory.</p><h5><a id="WebHDFS+via+client+DSL">WebHDFS via client DSL</a> <a href="#WebHDFS+via+client+DSL"><img src="markbook-section-link.png"/></a></h5><p>You can use the Groovy example scripts and interpreter provided with the distribution.</p>
 <pre><code>java -jar bin/shell.jar samples/ExampleWebHdfsPutGet.groovy
 java -jar bin/shell.jar samples/ExampleWebHdfsLs.groovy
@@ -3735,7 +3735,7 @@ Hdfs.rm( session ).file( &quot;/user/gue
 
 // Clean the session.
 session.shutdown()
-</code></pre><h5><a id="WebHDFS+via+cURL">WebHDFS via cURL</a> <a href="#WebHDFS+via+cURL"><img src="markbook-section-link.png"/></a></h5><p>Use can use cURL to directly invoke the REST APIs via the gateway.</p><h6><a id="Optionally+cleanup+the+sample+directory+in+case+a+previous+example+was+run+without+cleaning+up.">Optionally cleanup the sample directory in case a previous example was run without cleaning up.</a> <a href="#Optionally+cleanup+the+sample+directory+in+case+a+previous+example+was+run+without+cleaning+up."><img src="markbook-section-link.png"/></a></h6>
+</code></pre><h5><a id="WebHDFS+via+cURL">WebHDFS via cURL</a> <a href="#WebHDFS+via+cURL"><img src="markbook-section-link.png"/></a></h5><p>Users can use cURL to directly invoke the REST APIs via the gateway.</p><h6><a id="Optionally+cleanup+the+sample+directory+in+case+a+previous+example+was+run+without+cleaning+up.">Optionally cleanup the sample directory in case a previous example was run without cleaning up.</a> <a href="#Optionally+cleanup+the+sample+directory+in+case+a+previous+example+was+run+without+cleaning+up."><img src="markbook-section-link.png"/></a></h6>
 <pre><code>curl -i -k -u guest:guest-password -X DELETE \
     &#39;https://localhost:8443/gateway/sandbox/webhdfs/v1/user/guest/example?op=DELETE&amp;recursive=true&#39;
 </code></pre><h6><a id="Register+the+name+for+a+sample+file+README+in+/user/guest/example.">Register the name for a sample file README in /user/guest/example.</a> <a href="#Register+the+name+for+a+sample+file+README+in+/user/guest/example."><img src="markbook-section-link.png"/></a></h6>
@@ -3878,7 +3878,7 @@ session.shutdown()
     &lt;role&gt;WEBHCAT&lt;/role&gt;
     &lt;url&gt;http://localhost:50111/templeton&lt;/url&gt;
 &lt;/service&gt;
-</code></pre><p>The URLs provided for the role NAMENODE and JOBTRACKER do not result in an endpoint being exposed by the gateway. This information is only required so that other URLs can be rewritten that reference the appropriate RPC address for Hadoop services. This prevents clients from needed to be aware of the internal cluster details. Note that for Hadoop 2 the JOBTRACKER RPC endpoint is provided by the Resource Manager component.</p><p>By default the gateway is configured to use the HTTP endpoint for WebHCat in the Sandbox. This could alternatively be configured to use the HTTPS endpoint by provided the correct address.</p><h4><a id="WebHCat+URL+Mapping">WebHCat URL Mapping</a> <a href="#WebHCat+URL+Mapping"><img src="markbook-section-link.png"/></a></h4><p>For WebHCat URLs, the mapping of Knox Gateway accessible URLs to direct WebHCat URLs is simple.</p>
+</code></pre><p>The URLs provided for the role NAMENODE and JOBTRACKER do not result in an endpoint being exposed by the gateway. This information is only required so that other URLs can be rewritten that reference the appropriate RPC address for Hadoop services. This prevents clients from needing to be aware of the internal cluster details. Note that for Hadoop 2 the JOBTRACKER RPC endpoint is provided by the Resource Manager component.</p><p>By default the gateway is configured to use the HTTP endpoint for WebHCat in the Sandbox. This could alternatively be configured to use the HTTPS endpoint by providing the correct address.</p><h4><a id="WebHCat+URL+Mapping">WebHCat URL Mapping</a> <a href="#WebHCat+URL+Mapping"><img src="markbook-section-link.png"/></a></h4><p>For WebHCat URLs, the mapping of Knox Gateway accessible URLs to direct WebHCat URLs is simple.</p>
 <table>
   <tbody>
     <tr>
@@ -3890,7 +3890,7 @@ session.shutdown()
       <td><code>http://{webhcat-host}:{webhcat-port}/templeton}</code> </td>
     </tr>
   </tbody>
-</table><h4><a id="WebHCat+via+cURL">WebHCat via cURL</a> <a href="#WebHCat+via+cURL"><img src="markbook-section-link.png"/></a></h4><p>Use can use cURL to directly invoke the REST APIs via the gateway. For the full list of available REST calls look at the WebHCat documentation. This is a simple curl command to test the connection:</p>
+</table><h4><a id="WebHCat+via+cURL">WebHCat via cURL</a> <a href="#WebHCat+via+cURL"><img src="markbook-section-link.png"/></a></h4><p>Users can use cURL to directly invoke the REST APIs via the gateway. For the full list of available REST calls look at the WebHCat documentation. This is a simple curl command to test the connection:</p>
 <pre><code>curl -i -k -u guest:guest-password &#39;https://localhost:8443/gateway/sandbox/templeton/v1/status&#39;
 </code></pre><h4><a id="WebHCat+Example">WebHCat Example</a> <a href="#WebHCat+Example"><img src="markbook-section-link.png"/></a></h4><p>This example will submit the familiar WordCount Java MapReduce job to the Hadoop cluster via the gateway using the KnoxShell DSL. There are several ways to do this depending upon your preference.</p><p>You can use the &ldquo;embedded&rdquo; Groovy interpreter provided with the distribution.</p>
 <pre><code>java -jar bin/shell.jar samples/ExampleWebHCatJob.groovy
@@ -4743,7 +4743,7 @@ session.shutdown(10, SECONDS)
       <li>hive-jdbc-0.14.0-standalone.jar;</li>
       <li>commons-logging-1.1.3.jar;</li>
     </ul></li>
-    <li>Connection URL has to be following: <code>jdbc:hive2://{gateway-host}:{gateway-port}/;ssl=true;sslTrustStore={gateway-trust-store-path};trustStorePassword={gateway-trust-store-password};transportMode=http;httpPath={gateway-path}/{cluster-name}/hive</code></li>
+    <li>Connection URL has to be the following: <code>jdbc:hive2://{gateway-host}:{gateway-port}/;ssl=true;sslTrustStore={gateway-trust-store-path};trustStorePassword={gateway-trust-store-password};transportMode=http;httpPath={gateway-path}/{cluster-name}/hive</code></li>
     <li>Look at <a href="https://cwiki.apache.org/confluence/display/Hive/GettingStarted#GettingStarted-DDLOperations">https://cwiki.apache.org/confluence/display/Hive/GettingStarted#GettingStarted-DDLOperations</a> for examples.  Hint: For testing it would be better to execute <code>set hive.security.authorization.enabled=false</code> as the first statement.  Hint: Good examples of Hive DDL/DML can be found here <a href="http://gettingstarted.hadooponazure.com/hw/hive.html">http://gettingstarted.hadooponazure.com/hw/hive.html</a></li>
   </ol></li>
 </ol><h5><a id="Customization">Customization</a> <a href="#Customization"><img src="markbook-section-link.png"/></a></h5><p>This example may need to be tailored to the execution environment. In particular host name, host port, user name, user password and context path may need to be changed to match your environment. In particular there is one example file in the distribution that may need to be customized. Take a moment to review this file. All of the values that may need to be customized can be found together at the top of the file.</p>
@@ -4828,7 +4828,7 @@ public class HiveJDBCSample {
     }
   }
 }
-</code></pre><h6><a id="Groovy">Groovy</a> <a href="#Groovy"><img src="markbook-section-link.png"/></a></h6><p>Make sure that <code>{GATEWAY_HOME/ext}</code> directory contains following libraries for successful execution:</p>
+</code></pre><h6><a id="Groovy">Groovy</a> <a href="#Groovy"><img src="markbook-section-link.png"/></a></h6><p>Make sure that <code>{GATEWAY_HOME/ext}</code> directory contains the following libraries for successful execution:</p>
 <ul>
   <li>hive-jdbc-0.14.0-standalone.jar;</li>
   <li>commons-logging-1.1.3.jar;</li>
@@ -5446,7 +5446,7 @@ DriverManager.getConnection(url, props);
 <ul>
   <li>In the first cURL request, the quotes are necessary around the URL or else a command line terminal will not include the <code>&amp;password</code> query parameter in the request.</li>
   <li>This API call does not require any credentials to receive a response from Knox, but expect to receive 401 responses from each of the services if none are provided.</li>
-</ul><h2><a id="UI+Service+Details">UI Service Details</a> <a href="#UI+Service+Details"><img src="markbook-section-link.png"/></a></h2><p>In the sections that follow the integrations for proxying various UIs currently available out of the box with the gateway will be described. These sections will include examples that demonstrate how to access each of these services via the gateway.</p><p>These are the current Hadoop services with built-in support for their UIs.</p>
+</ul><h2><a id="UI+Service+Details">UI Service Details</a> <a href="#UI+Service+Details"><img src="markbook-section-link.png"/></a></h2><p>In the sections that follow, the integrations for proxying various UIs currently available out of the box with the gateway will be described. These sections will include examples that demonstrate how to access each of these services via the gateway.</p><p>These are the current Hadoop services with built-in support for their UIs.</p>
 <ul>
   <li><a href="#Name+Node+UI">Name Node UI</a></li>
   <li><a href="#Job+History+UI">Job History UI</a></li>
@@ -5479,7 +5479,7 @@ DriverManager.getConnection(url, props);
     &lt;role&gt;WEBHDFS&lt;/role&gt;
     &lt;url&gt;http://sandbox.hortonworks.com:50070/webhdfs&lt;/url&gt;
 &lt;/service&gt;
-</code></pre><p>By default the gateway is configured to use the HTTP endpoint for WebHDFS in the Sandbox. This could alternatively be configured to use the HTTPS endpoint by provided the correct address.</p><h4><a id="Name+Node+UI+URL+Mapping">Name Node UI URL Mapping</a> <a href="#Name+Node+UI+URL+Mapping"><img src="markbook-section-link.png"/></a></h4><p>For Name Node UI URLs, the mapping of Knox Gateway accessible HDFS UI URLs to direct HDFS UI URLs is:</p>
+</code></pre><p>By default the gateway is configured to use the HTTP endpoint for WebHDFS in the Sandbox. This could alternatively be configured to use the HTTPS endpoint by providing the correct address.</p><h4><a id="Name+Node+UI+URL+Mapping">Name Node UI URL Mapping</a> <a href="#Name+Node+UI+URL+Mapping"><img src="markbook-section-link.png"/></a></h4><p>For Name Node UI URLs, the mapping of Knox Gateway accessible HDFS UI URLs to direct HDFS UI URLs is:</p>
 <table>
   <tbody>
     <tr>

Modified: knox/site/books/knox-0-4-0/deployment-overview.png
URL: http://svn.apache.org/viewvc/knox/site/books/knox-0-4-0/deployment-overview.png?rev=1803686&r1=1803685&r2=1803686&view=diff
==============================================================================
Binary files - no diff available.

Modified: knox/site/books/knox-0-4-0/deployment-provider.png
URL: http://svn.apache.org/viewvc/knox/site/books/knox-0-4-0/deployment-provider.png?rev=1803686&r1=1803685&r2=1803686&view=diff
==============================================================================
Binary files - no diff available.

Modified: knox/site/books/knox-0-4-0/deployment-service.png
URL: http://svn.apache.org/viewvc/knox/site/books/knox-0-4-0/deployment-service.png?rev=1803686&r1=1803685&r2=1803686&view=diff
==============================================================================
Binary files - no diff available.

Modified: knox/site/books/knox-0-4-0/runtime-overview.png
URL: http://svn.apache.org/viewvc/knox/site/books/knox-0-4-0/runtime-overview.png?rev=1803686&r1=1803685&r2=1803686&view=diff
==============================================================================
Binary files - no diff available.

Modified: knox/site/books/knox-0-4-0/runtime-request-processing.png
URL: http://svn.apache.org/viewvc/knox/site/books/knox-0-4-0/runtime-request-processing.png?rev=1803686&r1=1803685&r2=1803686&view=diff
==============================================================================
Binary files - no diff available.

Modified: knox/site/books/knox-0-5-0/deployment-overview.png
URL: http://svn.apache.org/viewvc/knox/site/books/knox-0-5-0/deployment-overview.png?rev=1803686&r1=1803685&r2=1803686&view=diff
==============================================================================
Binary files - no diff available.

Modified: knox/site/books/knox-0-5-0/deployment-provider.png
URL: http://svn.apache.org/viewvc/knox/site/books/knox-0-5-0/deployment-provider.png?rev=1803686&r1=1803685&r2=1803686&view=diff
==============================================================================
Binary files - no diff available.

Modified: knox/site/books/knox-0-5-0/deployment-service.png
URL: http://svn.apache.org/viewvc/knox/site/books/knox-0-5-0/deployment-service.png?rev=1803686&r1=1803685&r2=1803686&view=diff
==============================================================================
Binary files - no diff available.

Modified: knox/site/books/knox-0-5-0/runtime-overview.png
URL: http://svn.apache.org/viewvc/knox/site/books/knox-0-5-0/runtime-overview.png?rev=1803686&r1=1803685&r2=1803686&view=diff
==============================================================================
Binary files - no diff available.

Modified: knox/site/books/knox-0-5-0/runtime-request-processing.png
URL: http://svn.apache.org/viewvc/knox/site/books/knox-0-5-0/runtime-request-processing.png?rev=1803686&r1=1803685&r2=1803686&view=diff
==============================================================================
Binary files - no diff available.

Modified: knox/site/books/knox-0-6-0/deployment-overview.png
URL: http://svn.apache.org/viewvc/knox/site/books/knox-0-6-0/deployment-overview.png?rev=1803686&r1=1803685&r2=1803686&view=diff
==============================================================================
Binary files - no diff available.

Modified: knox/site/books/knox-0-6-0/deployment-provider.png
URL: http://svn.apache.org/viewvc/knox/site/books/knox-0-6-0/deployment-provider.png?rev=1803686&r1=1803685&r2=1803686&view=diff
==============================================================================
Binary files - no diff available.

Modified: knox/site/books/knox-0-6-0/deployment-service.png
URL: http://svn.apache.org/viewvc/knox/site/books/knox-0-6-0/deployment-service.png?rev=1803686&r1=1803685&r2=1803686&view=diff
==============================================================================
Binary files - no diff available.

Modified: knox/site/books/knox-0-6-0/runtime-overview.png
URL: http://svn.apache.org/viewvc/knox/site/books/knox-0-6-0/runtime-overview.png?rev=1803686&r1=1803685&r2=1803686&view=diff
==============================================================================
Binary files - no diff available.

Modified: knox/site/books/knox-0-6-0/runtime-request-processing.png
URL: http://svn.apache.org/viewvc/knox/site/books/knox-0-6-0/runtime-request-processing.png?rev=1803686&r1=1803685&r2=1803686&view=diff
==============================================================================
Binary files - no diff available.

Modified: knox/site/books/knox-0-7-0/deployment-overview.png
URL: http://svn.apache.org/viewvc/knox/site/books/knox-0-7-0/deployment-overview.png?rev=1803686&r1=1803685&r2=1803686&view=diff
==============================================================================
Binary files - no diff available.

Modified: knox/site/books/knox-0-7-0/deployment-provider.png
URL: http://svn.apache.org/viewvc/knox/site/books/knox-0-7-0/deployment-provider.png?rev=1803686&r1=1803685&r2=1803686&view=diff
==============================================================================
Binary files - no diff available.

Modified: knox/site/books/knox-0-7-0/deployment-service.png
URL: http://svn.apache.org/viewvc/knox/site/books/knox-0-7-0/deployment-service.png?rev=1803686&r1=1803685&r2=1803686&view=diff
==============================================================================
Binary files - no diff available.

Modified: knox/site/books/knox-0-7-0/general_saml_flow.png
URL: http://svn.apache.org/viewvc/knox/site/books/knox-0-7-0/general_saml_flow.png?rev=1803686&r1=1803685&r2=1803686&view=diff
==============================================================================
Binary files - no diff available.

Modified: knox/site/books/knox-0-7-0/runtime-overview.png
URL: http://svn.apache.org/viewvc/knox/site/books/knox-0-7-0/runtime-overview.png?rev=1803686&r1=1803685&r2=1803686&view=diff
==============================================================================
Binary files - no diff available.

Modified: knox/site/books/knox-0-7-0/runtime-request-processing.png
URL: http://svn.apache.org/viewvc/knox/site/books/knox-0-7-0/runtime-request-processing.png?rev=1803686&r1=1803685&r2=1803686&view=diff
==============================================================================
Binary files - no diff available.

Modified: knox/site/books/knox-0-8-0/deployment-overview.png
URL: http://svn.apache.org/viewvc/knox/site/books/knox-0-8-0/deployment-overview.png?rev=1803686&r1=1803685&r2=1803686&view=diff
==============================================================================
Binary files - no diff available.

Modified: knox/site/books/knox-0-8-0/deployment-provider.png
URL: http://svn.apache.org/viewvc/knox/site/books/knox-0-8-0/deployment-provider.png?rev=1803686&r1=1803685&r2=1803686&view=diff
==============================================================================
Binary files - no diff available.

Modified: knox/site/books/knox-0-8-0/deployment-service.png
URL: http://svn.apache.org/viewvc/knox/site/books/knox-0-8-0/deployment-service.png?rev=1803686&r1=1803685&r2=1803686&view=diff
==============================================================================
Binary files - no diff available.

Modified: knox/site/books/knox-0-8-0/general_saml_flow.png
URL: http://svn.apache.org/viewvc/knox/site/books/knox-0-8-0/general_saml_flow.png?rev=1803686&r1=1803685&r2=1803686&view=diff
==============================================================================
Binary files - no diff available.

Modified: knox/site/books/knox-0-8-0/runtime-overview.png
URL: http://svn.apache.org/viewvc/knox/site/books/knox-0-8-0/runtime-overview.png?rev=1803686&r1=1803685&r2=1803686&view=diff
==============================================================================
Binary files - no diff available.

Modified: knox/site/books/knox-0-8-0/runtime-request-processing.png
URL: http://svn.apache.org/viewvc/knox/site/books/knox-0-8-0/runtime-request-processing.png?rev=1803686&r1=1803685&r2=1803686&view=diff
==============================================================================
Binary files - no diff available.

Modified: knox/site/books/knox-0-9-0/deployment-overview.png
URL: http://svn.apache.org/viewvc/knox/site/books/knox-0-9-0/deployment-overview.png?rev=1803686&r1=1803685&r2=1803686&view=diff
==============================================================================
Binary files - no diff available.

Modified: knox/site/books/knox-0-9-0/deployment-provider.png
URL: http://svn.apache.org/viewvc/knox/site/books/knox-0-9-0/deployment-provider.png?rev=1803686&r1=1803685&r2=1803686&view=diff
==============================================================================
Binary files - no diff available.

Modified: knox/site/books/knox-0-9-0/deployment-service.png
URL: http://svn.apache.org/viewvc/knox/site/books/knox-0-9-0/deployment-service.png?rev=1803686&r1=1803685&r2=1803686&view=diff
==============================================================================
Binary files - no diff available.

Modified: knox/site/books/knox-0-9-0/general_saml_flow.png
URL: http://svn.apache.org/viewvc/knox/site/books/knox-0-9-0/general_saml_flow.png?rev=1803686&r1=1803685&r2=1803686&view=diff
==============================================================================
Binary files - no diff available.

Modified: knox/site/books/knox-0-9-0/runtime-overview.png
URL: http://svn.apache.org/viewvc/knox/site/books/knox-0-9-0/runtime-overview.png?rev=1803686&r1=1803685&r2=1803686&view=diff
==============================================================================
Binary files - no diff available.

Modified: knox/site/books/knox-0-9-0/runtime-request-processing.png
URL: http://svn.apache.org/viewvc/knox/site/books/knox-0-9-0/runtime-request-processing.png?rev=1803686&r1=1803685&r2=1803686&view=diff
==============================================================================
Binary files - no diff available.

Modified: knox/site/books/knox-0-9-1/deployment-overview.png
URL: http://svn.apache.org/viewvc/knox/site/books/knox-0-9-1/deployment-overview.png?rev=1803686&r1=1803685&r2=1803686&view=diff
==============================================================================
Binary files - no diff available.

Modified: knox/site/books/knox-0-9-1/deployment-provider.png
URL: http://svn.apache.org/viewvc/knox/site/books/knox-0-9-1/deployment-provider.png?rev=1803686&r1=1803685&r2=1803686&view=diff
==============================================================================
Binary files - no diff available.

Modified: knox/site/books/knox-0-9-1/deployment-service.png
URL: http://svn.apache.org/viewvc/knox/site/books/knox-0-9-1/deployment-service.png?rev=1803686&r1=1803685&r2=1803686&view=diff
==============================================================================
Binary files - no diff available.

Modified: knox/site/books/knox-0-9-1/general_saml_flow.png
URL: http://svn.apache.org/viewvc/knox/site/books/knox-0-9-1/general_saml_flow.png?rev=1803686&r1=1803685&r2=1803686&view=diff
==============================================================================
Binary files - no diff available.

Modified: knox/site/books/knox-0-9-1/runtime-overview.png
URL: http://svn.apache.org/viewvc/knox/site/books/knox-0-9-1/runtime-overview.png?rev=1803686&r1=1803685&r2=1803686&view=diff
==============================================================================
Binary files - no diff available.

Modified: knox/site/books/knox-0-9-1/runtime-request-processing.png
URL: http://svn.apache.org/viewvc/knox/site/books/knox-0-9-1/runtime-request-processing.png?rev=1803686&r1=1803685&r2=1803686&view=diff
==============================================================================
Binary files - no diff available.

Modified: knox/site/index.html
URL: http://svn.apache.org/viewvc/knox/site/index.html?rev=1803686&r1=1803685&r2=1803686&view=diff
==============================================================================
--- knox/site/index.html (original)
+++ knox/site/index.html Tue Aug  1 18:56:32 2017
@@ -1,13 +1,13 @@
 <!DOCTYPE html>
 <!--
- | Generated by Apache Maven Doxia at 2017-07-11
+ | Generated by Apache Maven Doxia at 2017-08-01
  | Rendered using Apache Maven Fluido Skin 1.3.0
 -->
 <html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
   <head>
     <meta charset="UTF-8" />
     <meta name="viewport" content="width=device-width, initial-scale=1.0" />
-    <meta name="Date-Revision-yyyymmdd" content="20170711" />
+    <meta name="Date-Revision-yyyymmdd" content="20170801" />
     <meta http-equiv="Content-Language" content="en" />
     <title>Knox Gateway &#x2013; REST API and Application Gateway for the Apache Hadoop Ecosystem</title>
     <link rel="stylesheet" href="./css/apache-maven-fluido-1.3.0.min.css" />
@@ -58,7 +58,7 @@
               
                 
                     
-                  <li id="publishDate" class="pull-right">Last Published: 2017-07-11</li> 
+                  <li id="publishDate" class="pull-right">Last Published: 2017-08-01</li> 
             
                             </ul>
       </div>

Modified: knox/site/issue-tracking.html
URL: http://svn.apache.org/viewvc/knox/site/issue-tracking.html?rev=1803686&r1=1803685&r2=1803686&view=diff
==============================================================================
--- knox/site/issue-tracking.html (original)
+++ knox/site/issue-tracking.html Tue Aug  1 18:56:32 2017
@@ -1,13 +1,13 @@
 <!DOCTYPE html>
 <!--
- | Generated by Apache Maven Doxia at 2017-07-11
+ | Generated by Apache Maven Doxia at 2017-08-01
  | Rendered using Apache Maven Fluido Skin 1.3.0
 -->
 <html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
   <head>
     <meta charset="UTF-8" />
     <meta name="viewport" content="width=device-width, initial-scale=1.0" />
-    <meta name="Date-Revision-yyyymmdd" content="20170711" />
+    <meta name="Date-Revision-yyyymmdd" content="20170801" />
     <meta http-equiv="Content-Language" content="en" />
     <title>Knox Gateway &#x2013; Issue Tracking</title>
     <link rel="stylesheet" href="./css/apache-maven-fluido-1.3.0.min.css" />
@@ -58,7 +58,7 @@
               
                 
                     
-                  <li id="publishDate" class="pull-right">Last Published: 2017-07-11</li> 
+                  <li id="publishDate" class="pull-right">Last Published: 2017-08-01</li> 
             
                             </ul>
       </div>