You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@knox.apache.org by am...@apache.org on 2022/03/30 15:22:58 UTC

svn commit: r1899392 [11/11] - in /knox/trunk: ./ books/2.0.0/ books/2.0.0/dev-guide/ books/2.0.0/img/ books/2.0.0/img/adminui/ books/2.0.0/knoxshell-guide/

Added: knox/trunk/books/2.0.0/service_webhdfs.md
URL: http://svn.apache.org/viewvc/knox/trunk/books/2.0.0/service_webhdfs.md?rev=1899392&view=auto
==============================================================================
--- knox/trunk/books/2.0.0/service_webhdfs.md (added)
+++ knox/trunk/books/2.0.0/service_webhdfs.md Wed Mar 30 15:22:57 2022
@@ -0,0 +1,346 @@
+<!---
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       https://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+### WebHDFS ###
+
+REST API access to HDFS in a Hadoop cluster is provided by WebHDFS or HttpFS.
+Both services provide the same API.
+The [WebHDFS REST API](http://hadoop.apache.org/docs/stable/hadoop-project-dist/hadoop-hdfs/WebHDFS.html) documentation is available online.
+WebHDFS must be enabled in the `hdfs-site.xml` configuration file and exposes the API on each NameNode and DataNode.
+HttpFS however is a separate server to be configured and started separately.
+In the sandbox this configuration file is located at `/etc/hadoop/conf/hdfs-site.xml`.
+Note the properties shown below as they are related to configuration required by the gateway.
+Some of these represent the default values and may not actually be present in `hdfs-site.xml`.
+
+    <property>
+        <name>dfs.webhdfs.enabled</name>
+        <value>true</value>
+    </property>
+    <property>
+        <name>dfs.namenode.rpc-address</name>
+        <value>sandbox.hortonworks.com:8020</value>
+    </property>
+    <property>
+        <name>dfs.namenode.http-address</name>
+        <value>sandbox.hortonworks.com:50070</value>
+    </property>
+    <property>
+        <name>dfs.https.namenode.https-address</name>
+        <value>sandbox.hortonworks.com:50470</value>
+    </property>
+
+The values above need to be reflected in each topology descriptor file deployed to the gateway.
+The gateway by default includes a sample topology descriptor file `{GATEWAY_HOME}/deployments/sandbox.xml`.
+The values in this sample are configured to work with an installed Sandbox VM.
+
+Please also note that the port changed from 50070 to 9870 in Hadoop 3.0.
+
+    <service>
+        <role>NAMENODE</role>
+        <url>hdfs://localhost:8020</url>
+    </service>
+    <service>
+        <role>WEBHDFS</role>
+        <url>http://localhost:50070/webhdfs</url>
+    </service>
+
+The URL provided for the role NAMENODE does not result in an endpoint being exposed by the gateway.
+This information is only required so that other URLs can be rewritten that reference the Name Node's RPC address.
+This prevents clients from needing to be aware of the internal cluster details.
+
+By default the gateway is configured to use the HTTP endpoint for WebHDFS in the Sandbox.
+This could alternatively be configured to use the HTTPS endpoint by providing the correct address.
+
+##### HDFS NameNode Federation
+
+NameNode federation introduces some additional complexity when determining to which URL(s) Knox should proxy HDFS-related requests.
+
+The HDFS core-site.xml configuration includes additional properties, which represent options in terms of the NameNode endpoints.
+
+| ------- | ---------------------------------------------------- | ---------------------- |
+| Property Name             | Description                        | Example Value          |
+| dfs.internal.nameservices | The list of defined namespaces     | ns1,ns2                |
+
+For each value enumerated by *dfs.internal.nameservices*, there is another property defined, for specifying the associated NameNode names.
+
+| ------- | ------------------------------------------------------------------ | ---------- |
+| Property Name        | Description                                           | Example Value |
+| dfs.ha.namenodes.ns1 | The NameNode identifiers associated with the ns1 namespace  | nn1,nn2 |
+| dfs.ha.namenodes.ns2 | The NameNode identifiers associated with the ns2 namespace  | nn3,nn4 |
+
+For each namenode name enumerated by each of these properties, there are other properties defined, for specifying the associated host addresses.
+
+| ------- | ---------------------------------------------------- | ---------------------- |
+| Property Name             | Description                        | Example Value          |
+| dfs.namenode.http-address.ns1.nn1  | The HTTP host address of nn1 NameNode in the ns1 namespace  | host1:50070 |
+| dfs.namenode.https-address.ns1.nn1 | The HTTPS host address of nn1 NameNode in the ns1 namespace | host1:50470 |
+| dfs.namenode.http-address.ns1.nn2  | The HTTP host address of nn2 NameNode in the ns1 namespace  | host2:50070 |
+| dfs.namenode.https-address.ns1.nn2 | The HTTPS host address of nn2 NameNode in the ns1 namespace | host2:50470 |
+| dfs.namenode.http-address.ns2.nn3  | The HTTP host address of nn3 NameNode in the ns2 namespace  | host3:50070 |
+| dfs.namenode.https-address.ns2.nn3 | The HTTPS host address of nn3 NameNode in the ns2 namespace | host3:50470 |
+| dfs.namenode.http-address.ns2.nn4  | The HTTP host address of nn4 NameNode in the ns2 namespace  | host4:50070 |
+| dfs.namenode.https-address.ns2.nn4 | The HTTPS host address of nn4 NameNode in the ns2 namespace | host4:50470 |
+
+So, if Knox should proxy the NameNodes associated with *ns1*, and the configuration does not dictate HTTPS, then the WEBHDFS service must
+contain URLs based on the values of *dfs.namenode.http-address.ns1.nn1* and *dfs.namenode.http-address.ns1.nn2*. Likewise, if Knox should
+proxy the NameNodes associated with *ns2*, the WEBHDFS service must contain URLs based on the values of *dfs.namenode.http-address.ns2.nn3*
+and *dfs.namenode.http-address.ns2.nn3*.
+
+Fortunately, for Ambari-managed clusters, [descriptors](#Simplified+Descriptor+Files) and service discovery can handle this complexity for administrators.
+In the descriptor, the service can be declared without any endpoints, and the desired namespace can be specified to disambiguate which endpoint(s)
+should be proxied by way of a parameter named *discovery-namespace*.
+
+    "services": [
+      {
+        "name": "WEBHDFS",
+        "params": {
+          "discovery-nameservice": "ns2"
+        }
+      },
+
+If no namespace is specified, then the default namespace will be applied. This default namespace is derived from the value of the
+property named *fs.defaultFS* defined in the HDFS *core-site.xml* configuration.
+
+<br>
+
+#### WebHDFS URL Mapping ####
+
+For Name Node URLs, the mapping of Knox Gateway accessible WebHDFS URLs to direct WebHDFS URLs is simple.
+
+| ------- | ----------------------------------------------------------------------------- |
+| Gateway | `https://{gateway-host}:{gateway-port}/{gateway-path}/{cluster-name}/webhdfs` |
+| Cluster | `http://{webhdfs-host}:50070/webhdfs`                                         |
+
+However, there is a subtle difference to URLs that are returned by WebHDFS in the Location header of many requests.
+Direct WebHDFS requests may return Location headers that contain the address of a particular DataNode.
+The gateway will rewrite these URLs to ensure subsequent requests come back through the gateway and internal cluster details are protected.
+
+A WebHDFS request to the NameNode to retrieve a file will return a URL of the form below in the Location header.
+
+    http://{datanode-host}:{data-node-port}/webhdfs/v1/{path}?...
+
+Note that this URL contains the network location of a DataNode.
+The gateway will rewrite this URL to look like the URL below.
+
+    https://{gateway-host}:{gateway-port}/{gateway-path}/{cluster-name}/webhdfs/data/v1/{path}?_={encrypted-query-parameters}
+
+The `{encrypted-query-parameters}` will contain the `{datanode-host}` and `{datanode-port}` information.
+This information along with the original query parameters are encrypted so that the internal Hadoop details are protected.
+
+#### WebHDFS Examples ####
+
+The examples below upload a file, download the file and list the contents of the directory.
+
+##### WebHDFS via client DSL
+
+You can use the Groovy example scripts and interpreter provided with the distribution.
+
+    java -jar bin/shell.jar samples/ExampleWebHdfsPutGet.groovy
+    java -jar bin/shell.jar samples/ExampleWebHdfsLs.groovy
+
+You can manually type the client DSL script into the KnoxShell interactive Groovy interpreter provided with the distribution.
+The command below starts the KnoxShell in interactive mode.
+
+    java -jar bin/shell.jar
+
+Each line below could be typed or copied into the interactive shell and executed.
+This is provided as an example to illustrate the use of the client DSL.
+
+    // Import the client DSL and a useful utilities for working with JSON.
+    import org.apache.knox.gateway.shell.Hadoop
+    import org.apache.knox.gateway.shell.hdfs.Hdfs
+    import groovy.json.JsonSlurper
+
+    // Setup some basic config.
+    gateway = "https://localhost:8443/gateway/sandbox"
+    username = "guest"
+    password = "guest-password"
+
+    // Start the session.
+    session = Hadoop.login( gateway, username, password )
+
+    // Cleanup anything leftover from a previous run.
+    Hdfs.rm( session ).file( "/user/guest/example" ).recursive().now()
+
+    // Upload the README to HDFS.
+    Hdfs.put( session ).file( "README" ).to( "/user/guest/example/README" ).now()
+
+    // Download the README from HDFS.
+    text = Hdfs.get( session ).from( "/user/guest/example/README" ).now().string
+    println text
+
+    // List the contents of the directory.
+    text = Hdfs.ls( session ).dir( "/user/guest/example" ).now().string
+    json = (new JsonSlurper()).parseText( text )
+    println json.FileStatuses.FileStatus.pathSuffix
+
+    // Cleanup the directory.
+    Hdfs.rm( session ).file( "/user/guest/example" ).recursive().now()
+
+    // Clean the session.
+    session.shutdown()
+
+
+##### WebHDFS via cURL
+
+Users can use cURL to directly invoke the REST APIs via the gateway.
+
+###### Optionally cleanup the sample directory in case a previous example was run without cleaning up.
+
+    curl -i -k -u guest:guest-password -X DELETE \
+        'https://localhost:8443/gateway/sandbox/webhdfs/v1/user/guest/example?op=DELETE&recursive=true'
+
+###### Register the name for a sample file README in /user/guest/example.
+
+    curl -i -k -u guest:guest-password -X PUT \
+        'https://localhost:8443/gateway/sandbox/webhdfs/v1/user/guest/example/README?op=CREATE'
+
+###### Upload README to /user/guest/example.  Use the README in {GATEWAY_HOME}.
+
+    curl -i -k -u guest:guest-password -T README -X PUT \
+        '{Value of Location header from command above}'
+
+###### List the contents of the directory /user/guest/example.
+
+    curl -i -k -u guest:guest-password -X GET \
+        'https://localhost:8443/gateway/sandbox/webhdfs/v1/user/guest/example?op=LISTSTATUS'
+
+###### Request the content of the README file in /user/guest/example.
+
+    curl -i -k -u guest:guest-password -X GET \
+        'https://localhost:8443/gateway/sandbox/webhdfs/v1/user/guest/example/README?op=OPEN'
+
+###### Read the content of the file.
+
+    curl -i -k -u guest:guest-password -X GET \
+        '{Value of Location header from command above}'
+
+###### Optionally cleanup the example directory.
+
+    curl -i -k -u guest:guest-password -X DELETE \
+        'https://localhost:8443/gateway/sandbox/webhdfs/v1/user/guest/example?op=DELETE&recursive=true'
+
+
+##### WebHDFS client DSL
+
+###### get() - Get a file from HDFS (OPEN).
+
+* Request
+    * from( String name ) - The full name of the file in HDFS.
+    * file( String name ) - The name of a local file to create with the content.
+    If this isn't specified the file content must be read from the response.
+* Response
+    * BasicResponse
+    * If file parameter specified content will be streamed to file.
+* Example
+    * `Hdfs.get( session ).from( "/user/guest/example/README" ).now().string`
+
+###### ls() - Query the contents of a directory (LISTSTATUS)
+
+* Request
+    * dir( String name ) - The full name of the directory in HDFS.
+* Response
+    * BasicResponse
+* Example
+    * `Hdfs.ls( session ).dir( "/user/guest/example" ).now().string`
+
+###### mkdir() - Create a directory in HDFS (MKDIRS)
+
+* Request
+    * dir( String name ) - The full name of the directory to create in HDFS.
+    * perm( String perm ) - The permissions for the directory (e.g. 644).  Optional: default="777"
+* Response
+    * EmptyResponse - Implicit close().
+* Example
+    * `Hdfs.mkdir( session ).dir( "/user/guest/example" ).now()`
+
+###### put() - Write a file into HDFS (CREATE)
+
+* Request
+    * text( String text ) - Text to upload to HDFS.  Takes precedence over file if both present.
+    * file( String name ) - The name of a local file to upload to HDFS.
+    * to( String name ) - The fully qualified name to create in HDFS.
+* Response
+    * EmptyResponse - Implicit close().
+* Example
+    * `Hdfs.put( session ).file( README ).to( "/user/guest/example/README" ).now()`
+
+###### rm() - Delete a file or directory (DELETE)
+
+* Request
+    * file( String name ) - The fully qualified file or directory name in HDFS.
+    * recursive( Boolean recursive ) - Delete directory and all of its contents if True.  Optional: default=False
+* Response
+    * BasicResponse - Implicit close().
+* Example
+    * `Hdfs.rm( session ).file( "/user/guest/example" ).recursive().now()`
+
+
+### WebHDFS HA ###
+
+Knox provides basic failover and retry functionality for REST API calls made to WebHDFS when HDFS HA has been 
+configured and enabled.
+
+To enable HA functionality for WebHDFS in Knox the following configuration has to be added to the topology file.
+
+    <provider>
+       <role>ha</role>
+       <name>HaProvider</name>
+       <enabled>true</enabled>
+       <param>
+           <name>WEBHDFS</name>
+           <value>maxFailoverAttempts=3;failoverSleep=1000;maxRetryAttempts=300;retrySleep=1000;enabled=true</value>
+       </param>
+    </provider>
+    
+The role and name of the provider above must be as shown. The name in the 'param' section must match that of the service 
+role name that is being configured for HA and the value in the 'param' section is the configuration for that particular
+service in HA mode. In this case the name is 'WEBHDFS'.
+
+The various configuration parameters are described below:
+     
+* maxFailoverAttempts - 
+This is the maximum number of times a failover will be attempted. The failover strategy at this time is very simplistic
+in that the next URL in the list of URLs provided for the service is used and the one that failed is put at the bottom 
+of the list. If the list is exhausted and the maximum number of attempts is not reached then the first URL that failed 
+will be tried again (the list will start again from the original top entry).
+
+* failoverSleep - 
+The amount of time in milliseconds that the process will wait or sleep before attempting to failover.
+
+* maxRetryAttempts - 
+The is the maximum number of times that a retry request will be attempted. Unlike failover, the retry is done on the 
+same URL that failed. This is a special case in HDFS when the node is in safe mode. The expectation is that the node will
+come out of safe mode so a retry is desirable here as opposed to a failover.
+
+* retrySleep - 
+The amount of time in milliseconds that the process will wait or sleep before a retry is issued.
+
+* enabled - 
+Flag to turn the particular service on or off for HA.
+
+And for the service configuration itself the additional URLs should be added to the list. The active 
+URL (at the time of configuration) should ideally be added to the top of the list.
+
+
+    <service>
+        <role>WEBHDFS</role>
+        <url>http://{host1}:50070/webhdfs</url>
+        <url>http://{host2}:50070/webhdfs</url>
+    </service>
+
+

Added: knox/trunk/books/2.0.0/service_yarn.md
URL: http://svn.apache.org/viewvc/knox/trunk/books/2.0.0/service_yarn.md?rev=1899392&view=auto
==============================================================================
--- knox/trunk/books/2.0.0/service_yarn.md (added)
+++ knox/trunk/books/2.0.0/service_yarn.md Wed Mar 30 15:22:57 2022
@@ -0,0 +1,126 @@
+<!---
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       https://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+--->
+
+### Yarn ###
+
+Knox provides gateway functionality for the REST APIs of the ResourceManager. The ResourceManager REST APIs allow the
+user to get information about the cluster - status on the cluster, metrics on the cluster, scheduler information,
+information about nodes in the cluster, and information about applications on the cluster. Also as of Hadoop version
+2.5.0, the user can submit a new application as well as kill it (or get state) using the 'Writable' APIs.
+
+The docs for this can be found here
+
+http://hadoop.apache.org/docs/current/hadoop-yarn/hadoop-yarn-site/ResourceManagerRest.html
+
+To enable this functionality, a topology file needs to have the following configuration:
+
+    <service>
+        <role>RESOURCEMANAGER</role>
+        <url>http://<hostname>:<port>/ws</url>
+    </service>
+
+The default resource manager http port is 8088. If it is configured to some other port, that configuration can be
+found in `yarn-site.xml` under the property `yarn.resourcemanager.webapp.address`.
+
+#### Yarn URL Mapping ####
+
+For Yarn URLs, the mapping of Knox Gateway accessible URLs to direct Yarn URLs is the following.
+
+| ------- | ------------------------------------------------------------------------------------- |
+| Gateway | `https://{gateway-host}:{gateway-port}/{gateway-path}/{cluster-name}/resourcemanager` |
+| Cluster | `http://{yarn-host}:{yarn-port}/ws}`                                      |
+
+
+#### Yarn Examples via cURL
+
+Some of the various calls that can be made and examples using curl are listed below.
+
+    # 0. Getting cluster info
+    
+    curl -ikv -u guest:guest-password -X GET 'https://localhost:8443/gateway/sandbox/resourcemanager/v1/cluster'
+    
+    # 1. Getting cluster metrics
+    
+    curl -ikv -u guest:guest-password -X GET 'https://localhost:8443/gateway/sandbox/resourcemanager/v1/cluster/metrics'
+    
+    To get the same information in an xml format
+    
+    curl -ikv -u guest:guest-password -H Accept:application/xml -X GET 'https://localhost:8443/gateway/sandbox/resourcemanager/v1/cluster/metrics'
+    
+    # 2. Getting scheduler information
+    
+    curl -ikv -u guest:guest-password -X GET 'https://localhost:8443/gateway/sandbox/resourcemanager/v1/cluster/scheduler'
+    
+    # 3. Getting all the applications listed and their information
+    
+    curl -ikv -u guest:guest-password -X GET 'https://localhost:8443/gateway/sandbox/resourcemanager/v1/cluster/apps'
+    
+    # 4. Getting applications statistics
+    
+    curl -ikv -u guest:guest-password -X GET 'https://localhost:8443/gateway/sandbox/resourcemanager/v1/cluster/appstatistics'
+    
+    Also query params can be used as below to filter the results
+    
+    curl -ikv -u guest:guest-password -X GET 'https://localhost:8443/gateway/sandbox/resourcemanager/v1/cluster/appstatistics?states=accepted,running,finished&applicationTypes=mapreduce'
+    
+    # 5. To get a specific application (please note, replace the application id with a real value)
+    
+    curl -ikv -u guest:guest-password -X GET 'https://localhost:8443/gateway/sandbox/resourcemanager/v1/cluster/apps/{application_id}'
+    
+    # 6. To get the attempts made for a particular application
+    
+    curl -ikv -u guest:guest-password -X GET 'https://localhost:8443/gateway/sandbox/resourcemanager/v1/cluster/apps/{application_id}/appattempts'
+    
+    # 7. To get information about the various nodes
+    
+    curl -ikv -u guest:guest-password -X GET 'https://localhost:8443/gateway/sandbox/resourcemanager/v1/cluster/nodes'
+    
+    Also to get a specific node, use an id obtained in the response from above (the node id is scrambled) and issue the following
+    
+    curl -ikv -u guest:guest-password -X GET 'https://localhost:8443/gateway/sandbox/resourcemanager/v1/cluster/nodes/{node_id}'
+    
+    # 8. To create a new Application
+    
+    curl -ikv -u guest:guest-password -X POST 'https://localhost:8443/gateway/sandbox/resourcemanager/v1/cluster/apps/new-application'
+    
+    An application id is returned from the request above and this can be used to submit an application.
+    
+    # 9. To submit an application, put together a request containing the application id received in the above response (please refer to Yarn REST
+    API documentation).
+    
+    curl -ikv -u guest:guest-password -T request.json -H Content-Type:application/json -X POST 'https://localhost:8443/gateway/sandbox/resourcemanager/v1/cluster/apps'
+    
+    Here the request is saved in a file called request.json
+    
+    #10. To get application state
+    
+    curl -ikv -u guest:guest-password -X GET 'https://localhost:8443/gateway/sandbox/resourcemanager/v1/cluster/apps/{application_id}/state'
+    
+    curl -ikv -u guest:guest-password -H Content-Type:application/json -X PUT -T state-killed.json 'https://localhost:8443/gateway/sandbox/resourcemanager/v1/cluster/apps/application_1409008107556_0007/state'
+    
+    # 11. To kill an application that is running issue the below command with the application id of the application that is to be killed.
+    The contents of the state-killed.json file are :
+    
+    {
+      "state":"KILLED"
+    }
+    
+    
+    curl -ikv -u guest:guest-password -H Content-Type:application/json -X PUT -T state-killed.json 'https://localhost:8443/gateway/sandbox/resourcemanager/v1/cluster/apps/{application_id}/state'
+
+Note: The sensitive data like nodeHttpAddress, nodeHostName, id will be hidden.
+

Added: knox/trunk/books/2.0.0/websocket-support.md
URL: http://svn.apache.org/viewvc/knox/trunk/books/2.0.0/websocket-support.md?rev=1899392&view=auto
==============================================================================
--- knox/trunk/books/2.0.0/websocket-support.md (added)
+++ knox/trunk/books/2.0.0/websocket-support.md Wed Mar 30 15:22:57 2022
@@ -0,0 +1,76 @@
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       https://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+## WebSocket Support ##
+
+### Introduction
+
+WebSocket is a communication protocol that allows full duplex communication over a single TCP connection.
+Knox provides out-of-the-box support for the WebSocket protocol, currently only text messages are supported.
+
+### Configuration ###
+
+By default WebSocket functionality is disabled, it can be easily enabled by changing the `gateway.websocket.feature.enabled` property to `true` in `<KNOX-HOME>/conf/gateway-site.xml` file.  
+
+      <property>
+          <name>gateway.websocket.feature.enabled</name>
+          <value>true</value>
+          <description>Enable/Disable websocket feature.</description>
+      </property>
+
+Service and rewrite rules need to changed accordingly to match the appropriate websocket context.
+
+### Example ###
+
+In the following sample configuration we assume that the backend WebSocket URL is ws://myhost:9999/ws. And 'gateway.websocket.feature.enabled' property is set to 'true' as shown above.
+
+#### rewrite ####
+
+Example code snippet from `<KNOX-HOME>/data/services/{myservice}/{version}/rewrite.xml` where myservice = websocket and version = 0.6.0
+
+      <rules>
+        <rule dir="IN" name="WEBSOCKET/ws/inbound" pattern="*://*:*/**/ws">
+          <rewrite template="{$serviceUrl[WEBSOCKET]}/ws"/>
+        </rule>
+      </rules>
+
+#### service ####
+
+Example code snippet from `<KNOX-HOME>/data/services/{myservice}/{version}/service.xml` where myservice = websocket and version = 0.6.0
+
+      <service role="WEBSOCKET" name="websocket" version="0.6.0">
+        <policies>
+              <policy role="webappsec"/>
+              <policy role="authentication" name="Anonymous"/>
+              <policy role="rewrite"/>
+              <policy role="authorization"/>
+        </policies>
+        <routes>
+          <route path="/ws">
+              <rewrite apply="WEBSOCKET/ws/inbound" to="request.url"/>
+          </route>
+        </routes>
+      </service>
+
+#### topology ####
+
+Finally, update the topology file at `<KNOX-HOME>/conf/{topology}.xml`  with the backend service URL
+
+      <service>
+          <role>WEBSOCKET</role>
+          <url>ws://myhost:9999/ws</url>
+      </service>

Added: knox/trunk/books/2.0.0/x-forwarded-headers.md
URL: http://svn.apache.org/viewvc/knox/trunk/books/2.0.0/x-forwarded-headers.md?rev=1899392&view=auto
==============================================================================
--- knox/trunk/books/2.0.0/x-forwarded-headers.md (added)
+++ knox/trunk/books/2.0.0/x-forwarded-headers.md Wed Mar 30 15:22:57 2022
@@ -0,0 +1,76 @@
+<!---
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       https://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+--->
+
+### X-Forwarded-* Headers Support ###
+Out-of-the-box Knox provides support for some `X-Forwarded-*` headers through the use of a Servlet Filter. Specifically the
+headers handled/populated by Knox are:
+
+* X-Forwarded-For
+* X-Forwarded-Proto
+* X-Forwarded-Port
+* X-Forwarded-Host
+* X-Forwarded-Server
+* X-Forwarded-Context
+
+This functionality can be turned off by a configuration setting in the file gateway-site.xml and redeploying the
+necessary topology/topologies.
+
+The setting is (under the 'configuration' tag) :
+
+    <property>
+        <name>gateway.xforwarded.enabled</name>
+        <value>false</value>
+    </property>
+
+If this setting is absent, the default behavior is that the `X-Forwarded-*` header support is on or in other words,
+`gateway.xforwarded.enabled` is set to `true` by default.
+
+
+#### Header population ####
+
+The following are the various rules for population of these headers:
+
+##### X-Forwarded-For #####
+
+This header represents a list of client IP addresses. If the header is already present Knox adds a comma separated value
+to the list. The value added is the client's IP address as Knox sees it. This value is added to the end of the list.
+
+##### X-Forwarded-Proto #####
+
+The protocol used in the client request. If this header is passed into Knox its value is maintained, otherwise Knox will
+populate the header with the value 'https' if the request is a secure one or 'http' otherwise.
+
+##### X-Forwarded-Port #####
+
+The port used in the client request. If this header is passed into Knox its value is maintained, otherwise Knox will
+populate the header with the value of the port that the request was made coming into Knox.
+
+##### X-Forwarded-Host #####
+
+Represents the original host requested by the client in the Host HTTP request header. The value passed into Knox is maintained
+by Knox. If no value is present, Knox populates the header with the value of the HTTP Host header.
+
+##### X-Forwarded-Server #####
+
+The hostname of the server Knox is running on.
+
+##### X-Forwarded-Context #####
+
+This header value contains the context path of the request to Knox.
+
+
+

Modified: knox/trunk/build.xml
URL: http://svn.apache.org/viewvc/knox/trunk/build.xml?rev=1899392&r1=1899391&r2=1899392&view=diff
==============================================================================
--- knox/trunk/build.xml (original)
+++ knox/trunk/build.xml Wed Mar 30 15:22:57 2022
@@ -36,6 +36,7 @@
     <property name="book-1-4-0-dir" value="${book-target}/${gateway-artifact}-1-4-0"/>
     <property name="book-1-5-0-dir" value="${book-target}/${gateway-artifact}-1-5-0"/>
 	<property name="book-1-6-0-dir" value="${book-target}/${gateway-artifact}-1-6-0"/>
+	<property name="book-2-0-0-dir" value="${book-target}/${gateway-artifact}-2-0-0"/>
 
     <property name="svn.release.path" value="https://dist.apache.org/repos/dist/release/incubator/${gateway-project}" />
     <property name="svn.staging.path" value="https://dist.apache.org/repos/dist/dev/incubator/${gateway-project}" />
@@ -84,7 +85,7 @@
     </target>
 
     <target name="books" depends="markbook,_books"/>
-    <target name="_books" depends="_book-0-12-0,_book-0-13-0,_book-0-14-0,_book-1-0-0,_book-1-1-0,_book-1-2-0,_book-1-3-0,_book-1-4-0,_book-1-5-0,_book-1-6-0"/>
+    <target name="_books" depends="_book-0-12-0,_book-0-13-0,_book-0-14-0,_book-1-0-0,_book-1-1-0,_book-1-2-0,_book-1-3-0,_book-1-4-0,_book-1-5-0,_book-1-6-0,_book-2-0-0"/>
     <target name="_book-0-12-0" depends="init">
         <delete dir="${book-target}/${gateway-artifact}-0-12-0" includes="**/*.html,**/*.css,**/*.png"/>
         <java jar="markbook/target/markbook.jar" fork="true" failonerror="true">
@@ -295,6 +296,31 @@
             <fileset dir="books/1.6.0/img/adminui"/>
         </copy>
     </target>
+    <target name="_book-2-0-0" depends="init">
+        <delete dir="${book-target}/${gateway-artifact}-2-0-0" includes="**/*.html,**/*.css,**/*.png"/>
+        <java jar="markbook/target/markbook.jar" fork="true" failonerror="true">
+            <arg value="-i"/><arg value="books/2.0.0/book.md"/>
+            <arg value="-o"/><arg value="${book-2-0-0-dir}/user-guide.html"/>
+        </java>
+        <java jar="markbook/target/markbook.jar" fork="true" failonerror="true">
+            <arg value="-i"/><arg value="books/2.0.0/dev-guide/book.md"/>
+            <arg value="-o"/><arg value="${book-2-0-0-dir}/dev-guide.html"/>
+        </java>
+        <java jar="markbook/target/markbook.jar" fork="true" failonerror="true">
+            <arg value="-i"/><arg value="books/2.0.0/dev-guide/knoxsso_integration.md"/>
+            <arg value="-o"/><arg value="${book-2-0-0-dir}/knoxsso_integration.html"/>
+        </java>
+        <java jar="markbook/target/markbook.jar" fork="true" failonerror="true">
+            <arg value="-i"/><arg value="books/2.0.0/knoxshell-guide/knoxshell_user_guide.md"/>
+            <arg value="-o"/><arg value="${book-2-0-0-dir}/knoxshell_user_guide.html"/>
+        </java>
+        <copy todir="${book-target}/${gateway-artifact}-2-0-0">
+            <fileset dir="books/static"/>
+        </copy>
+        <copy todir="${book-target}/${gateway-artifact}-2-0-0/adminui">
+            <fileset dir="books/2.0.0/img/adminui"/>
+        </copy>
+    </target>
 
     <target name="markbook" depends="init" description="Build and package markbook tool.">
         <exec executable="${mvn.cmd}">