You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@knox.apache.org by lm...@apache.org on 2016/01/17 05:57:07 UTC

svn commit: r1725059 [10/10] - in /knox: site/ site/books/knox-0-4-0/ site/books/knox-0-5-0/ site/books/knox-0-6-0/ site/books/knox-0-7-0/ site/books/knox-0-8-0/ trunk/ trunk/books/0.8.0/ trunk/books/0.8.0/dev-guide/

Added: knox/trunk/books/0.8.0/service_hive.md
URL: http://svn.apache.org/viewvc/knox/trunk/books/0.8.0/service_hive.md?rev=1725059&view=auto
==============================================================================
--- knox/trunk/books/0.8.0/service_hive.md (added)
+++ knox/trunk/books/0.8.0/service_hive.md Sun Jan 17 04:57:06 2016
@@ -0,0 +1,325 @@
+<!---
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+--->
+
+### Hive ###
+
+The [Hive wiki pages](https://cwiki.apache.org/confluence/display/Hive/Home) describe Hive installation and configuration processes.
+In sandbox configuration file for Hive is located at `/etc/hive/hive-site.xml`.
+Hive Server has to be started in HTTP mode.
+Note the properties shown below as they are related to configuration required by the gateway.
+
+    <property>
+        <name>hive.server2.thrift.http.port</name>
+        <value>10001</value>
+        <description>Port number when in HTTP mode.</description>
+    </property>
+
+    <property>
+        <name>hive.server2.thrift.http.path</name>
+        <value>cliservice</value>
+        <description>Path component of URL endpoint when in HTTP mode.</description>
+    </property>
+
+    <property>
+        <name>hive.server2.transport.mode</name>
+        <value>http</value>
+        <description>Server transport mode. "binary" or "http".</description>
+    </property>
+
+    <property>
+        <name>hive.server2.allow.user.substitution</name>
+        <value>true</value>
+    </property>
+
+The gateway by default includes a sample topology descriptor file `{GATEWAY_HOME}/deployments/sandbox.xml`.
+The value in this sample is configured to work with an installed Sandbox VM.
+
+    <service>
+        <role>HIVE</role>
+        <url>http://localhost:10001/cliservice</url>
+    </service>
+
+By default the gateway is configured to use the binary transport mode for Hive in the Sandbox.
+
+#### Hive JDBC URL Mapping ####
+
+| ------- | ------------------------------------------------------------------------------- |
+| Gateway | jdbc:hive2://{gateway-host}:{gateway-port}/;ssl=true;sslTrustStore={gateway-trust-store-path};trustStorePassword={gateway-trust-store-password};transportMode=http;httpPath={gateway-path}/{cluster-name}/hive|
+| Cluster |`http://{hive-host}:{hive-port}/{hive-path}`|
+
+#### Hive Examples ####
+
+This guide provides detailed examples for how to do some basic interactions with Hive via the Apache Knox Gateway.
+
+##### Hive Setup #####
+
+1. Make sure you are running the correct version of Hive to ensure JDBC/Thrift/HTTP support.
+2. Make sure Hive Server is running on the correct port.
+3. Make sure Hive Server is running in HTTP mode.
+4. Client side (JDBC):
+     1. Hive JDBC in HTTP mode depends on following minimal libraries set to run successfully(must be in the classpath):
+         * hive-jdbc-0.14.0-standalone.jar;
+         * commons-logging-1.1.3.jar;
+     2. Connection URL has to be following: `jdbc:hive2://{gateway-host}:{gateway-port}/;ssl=true;sslTrustStore={gateway-trust-store-path};trustStorePassword={gateway-trust-store-password};transportMode=http;httpPath={gateway-path}/{cluster-name}/hive`
+     3. Look at https://cwiki.apache.org/confluence/display/Hive/GettingStarted#GettingStarted-DDLOperations for examples.
+       Hint: For testing it would be better to execute `set hive.security.authorization.enabled=false` as the first statement.
+       Hint: Good examples of Hive DDL/DML can be found here http://gettingstarted.hadooponazure.com/hw/hive.html
+
+##### Customization #####
+
+This example may need to be tailored to the execution environment.
+In particular host name, host port, user name, user password and context path may need to be changed to match your environment.
+In particular there is one example file in the distribution that may need to be customized.
+Take a moment to review this file.
+All of the values that may need to be customized can be found together at the top of the file.
+
+* samples/hive/java/jdbc/sandbox/HiveJDBCSample.java
+
+##### Client JDBC Example #####
+
+Sample example for creating new table, loading data into it from the file system local to the Hive server and querying data from that table.
+
+###### Java ######
+
+    import java.sql.Connection;
+    import java.sql.DriverManager;
+    import java.sql.ResultSet;
+    import java.sql.SQLException;
+    import java.sql.Statement;
+
+    import java.util.logging.Level;
+    import java.util.logging.Logger;
+
+    public class HiveJDBCSample {
+
+      public static void main( String[] args ) {
+        Connection connection = null;
+        Statement statement = null;
+        ResultSet resultSet = null;
+
+        try {
+          String user = "guest";
+          String password = user + "-password";
+          String gatewayHost = "localhost";
+          int gatewayPort = 8443;
+          String trustStore = "/usr/lib/knox/data/security/keystores/gateway.jks";
+          String trustStorePassword = "knoxsecret";
+          String contextPath = "gateway/sandbox/hive";
+          String connectionString = String.format( "jdbc:hive2://%s:%d/;ssl=true;sslTrustStore=%s;trustStorePassword=%s?hive.server2.transport.mode=http;hive.server2.thrift.http.path=/%s", gatewayHost, gatewayPort, trustStore, trustStorePassword, contextPath );
+
+          // load Hive JDBC Driver
+          Class.forName( "org.apache.hive.jdbc.HiveDriver" );
+
+          // configure JDBC connection
+          connection = DriverManager.getConnection( connectionString, user, password );
+
+          statement = connection.createStatement();
+
+          // disable Hive authorization - it could be omitted if Hive authorization
+          // was configured properly
+          statement.execute( "set hive.security.authorization.enabled=false" );
+
+          // create sample table
+          statement.execute( "CREATE TABLE logs(column1 string, column2 string, column3 string, column4 string, column5 string, column6 string, column7 string) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' '" );
+
+          // load data into Hive from file /tmp/log.txt which is placed on the local file system
+          statement.execute( "LOAD DATA LOCAL INPATH '/tmp/log.txt' OVERWRITE INTO TABLE logs" );
+
+          resultSet = statement.executeQuery( "SELECT * FROM logs" );
+
+          while ( resultSet.next() ) {
+            System.out.println( resultSet.getString( 1 ) + " --- " + resultSet.getString( 2 ) + " --- " + resultSet.getString( 3 ) + " --- " + resultSet.getString( 4 ) );
+          }
+        } catch ( ClassNotFoundException ex ) {
+          Logger.getLogger( HiveJDBCSample.class.getName() ).log( Level.SEVERE, null, ex );
+        } catch ( SQLException ex ) {
+          Logger.getLogger( HiveJDBCSample.class.getName() ).log( Level.SEVERE, null, ex );
+        } finally {
+          if ( resultSet != null ) {
+            try {
+              resultSet.close();
+            } catch ( SQLException ex ) {
+              Logger.getLogger( HiveJDBCSample.class.getName() ).log( Level.SEVERE, null, ex );
+            }
+          }
+          if ( statement != null ) {
+            try {
+              statement.close();
+            } catch ( SQLException ex ) {
+              Logger.getLogger( HiveJDBCSample.class.getName() ).log( Level.SEVERE, null, ex );
+            }
+          }
+          if ( connection != null ) {
+            try {
+              connection.close();
+            } catch ( SQLException ex ) {
+              Logger.getLogger( HiveJDBCSample.class.getName() ).log( Level.SEVERE, null, ex );
+            }
+          }
+        }
+      }
+    }
+
+###### Groovy ######
+
+Make sure that `{GATEWAY_HOME/ext}` directory contains following libraries for successful execution:
+
+- hive-jdbc-0.14.0-standalone.jar;
+- commons-logging-1.1.3.jar;
+
+There are several ways to execute this sample depending upon your preference.
+
+You can use the Groovy interpreter provided with the distribution.
+
+    java -jar bin/shell.jar samples/hive/groovy/jdbc/sandbox/HiveJDBCSample.groovy
+
+You can manually type in the KnoxShell DSL script into the interactive Groovy interpreter provided with the distribution.
+
+    java -jar bin/shell.jar
+
+Each line from the file below will need to be typed or copied into the interactive shell.
+
+    import java.sql.DriverManager
+
+    user = "guest";
+    password = user + "-password";
+    gatewayHost = "localhost";
+    gatewayPort = 8443;
+    trustStore = "/usr/lib/knox/data/security/keystores/gateway.jks";
+    trustStorePassword = "knoxsecret";
+    contextPath = "gateway/sandbox/hive";
+    connectionString = String.format( "jdbc:hive2://%s:%d/;ssl=true;sslTrustStore=%s;trustStorePassword=%s?hive.server2.transport.mode=http;hive.server2.thrift.http.path=/%s", gatewayHost, gatewayPort, trustStore, trustStorePassword, contextPath );
+
+    // Load Hive JDBC Driver
+    Class.forName( "org.apache.hive.jdbc.HiveDriver" );
+
+    // Configure JDBC connection
+    connection = DriverManager.getConnection( connectionString, user, password );
+
+    statement = connection.createStatement();
+
+    // Disable Hive authorization - This can be omitted if Hive authorization is configured properly
+    statement.execute( "set hive.security.authorization.enabled=false" );
+
+    // Create sample table
+    statement.execute( "CREATE TABLE logs(column1 string, column2 string, column3 string, column4 string, column5 string, column6 string, column7 string) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' '" );
+
+    // Load data into Hive from file /tmp/log.txt which is placed on the local file system
+    statement.execute( "LOAD DATA LOCAL INPATH '/tmp/sample.log' OVERWRITE INTO TABLE logs" );
+
+    resultSet = statement.executeQuery( "SELECT * FROM logs" );
+
+    while ( resultSet.next() ) {
+      System.out.println( resultSet.getString( 1 ) + " --- " + resultSet.getString( 2 ) );
+    }
+
+    resultSet.close();
+    statement.close();
+    connection.close();
+
+Examples use 'log.txt' with content:
+
+    2012-02-03 18:35:34 SampleClass6 [INFO] everything normal for id 577725851
+    2012-02-03 18:35:34 SampleClass4 [FATAL] system problem at id 1991281254
+    2012-02-03 18:35:34 SampleClass3 [DEBUG] detail for id 1304807656
+    2012-02-03 18:35:34 SampleClass3 [WARN] missing id 423340895
+    2012-02-03 18:35:34 SampleClass5 [TRACE] verbose detail for id 2082654978
+    2012-02-03 18:35:34 SampleClass0 [ERROR] incorrect id  1886438513
+    2012-02-03 18:35:34 SampleClass9 [TRACE] verbose detail for id 438634209
+    2012-02-03 18:35:34 SampleClass8 [DEBUG] detail for id 2074121310
+    2012-02-03 18:35:34 SampleClass0 [TRACE] verbose detail for id 1505582508
+    2012-02-03 18:35:34 SampleClass0 [TRACE] verbose detail for id 1903854437
+    2012-02-03 18:35:34 SampleClass7 [DEBUG] detail for id 915853141
+    2012-02-03 18:35:34 SampleClass3 [TRACE] verbose detail for id 303132401
+    2012-02-03 18:35:34 SampleClass6 [TRACE] verbose detail for id 151914369
+    2012-02-03 18:35:34 SampleClass2 [DEBUG] detail for id 146527742
+    ...
+
+Expected output:
+
+    2012-02-03 --- 18:35:34 --- SampleClass6 --- [INFO]
+    2012-02-03 --- 18:35:34 --- SampleClass4 --- [FATAL]
+    2012-02-03 --- 18:35:34 --- SampleClass3 --- [DEBUG]
+    2012-02-03 --- 18:35:34 --- SampleClass3 --- [WARN]
+    2012-02-03 --- 18:35:34 --- SampleClass5 --- [TRACE]
+    2012-02-03 --- 18:35:34 --- SampleClass0 --- [ERROR]
+    2012-02-03 --- 18:35:34 --- SampleClass9 --- [TRACE]
+    2012-02-03 --- 18:35:34 --- SampleClass8 --- [DEBUG]
+    2012-02-03 --- 18:35:34 --- SampleClass0 --- [TRACE]
+    2012-02-03 --- 18:35:34 --- SampleClass0 --- [TRACE]
+    2012-02-03 --- 18:35:34 --- SampleClass7 --- [DEBUG]
+    2012-02-03 --- 18:35:34 --- SampleClass3 --- [TRACE]
+    2012-02-03 --- 18:35:34 --- SampleClass6 --- [TRACE]
+    2012-02-03 --- 18:35:34 --- SampleClass2 --- [DEBUG]
+    ...
+
+### HiveServer2 HA ###
+
+Knox provides basic failover functionality for calls made to Hive Server when more than one HiveServer2 instance is
+installed in the cluster and registered with the same Zookeeper ensemble. The HA functionality in this case fetches the
+HiveServer2 URL information from a Zookeeper ensemble, so the user need only supply the necessary Zookeeper
+configuration and not the Hive connection URLs.
+
+To enable HA functionality for Hive in Knox the following configuration has to be added to the topology file.
+
+    <provider>
+        <role>ha</role>
+        <name>HaProvider</name>
+        <enabled>true</enabled>
+        <param>
+            <name>HIVE</name>
+            <value>maxFailoverAttempts=3;failoverSleep=1000;enabled=true;zookeeperEnsemble=machine1:2181,machine2:2181,machine3:2181;
+           zookeeperNamespace=hiveserver2</value>
+       </param>
+    </provider>
+
+The role and name of the provider above must be as shown. The name in the 'param' section must match that of the service
+role name that is being configured for HA and the value in the 'param' section is the configuration for that particular
+service in HA mode. In this case the name is 'HIVE'.
+
+The various configuration parameters are described below:
+
+* maxFailoverAttempts -
+This is the maximum number of times a failover will be attempted. The failover strategy at this time is very simplistic
+in that the next URL in the list of URLs provided for the service is used and the one that failed is put at the bottom
+of the list. If the list is exhausted and the maximum number of attempts is not reached then the first URL will be tried
+again after the list is fetched again from Zookeeper (a refresh of the list is done at this point)
+
+* failoverSleep -
+The amount of time in millis that the process will wait or sleep before attempting to failover.
+
+* enabled -
+Flag to turn the particular service on or off for HA.
+
+* zookeeperEnsemble -
+A comma separated list of host names (or IP addresses) of the zookeeper hosts that consist of the ensemble that the Hive
+servers register their information with. This value can be obtained from Hive's config file hive-site.xml as the value
+for the parameter 'hive.zookeeper.quorum'.
+
+* zookeeperNamespace -
+This is the namespace under which HiveServer2 information is registered in the Zookeeper ensemble. This value can be
+obtained from Hive's config file hive-site.xml as the value for the parameter 'hive.server2.zookeeper.namespace'.
+
+
+And for the service configuration itself the URLs need not be added to the list. For example.
+
+    <service>
+        <role>HIVE</role>
+    </service>
+
+Please note that there is no `<url>` tag specified here as the URLs for the Hive servers are obtained from Zookeeper.
+

Added: knox/trunk/books/0.8.0/service_oozie.md
URL: http://svn.apache.org/viewvc/knox/trunk/books/0.8.0/service_oozie.md?rev=1725059&view=auto
==============================================================================
--- knox/trunk/books/0.8.0/service_oozie.md (added)
+++ knox/trunk/books/0.8.0/service_oozie.md Sun Jan 17 04:57:06 2016
@@ -0,0 +1,197 @@
+<!---
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+--->
+
+### Oozie ###
+
+
+Oozie is a Hadoop component that provides complex job workflows to be submitted and managed.
+Please refer to the latest [Oozie documentation](http://oozie.apache.org/docs/4.2.0/) for details.
+
+In order to make Oozie accessible via the gateway there are several important Hadoop configuration settings.
+These all relate to the network endpoint exposed by various Hadoop services.
+
+The HTTP endpoint at which Oozie is running can be found via the `oozie.base.url property` in the `oozie-site.xml` file.
+In a Sandbox installation this can typically be found in `/etc/oozie/conf/oozie-site.xml`.
+
+    <property>
+        <name>oozie.base.url</name>
+        <value>http://sandbox.hortonworks.com:11000/oozie</value>
+    </property>
+
+The RPC address at which the Resource Manager exposes the JOBTRACKER endpoint can be found via the `yarn.resourcemanager.address` in the `yarn-site.xml` file.
+In a Sandbox installation this can typically be found in `/etc/hadoop/conf/yarn-site.xml`.
+
+    <property>
+        <name>yarn.resourcemanager.address</name>
+        <value>sandbox.hortonworks.com:8050</value>
+    </property>
+
+The RPC address at which the Name Node exposes its RPC endpoint can be found via the `dfs.namenode.rpc-address` in the `hdfs-site.xml` file.
+In a Sandbox installation this can typically be found in `/etc/hadoop/conf/hdfs-site.xml`.
+
+    <property>
+        <name>dfs.namenode.rpc-address</name>
+        <value>sandbox.hortonworks.com:8020</value>
+    </property>
+
+If HDFS has been configured to be in High Availability mode (HA), then instead of the RPC address mentioned above for the Name Node, look up and use the logical name of the service found via `dfs.nameservices` in `hdfs-site.xml`. For example,
+
+    <property>
+        <name>dfs.nameservices</name>
+        <value>ha-service</value>
+    </property>
+
+Please note, only one of the URLs, either the RPC endpoint or the HA service name should be used as the NAMENODE hdfs URL in the gateway topology file.
+
+The information above must be provided to the gateway via a topology descriptor file.
+These topology descriptor files are placed in `{GATEWAY_HOME}/deployments`.
+An example that is setup for the default configuration of the Sandbox is `{GATEWAY_HOME}/deployments/sandbox.xml`.
+These values will need to be changed for non-default Sandbox or other Hadoop cluster configuration.
+
+    <service>
+        <role>NAMENODE</role>
+        <url>hdfs://localhost:8020</url>
+    </service>
+    <service>
+        <role>JOBTRACKER</role>
+        <url>rpc://localhost:8050</url>
+    </service>
+    <service>
+        <role>OOZIE</role>
+        <url>http://localhost:11000/oozie</url>
+    </service>
+
+#### Oozie URL Mapping ####
+
+For Oozie URLs, the mapping of Knox Gateway accessible URLs to direct Oozie URLs is simple.
+
+| ------- | --------------------------------------------------------------------------- |
+| Gateway | `https://{gateway-host}:{gateway-port}/{gateway-path}/{cluster-name}/oozie` |
+| Cluster | `http://{oozie-host}:{oozie-port}/oozie}`                                   |
+
+
+#### Oozie Request Changes ####
+
+TODO - In some cases the Oozie requests needs to be slightly different when made through the gateway.
+These changes are required in order to protect the client from knowing the internal structure of the Hadoop cluster.
+
+
+#### Oozie Example via Client DSL ####
+
+This example will also submit the familiar WordCount Java MapReduce job to the Hadoop cluster via the gateway using the KnoxShell DSL.
+However in this case the job will be submitted via a Oozie workflow.
+There are several ways to do this depending upon your preference.
+
+You can use the "embedded" Groovy interpreter provided with the distribution.
+
+    java -jar bin/shell.jar samples/ExampleOozieWorkflow.groovy
+
+You can manually type in the KnoxShell DSL script into the "embedded" Groovy interpreter provided with the distribution.
+
+    java -jar bin/shell.jar
+
+Each line from the file `samples/ExampleOozieWorkflow.groovy` will need to be typed or copied into the interactive shell.
+
+#### Oozie Example via cURL
+
+The example below illustrates the sequence of curl commands that could be used to run a "word count" map reduce job via an Oozie workflow.
+
+It utilizes the hadoop-examples.jar from a Hadoop install for running a simple word count job.
+A copy of that jar has been included in the samples directory for convenience.
+
+In addition a workflow definition and configuration file is required.
+These have not been included but are available for download.
+Download [workflow-definition.xml](workflow-definition.xml) and [workflow-configuration.xml](workflow-configuration.xml) and store them in the {GATEWAY_HOME} directory.
+Review the contents of workflow-configuration.xml to ensure that it matches your environment.
+
+Take care to follow the instructions below where replacement values are required.
+These replacement values are identified with { } markup.
+
+    # 0. Optionally cleanup the test directory in case a previous example was run without cleaning up.
+    curl -i -k -u guest:guest-password -X DELETE \
+        'https://localhost:8443/gateway/sandbox/webhdfs/v1/user/guest/example?op=DELETE&recursive=true'
+
+    # 1. Create the inode for workflow definition file in /user/guest/example
+    curl -i -k -u guest:guest-password -X PUT \
+        'https://localhost:8443/gateway/sandbox/webhdfs/v1/user/guest/example/workflow.xml?op=CREATE'
+
+    # 2. Upload the workflow definition file.  This file can be found in {GATEWAY_HOME}/templates
+    curl -i -k -u guest:guest-password -T workflow-definition.xml -X PUT \
+        '{Value Location header from command above}'
+
+    # 3. Create the inode for hadoop-examples.jar in /user/guest/example/lib
+    curl -i -k -u guest:guest-password -X PUT \
+        'https://localhost:8443/gateway/sandbox/webhdfs/v1/user/guest/example/lib/hadoop-examples.jar?op=CREATE'
+
+    # 4. Upload hadoop-examples.jar to /user/guest/example/lib.  Use a hadoop-examples.jar from a Hadoop install.
+    curl -i -k -u guest:guest-password -T samples/hadoop-examples.jar -X PUT \
+        '{Value Location header from command above}'
+
+    # 5. Create the inode for a sample input file readme.txt in /user/guest/example/input.
+    curl -i -k -u guest:guest-password -X PUT \
+        'https://localhost:8443/gateway/sandbox/webhdfs/v1/user/guest/example/input/README?op=CREATE'
+
+    # 6. Upload readme.txt to /user/guest/example/input.  Use the readme.txt in {GATEWAY_HOME}.
+    # The sample below uses this README file found in {GATEWAY_HOME}.
+    curl -i -k -u guest:guest-password -T README -X PUT \
+        '{Value of Location header from command above}'
+
+    # 7. Submit the job via Oozie
+    # Take note of the Job ID in the JSON response as this will be used in the next step.
+    curl -i -k -u guest:guest-password -H Content-Type:application/xml -T workflow-configuration.xml \
+        -X POST 'https://localhost:8443/gateway/sandbox/oozie/v1/jobs?action=start'
+
+    # 8. Query the job status via Oozie.
+    curl -i -k -u guest:guest-password -X GET \
+        'https://localhost:8443/gateway/sandbox/oozie/v1/job/{Job ID from JSON body}'
+
+    # 9. List the contents of the output directory /user/guest/example/output
+    curl -i -k -u guest:guest-password -X GET \
+        'https://localhost:8443/gateway/sandbox/webhdfs/v1/user/guest/example/output?op=LISTSTATUS'
+
+    # 10. Optionally cleanup the test directory
+    curl -i -k -u guest:guest-password -X DELETE \
+        'https://localhost:8443/gateway/sandbox/webhdfs/v1/user/guest/example?op=DELETE&recursive=true'
+
+### Oozie Client DSL ###
+
+#### submit() - Submit a workflow job.
+
+* Request
+    * text (String) - XML formatted workflow configuration string.
+    * file (String) - A filename containing XML formatted workflow configuration.
+    * action (String) - The initial action to take on the job.  Optional: Default is "start".
+* Response
+    * BasicResponse
+* Example
+    * `Workflow.submit(session).file(localFile).action("start").now()`
+
+#### status() - Query the status of a workflow job.
+
+* Request
+    * jobId (String) - The job ID to check. This is the ID received when the job was created.
+* Response
+    * BasicResponse
+* Example
+    * `Workflow.status(session).jobId(jobId).now().string`
+
+### Oozie HA ###
+
+Please look at #[Default Service HA support]
+
+
+

Added: knox/trunk/books/0.8.0/service_service_test.md
URL: http://svn.apache.org/viewvc/knox/trunk/books/0.8.0/service_service_test.md?rev=1725059&view=auto
==============================================================================
--- knox/trunk/books/0.8.0/service_service_test.md (added)
+++ knox/trunk/books/0.8.0/service_service_test.md Sun Jan 17 04:57:06 2016
@@ -0,0 +1,221 @@
+<!---
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+--->
+
+### Service Test ###
+
+The gateway supports a Service Test API that can be used to test Knox's ability to connect to each of the different Hadoop services via a simple HTTP GET request. To be able to access this API one must add the following entry into the topology for which you wish to run the service test.
+
+	<service>
+		<role>SERVICE-TEST</role>
+	</service>
+
+After adding the above to a topology, you can make a cURL request with the following structure
+
+	curl -i -k -u guest:guest-password https://{gateway-host}:{gateway-port}/{gateway-path}/{topology-name}/service-test
+
+Below is an example response. The gateway is also capable of returning XML if specified in the request's "Accept" HTTP header.
+
+	{
+	    "serviceTestWrapper": {
+		   "Tests": {
+			  "ServiceTest": [
+				 {
+					"serviceName": "WEBHDFS",
+					"requestURL": "http://{gateway-host}:{gateway-port}/gateway/{topology-name}/webhdfs/v1/?op=LISTSTATUS",
+					"responseContent": "Content-Length:0,Content-Type: application/json;charset=utf-8",
+					"httpCode": 200,
+					"message": "Request sucessful."
+				 },
+				 {
+					"serviceName": "WEBHCAT",
+					"requestURL": "http://{gateway-host}:{gateway-port}/gateway/{topology-name}/templeton/v1/status",
+					"responseContent": "Content-Length:0,Content-Type: application/json;charset=utf-8",
+					"httpCode": 200,
+					"message": "Request sucessful."
+				 },
+				 {
+					"serviceName": "WEBHCAT",
+					"requestURL": "http://{gateway-host}:{gateway-port}/gateway/{topology-name}/templeton/v1/version",
+					"responseContent": "Content-Length:0,Content-Type: application/json;charset=utf-8",
+					"httpCode": 200,
+					"message": "Request sucessful."
+				 },
+				 {
+					"serviceName": "WEBHCAT",
+					"requestURL": "http://{gateway-host}:{gateway-port}/gateway/{topology-name}/templeton/v1/version/hive",
+					"responseContent": "Content-Length:0,Content-Type: application/json;charset=utf-8",
+					"httpCode": 200,
+					"message": "Request sucessful."
+				 },
+				 {
+					"serviceName": "WEBHCAT",
+					"requestURL": "http://{gateway-host}:{gateway-port}/gateway/{topology-name}/templeton/v1/version/hadoop",
+					"responseContent": "Content-Length:0,Content-Type: application/json;charset=utf-8",
+					"httpCode": 200,
+					"message": "Request sucessful."
+				 },
+				 {
+					"serviceName": "OOZIE",
+					"requestURL": "http://{gateway-host}:{gateway-port}/gateway/{topology-name}/oozie/v1/admin/build-version",
+					"responseContent": "Content-Length:0,Content-Type: application/json;charset=utf-8",
+					"httpCode": 200,
+					"message": "Request sucessful."
+				 },
+				 {
+					"serviceName": "OOZIE",
+					"requestURL": "http://{gateway-host}:{gateway-port}/gateway/{topology-name}/oozie/v1/admin/status",
+					"responseContent": "Content-Length:0,Content-Type: application/json;charset=utf-8",
+					"httpCode": 200,
+					"message": "Request sucessful."
+				 },
+				 {
+					"serviceName": "OOZIE",
+					"requestURL": "http://{gateway-host}:{gateway-port}/gateway/{topology-name}/oozie/versions",
+					"responseContent": "Content-Length:0,Content-Type: application/json;charset=utf-8",
+					"httpCode": 200,
+					"message": "Request sucessful."
+				 },
+				 {
+					"serviceName": "WEBHBASE",
+					"requestURL": "http://{gateway-host}:{gateway-port}/gateway/{topology-name}/hbase/version",
+					"responseContent": "Content-Length:0,Content-Type: application/json;charset=utf-8",
+					"httpCode": 200,
+					"message": "Request sucessful."
+				 },
+				 {
+					"serviceName": "WEBHBASE",
+					"requestURL": "http://{gateway-host}:{gateway-port}/gateway/{topology-name}/hbase/version/cluster",
+					"responseContent": "Content-Length:0,Content-Type: application/json;charset=utf-8",
+					"httpCode": 200,
+					"message": "Request sucessful."
+				 },
+				 {
+					"serviceName": "WEBHBASE",
+					"requestURL": "http://{gateway-host}:{gateway-port}/gateway/{topology-name}/hbase/status/cluster",
+					"responseContent": "Content-Length:0,Content-Type: application/json;charset=utf-8",
+					"httpCode": 200,
+					"message": "Request sucessful."
+				 },
+				 {
+					"serviceName": "WEBHBASE",
+					"requestURL": "http://{gateway-host}:{gateway-port}/gateway/{topology-name}/hbase",
+					"responseContent": "Content-Length:0,Content-Type: application/json;charset=utf-8",
+					"httpCode": 200,
+					"message": "Request sucessful."
+				 },
+				 {
+					"serviceName": "RESOURCEMANAGER",
+					"requestURL": "http://{gateway-host}:{gateway-port}/gateway/{topology-name}/resourcemanager/v1/{topology-name}/info",
+					"responseContent": "Content-Length:0,Content-Type: application/json;charset=utf-8",
+					"httpCode": 200,
+					"message": "Request sucessful."
+				 },
+				 {
+					"serviceName": "RESOURCEMANAGER",
+					"requestURL": "http://{gateway-host}:{gateway-port}/gateway/{topology-name}/resourcemanager/v1/{topology-name}/metrics",
+					"responseContent": "Content-Length:0,Content-Type: application/json;charset=utf-8",
+					"httpCode": 200,
+					"message": "Request sucessful."
+				 },
+				 {
+					"serviceName": "RESOURCEMANAGER",
+					"requestURL": "http://{gateway-host}:{gateway-port}/gateway/{topology-name}/resourcemanager/v1/{topology-name}/apps",
+					"responseContent": "Content-Length:0,Content-Type: application/json;charset=utf-8",
+					"httpCode": 200,
+					"message": "Request sucessful."
+				 },
+				 {
+					"serviceName": "FALCON",
+					"requestURL": "http://{gateway-host}:{gateway-port}/gateway/{topology-name}/falcon/api/admin/stack",
+					"responseContent": "Content-Length:0,Content-Type: application/json;charset=utf-8",
+					"httpCode": 200,
+					"message": "Request sucessful."
+				 },
+				 {
+					"serviceName": "FALCON",
+					"requestURL": "http://{gateway-host}:{gateway-port}/gateway/{topology-name}/falcon/api/admin/version",
+					"responseContent": "Content-Length:0,Content-Type: application/json;charset=utf-8",
+					"httpCode": 200,
+					"message": "Request sucessful."
+				 },
+				 {
+					"serviceName": "FALCON",
+					"requestURL": "http://{gateway-host}:{gateway-port}/gateway/{topology-name}/falcon/api/metadata/lineage/serialize",
+					"responseContent": "Content-Length:0,Content-Type: application/json;charset=utf-8",
+					"httpCode": 200,
+					"message": "Request sucessful."
+				 },
+				 {
+					"serviceName": "FALCON",
+					"requestURL": "http://{gateway-host}:{gateway-port}/gateway/{topology-name}/falcon/api/metadata/lineage/vertices/all",
+					"responseContent": "Content-Length:0,Content-Type: application/json;charset=utf-8",
+					"httpCode": 200,
+					"message": "Request sucessful."
+				 },
+				 {
+					"serviceName": "FALCON",
+					"requestURL": "http://{gateway-host}:{gateway-port}/gateway/{topology-name}/falcon/api/metadata/lineage/edges/all",
+					"responseContent": "Content-Length:0,Content-Type: application/json;charset=utf-8",
+					"httpCode": 200,
+					"message": "Request sucessful."
+				 },
+				 {
+					"serviceName": "STORM",
+					"requestURL": "http://{gateway-host}:{gateway-port}/gateway/{topology-name}/storm/api/v1/cluster/configuration",
+					"responseContent": "Content-Length:0,Content-Type: application/json;charset=utf-8",
+					"httpCode": 200,
+					"message": "Request sucessful."
+				 },
+				 {
+					"serviceName": "STORM",
+					"requestURL": "http://{gateway-host}:{gateway-port}/gateway/{topology-name}/storm/api/v1/cluster/summary",
+					"responseContent": "Content-Length:0,Content-Type: application/json;charset=utf-8",
+					"httpCode": 200,
+					"message": "Request sucessful."
+				 },
+				 {
+					"serviceName": "STORM",
+					"requestURL": "http://{gateway-host}:{gateway-port}/gateway/{topology-name}/storm/api/v1/supervisor/summary",
+					"responseContent": "Content-Length:0,Content-Type: application/json;charset=utf-8",
+					"httpCode": 200,
+					"message": "Request sucessful."
+				 },
+				 {
+					"serviceName": "STORM",
+					"requestURL": "http://{gateway-host}:{gateway-port}/gateway/{topology-name}/storm/api/v1/topology/summary",
+					"responseContent": "Content-Length:0,Content-Type: application/json;charset=utf-8",
+					"httpCode": 200,
+					"message": "Request sucessful."
+				 }
+			  ]
+		   },
+		   "messages": {
+			  "message": [
+			  ]
+		   }
+	    }
+	}
+
+
+We can see that this service-test makes HTTP requests to each of the services through Knox using the specified topology. The test will only make calls to those services that have entries within the topology file.
+
+##### Adding and Changing test URLs
+
+URLs for each service are stored in `{GATEWAY_HOME}/data/services/{service-name}/{service-version}/service.xml`. Each `<testURL>` element represents a service resource that will be tested if the service is set up in the topology. You can add or remove these from teh service.xml files. Just note if you add URL's there is no guarantee in the order they will be tested. All default URLs have been tested and work on various clusters. If a new URL is added and doesn't respond in a way the user expects then it is up to the user to determine whether the URL is correct or not.
+
+##### Some important things to note:
+ - This API call does not require any credentials to receive a response from Knox, but expect to receive 401 responses from each of the services if none are provided.

Added: knox/trunk/books/0.8.0/service_storm.md
URL: http://svn.apache.org/viewvc/knox/trunk/books/0.8.0/service_storm.md?rev=1725059&view=auto
==============================================================================
--- knox/trunk/books/0.8.0/service_storm.md (added)
+++ knox/trunk/books/0.8.0/service_storm.md Sun Jan 17 04:57:06 2016
@@ -0,0 +1,113 @@
+<!---
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+--->
+
+### Storm ###
+
+Storm is a distributed realtime computation system. Storm exposes REST APIs for UI functionality that can be used for
+retrieving metrics data and configuration information as well as management operations such as starting or stopping topologies.
+
+The docs for this can be found here
+
+https://github.com/apache/storm/blob/master/docs/documentation/ui-rest-api.md
+
+To enable this functionality, a topology file needs to have the following configuration:
+
+    <service>
+        <role>STORM</role>
+        <url>http://<hostname>:<port></url>
+    </service>
+
+The default UI daemon port is 8744. If it is configured to some other port, that configuration can be
+found in `storm.yaml` as the value for the property `ui.port`.
+
+In addition to the storm service configuration above, a STORM-LOGVIEWER service must be configured if the
+log files are to be retrieved through Knox. The value of the port for the logviewer can be found by the property
+'logviewer.port' also in the file storm.yaml.
+
+    <service>
+        <role>STORM-LOGVIEWER</role>
+        <url>http://<hostname>:<port></url>
+    </service>
+
+
+#### Storm URL Mapping ####
+
+For Storm URLs, the mapping of Knox Gateway accessible URLs to direct Storm URLs is the following.
+
+| ------- | ------------------------------------------------------------------------------------- |
+| Gateway | `https://{gateway-host}:{gateway-port}/{gateway-path}/{cluster-name}/storm` |
+| Cluster | `http://{storm-host}:{storm-port}`                                      |
+
+For the log viewer the mapping is as follows
+
+| ------- | ------------------------------------------------------------------------------------- |
+| Gateway | `https://{gateway-host}:{gateway-port}/{gateway-path}/{cluster-name}/storm/logviewer` |
+| Cluster | `http://{storm-logviewer-host}:{storm-logviewer-port}`                                      |
+
+
+#### Storm Examples
+
+Some of the various calls that can be made and examples using curl are listed below.
+
+    # 0. Getting cluster configuration
+    
+    curl -ikv -u guest:guest-password -X GET 'https://localhost:8443/gateway/sandbox/storm/api/v1/cluster/configuration'
+    
+    # 1. Getting cluster summary information
+    
+    curl -ikv -u guest:guest-password -X GET 'https://localhost:8443/gateway/sandbox/storm/api/v1/cluster/summary'
+
+    # 2. Getting supervisor summary information
+    
+    curl -ikv -u guest:guest-password -X GET 'https://localhost:8443/gateway/sandbox/storm/api/v1/supervisor/summary'
+    
+    # 3. topologies summary information
+    
+    curl -ikv -u guest:guest-password -X GET 'https://localhost:8443/gateway/sandbox/storm/api/v1/topology/summary'
+    
+    # 4. Getting specific topology information. Substitute {id} with the topology id.
+    
+    curl -ikv -u guest:guest-password -X GET 'https://localhost:8443/gateway/sandbox/storm/api/v1/topology/{id}'
+
+    # 5. To get component level information. Substitute {id} with the topology id and {component} with the component id e.g. 'spout'
+    
+    curl -ikv -u guest:guest-password -X GET 'https://localhost:8443/gateway/sandbox/storm/api/v1/topology/{id}/component/{component}'
+
+
+The following POST operations all require a 'x-csrf-token' header along with other information that can be stored in a cookie file.
+In particular the 'ring-session' header and 'JSESSIONID'.
+
+    # 6. To activate a topology. Substitute {id} with the topology id and {token-value} with the x-csrf-token value.
+
+    curl -ik -b ~/cookiejar.txt -c ~/cookiejar.txt -u guest:guest-password -H 'x-csrf-token:{token-value}' -X POST \
+     http://localhost:8744/api/v1/topology/{id}/activate
+
+    # 7. To de-activate a topology. Substitute {id} with the topology id and {token-value} with the x-csrf-token value.
+
+    curl -ik -b ~/cookiejar.txt -c ~/cookiejar.txt -u guest:guest-password -H 'x-csrf-token:{token-value}' -X POST \
+     http://localhost:8744/api/v1/topology/{id}/deactivate
+
+    # 8. To rebalance a topology. Substitute {id} with the topology id and {token-value} with the x-csrf-token value.
+
+    curl -ik -b ~/cookiejar.txt -c ~/cookiejar.txt -u guest:guest-password -H 'x-csrf-token:{token-value}' -X POST \
+     http://localhost:8744/api/v1/topology/{id}/rebalance/0
+
+    # 9. To kill a topology. Substitute {id} with the topology id and {token-value} with the x-csrf-token value.
+
+    curl -ik -b ~/cookiejar.txt -c ~/cookiejar.txt -u guest:guest-password -H 'x-csrf-token:{token-value}' -X POST \
+     http://localhost:8744/api/v1/topology/{id}/kill/0
+

Added: knox/trunk/books/0.8.0/service_webhcat.md
URL: http://svn.apache.org/viewvc/knox/trunk/books/0.8.0/service_webhcat.md?rev=1725059&view=auto
==============================================================================
--- knox/trunk/books/0.8.0/service_webhcat.md (added)
+++ knox/trunk/books/0.8.0/service_webhcat.md Sun Jan 17 04:57:06 2016
@@ -0,0 +1,163 @@
+<!---
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+--->
+
+### WebHCat ###
+
+WebHCat (also called _Templeton_) is a related but separate service from HiveServer2.
+As such it is installed and configured independently.
+The [WebHCat wiki pages](https://cwiki.apache.org/confluence/display/Hive/WebHCat) describe this processes.
+In sandbox this configuration file for WebHCat is located at `/etc/hadoop/hcatalog/webhcat-site.xml`.
+Note the properties shown below as they are related to configuration required by the gateway.
+
+    <property>
+        <name>templeton.port</name>
+        <value>50111</value>
+    </property>
+
+Also important is the configuration of the JOBTRACKER RPC endpoint.
+For Hadoop 2 this can be found in the `yarn-site.xml` file.
+In Sandbox this file can be found at `/etc/hadoop/conf/yarn-site.xml`.
+The property `yarn.resourcemanager.address` within that file is relevant for the gateway's configuration.
+
+    <property>
+        <name>yarn.resourcemanager.address</name>
+        <value>sandbox.hortonworks.com:8050</value>
+    </property>
+
+See #[WebHDFS] for details about locating the Hadoop configuration for the NAMENODE endpoint.
+
+The gateway by default includes a sample topology descriptor file `{GATEWAY_HOME}/deployments/sandbox.xml`.
+The values in this sample are configured to work with an installed Sandbox VM.
+
+    <service>
+        <role>NAMENODE</role>
+        <url>hdfs://localhost:8020</url>
+    </service>
+    <service>
+        <role>JOBTRACKER</role>
+        <url>rpc://localhost:8050</url>
+    </service>
+    <service>
+        <role>WEBHCAT</role>
+        <url>http://localhost:50111/templeton</url>
+    </service>
+
+The URLs provided for the role NAMENODE and JOBTRACKER do not result in an endpoint being exposed by the gateway.
+This information is only required so that other URLs can be rewritten that reference the appropriate RPC address for Hadoop services.
+This prevents clients from needed to be aware of the internal cluster details.
+Note that for Hadoop 2 the JOBTRACKER RPC endpoint is provided by the Resource Manager component.
+
+By default the gateway is configured to use the HTTP endpoint for WebHCat in the Sandbox.
+This could alternatively be configured to use the HTTPS endpoint by provided the correct address.
+
+#### WebHCat URL Mapping ####
+
+For WebHCat URLs, the mapping of Knox Gateway accessible URLs to direct WebHCat URLs is simple.
+
+| ------- | ------------------------------------------------------------------------------- |
+| Gateway | `https://{gateway-host}:{gateway-port}/{gateway-path}/{cluster-name}/templeton` |
+| Cluster | `http://{webhcat-host}:{webhcat-port}/templeton}`                               |
+
+
+#### WebHCat via cURL
+
+Use can use cURL to directly invoke the REST APIs via the gateway. For the full list of available REST calls look at the WebHCat documentation. This is a simple curl command to test the connection:
+
+    curl -i -k -u guest:guest-password 'https://localhost:8443/gateway/sandbox/templeton/v1/status'
+
+
+#### WebHCat Example ####
+
+This example will submit the familiar WordCount Java MapReduce job to the Hadoop cluster via the gateway using the KnoxShell DSL.
+There are several ways to do this depending upon your preference.
+
+You can use the "embedded" Groovy interpreter provided with the distribution.
+
+    java -jar bin/shell.jar samples/ExampleWebHCatJob.groovy
+
+You can manually type in the KnoxShell DSL script into the "embedded" Groovy interpreter provided with the distribution.
+
+    java -jar bin/shell.jar
+
+Each line from the file `samples/ExampleWebHCatJob.groovy` would then need to be typed or copied into the interactive shell.
+
+#### WebHCat Client DSL ####
+
+##### submitJava() - Submit a Java MapReduce job.
+
+* Request
+    * jar (String) - The remote file name of the JAR containing the app to execute.
+    * app (String) - The app name to execute. This is _wordcount_ for example not the class name.
+    * input (String) - The remote directory name to use as input for the job.
+    * output (String) - The remote directory name to store output from the job.
+* Response
+    * jobId : String - The job ID of the submitted job.  Consumes body.
+* Example
+
+
+    Job.submitJava(session)
+        .jar(remoteJarName)
+        .app(appName)
+        .input(remoteInputDir)
+        .output(remoteOutputDir)
+        .now()
+        .jobId
+
+##### submitPig() - Submit a Pig job.
+
+* Request
+    * file (String) - The remote file name of the pig script.
+    * arg (String) - An argument to pass to the script.
+    * statusDir (String) - The remote directory to store status output.
+* Response
+    * jobId : String - The job ID of the submitted job.  Consumes body.
+* Example
+    * `Job.submitPig(session).file(remotePigFileName).arg("-v").statusDir(remoteStatusDir).now()`
+
+##### submitHive() - Submit a Hive job.
+
+* Request
+    * file (String) - The remote file name of the hive script.
+    * arg (String) - An argument to pass to the script.
+    * statusDir (String) - The remote directory to store status output.
+* Response
+    * jobId : String - The job ID of the submitted job.  Consumes body.
+* Example
+    * `Job.submitHive(session).file(remoteHiveFileName).arg("-v").statusDir(remoteStatusDir).now()`
+
+##### queryQueue() - Return a list of all job IDs registered to the user.
+
+* Request
+    * No request parameters.
+* Response
+    * BasicResponse
+* Example
+    * `Job.queryQueue(session).now().string`
+
+##### queryStatus() - Check the status of a job and get related job information given its job ID.
+
+* Request
+    * jobId (String) - The job ID to check. This is the ID received when the job was created.
+* Response
+    * BasicResponse
+* Example
+    * `Job.queryStatus(session).jobId(jobId).now().string`
+
+### WebHCat HA ###
+
+Please look at #[Default Service HA support]
+

Added: knox/trunk/books/0.8.0/service_webhdfs.md
URL: http://svn.apache.org/viewvc/knox/trunk/books/0.8.0/service_webhdfs.md?rev=1725059&view=auto
==============================================================================
--- knox/trunk/books/0.8.0/service_webhdfs.md (added)
+++ knox/trunk/books/0.8.0/service_webhdfs.md Sun Jan 17 04:57:06 2016
@@ -0,0 +1,294 @@
+<!---
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+--->
+
+### WebHDFS ###
+
+REST API access to HDFS in a Hadoop cluster is provided by WebHDFS.
+The [WebHDFS REST API](http://hadoop.apache.org/docs/stable/hadoop-project-dist/hadoop-hdfs/WebHDFS.html) documentation is available online.
+WebHDFS must be enabled in the hdfs-site.xml configuration file.
+In the sandbox this configuration file is located at `/etc/hadoop/conf/hdfs-site.xml`.
+Note the properties shown below as they are related to configuration required by the gateway.
+Some of these represent the default values and may not actually be present in hdfs-site.xml.
+
+    <property>
+        <name>dfs.webhdfs.enabled</name>
+        <value>true</value>
+    </property>
+    <property>
+        <name>dfs.namenode.rpc-address</name>
+        <value>sandbox.hortonworks.com:8020</value>
+    </property>
+    <property>
+        <name>dfs.namenode.http-address</name>
+        <value>sandbox.hortonworks.com:50070</value>
+    </property>
+    <property>
+        <name>dfs.https.namenode.https-address</name>
+        <value>sandbox.hortonworks.com:50470</value>
+    </property>
+
+The values above need to be reflected in each topology descriptor file deployed to the gateway.
+The gateway by default includes a sample topology descriptor file `{GATEWAY_HOME}/deployments/sandbox.xml`.
+The values in this sample are configured to work with an installed Sandbox VM.
+
+    <service>
+        <role>NAMENODE</role>
+        <url>hdfs://localhost:8020</url>
+    </service>
+    <service>
+        <role>WEBHDFS</role>
+        <url>http://localhost:50070/webhdfs</url>
+    </service>
+
+The URL provided for the role NAMENODE does not result in an endpoint being exposed by the gateway.
+This information is only required so that other URLs can be rewritten that reference the Name Node's RPC address.
+This prevents clients from needed to be aware of the internal cluster details.
+
+By default the gateway is configured to use the HTTP endpoint for WebHDFS in the Sandbox.
+This could alternatively be configured to use the HTTPS endpoint by provided the correct address.
+
+#### WebHDFS URL Mapping ####
+
+For Name Node URLs, the mapping of Knox Gateway accessible WebHDFS URLs to direct WebHDFS URLs is simple.
+
+| ------- | ----------------------------------------------------------------------------- |
+| Gateway | `https://{gateway-host}:{gateway-port}/{gateway-path}/{cluster-name}/webhdfs` |
+| Cluster | `http://{webhdfs-host}:50070/webhdfs`                                         |
+
+However, there is a subtle difference to URLs that are returned by WebHDFS in the Location header of many requests.
+Direct WebHDFS requests may return Location headers that contain the address of a particular DataNode.
+The gateway will rewrite these URLs to ensure subsequent requests come back through the gateway and internal cluster details are protected.
+
+A WebHDFS request to the NameNode to retrieve a file will return a URL of the form below in the Location header.
+
+    http://{datanode-host}:{data-node-port}/webhdfs/v1/{path}?...
+
+Note that this URL contains the network location of a DataNode.
+The gateway will rewrite this URL to look like the URL below.
+
+    https://{gateway-host}:{gateway-port}/{gateway-path}/{custer-name}/webhdfs/data/v1/{path}?_={encrypted-query-parameters}
+
+The `{encrypted-query-parameters}` will contain the `{datanode-host}` and `{datanode-port}` information.
+This information along with the original query parameters are encrypted so that the internal Hadoop details are protected.
+
+#### WebHDFS Examples ####
+
+The examples below upload a file, download the file and list the contents of the directory.
+
+##### WebHDFS via client DSL
+
+You can use the Groovy example scripts and interpreter provided with the distribution.
+
+    java -jar bin/shell.jar samples/ExampleWebHdfsPutGet.groovy
+    java -jar bin/shell.jar samples/ExampleWebHdfsLs.groovy
+
+You can manually type the client DSL script into the KnoxShell interactive Groovy interpreter provided with the distribution.
+The command below starts the KnoxShell in interactive mode.
+
+    java -jar bin/shell.jar
+
+Each line below could be typed or copied into the interactive shell and executed.
+This is provided as an example to illustrate the use of the client DSL.
+
+    // Import the client DSL and a useful utilities for working with JSON.
+    import org.apache.hadoop.gateway.shell.Hadoop
+    import org.apache.hadoop.gateway.shell.hdfs.Hdfs
+    import groovy.json.JsonSlurper
+
+    // Setup some basic config.
+    gateway = "https://localhost:8443/gateway/sandbox"
+    username = "guest"
+    password = "guest-password"
+
+    // Start the session.
+    session = Hadoop.login( gateway, username, password )
+
+    // Cleanup anything leftover from a previous run.
+    Hdfs.rm( session ).file( "/user/guest/example" ).recursive().now()
+
+    // Upload the README to HDFS.
+    Hdfs.put( session ).file( "README" ).to( "/user/guest/example/README" ).now()
+
+    // Download the README from HDFS.
+    text = Hdfs.get( session ).from( "/user/guest/example/README" ).now().string
+    println text
+
+    // List the contents of the directory.
+    text = Hdfs.ls( session ).dir( "/user/guest/example" ).now().string
+    json = (new JsonSlurper()).parseText( text )
+    println json.FileStatuses.FileStatus.pathSuffix
+
+    // Cleanup the directory.
+    Hdfs.rm( session ).file( "/user/guest/example" ).recursive().now()
+
+    // Clean the session.
+    session.shutdown()
+
+
+##### WebHDFS via cURL
+
+Use can use cURL to directly invoke the REST APIs via the gateway.
+
+###### Optionally cleanup the sample directory in case a previous example was run without cleaning up.
+
+    curl -i -k -u guest:guest-password -X DELETE \
+        'https://localhost:8443/gateway/sandbox/webhdfs/v1/user/guest/example?op=DELETE&recursive=true'
+
+###### Register the name for a sample file README in /user/guest/example.
+
+    curl -i -k -u guest:guest-password -X PUT \
+        'https://localhost:8443/gateway/sandbox/webhdfs/v1/user/guest/example/README?op=CREATE'
+
+###### Upload README to /user/guest/example.  Use the README in {GATEWAY_HOME}.
+
+    curl -i -k -u guest:guest-password -T README -X PUT \
+        '{Value of Location header from command above}'
+
+###### List the contents of the directory /user/guest/example.
+
+    curl -i -k -u guest:guest-password -X GET \
+        'https://localhost:8443/gateway/sandbox/webhdfs/v1/user/guest/example?op=LISTSTATUS'
+
+###### Request the content of the README file in /user/guest/example.
+
+    curl -i -k -u guest:guest-password -X GET \
+        'https://localhost:8443/gateway/sandbox/webhdfs/v1/user/guest/example/README?op=OPEN'
+
+###### Read the content of the file.
+
+    curl -i -k -u guest:guest-password -X GET \
+        '{Value of Location header from command above}'
+
+###### Optionally cleanup the example directory.
+
+    curl -i -k -u guest:guest-password -X DELETE \
+        'https://localhost:8443/gateway/sandbox/webhdfs/v1/user/guest/example?op=DELETE&recursive=true'
+
+
+##### WebHDFS client DSL
+
+###### get() - Get a file from HDFS (OPEN).
+
+* Request
+    * from( String name ) - The full name of the file in HDFS.
+    * file( String name ) - The name of a local file to create with the content.
+    If this isn't specified the file content must be read from the response.
+* Response
+    * BasicResponse
+    * If file parameter specified content will be streamed to file.
+* Example
+    * `Hdfs.get( session ).from( "/user/guest/example/README" ).now().string`
+
+###### ls() - Query the contents of a directory (LISTSTATUS)
+
+* Request
+    * dir( String name ) - The full name of the directory in HDFS.
+* Response
+    * BasicResponse
+* Example
+    * `Hdfs.ls( session ).dir( "/user/guest/example" ).now().string`
+
+###### mkdir() - Create a directory in HDFS (MKDIRS)
+
+* Request
+    * dir( String name ) - The full name of the directory to create in HDFS.
+    * perm( String perm ) - The permissions for the directory (e.g. 644).  Optional: default="777"
+* Response
+    * EmptyResponse - Implicit close().
+* Example
+    * `Hdfs.mkdir( session ).dir( "/user/guest/example" ).now()`
+
+###### put() - Write a file into HDFS (CREATE)
+
+* Request
+    * text( String text ) - Text to upload to HDFS.  Takes precedence over file if both present.
+    * file( String name ) - The name of a local file to upload to HDFS.
+    * to( String name ) - The fully qualified name to create in HDFS.
+* Response
+    * EmptyResponse - Implicit close().
+* Example
+    * `Hdfs.put( session ).file( README ).to( "/user/guest/example/README" ).now()`
+
+###### rm() - Delete a file or directory (DELETE)
+
+* Request
+    * file( String name ) - The fully qualified file or directory name in HDFS.
+    * recursive( Boolean recursive ) - Delete directory and all of its contents if True.  Optional: default=False
+* Response
+    * BasicResponse - Implicit close().
+* Example
+    * `Hdfs.rm( session ).file( "/user/guest/example" ).recursive().now()`
+
+
+### WebHDFS HA ###
+
+Knox provides basic failover and retry functionality for REST API calls made to WebHDFS when HDFS HA has been 
+configured and enabled.
+
+To enable HA functionality for WebHDFS in Knox the following configuration has to be added to the topology file.
+
+    <provider>
+       <role>ha</role>
+       <name>HaProvider</name>
+       <enabled>true</enabled>
+       <param>
+           <name>WEBHDFS</name>
+           <value>maxFailoverAttempts=3;failoverSleep=1000;maxRetryAttempts=300;retrySleep=1000;enabled=true</value>
+       </param>
+    </provider>
+    
+The role and name of the provider above must be as shown. The name in the 'param' section must match that of the service 
+role name that is being configured for HA and the value in the 'param' section is the configuration for that particular
+service in HA mode. In this case the name is 'WEBHDFS'.
+
+The various configuration parameters are described below:
+     
+* maxFailoverAttempts - 
+This is the maximum number of times a failover will be attempted. The failover strategy at this time is very simplistic
+in that the next URL in the list of URLs provided for the service is used and the one that failed is put at the bottom 
+of the list. If the list is exhausted and the maximum number of attempts is not reached then the first URL that failed 
+will be tried again (the list will start again from the original top entry).
+
+* failoverSleep - 
+The amount of time in milliseconds that the process will wait or sleep before attempting to failover.
+
+* maxRetryAttempts - 
+The is the maximum number of times that a retry request will be attempted. Unlike failover, the retry is done on the 
+same URL that failed. This is a special case in HDFS when the node is in safe mode. The expectation is that the node will
+come out of safe mode so a retry is desirable here as opposed to a failover.
+
+* retrySleep - 
+The amount of time in milliseconds that the process will wait or sleep before a retry is issued.
+
+* enabled - 
+Flag to turn the particular service on or off for HA.
+
+And for the service configuration itself the additional URLs should be added to the list. The active 
+URL (at the time of configuration) should ideally be added to the top of the list.
+
+
+    <service>
+        <role>WEBHDFS</role>
+        <url>http://{host1}:50070/webhdfs</url>
+        <url>http://{host2}:50070/webhdfs</url>
+    </service>
+    
+
+
+
+
+

Added: knox/trunk/books/0.8.0/service_yarn.md
URL: http://svn.apache.org/viewvc/knox/trunk/books/0.8.0/service_yarn.md?rev=1725059&view=auto
==============================================================================
--- knox/trunk/books/0.8.0/service_yarn.md (added)
+++ knox/trunk/books/0.8.0/service_yarn.md Sun Jan 17 04:57:06 2016
@@ -0,0 +1,124 @@
+<!---
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+--->
+
+### Yarn ###
+
+Knox provides gateway functionality for the REST APIs of the ResourceManager. The ResourceManager REST APIs allow the
+user to get information about the cluster - status on the cluster, metrics on the cluster, scheduler information,
+information about nodes in the cluster, and information about applications on the cluster. Also as of Hadoop version
+2.5.0, the user can submit a new application as well as kill it (or get state) using the 'Writable' APIs.
+
+The docs for this can be found here
+
+http://hadoop.apache.org/docs/current/hadoop-yarn/hadoop-yarn-site/ResourceManagerRest.html
+
+To enable this functionality, a topology file needs to have the following configuration:
+
+    <service>
+        <role>RESOURCEMANAGER</role>
+        <url>http://<hostname>:<port>/ws</url>
+    </service>
+
+The default resource manager http port is 8088. If it is configured to some other port, that configuration can be
+found in `yarn-site.xml` under the property `yarn.resourcemanager.webapp.address`.
+
+#### Yarn URL Mapping ####
+
+For Yarn URLs, the mapping of Knox Gateway accessible URLs to direct Yarn URLs is the following.
+
+| ------- | ------------------------------------------------------------------------------------- |
+| Gateway | `https://{gateway-host}:{gateway-port}/{gateway-path}/{cluster-name}/resourcemanager` |
+| Cluster | `http://{yarn-host}:{yarn-port}/ws}`                                      |
+
+
+#### Yarn Examples via cURL
+
+Some of the various calls that can be made and examples using curl are listed below.
+
+    # 0. Getting cluster info
+    
+    curl -ikv -u guest:guest-password -X GET 'https://localhost:8443/gateway/sandbox/resourcemanager/v1/cluster'
+    
+    # 1. Getting cluster metrics
+    
+    curl -ikv -u guest:guest-password -X GET 'https://localhost:8443/gateway/sandbox/resourcemanager/v1/cluster/metrics'
+    
+    To get the same information in an xml format
+    
+    curl -ikv -u guest:guest-password -H Accept:application/xml -X GET 'https://localhost:8443/gateway/sandbox/resourcemanager/v1/cluster/metrics'
+    
+    # 2. Getting scheduler information
+    
+    curl -ikv -u guest:guest-password -X GET 'https://localhost:8443/gateway/sandbox/resourcemanager/v1/cluster/scheduler'
+    
+    # 3. Getting all the applications listed and their information
+    
+    curl -ikv -u guest:guest-password -X GET 'https://localhost:8443/gateway/sandbox/resourcemanager/v1/cluster/apps'
+    
+    # 4. Getting applications statistics
+    
+    curl -ikv -u guest:guest-password -X GET 'https://localhost:8443/gateway/sandbox/resourcemanager/v1/cluster/appstatistics'
+    
+    Also query params can be used as below to filter the results
+    
+    curl -ikv -u guest:guest-password -X GET 'https://localhost:8443/gateway/sandbox/resourcemanager/v1/cluster/appstatistics?states=accepted,running,finished&applicationTypes=mapreduce'
+    
+    # 5. To get a specific application (please note, replace the application id with a real value)
+    
+    curl -ikv -u guest:guest-password -X GET 'https://localhost:8443/gateway/sandbox/resourcemanager/v1/cluster/apps/{application_id}'
+    
+    # 6. To get the attempts made for a particular application
+    
+    curl -ikv -u guest:guest-password -X GET 'https://localhost:8443/gateway/sandbox/resourcemanager/v1/cluster/apps/{application_id}/appattempts'
+    
+    # 7. To get information about the various nodes
+    
+    curl -ikv -u guest:guest-password -X GET 'https://localhost:8443/gateway/sandbox/resourcemanager/v1/cluster/nodes'
+    
+    Also to get a specific node, use an id obtained in the response from above (the node id is scrambled) and issue the following
+    
+    curl -ikv -u guest:guest-password -X GET 'https://localhost:8443/gateway/sandbox/resourcemanager/v1/cluster/nodes/{node_id}'
+    
+    # 8. To create a new Application
+    
+    curl -ikv -u guest:guest-password -X POST 'https://localhost:8443/gateway/sandbox/resourcemanager/v1/cluster/apps/new-application'
+    
+    An application id is returned from the request above and this can be used to submit an application.
+    
+    # 9. To submit an application, put together a request containing the application id received in the above response (please refer to Yarn REST
+    API documentation).
+    
+    curl -ikv -u guest:guest-password -T request.json -H Content-Type:application/json -X POST 'https://localhost:8443/gateway/sandbox/resourcemanager/v1/cluster/apps'
+    
+    Here the request is saved in a file called request.json
+    
+    #10. To get application state
+    
+    curl -ikv -u guest:guest-password -X GET 'https://localhost:8443/gateway/sandbox/resourcemanager/v1/cluster/apps/{application_id}/state'
+    
+    curl -ikv -u guest:guest-password -H Content-Type:application/json -X PUT -T state-killed.json 'https://localhost:8443/gateway/sandbox/resourcemanager/v1/cluster/apps/application_1409008107556_0007/state'
+    
+    # 11. To kill an application that is running issue the below command with the application id of the application that is to be killed.
+    The contents of the state-killed.json file are :
+    
+    {
+      "state":"KILLED"
+    }
+    
+    
+    curl -ikv -u guest:guest-password -H Content-Type:application/json -X PUT -T state-killed.json 'https://localhost:8443/gateway/sandbox/resourcemanager/v1/cluster/apps/{application_id}/state'
+

Added: knox/trunk/books/0.8.0/x-forwarded-headers.md
URL: http://svn.apache.org/viewvc/knox/trunk/books/0.8.0/x-forwarded-headers.md?rev=1725059&view=auto
==============================================================================
--- knox/trunk/books/0.8.0/x-forwarded-headers.md (added)
+++ knox/trunk/books/0.8.0/x-forwarded-headers.md Sun Jan 17 04:57:06 2016
@@ -0,0 +1,76 @@
+<!---
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+--->
+
+### X-Forwarded-* Headers Support ###
+Out-of-the-box Knox provides support for some X-Forwarded-* headers through the use of a Servlet Filter. Specifically the
+headers handled/populated by Knox are:
+
+* X-Forwarded-For
+* X-Forwarded-Proto
+* X-Forwarded-Port
+* X-Forwarded-Host
+* X-Forwarded-Server
+* X-Forwarded-Context
+
+If this functionality can be turned off by a configuration setting in the file gateway-site.xml and redeploying the
+necessary topology/topologies.
+
+The setting is (under the 'configuration' tag) :
+
+    <property>
+        <name>gateway.xforwarded.enabled</name>
+        <value>false</value>
+    </property>
+
+If this setting is absent, the default behavior is that the X-Forwarded-* header support is on or in other words,
+'gateway.xforwarded.enabled' is set to 'true' by default.
+
+
+#### Header population ####
+
+The following are the various rules for population of these headers:
+
+##### X-Forwarded-For #####
+
+This header represents a list of client IP addresses. If the header is already present Knox adds a comma separated value
+to the list. The value added is the client's IP address as Knox sees it. This value is added to the end of the list.
+
+##### X-Forwarded-Proto #####
+
+The protocol used in the client request. If this header is passed into Knox its value is maintained, otherwise Knox will
+populate the header with the value 'https' if the request is a secure one or 'http' otherwise.
+
+##### X-Forwarded-Port #####
+
+The port used in the client request. If this header is passed into Knox its value is maintained, otherwise Knox will
+populate the header with the value of the port that the request was made coming into Knox.
+
+##### X-Forwarded-Host #####
+
+Represents the original host requested by the client in the Host HTTP request header. The value passed into Knox is maintained
+by Knox. If no value is present, Knox populates the header with the value of the HTTP Host header.
+
+##### X-Forwarded-Server #####
+
+The hostname of the server Knox is running on.
+
+##### X-Forwarded-Context #####
+
+This header value contains the context path of the request to Knox.
+
+
+

Modified: knox/trunk/build.xml
URL: http://svn.apache.org/viewvc/knox/trunk/build.xml?rev=1725059&r1=1725058&r2=1725059&view=diff
==============================================================================
--- knox/trunk/build.xml (original)
+++ knox/trunk/build.xml Sun Jan 17 04:57:06 2016
@@ -34,6 +34,7 @@
     <property name="book-0-5-0-file" value="${book-0-5-0-dir}/${gateway-artifact}-0-5-0.html"/>
     <property name="book-0-6-0-dir" value="${book-target}/${gateway-artifact}-0-6-0"/>
     <property name="book-0-7-0-dir" value="${book-target}/${gateway-artifact}-0-7-0"/>
+    <property name="book-0-8-0-dir" value="${book-target}/${gateway-artifact}-0-8-0"/>
 
     <property name="svn.release.path" value="https://dist.apache.org/repos/dist/release/incubator/${gateway-project}" />
     <property name="svn.staging.path" value="https://dist.apache.org/repos/dist/dev/incubator/${gateway-project}" />
@@ -82,7 +83,7 @@
     </target>
 
     <target name="books" depends="markbook,_books"/>
-    <target name="_books" depends="_book-0-3-0,_book-0-4-0,_book-0-5-0,_book-0-6-0,_book-0-7-0"/>
+    <target name="_books" depends="_book-0-3-0,_book-0-4-0,_book-0-5-0,_book-0-6-0,_book-0-7-0,_book-0-8-0"/>
     <target name="_book-0-3-0" depends="init">
         <delete dir="${book-target}/${gateway-artifact}-0-3-0" includes="**/*.html,**/*.css,**/*.png"/>
         <java jar="markbook/target/markbook.jar" fork="true" failonerror="true">
@@ -153,6 +154,24 @@
             <fileset dir="books/static"/>
         </copy>
     </target>
+    <target name="_book-0-8-0" depends="init">
+        <delete dir="${book-target}/${gateway-artifact}-0-8-0" includes="**/*.html,**/*.css,**/*.png"/>
+        <java jar="markbook/target/markbook.jar" fork="true" failonerror="true">
+            <arg value="-i"/><arg value="books/0.8.0/book.md"/>
+            <arg value="-o"/><arg value="${book-0-8-0-dir}/user-guide.html"/>
+        </java>
+        <java jar="markbook/target/markbook.jar" fork="true" failonerror="true">
+            <arg value="-i"/><arg value="books/0.8.0/dev-guide/book.md"/>
+            <arg value="-o"/><arg value="${book-0-8-0-dir}/dev-guide.html"/>
+        </java>
+        <java jar="markbook/target/markbook.jar" fork="true" failonerror="true">
+            <arg value="-i"/><arg value="books/0.8.0/dev-guide/knoxsso_integration.md"/>
+            <arg value="-o"/><arg value="${book-0-8-0-dir}/knoxsso_integration.html"/>
+        </java>
+        <copy todir="${book-target}/${gateway-artifact}-0-8-0">
+            <fileset dir="books/static"/>
+        </copy>
+    </target>
 
     <target name="markbook" depends="init" description="Build and package markbook tool.">
         <exec executable="${mvn.cmd}">