You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@knox.apache.org by km...@apache.org on 2016/01/15 16:24:46 UTC

svn commit: r1724836 [3/5] - in /knox: site/ site/books/knox-0-7-0/ trunk/books/0.7.0/

Modified: knox/site/issue-tracking.html
URL: http://svn.apache.org/viewvc/knox/site/issue-tracking.html?rev=1724836&r1=1724835&r2=1724836&view=diff
==============================================================================
--- knox/site/issue-tracking.html (original)
+++ knox/site/issue-tracking.html Fri Jan 15 15:24:45 2016
@@ -1,13 +1,13 @@
 <!DOCTYPE html>
 <!--
- | Generated by Apache Maven Doxia at 2016-01-14
+ | Generated by Apache Maven Doxia at 2016-01-15
  | Rendered using Apache Maven Fluido Skin 1.3.0
 -->
 <html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
   <head>
     <meta charset="UTF-8" />
     <meta name="viewport" content="width=device-width, initial-scale=1.0" />
-    <meta name="Date-Revision-yyyymmdd" content="20160114" />
+    <meta name="Date-Revision-yyyymmdd" content="20160115" />
     <meta http-equiv="Content-Language" content="en" />
     <title>Knox Gateway &#x2013; Issue Tracking</title>
     <link rel="stylesheet" href="./css/apache-maven-fluido-1.3.0.min.css" />
@@ -58,7 +58,7 @@
               
                 
                     
-                  <li id="publishDate" class="pull-right">Last Published: 2016-01-14</li> 
+                  <li id="publishDate" class="pull-right">Last Published: 2016-01-15</li> 
             
                             </ul>
       </div>

Modified: knox/site/license.html
URL: http://svn.apache.org/viewvc/knox/site/license.html?rev=1724836&r1=1724835&r2=1724836&view=diff
==============================================================================
--- knox/site/license.html (original)
+++ knox/site/license.html Fri Jan 15 15:24:45 2016
@@ -1,13 +1,13 @@
 <!DOCTYPE html>
 <!--
- | Generated by Apache Maven Doxia at 2016-01-14
+ | Generated by Apache Maven Doxia at 2016-01-15
  | Rendered using Apache Maven Fluido Skin 1.3.0
 -->
 <html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
   <head>
     <meta charset="UTF-8" />
     <meta name="viewport" content="width=device-width, initial-scale=1.0" />
-    <meta name="Date-Revision-yyyymmdd" content="20160114" />
+    <meta name="Date-Revision-yyyymmdd" content="20160115" />
     <meta http-equiv="Content-Language" content="en" />
     <title>Knox Gateway &#x2013; Project License</title>
     <link rel="stylesheet" href="./css/apache-maven-fluido-1.3.0.min.css" />
@@ -58,7 +58,7 @@
               
                 
                     
-                  <li id="publishDate" class="pull-right">Last Published: 2016-01-14</li> 
+                  <li id="publishDate" class="pull-right">Last Published: 2016-01-15</li> 
             
                             </ul>
       </div>

Modified: knox/site/mail-lists.html
URL: http://svn.apache.org/viewvc/knox/site/mail-lists.html?rev=1724836&r1=1724835&r2=1724836&view=diff
==============================================================================
--- knox/site/mail-lists.html (original)
+++ knox/site/mail-lists.html Fri Jan 15 15:24:45 2016
@@ -1,13 +1,13 @@
 <!DOCTYPE html>
 <!--
- | Generated by Apache Maven Doxia at 2016-01-14
+ | Generated by Apache Maven Doxia at 2016-01-15
  | Rendered using Apache Maven Fluido Skin 1.3.0
 -->
 <html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
   <head>
     <meta charset="UTF-8" />
     <meta name="viewport" content="width=device-width, initial-scale=1.0" />
-    <meta name="Date-Revision-yyyymmdd" content="20160114" />
+    <meta name="Date-Revision-yyyymmdd" content="20160115" />
     <meta http-equiv="Content-Language" content="en" />
     <title>Knox Gateway &#x2013; Project Mailing Lists</title>
     <link rel="stylesheet" href="./css/apache-maven-fluido-1.3.0.min.css" />
@@ -58,7 +58,7 @@
               
                 
                     
-                  <li id="publishDate" class="pull-right">Last Published: 2016-01-14</li> 
+                  <li id="publishDate" class="pull-right">Last Published: 2016-01-15</li> 
             
                             </ul>
       </div>

Modified: knox/site/project-info.html
URL: http://svn.apache.org/viewvc/knox/site/project-info.html?rev=1724836&r1=1724835&r2=1724836&view=diff
==============================================================================
--- knox/site/project-info.html (original)
+++ knox/site/project-info.html Fri Jan 15 15:24:45 2016
@@ -1,13 +1,13 @@
 <!DOCTYPE html>
 <!--
- | Generated by Apache Maven Doxia at 2016-01-14
+ | Generated by Apache Maven Doxia at 2016-01-15
  | Rendered using Apache Maven Fluido Skin 1.3.0
 -->
 <html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
   <head>
     <meta charset="UTF-8" />
     <meta name="viewport" content="width=device-width, initial-scale=1.0" />
-    <meta name="Date-Revision-yyyymmdd" content="20160114" />
+    <meta name="Date-Revision-yyyymmdd" content="20160115" />
     <meta http-equiv="Content-Language" content="en" />
     <title>Knox Gateway &#x2013; Project Information</title>
     <link rel="stylesheet" href="./css/apache-maven-fluido-1.3.0.min.css" />
@@ -58,7 +58,7 @@
               
                 
                     
-                  <li id="publishDate" class="pull-right">Last Published: 2016-01-14</li> 
+                  <li id="publishDate" class="pull-right">Last Published: 2016-01-15</li> 
             
                             </ul>
       </div>

Modified: knox/site/team-list.html
URL: http://svn.apache.org/viewvc/knox/site/team-list.html?rev=1724836&r1=1724835&r2=1724836&view=diff
==============================================================================
--- knox/site/team-list.html (original)
+++ knox/site/team-list.html Fri Jan 15 15:24:45 2016
@@ -1,13 +1,13 @@
 <!DOCTYPE html>
 <!--
- | Generated by Apache Maven Doxia at 2016-01-14
+ | Generated by Apache Maven Doxia at 2016-01-15
  | Rendered using Apache Maven Fluido Skin 1.3.0
 -->
 <html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
   <head>
     <meta charset="UTF-8" />
     <meta name="viewport" content="width=device-width, initial-scale=1.0" />
-    <meta name="Date-Revision-yyyymmdd" content="20160114" />
+    <meta name="Date-Revision-yyyymmdd" content="20160115" />
     <meta http-equiv="Content-Language" content="en" />
     <title>Knox Gateway &#x2013; Team list</title>
     <link rel="stylesheet" href="./css/apache-maven-fluido-1.3.0.min.css" />
@@ -58,7 +58,7 @@
               
                 
                     
-                  <li id="publishDate" class="pull-right">Last Published: 2016-01-14</li> 
+                  <li id="publishDate" class="pull-right">Last Published: 2016-01-15</li> 
             
                             </ul>
       </div>

Modified: knox/trunk/books/0.7.0/admin_api.md
URL: http://svn.apache.org/viewvc/knox/trunk/books/0.7.0/admin_api.md?rev=1724836&r1=1724835&r2=1724836&view=diff
==============================================================================
--- knox/trunk/books/0.7.0/admin_api.md (original)
+++ knox/trunk/books/0.7.0/admin_api.md Fri Jan 15 15:24:45 2016
@@ -34,9 +34,9 @@ Please note that to access that admin AP
 ###### Operations  
 
  * ###### HTTP GET
- 	1. #[Server Version]  
- 	2. #[Topology Collection]  
- 	3. #[Topology]    
+  1. #[Server Version]  
+  2. #[Topology Collection]  
+  3. #[Topology]    
  * ###### HTTP PUT   
  * ###### HTTP DELETE
 
@@ -53,20 +53,20 @@ Calls to Knox and returns the gateway's
 ###### Example cURL Request  
 
 `curl -u admin:admin-password -i -k https://{gateway-host}:{gateway-port}/{gateway-path}/admin/api/v1/version`
-	
+
 ###### Response
 
-        <ServerVersion>
-           <version>0.7.0</version>
-           <hash>{version-hash}</hash>
-        </ServerVersion>
+    <ServerVersion>
+        <version>0.7.0</version>
+        <hash>{version-hash}</hash>
+    </ServerVersion>
 
 ##### Topology Collection  
 
 ###### Description  
 
 Calls to Knox and return an array of JSON objects that represent the list of deployed topologies currently inside of the gateway.  
-	
+
 ###### Example Request URL  
 
 `https://{gateway-host}:{gateway-port}/{gateway-path}/admin/api/{api-version}/topologies`  
@@ -77,21 +77,21 @@ Calls to Knox and return an array of JSO
 
 ###### Response
 
-	[  
-		{  
-			"href":"https://localhost:8443/gateway/admin/api/v1/topologies/_default",
-			"name":"_default",
-			"timestamp":"1405633120000",
-			"uri":"https://localhost:8443/gateway/_default"
-		},
-		{  
-			"href":"https://localhost:8443/gateway/admin/api/v1/topologies/admin",
-			"name":"admin",
-			"timestamp":"1406672646000",
-			"uri":"https://localhost:8443/gateway/admin"
-		}
-	]  
-	
+    [  
+      {  
+        "href":"https://localhost:8443/gateway/admin/api/v1/topologies/_default",
+        "name":"_default",
+        "timestamp":"1405633120000",
+        "uri":"https://localhost:8443/gateway/_default"
+      },
+      {  
+        "href":"https://localhost:8443/gateway/admin/api/v1/topologies/admin",
+        "name":"admin",
+        "timestamp":"1406672646000",
+        "uri":"https://localhost:8443/gateway/admin"
+      }
+    ]  
+  
 
 ##### Topology  
 
@@ -106,54 +106,49 @@ Calls to Knox and return a JSON object t
 ###### Example cURL Request  
 
 `curl -u admin:admin-password -i -k -H Accept:application/json https://{gateway-host}:{gateway-port}/{gateway-path}/admin/api/v1/topologies/{topology-name}`
-	
+  
 ###### Response  
 
-	{
-		"name": "admin",
-		"providers": [{
-			"enabled": true,
-			"name": "ShiroProvider",
-			"params": {
-				"sessionTimeout": "30",
-				"main.ldapRealm": "org.apache.hadoop.gateway.shirorealm.KnoxLdapRealm",
-				"main.ldapRealm.userDnTemplate": "uid={0},ou=people,dc=hadoop,dc=apache,dc=org",
-				"main.ldapRealm.contextFactory.url": "ldap://localhost:33389",
-				"main.ldapRealm.contextFactory.authenticationMechanism": "simple",
-				"urls./**": "authcBasic"
-			},
-			"role": "authentication"
-		}, {
-			"enabled": true,
-			"name": "AclsAuthz",
-			"params": {
-				"knox.acl": "admin;*;*"
-			},
-			"role": "authorization"
-		}, {
-			"enabled": true,
-			"name": "Default",
-			"params": {},
-			"role": "identity-assertion"
-		}, {
-			"enabled": true,
-			"name": "static",
-			"params": {
-				"localhost": "sandbox,sandbox.hortonworks.com"
-			},
-			"role": "hostmap"
-		}],
-		"services": [{
-			"name": null,
-			"params": {},
-			"role": "KNOX",
-			"url": null
-		}],
-		"timestamp": 1406672646000,
-		"uri": "https://localhost:8443/gateway/admin"
-	}
-
-
-
-
-
+  {
+    "name": "admin",
+    "providers": [{
+      "enabled": true,
+      "name": "ShiroProvider",
+      "params": {
+        "sessionTimeout": "30",
+        "main.ldapRealm": "org.apache.hadoop.gateway.shirorealm.KnoxLdapRealm",
+        "main.ldapRealm.userDnTemplate": "uid={0},ou=people,dc=hadoop,dc=apache,dc=org",
+        "main.ldapRealm.contextFactory.url": "ldap://localhost:33389",
+        "main.ldapRealm.contextFactory.authenticationMechanism": "simple",
+        "urls./**": "authcBasic"
+      },
+      "role": "authentication"
+    }, {
+      "enabled": true,
+      "name": "AclsAuthz",
+      "params": {
+        "knox.acl": "admin;*;*"
+      },
+      "role": "authorization"
+    }, {
+      "enabled": true,
+      "name": "Default",
+      "params": {},
+      "role": "identity-assertion"
+    }, {
+      "enabled": true,
+      "name": "static",
+      "params": {
+        "localhost": "sandbox,sandbox.hortonworks.com"
+      },
+      "role": "hostmap"
+    }],
+    "services": [{
+      "name": null,
+      "params": {},
+      "role": "KNOX",
+      "url": null
+    }],
+    "timestamp": 1406672646000,
+    "uri": "https://localhost:8443/gateway/admin"
+  }

Modified: knox/trunk/books/0.7.0/book.md
URL: http://svn.apache.org/viewvc/knox/trunk/books/0.7.0/book.md?rev=1724836&r1=1724835&r2=1724836&view=diff
==============================================================================
--- knox/trunk/books/0.7.0/book.md (original)
+++ knox/trunk/books/0.7.0/book.md Fri Jan 15 15:24:45 2016
@@ -40,7 +40,7 @@
     * #[Authentication]
     * #[Advanced LDAP Authentication]
     * #[LDAP Authentication Caching]
-    * #[LDAPGroupLookup]
+    * #[LDAP Group Lookup]
     * #[Identity Assertion]
     * #[Authorization]
     * #[Secure Clusters]
@@ -48,6 +48,7 @@
     * #[Web App Security Provider]
     * #[Preauthenticated SSO Provider]
     * #[KnoxSSO Setup and Configuration]
+    * #[Mutual Authentication with SSL]
     * #[Audit]
 * #[Client Details]
 * #[Service Details]
@@ -58,6 +59,7 @@
     * #[Hive]
     * #[Yarn]
     * #[Storm]
+    * #[Default Service HA support]
 * #[UI Service Details]
 * #[Limitations]
 * #[Troubleshooting]

Modified: knox/trunk/books/0.7.0/book_client-details.md
URL: http://svn.apache.org/viewvc/knox/trunk/books/0.7.0/book_client-details.md?rev=1724836&r1=1724835&r2=1724836&view=diff
==============================================================================
--- knox/trunk/books/0.7.0/book_client-details.md (original)
+++ knox/trunk/books/0.7.0/book_client-details.md Fri Jan 15 15:24:45 2016
@@ -19,7 +19,7 @@
 
 Hadoop requires a client that can be used to interact remotely with the services provided by Hadoop cluster.
 This will also be true when using the Apache Knox Gateway to provide perimeter security and centralized access for these services.
-The two primary existing clients for Hadoop are the CLI (i.e. Command Line Interface, hadoop) and HUE (i.e. Hadoop User Environment).
+The two primary existing clients for Hadoop are the CLI (i.e. Command Line Interface, hadoop) and [Hue](http://gethue.com/) (i.e. Hadoop User Experience).
 For several reasons however, neither of these clients can _currently_ be used to access Hadoop services via the Apache Knox Gateway.
 
 This led to thinking about a very simple client that could help people use and evaluate the gateway.
@@ -37,11 +37,11 @@ The list below outlines the general requ
 The result is a very simple DSL ([Domain Specific Language](http://en.wikipedia.org/wiki/Domain-specific_language)) of sorts that is used via [Groovy](http://groovy.codehaus.org) scripts.
 Here is an example of a command that copies a file from the local file system to HDFS.
 
-_Note: The variables session, localFile and remoteFile are assumed to be defined._
+_Note: The variables `session`, `localFile` and `remoteFile` are assumed to be defined._
 
-    Hdfs.put( session ).file( localFile ).to( remoteFile ).now()
+    Hdfs.put(session).file(localFile).to(remoteFile).now()
 
-*This work is very early in development but is also very useful in its current state.*
+*This work is in very early development but is already very useful in its current state.*
 *We are very interested in receiving feedback about how to improve this feature and the DSL in particular.*
 
 A note of thanks to [REST-assured](https://code.google.com/p/rest-assured/) which provides a [Fluent interface](http://en.wikipedia.org/wiki/Fluent_interface) style DSL for testing REST services.
@@ -52,10 +52,10 @@ It served as the initial inspiration for
 
 This document assumes a few things about your environment in order to simplify the examples.
 
-* The JVM is executable as simply java.
+* The JVM is executable as simply `java`.
 * The Apache Knox Gateway is installed and functional.
-* The example commands are executed within the context of the GATEWAY_HOME current directory.
-The GATEWAY_HOME directory is the directory within the Apache Knox Gateway installation that contains the README file and the bin, conf and deployments directories.
+* The example commands are executed within the context of the `GATEWAY_HOME` current directory.
+The `GATEWAY_HOME` directory is the directory within the Apache Knox Gateway installation that contains the README file and the bin, conf and deployments directories.
 * A few examples require the use of commands from a standard Groovy installation.  These examples are optional but to try them you will need Groovy [installed](http://groovy.codehaus.org/Installing+Groovy).
 
 
@@ -65,7 +65,7 @@ The DSL requires a shell to interpret th
 The shell can either be used interactively or to execute a script file.
 To simplify use, the distribution contains an embedded version of the Groovy shell.
 
-The shell can be run interactively.  Use the command `exit` to exit.
+The shell can be run interactively. Use the command `exit` to exit.
 
     java -jar bin/shell.jar
 
@@ -96,12 +96,12 @@ Below is a very simple example of an int
 The `knox:000>` in the example above is the prompt from the embedded Groovy console.
 If you output doesn't look like this you may need to set the verbosity and show-last-result preferences as described above in the Usage section.
 
-If you relieve an error `HTTP/1.1 403 Forbidden` it may be because that file already exists.
+If you recieve an error `HTTP/1.1 403 Forbidden` it may be because that file already exists.
 Try deleting it with the following command and then try again.
 
     knox:000> Hdfs.rm(session).file("/tmp/example/README").now()
 
-Without using some other tool to browse HDFS it is hard to tell that that this command did anything.
+Without using some other tool to browse HDFS it is hard to tell that this command did anything.
 Execute this to get a bit more feedback.
 
     knox:000> println "Status=" + Hdfs.put( session ).file( "README" ).to( "/tmp/example/README2" ).now().statusCode
@@ -127,11 +127,11 @@ The example below shows the use of JsonS
     [README, README2]
 
 *In the future, "built-in" methods to slurp JSON and XML may be added to make this a bit easier.*
-*This would allow for this type if single line interaction.*
+*This would allow for the following type of single line interaction:*
 
     println Hdfs.ls(session).dir("/tmp").now().json().FileStatuses.FileStatus.pathSuffix
 
-Shell session should always be ended with shutting down the session.
+Shell sessions should always be ended with shutting down the session.
 The examples above do not touch on it but the DSL supports the simple execution of commands asynchronously.
 The shutdown command attempts to ensures that all asynchronous commands have completed before existing the shell.
 
@@ -169,7 +169,7 @@ Without this an error would result the s
 ### Futures ###
 
 The DSL supports the ability to invoke commands asynchronously via the later() invocation method.
-The object returned from the later() method is a java.util.concurrent.Future parametrized with the response type of the command.
+The object returned from the later() method is a java.util.concurrent.Future parameterized with the response type of the command.
 This is an example of how to asynchronously put a file to HDFS.
 
     future = Hdfs.put(session).file("README").to("/tmp/example/README").later()
@@ -199,7 +199,7 @@ These blocks of code are executed once t
     Hdfs.put(session).file("README").to("/tmp/example/README").later(){ println it.statusCode }
 
 In this example the put() command is executed on a separate thread and once complete the `println it.statusCode` block is executed on that thread.
-The it variable is automatically populated by Groovy and is a reference to the result that is returned from the future or now() method.
+The `it` variable is automatically populated by Groovy and is a reference to the result that is returned from the future or `now()` method.
 The future example above can be rewritten to illustrate the use of closures.
 
     readmeFuture = Hdfs.put(session).file("README").to("/tmp/example/README").later() { println it.statusCode }
@@ -220,15 +220,14 @@ This construct encapsulates the client s
 In particular it will simplify the management of any tokens that need to be presented with each command invocation.
 It also manages a thread pool that is used by all asynchronous commands which is why it is important to call one of the shutdown methods.
 
-The syntax associated with this is expected to change we expect that credentials will not need to be provided to the gateway.
-Rather it is expected that some form of access token will be used to initialize the session.
+The syntax associated with this is expected to change. We expect that credentials will not need to be provided to the gateway. Rather it is expected that some form of access token will be used to initialize the session.
 
 
 #### Services ####
 
 Services are the primary extension point for adding new suites of commands.
-The current built in examples are: Hdfs, Job and Workflow.
-The desire for extensibility is the reason for the slightly awkward Hdfs.ls(session) syntax.
+The current built-in examples are: Hdfs, Job and Workflow.
+The desire for extensibility is the reason for the slightly awkward `Hdfs.ls(session)` syntax.
 Certainly something more like `session.hdfs().ls()` would have been preferred but this would prevent adding new commands easily.
 At a minimum it would result in extension commands with a different syntax from the "built-in" commands.
 
@@ -294,7 +293,7 @@ The close() method may be called after r
 
 ### Services ###
 
-The built-in supported client DLS for each Hadoop service can be found in the #[Service Details] section.
+The built-in supported client DSL for each Hadoop service can be found in the #[Service Details] section.
 
 
 ### Extension ###
@@ -320,14 +319,14 @@ Therefore to extend the shell you should
 The `lib` directory is reserved for JARs that may be delivered with the product.
 
 Below are samples for the service and command classes that would need to be written to add new commands to the shell.
-These happen to be Groovy source files but could with very minor changes be Java files.
-The easiest way to add these to the shell is to compile them directory into the `ext` directory.
+These happen to be Groovy source files but could - with very minor changes - be Java files.
+The easiest way to add these to the shell is to compile them directly into the `ext` directory.
 *Note: This command depends upon having the Groovy compiler installed and available on the execution path.*
 
     groovy -d ext -cp bin/shell.jar samples/SampleService.groovy \
         samples/SampleSimpleCommand.groovy samples/SampleComplexCommand.groovy
 
-These source files are available in the samples directory of the distribution but these are included here for convenience.
+These source files are available in the samples directory of the distribution but are included here for convenience.
 
 
 #### Sample Service (Groovy)
@@ -447,7 +446,7 @@ These source files are available in the
 The shell included in the distribution is basically an unmodified packaging of the Groovy shell.
 The distribution does however provide a wrapper that makes it very easy to setup the class path for the shell.
 In fact the JARs required to execute the DSL are included on the class path by default.
-Therefore these command are functionally equivalent if you have Groovy [installed][15].
+Therefore these command are functionally equivalent if you have Groovy installed.
 See below for a description of what is required for JARs required by the DSL from `lib` and `dep` directories.
 
     java -jar bin/shell.jar samples/ExampleWebHdfsPutGet.groovy
@@ -479,26 +478,26 @@ Alternatively, you can use the Groovy Co
 
 The JARs currently required by the client DSL are
 
-    lib/gateway-shell-${gateway-version}.jar
-    dep/httpclient-4.2.3.jar
-    dep/httpcore-4.2.2.jar
-    dep/commons-lang3-3.1.jar
+    lib/gateway-shell-{GATEWAY_VERSION}.jar
+    dep/httpclient-4.3.6.jar
+    dep/httpcore-4.3.3.jar
+    dep/commons-lang3-3.4.jar
     dep/commons-codec-1.7.jar
 
 So on Linux/MacOS you would need this command
 
-    groovy -cp lib/gateway-shell-0.4.0.jar:dep/httpclient-4.2.5.jar:dep/httpcore-4.2.4.jar:dep/commons-lang3-3.1.jar:dep/commons-codec-1.7.jar samples/ExampleWebHdfsPutGet.groovy
+    groovy -cp lib/gateway-shell-0.7.0.jar:dep/httpclient-4.3.6.jar:dep/httpcore-4.3.3.jar:dep/commons-lang3-3.4.jar:dep/commons-codec-1.7.jar samples/ExampleWebHdfsPutGet.groovy
 
 and on Windows you would need this command
 
-    groovy -cp lib/gateway-shell-0.4.0.jar;dep/httpclient-4.2.5.jar;dep/httpcore-4.2.4.jar;dep/commons-lang3-3.1.jar;dep/commons-codec-1.7.jar samples/ExampleWebHdfsPutGet.groovy
+    groovy -cp lib/gateway-shell-0.7.0.jar;dep/httpclient-4.3.6.jar;dep/httpcore-4.3.3.jar;dep/commons-lang3-3.4.jar;dep/commons-codec-1.7.jar samples/ExampleWebHdfsPutGet.groovy
 
 The exact list of required JARs is likely to change from release to release so it is recommended that you utilize the wrapper `bin/shell.jar`.
 
-In addition because the DSL can be used via standard Groovy, the Groovy integrations in many popular IDEs (e.g. IntelliJ , Eclipse) can also be used.
+In addition because the DSL can be used via standard Groovy, the Groovy integrations in many popular IDEs (e.g. IntelliJ, Eclipse) can also be used.
 This makes it particularly nice to develop and execute scripts to interact with Hadoop.
 The code-completion features in modern IDEs in particular provides immense value.
-All that is required is to add the gateway-shell-0.4.0.jar to the projects class path.
+All that is required is to add the `gateway-shell-{GATEWAY_VERSION}.jar` to the projects class path.
 
 There are a variety of Groovy tools that make it very easy to work with the standard interchange formats (i.e. JSON and XML).
 In Groovy the creation of XML or JSON is typically done via a "builder" and parsing done via a "slurper".

Modified: knox/trunk/books/0.7.0/book_gateway-details.md
URL: http://svn.apache.org/viewvc/knox/trunk/books/0.7.0/book_gateway-details.md?rev=1724836&r1=1724835&r2=1724836&view=diff
==============================================================================
--- knox/trunk/books/0.7.0/book_gateway-details.md (original)
+++ knox/trunk/books/0.7.0/book_gateway-details.md Fri Jan 15 15:24:45 2016
@@ -39,15 +39,15 @@ The default value for this property is "
 
 Therefore, when deploying the sandbox.xml topology, both of the following example URLs work for the same underlying Hadoop cluster:
 
-	https://{gateway-host}:{gateway-port}/webhdfs
-	https://{gateway-host}:{gateway-port}/{gateway-path}/{cluster-name}/webhdfs
+    https://{gateway-host}:{gateway-port}/webhdfs
+    https://{gateway-host}:{gateway-port}/{gateway-path}/{cluster-name}/webhdfs
 
 These default topology URLs exist for all of the services in the topology.
 
 #### Fully Qualified URLs #####
-Examples of mappings for the WebHDFS, WebHCat, Oozie and Stargate/HBase are shown below.
+Examples of mappings for the WebHDFS, WebHCat, Oozie and HBase are shown below.
 These mapping are generated from the combination of the gateway configuration file (i.e. `{GATEWAY_HOME}/conf/gateway-site.xml`) and the cluster topology descriptors (e.g. `{GATEWAY_HOME}/conf/topologies/{cluster-name}.xml`).
-The port numbers show for the Cluster URLs represent the default ports for these services.
+The port numbers shown for the Cluster URLs represent the default ports for these services.
 The actual port number may be different for a given cluster.
 
 * WebHDFS
@@ -59,11 +59,11 @@ The actual port number may be different
 * Oozie
     * Gateway: `https://{gateway-host}:{gateway-port}/{gateway-path}/{cluster-name}/oozie`
     * Cluster: `http://{oozie-host}:11000/oozie}`
-* Stargate (HBase)
+* HBase
     * Gateway: `https://{gateway-host}:{gateway-port}/{gateway-path}/{cluster-name}/hbase`
-    * Cluster: `http://{hbase-host}:60080`
+    * Cluster: `http://{hbase-host}:8080`
 * Hive JDBC
-    * Gateway:         jdbc:hive2://{gateway-host}:{gateway-port}/;ssl=true;sslTrustStore={gateway-trust-store-path};trustStorePassword={gateway-trust-store-password}?hive.server2.transport.mode=http;hive.server2.thrift.http.path={gateway-path}/{cluster-name}/hive
+    * Gateway: `jdbc:hive2://{gateway-host}:{gateway-port}/;ssl=true;sslTrustStore={gateway-trust-store-path};trustStorePassword={gateway-trust-store-password};transportMode=http;httpPath={gateway-path}/{cluster-name}/hive`
     * Cluster: `http://{hive-host}:10001/cliservice`
 
 The values for `{gateway-host}`, `{gateway-port}`, `{gateway-path}` are provided via the gateway configuration file (i.e. `{GATEWAY_HOME}/conf/gateway-site.xml`).
@@ -72,9 +72,12 @@ The value for `{cluster-name}` is derive
 
 The value for `{webhdfs-host}`, `{webhcat-host}`, `{oozie-host}`, `{hbase-host}` and `{hive-host}` are provided via the cluster topology descriptor (e.g. `{GATEWAY_HOME}/conf/topologies/{cluster-name}.xml`).
 
-Note: The ports 50070, 50111, 11000, 60080 (default 8080) and 10001 are the defaults for WebHDFS, WebHCat, Oozie, Stargate/HBase and Hive respectively.
+Note: The ports 50070, 50111, 11000, 8080 and 10001 are the defaults for WebHDFS, WebHCat, Oozie, HBase and Hive respectively.
 Their values can also be provided via the cluster topology descriptor if your Hadoop cluster uses different ports.
 
+Note: The HBase REST API uses port 8080 by default. This often clashes with other running services.
+In the Hortonworks Sandbox Ambari might be running on this port so you might have to change it to a different port (e.g. 60080). 
+
 <<config.md>>
 <<knox_cli.md>>
 <<admin_api.md>>

Modified: knox/trunk/books/0.7.0/book_getting-started.md
URL: http://svn.apache.org/viewvc/knox/trunk/books/0.7.0/book_getting-started.md?rev=1724836&r1=1724835&r2=1724836&view=diff
==============================================================================
--- knox/trunk/books/0.7.0/book_getting-started.md (original)
+++ knox/trunk/books/0.7.0/book_getting-started.md Fri Jan 15 15:24:45 2016
@@ -23,7 +23,7 @@ This section provides everything you nee
 
 An existing Hadoop 2.x cluster is required for Knox 0.7.0 to sit in front of and protect.
 It is possible to use a Hadoop cluster deployed on EC2 but this will require additional configuration not covered here.
-It is also possible to protect access to a services of a Hadoop cluster that is secured with kerberos.
+It is also possible to protect access to a services of a Hadoop cluster that is secured with Kerberos.
 This too requires additional configuration that is described in other sections of this guide.
 See #[Supported Services] for details on what is supported for this release.
 
@@ -76,7 +76,7 @@ This table enumerates the versions of va
 | WebHDFS              | 2.4.0      | ![y]        | ![y]   |![y]|
 | WebHCat/Templeton    | 0.13.0     | ![y]        | ![y]   |![y]|
 | Oozie                | 4.0.0      | ![y]        | ![y]   |![y]|
-| HBase/Stargate       | 0.98.0     | ![y]        | ![y]   |![y]|
+| HBase                | 0.98.0     | ![y]        | ![y]   |![y]|
 | Hive (via WebHCat)   | 0.13.0     | ![y]        | ![y]   |![y]|
 | Hive (via JDBC/ODBC) | 0.13.0     | ![y]        | ![y]   |![y]|
 | Yarn ResourceManager | 2.5.0      | ![y]        | ![y]   |![n]|

Modified: knox/trunk/books/0.7.0/book_knox-samples.md
URL: http://svn.apache.org/viewvc/knox/trunk/books/0.7.0/book_knox-samples.md?rev=1724836&r1=1724835&r2=1724836&view=diff
==============================================================================
--- knox/trunk/books/0.7.0/book_knox-samples.md (original)
+++ knox/trunk/books/0.7.0/book_knox-samples.md Fri Jan 15 15:24:45 2016
@@ -31,7 +31,7 @@ The samples were initially written with
 * The Knox Demo LDAP server is running on localhost and port 33389 which is the default port for the ApacheDS LDAP server.
 * That the LDAP directory in use has a set of demo users provisioned with the convention of username and username"-password" as the password. Most of the samples have some variation of this pattern with "guest" and "guest-password".
 * That the Knox Gateway instance is running on the same machine which you will be running the samples from - therefore "localhost" and that the default port of "8443" is being used.
-* Finally, that there is a properly provisioned sandbox.xml topology in the {GATEWAY_HOME}/conf/topologies directory that is configured to point to the actual host and ports of running service components.
+* Finally, that there is a properly provisioned sandbox.xml topology in the `{GATEWAY_HOME}/conf/topologies` directory that is configured to point to the actual host and ports of running service components.
 
 #### Steps for Demo Single Node Clusters ####
 
@@ -40,7 +40,7 @@ There should be little to do if anything
 However, the following items will be worth ensuring before you start:
 
 1. The sandbox.xml topology is configured properly for the deployed services
-2. That there is an LDAP server running with guest/guest-password user available in the directory
+2. That there is a LDAP server running with guest/guest-password user available in the directory
 
 #### Steps for Ambari Deployed Knox Gateway ####
 
@@ -53,17 +53,17 @@ The Knox samples can however be made to
 3. The default.xml topology file can be copied to sandbox.xml in order to satisfy the topology name assumption in the samples.
 4. Be sure to use an actual Java JRE to run the sample with something like:
 
-	  /usr/jdk64/jdk1.7.0_67/bin/java -jar bin/shell.jar samples/ExampleWebHdfsLs.groovy
+    /usr/jdk64/jdk1.7.0_67/bin/java -jar bin/shell.jar samples/ExampleWebHdfsLs.groovy
 
 #### Steps for a Manually Installed Knox Gateway ####
 
 For manually installed Knox instances, there is really no way for the installer to know how to configure the topology file for you.
 
-Essentially, these steps are identical to the Ambari deployed instance except that #3 should be replaced with the configuration of the ootb sandbox.xml to point the configuration at the proper hosts and ports.
+Essentially, these steps are identical to the Ambari deployed instance except that #3 should be replaced with the configuration of the out of the box sandbox.xml to point the configuration at the proper hosts and ports.
 
 1. You need to have ssh access to the environment in order for the localhost assumption within the samples to be valid.
 2. The Knox Demo LDAP Server is started - you can start it from Ambari
-3. Change the hosts and ports within the {GATEWAY_HOME}/conf/topologies/sandbox.xml to reflect your actual cluster service locations.
+3. Change the hosts and ports within the `{GATEWAY_HOME}/conf/topologies/sandbox.xml` to reflect your actual cluster service locations.
 4. Be sure to use an actual Java JRE to run the sample with something like:
 
-	  /usr/jdk64/jdk1.7.0_67/bin/java -jar bin/shell.jar samples/ExampleWebHdfsLs.groovy
\ No newline at end of file
+    /usr/jdk64/jdk1.7.0_67/bin/java -jar bin/shell.jar samples/ExampleWebHdfsLs.groovy

Modified: knox/trunk/books/0.7.0/book_limitations.md
URL: http://svn.apache.org/viewvc/knox/trunk/books/0.7.0/book_limitations.md?rev=1724836&r1=1724835&r2=1724836&view=diff
==============================================================================
--- knox/trunk/books/0.7.0/book_limitations.md (original)
+++ knox/trunk/books/0.7.0/book_limitations.md Fri Jan 15 15:24:45 2016
@@ -25,7 +25,7 @@ The exception involves POST or PUT reque
 In this one case there is currently a 4Kb payload size limit for the first request made to the Hadoop cluster.
 This is a result of how the gateway negotiates a trust relationship between itself and the cluster via SPNego.
 There is an undocumented configuration setting to modify this limit's value if required.
-In the future this will be made more easily configuration and at that time it will be documented.
+In the future this will be made more easily configurable and at that time it will be documented.
 
 ### Group Membership Propagation ###
 

Modified: knox/trunk/books/0.7.0/book_service-details.md
URL: http://svn.apache.org/viewvc/knox/trunk/books/0.7.0/book_service-details.md?rev=1724836&r1=1724835&r2=1724836&view=diff
==============================================================================
--- knox/trunk/books/0.7.0/book_service-details.md (original)
+++ knox/trunk/books/0.7.0/book_service-details.md Fri Jan 15 15:24:45 2016
@@ -41,10 +41,10 @@ These are the current Hadoop services wi
 
 This document assumes a few things about your environment in order to simplify the examples.
 
-* The JVM is executable as simply java.
+* The JVM is executable as simply `java`.
 * The Apache Knox Gateway is installed and functional.
-* The example commands are executed within the context of the GATEWAY_HOME current directory.
-The GATEWAY_HOME directory is the directory within the Apache Knox Gateway installation that contains the README file and the bin, conf and deployments directories.
+* The example commands are executed within the context of the `GATEWAY_HOME` current directory.
+The `GATEWAY_HOME` directory is the directory within the Apache Knox Gateway installation that contains the README file and the bin, conf and deployments directories.
 * The [cURL][curl] command line HTTP client utility is installed and functional.
 * A few examples optionally require the use of commands from a standard Groovy installation.
 These examples are optional but to try them you will need Groovy [installed](http://groovy.codehaus.org/Installing+Groovy).
@@ -54,7 +54,7 @@ These examples are optional but to try t
 
 Using these samples with other Hadoop installations will require changes to the steps describe here as well as changes to referenced sample scripts.
 This will also likely require changes to the gateway's default configuration.
-In particular host names, ports user names and password may need to be changes to match your environment.
+In particular host names, ports user names and password may need to be changed to match your environment.
 These changes may need to be made to gateway configuration and also the Groovy sample script files in the distribution.
 All of the values that may need to be customized in the sample scripts can be found together at the top of each of these files.
 
@@ -68,7 +68,7 @@ In particular this form of the cURL comm
 The option -i (aka --include) is used to output HTTP response header information.
 This will be important when the content of the HTTP Location header is required for subsequent requests.
 
-The option -k (aka --insecure) is used to avoid any issues resulting the use of demonstration SSL certificates.
+The option -k (aka --insecure) is used to avoid any issues resulting from the use of demonstration SSL certificates.
 
 The option -u (aka --user) is used to provide the credentials to be used when the client is challenged by the gateway.
 
@@ -89,208 +89,208 @@ Therefore each request via cURL will res
 
 The gateway supports a Service Test API that can be used to test Knox's ability to connect to each of the different Hadoop services via a simeple HTTP GET request. To be able to access this API one must add the following line into the topology for which you wish to run the service test.
 
-	<service>
-		<role>SERVICE-TEST</role>
-	</service>
-	
+    <service>
+      <role>SERVICE-TEST</role>
+    </service>
+  
 After adding the above to a topology, you can make a cURL request with the following structure
 
-	curl -i -k "https://{gateway-hostname}:{gateway-port}/gateway/path/{topology-name}/service-test?username=guest&password=guest-password"
+    curl -i -k "https://{gateway-hostname}:{gateway-port}/gateway/path/{topology-name}/service-test?username=guest&password=guest-password"
 
 An alternate method of providing credentials:
-	
-	curl -i -k -u guest:guest-password https://{gateway-hostname}:{gateway-port}/gateway/path/{topology-name}/service-test
-	
+  
+    curl -i -k -u guest:guest-password https://{gateway-hostname}:{gateway-port}/gateway/path/{topology-name}/service-test
+  
 Below is an example response. The gateway is also capable of returning XML if specified in the request's "Accept" HTTP header.
-	
-	{
-	    "serviceTestWrapper": {
-		   "Tests": {
-			  "ServiceTest": [
-				 {
-					"serviceName": "WEBHDFS",
-					"requestURL": "http://{gateway-host}:{gateway-port}/gateway/{topology-name}/webhdfs/v1/?op=LISTSTATUS",
-					"responseContent": "Content-Length:0,Content-Type: application/json;charset=utf-8",
-					"httpCode": 200,
-					"message": "Request sucessful."
-				 },
-				 {
-					"serviceName": "WEBHCAT",
-					"requestURL": "http://{gateway-host}:{gateway-port}/gateway/{topology-name}/templeton/v1/status",
-					"responseContent": "Content-Length:0,Content-Type: application/json;charset=utf-8",
-					"httpCode": 200,
-					"message": "Request sucessful."
-				 },
-				 {
-					"serviceName": "WEBHCAT",
-					"requestURL": "http://{gateway-host}:{gateway-port}/gateway/{topology-name}/templeton/v1/version",
-					"responseContent": "Content-Length:0,Content-Type: application/json;charset=utf-8",
-					"httpCode": 200,
-					"message": "Request sucessful."
-				 },
-				 {
-					"serviceName": "WEBHCAT",
-					"requestURL": "http://{gateway-host}:{gateway-port}/gateway/{topology-name}/templeton/v1/version/hive",
-					"responseContent": "Content-Length:0,Content-Type: application/json;charset=utf-8",
-					"httpCode": 200,
-					"message": "Request sucessful."
-				 },
-				 {
-					"serviceName": "WEBHCAT",
-					"requestURL": "http://{gateway-host}:{gateway-port}/gateway/{topology-name}/templeton/v1/version/hadoop",
-					"responseContent": "Content-Length:0,Content-Type: application/json;charset=utf-8",
-					"httpCode": 200,
-					"message": "Request sucessful."
-				 },
-				 {
-					"serviceName": "OOZIE",
-					"requestURL": "http://{gateway-host}:{gateway-port}/gateway/{topology-name}/oozie/v1/admin/build-version",
-					"responseContent": "Content-Length:0,Content-Type: application/json;charset=utf-8",
-					"httpCode": 200,
-					"message": "Request sucessful."
-				 },
-				 {
-					"serviceName": "OOZIE",
-					"requestURL": "http://{gateway-host}:{gateway-port}/gateway/{topology-name}/oozie/v1/admin/status",
-					"responseContent": "Content-Length:0,Content-Type: application/json;charset=utf-8",
-					"httpCode": 200,
-					"message": "Request sucessful."
-				 },
-				 {
-					"serviceName": "OOZIE",
-					"requestURL": "http://{gateway-host}:{gateway-port}/gateway/{topology-name}/oozie/versions",
-					"responseContent": "Content-Length:0,Content-Type: application/json;charset=utf-8",
-					"httpCode": 200,
-					"message": "Request sucessful."
-				 },
-				 {
-					"serviceName": "WEBHBASE",
-					"requestURL": "http://{gateway-host}:{gateway-port}/gateway/{topology-name}/hbase/version",
-					"responseContent": "Content-Length:0,Content-Type: application/json;charset=utf-8",
-					"httpCode": 200,
-					"message": "Request sucessful."
-				 },
-				 {
-					"serviceName": "WEBHBASE",
-					"requestURL": "http://{gateway-host}:{gateway-port}/gateway/{topology-name}/hbase/version/cluster",
-					"responseContent": "Content-Length:0,Content-Type: application/json;charset=utf-8",
-					"httpCode": 200,
-					"message": "Request sucessful."
-				 },
-				 {
-					"serviceName": "WEBHBASE",
-					"requestURL": "http://{gateway-host}:{gateway-port}/gateway/{topology-name}/hbase/status/cluster",
-					"responseContent": "Content-Length:0,Content-Type: application/json;charset=utf-8",
-					"httpCode": 200,
-					"message": "Request sucessful."
-				 },
-				 {
-					"serviceName": "WEBHBASE",
-					"requestURL": "http://{gateway-host}:{gateway-port}/gateway/{topology-name}/hbase",
-					"responseContent": "Content-Length:0,Content-Type: application/json;charset=utf-8",
-					"httpCode": 200,
-					"message": "Request sucessful."
-				 },
-				 {
-					"serviceName": "RESOURCEMANAGER",
-					"requestURL": "http://{gateway-host}:{gateway-port}/gateway/{topology-name}/resourcemanager/v1/{topology-name}/info",
-					"responseContent": "Content-Length:0,Content-Type: application/json;charset=utf-8",
-					"httpCode": 200,
-					"message": "Request sucessful."
-				 },
-				 {
-					"serviceName": "RESOURCEMANAGER",
-					"requestURL": "http://{gateway-host}:{gateway-port}/gateway/{topology-name}/resourcemanager/v1/{topology-name}/metrics",
-					"responseContent": "Content-Length:0,Content-Type: application/json;charset=utf-8",
-					"httpCode": 200,
-					"message": "Request sucessful."
-				 },
-				 {
-					"serviceName": "RESOURCEMANAGER",
-					"requestURL": "http://{gateway-host}:{gateway-port}/gateway/{topology-name}/resourcemanager/v1/{topology-name}/apps",
-					"responseContent": "Content-Length:0,Content-Type: application/json;charset=utf-8",
-					"httpCode": 200,
-					"message": "Request sucessful."
-				 },
-				 {
-					"serviceName": "FALCON",
-					"requestURL": "http://{gateway-host}:{gateway-port}/gateway/{topology-name}/falcon/api/admin/stack",
-					"responseContent": "Content-Length:0,Content-Type: application/json;charset=utf-8",
-					"httpCode": 200,
-					"message": "Request sucessful."
-				 },
-				 {
-					"serviceName": "FALCON",
-					"requestURL": "http://{gateway-host}:{gateway-port}/gateway/{topology-name}/falcon/api/admin/version",
-					"responseContent": "Content-Length:0,Content-Type: application/json;charset=utf-8",
-					"httpCode": 200,
-					"message": "Request sucessful."
-				 },
-				 {
-					"serviceName": "FALCON",
-					"requestURL": "http://{gateway-host}:{gateway-port}/gateway/{topology-name}/falcon/api/metadata/lineage/serialize",
-					"responseContent": "Content-Length:0,Content-Type: application/json;charset=utf-8",
-					"httpCode": 200,
-					"message": "Request sucessful."
-				 },
-				 {
-					"serviceName": "FALCON",
-					"requestURL": "http://{gateway-host}:{gateway-port}/gateway/{topology-name}/falcon/api/metadata/lineage/vertices/all",
-					"responseContent": "Content-Length:0,Content-Type: application/json;charset=utf-8",
-					"httpCode": 200,
-					"message": "Request sucessful."
-				 },
-				 {
-					"serviceName": "FALCON",
-					"requestURL": "http://{gateway-host}:{gateway-port}/gateway/{topology-name}/falcon/api/metadata/lineage/edges/all",
-					"responseContent": "Content-Length:0,Content-Type: application/json;charset=utf-8",
-					"httpCode": 200,
-					"message": "Request sucessful."
-				 },
-				 {
-					"serviceName": "STORM",
-					"requestURL": "http://{gateway-host}:{gateway-port}/gateway/{topology-name}/storm/api/v1/cluster/configuration",
-					"responseContent": "Content-Length:0,Content-Type: application/json;charset=utf-8",
-					"httpCode": 200,
-					"message": "Request sucessful."
-				 },
-				 {
-					"serviceName": "STORM",
-					"requestURL": "http://{gateway-host}:{gateway-port}/gateway/{topology-name}/storm/api/v1/cluster/summary",
-					"responseContent": "Content-Length:0,Content-Type: application/json;charset=utf-8",
-					"httpCode": 200,
-					"message": "Request sucessful."
-				 },
-				 {
-					"serviceName": "STORM",
-					"requestURL": "http://{gateway-host}:{gateway-port}/gateway/{topology-name}/storm/api/v1/supervisor/summary",
-					"responseContent": "Content-Length:0,Content-Type: application/json;charset=utf-8",
-					"httpCode": 200,
-					"message": "Request sucessful."
-				 },
-				 {
-					"serviceName": "STORM",
-					"requestURL": "http://{gateway-host}:{gateway-port}/gateway/{topology-name}/storm/api/v1/topology/summary",
-					"responseContent": "Content-Length:0,Content-Type: application/json;charset=utf-8",
-					"httpCode": 200,
-					"message": "Request sucessful."
-				 }
-			  ]
-		   },
-		   "messages": {
-			  "message": [
-
-			  ]
-		   }
-	    }
-	}
+  
+    {
+        "serviceTestWrapper": {
+         "Tests": {
+          "ServiceTest": [
+           {
+            "serviceName": "WEBHDFS",
+            "requestURL": "http://{gateway-host}:{gateway-port}/gateway/{topology-name}/webhdfs/v1/?op=LISTSTATUS",
+            "responseContent": "Content-Length:0,Content-Type: application/json;charset=utf-8",
+            "httpCode": 200,
+            "message": "Request sucessful."
+           },
+           {
+            "serviceName": "WEBHCAT",
+            "requestURL": "http://{gateway-host}:{gateway-port}/gateway/{topology-name}/templeton/v1/status",
+            "responseContent": "Content-Length:0,Content-Type: application/json;charset=utf-8",
+            "httpCode": 200,
+            "message": "Request sucessful."
+           },
+           {
+            "serviceName": "WEBHCAT",
+            "requestURL": "http://{gateway-host}:{gateway-port}/gateway/{topology-name}/templeton/v1/version",
+            "responseContent": "Content-Length:0,Content-Type: application/json;charset=utf-8",
+            "httpCode": 200,
+            "message": "Request sucessful."
+           },
+           {
+            "serviceName": "WEBHCAT",
+            "requestURL": "http://{gateway-host}:{gateway-port}/gateway/{topology-name}/templeton/v1/version/hive",
+            "responseContent": "Content-Length:0,Content-Type: application/json;charset=utf-8",
+            "httpCode": 200,
+            "message": "Request sucessful."
+           },
+           {
+            "serviceName": "WEBHCAT",
+            "requestURL": "http://{gateway-host}:{gateway-port}/gateway/{topology-name}/templeton/v1/version/hadoop",
+            "responseContent": "Content-Length:0,Content-Type: application/json;charset=utf-8",
+            "httpCode": 200,
+            "message": "Request sucessful."
+           },
+           {
+            "serviceName": "OOZIE",
+            "requestURL": "http://{gateway-host}:{gateway-port}/gateway/{topology-name}/oozie/v1/admin/build-version",
+            "responseContent": "Content-Length:0,Content-Type: application/json;charset=utf-8",
+            "httpCode": 200,
+            "message": "Request sucessful."
+           },
+           {
+            "serviceName": "OOZIE",
+            "requestURL": "http://{gateway-host}:{gateway-port}/gateway/{topology-name}/oozie/v1/admin/status",
+            "responseContent": "Content-Length:0,Content-Type: application/json;charset=utf-8",
+            "httpCode": 200,
+            "message": "Request sucessful."
+           },
+           {
+            "serviceName": "OOZIE",
+            "requestURL": "http://{gateway-host}:{gateway-port}/gateway/{topology-name}/oozie/versions",
+            "responseContent": "Content-Length:0,Content-Type: application/json;charset=utf-8",
+            "httpCode": 200,
+            "message": "Request sucessful."
+           },
+           {
+            "serviceName": "WEBHBASE",
+            "requestURL": "http://{gateway-host}:{gateway-port}/gateway/{topology-name}/hbase/version",
+            "responseContent": "Content-Length:0,Content-Type: application/json;charset=utf-8",
+            "httpCode": 200,
+            "message": "Request sucessful."
+           },
+           {
+            "serviceName": "WEBHBASE",
+            "requestURL": "http://{gateway-host}:{gateway-port}/gateway/{topology-name}/hbase/version/cluster",
+            "responseContent": "Content-Length:0,Content-Type: application/json;charset=utf-8",
+            "httpCode": 200,
+            "message": "Request sucessful."
+           },
+           {
+            "serviceName": "WEBHBASE",
+            "requestURL": "http://{gateway-host}:{gateway-port}/gateway/{topology-name}/hbase/status/cluster",
+            "responseContent": "Content-Length:0,Content-Type: application/json;charset=utf-8",
+            "httpCode": 200,
+            "message": "Request sucessful."
+           },
+           {
+            "serviceName": "WEBHBASE",
+            "requestURL": "http://{gateway-host}:{gateway-port}/gateway/{topology-name}/hbase",
+            "responseContent": "Content-Length:0,Content-Type: application/json;charset=utf-8",
+            "httpCode": 200,
+            "message": "Request sucessful."
+           },
+           {
+            "serviceName": "RESOURCEMANAGER",
+            "requestURL": "http://{gateway-host}:{gateway-port}/gateway/{topology-name}/resourcemanager/v1/{topology-name}/info",
+            "responseContent": "Content-Length:0,Content-Type: application/json;charset=utf-8",
+            "httpCode": 200,
+            "message": "Request sucessful."
+           },
+           {
+            "serviceName": "RESOURCEMANAGER",
+            "requestURL": "http://{gateway-host}:{gateway-port}/gateway/{topology-name}/resourcemanager/v1/{topology-name}/metrics",
+            "responseContent": "Content-Length:0,Content-Type: application/json;charset=utf-8",
+            "httpCode": 200,
+            "message": "Request sucessful."
+           },
+           {
+            "serviceName": "RESOURCEMANAGER",
+            "requestURL": "http://{gateway-host}:{gateway-port}/gateway/{topology-name}/resourcemanager/v1/{topology-name}/apps",
+            "responseContent": "Content-Length:0,Content-Type: application/json;charset=utf-8",
+            "httpCode": 200,
+            "message": "Request sucessful."
+           },
+           {
+            "serviceName": "FALCON",
+            "requestURL": "http://{gateway-host}:{gateway-port}/gateway/{topology-name}/falcon/api/admin/stack",
+            "responseContent": "Content-Length:0,Content-Type: application/json;charset=utf-8",
+            "httpCode": 200,
+            "message": "Request sucessful."
+           },
+           {
+            "serviceName": "FALCON",
+            "requestURL": "http://{gateway-host}:{gateway-port}/gateway/{topology-name}/falcon/api/admin/version",
+            "responseContent": "Content-Length:0,Content-Type: application/json;charset=utf-8",
+            "httpCode": 200,
+            "message": "Request sucessful."
+           },
+           {
+            "serviceName": "FALCON",
+            "requestURL": "http://{gateway-host}:{gateway-port}/gateway/{topology-name}/falcon/api/metadata/lineage/serialize",
+            "responseContent": "Content-Length:0,Content-Type: application/json;charset=utf-8",
+            "httpCode": 200,
+            "message": "Request sucessful."
+           },
+           {
+            "serviceName": "FALCON",
+            "requestURL": "http://{gateway-host}:{gateway-port}/gateway/{topology-name}/falcon/api/metadata/lineage/vertices/all",
+            "responseContent": "Content-Length:0,Content-Type: application/json;charset=utf-8",
+            "httpCode": 200,
+            "message": "Request sucessful."
+           },
+           {
+            "serviceName": "FALCON",
+            "requestURL": "http://{gateway-host}:{gateway-port}/gateway/{topology-name}/falcon/api/metadata/lineage/edges/all",
+            "responseContent": "Content-Length:0,Content-Type: application/json;charset=utf-8",
+            "httpCode": 200,
+            "message": "Request sucessful."
+           },
+           {
+            "serviceName": "STORM",
+            "requestURL": "http://{gateway-host}:{gateway-port}/gateway/{topology-name}/storm/api/v1/cluster/configuration",
+            "responseContent": "Content-Length:0,Content-Type: application/json;charset=utf-8",
+            "httpCode": 200,
+            "message": "Request sucessful."
+           },
+           {
+            "serviceName": "STORM",
+            "requestURL": "http://{gateway-host}:{gateway-port}/gateway/{topology-name}/storm/api/v1/cluster/summary",
+            "responseContent": "Content-Length:0,Content-Type: application/json;charset=utf-8",
+            "httpCode": 200,
+            "message": "Request sucessful."
+           },
+           {
+            "serviceName": "STORM",
+            "requestURL": "http://{gateway-host}:{gateway-port}/gateway/{topology-name}/storm/api/v1/supervisor/summary",
+            "responseContent": "Content-Length:0,Content-Type: application/json;charset=utf-8",
+            "httpCode": 200,
+            "message": "Request sucessful."
+           },
+           {
+            "serviceName": "STORM",
+            "requestURL": "http://{gateway-host}:{gateway-port}/gateway/{topology-name}/storm/api/v1/topology/summary",
+            "responseContent": "Content-Length:0,Content-Type: application/json;charset=utf-8",
+            "httpCode": 200,
+            "message": "Request sucessful."
+           }
+          ]
+         },
+         "messages": {
+          "message": [
+    
+          ]
+         }
+        }
+    }
 
-	
+  
 We can see that this service-test makes HTTP requests to each of the services through Knox using the specified topology. The test will only make calls to those services that have entries within the topology file.
 
 ##### Adding and Changing test URLs
 
-URLs for each service are stored in `{GATEWAY_HOME}/data/services/{service-name}/{service-version}/service.xml`. Each `<testURL>` element represents a service resource that will be tested if the service is set up in the topology. You can add or remove these from teh service.xml files. Just note if you add URL's there is no guarantee in the order they will be tested. All default URLs have been tested and work on various clusters. If a new URL is added and doesn't respond in a way the user expects then it is up to the user to determine whether the URL is correct or not.
+URLs for each service are stored in `{GATEWAY_HOME}/data/services/{service-name}/{service-version}/service.xml`. Each `<testURL>` element represents a service resource that will be tested if the service is set up in the topology. You can add or remove these from the `service.xml` file. Just note if you add URLs there is no guarantee in the order they will be tested. All default URLs have been tested and work on various clusters. If a new URL is added and doesn't respond in a way the user expects then it is up to the user to determine whether the URL is correct or not.
 
 ##### Some important things to note:
  - In the first cURL request, the quotes are necessary around the URL or else a command line terminal will not include the `&password` query parameter in the request.

Modified: knox/trunk/books/0.7.0/book_troubleshooting.md
URL: http://svn.apache.org/viewvc/knox/trunk/books/0.7.0/book_troubleshooting.md?rev=1724836&r1=1724835&r2=1724836&view=diff
==============================================================================
--- knox/trunk/books/0.7.0/book_troubleshooting.md (original)
+++ knox/trunk/books/0.7.0/book_troubleshooting.md Fri Jan 15 15:24:45 2016
@@ -31,7 +31,7 @@ If you want to capture that output you w
 
 #### bin/gateway.sh start ####
 
-When the gateway is run this way the diagnostic output is written to /var/log/knox/knox.out and /var/log/knox/knox.err.
+When the gateway is run this way the diagnostic output is written to `{GATEWAY_HOME}/log/knox.out` and `{GATEWAY_HOME}/log/knox.err`.
 Typically only knox.out will have content.
 
 
@@ -76,17 +76,17 @@ If the gateway cannot contact one of the
 
     13/11/18 18:49:45 WARN hadoop.gateway: Connection exception dispatching request: http://localhost:50070/webhdfs/v1/?user.name=guest&op=LISTSTATUS org.apache.http.conn.HttpHostConnectException: Connection to http://localhost:50070 refused
     org.apache.http.conn.HttpHostConnectException: Connection to http://localhost:50070 refused
-    	at org.apache.http.impl.conn.DefaultClientConnectionOperator.openConnection(DefaultClientConnectionOperator.java:190)
-    	at org.apache.http.impl.conn.ManagedClientConnectionImpl.open(ManagedClientConnectionImpl.java:294)
-    	at org.apache.http.impl.client.DefaultRequestDirector.tryConnect(DefaultRequestDirector.java:645)
-    	at org.apache.http.impl.client.DefaultRequestDirector.execute(DefaultRequestDirector.java:480)
-    	at org.apache.http.impl.client.AbstractHttpClient.execute(AbstractHttpClient.java:906)
-    	at org.apache.http.impl.client.AbstractHttpClient.execute(AbstractHttpClient.java:805)
-    	at org.apache.http.impl.client.AbstractHttpClient.execute(AbstractHttpClient.java:784)
-    	at org.apache.hadoop.gateway.dispatch.HttpClientDispatch.executeRequest(HttpClientDispatch.java:99)
+      at org.apache.http.impl.conn.DefaultClientConnectionOperator.openConnection(DefaultClientConnectionOperator.java:190)
+      at org.apache.http.impl.conn.ManagedClientConnectionImpl.open(ManagedClientConnectionImpl.java:294)
+      at org.apache.http.impl.client.DefaultRequestDirector.tryConnect(DefaultRequestDirector.java:645)
+      at org.apache.http.impl.client.DefaultRequestDirector.execute(DefaultRequestDirector.java:480)
+      at org.apache.http.impl.client.AbstractHttpClient.execute(AbstractHttpClient.java:906)
+      at org.apache.http.impl.client.AbstractHttpClient.execute(AbstractHttpClient.java:805)
+      at org.apache.http.impl.client.AbstractHttpClient.execute(AbstractHttpClient.java:784)
+      at org.apache.hadoop.gateway.dispatch.HttpClientDispatch.executeRequest(HttpClientDispatch.java:99)
 
 The resulting behavior on the client will differ by client.
-For the client DSL executing the {GATEWAY_HOME}/samples/ExampleWebHdfsLs.groovy the output will look look like this.
+For the client DSL executing the `{GATEWAY_HOME}/samples/ExampleWebHdfsLs.groovy` the output will look like this.
 
     Caught: org.apache.hadoop.gateway.shell.HadoopException: org.apache.hadoop.gateway.shell.ErrorResponse: HTTP/1.1 500 Server Error
     org.apache.hadoop.gateway.shell.HadoopException: org.apache.hadoop.gateway.shell.ErrorResponse: HTTP/1.1 500 Server Error
@@ -111,14 +111,14 @@ When executing commands requests via cUR
 
 Resolving this will require ensuring that the Hadoop services are running and that connection information is correct.
 Basic Hadoop connectivity can be evaluated using cURL as described elsewhere.
-Otherwise the Hadoop cluster connection information is configured in the cluster's topology file (e.g. {GATEWAY_HOME}/deployments/sandbox.xml).
+Otherwise the Hadoop cluster connection information is configured in the cluster's topology file (e.g. `{GATEWAY_HOME}/deployments/sandbox.xml`).
 
 ### HTTP vs HTTPS protocol issues ###
 When Knox is configured to accept requests over SSL and is presented with a request over plain HTTP, the client is presented with an error such as seen in the following:
 
-	curl -i -k -u guest:guest-password -X GET 'http://localhost:8443/gateway/sandbox/webhdfs/v1/?op=LISTSTATUS'
-	the following error is returned
-	curl: (52) Empty reply from server
+    curl -i -k -u guest:guest-password -X GET 'http://localhost:8443/gateway/sandbox/webhdfs/v1/?op=LISTSTATUS'
+    the following error is returned
+    curl: (52) Empty reply from server
 
 This is the default behavior for Jetty SSL listener. While the credentials to the default authentication provider continue to be username and password, we do not want to encourage sending these in clear text. Since preemptively sending BASIC credentials is a common pattern with REST APIs it would be unwise to redirect to a HTTPS listener thus allowing clear text passwords.
 
@@ -165,11 +165,11 @@ The client will likely see something alo
     Content-Length: 0
     Server: Jetty(8.1.12.v20130726)
 
-#### Using ldapsearch to verify ldap connectivtiy and credentials
+#### Using ldapsearch to verify LDAP connectivity and credentials
 
-If your authentication to knox fails and you believe your are using correct creedentilas, you could try to verify the connectivity and credentials using ldapsearch, assuming you are using ldap directory for authentication.
+If your authentication to Knox fails and you believe your are using correct credentials, you could try to verify the connectivity and credentials using ldapsearch, assuming you are using LDAP directory for authentication.
 
-Assuming you are using the default values that came out of box with knox, your ldapsearch command would be like the following
+Assuming you are using the default values that came out of box with Knox, your ldapsearch command would be like the following
 
     ldapsearch -h localhost -p 33389 -D "uid=guest,ou=people,dc=hadoop,dc=apache,dc=org" -w guest-password -b "uid=guest,ou=people,dc=hadoop,dc=apache,dc=org" "objectclass=*"
 
@@ -216,7 +216,7 @@ If for example host mapping is disabled
 
     13/11/18 19:11:35 WARN hadoop.gateway: Connection exception dispatching request: http://sandbox.hortonworks.com:50075/webhdfs/v1/user/guest/example/README?op=CREATE&namenoderpcaddress=sandbox.hortonworks.com:8020&user.name=guest&overwrite=false java.net.UnknownHostException: sandbox.hortonworks.com
     java.net.UnknownHostException: sandbox.hortonworks.com
-    	at java.net.Inet6AddressImpl.lookupAllHostAddr(Native Method)
+      at java.net.Inet6AddressImpl.lookupAllHostAddr(Native Method)
 
 On the other hand if you are migrating from the Sandbox based configuration to a cluster you have deployment you may see a similar error.
 However in this case you may need to disable host mapping.
@@ -254,9 +254,9 @@ user 'hdfs' can create such a directory
 
 ### Job Submission Issues - OS Accounts ###
 
-If the hadoop cluster is not secured with Kerberos, the user submitting a job need not have an OS account on the hadoop nodemanagers.
+If the Hadoop cluster is not secured with Kerberos, the user submitting a job need not have an OS account on the Hadoop Nodemanagers.
 
-If the hadoop cluster is secured with Kerberos, the user submitting the job should have an OS account on hadoop nodemanagers.
+If the Hadoop cluster is secured with Kerberos, the user submitting the job should have an OS account on Hadoop Nodemanagers.
 
 In either case if the user does not have such OS account, his file permissions are based on user ownership of files or "other" permission in "ugo" posix permission.
 The user does not get any file permission as a member of any group if you are using default hadoop.security.group.mapping.
@@ -265,7 +265,7 @@ TODO: add sample error message from runn
 
 ### HBase Issues ###
 
-If you experience problems running the HBase samples with the Sandbox VM it may be necessary to restart HBase and Stargate.
+If you experience problems running the HBase samples with the Sandbox VM it may be necessary to restart HBase and the HBASE REST API.
 This can sometimes occur with the Sandbox VM is restarted from a saved state.
 If the client hangs after emitting the last line in the sample output below you are most likely affected.
 
@@ -274,12 +274,12 @@ If the client hangs after emitting the l
     Status : {...}
     Creating table 'test_table'...
 
-HBase and Stargate can be restred using the following commands on the Hadoop Sandbox VM.
+HBase and the HBASE REST API can be restarted using the following commands on the Hadoop Sandbox VM.
 You will need to ssh into the VM in order to run these commands.
 
     sudo -u hbase /usr/lib/hbase/bin/hbase-daemon.sh stop master
     sudo -u hbase /usr/lib/hbase/bin/hbase-daemon.sh start master
-    sudo -u hbase /usr/lib/hbase/bin/hbase-daemon.sh restart rest -p 60080
+    sudo -u hbase /usr/lib/hbase/bin/hbase-daemon.sh restart rest
 
 
 ### SSL Certificate Issues ###
@@ -303,11 +303,11 @@ Curl will present you with the follow me
 ### SPNego Authentication Issues ###
 
 Calls from Knox to Secure Hadoop Cluster fails, with SPNego authentication problems,
-if there was a TGT for knox in disk cache when Knox was started.
+if there was a TGT for Knox in disk cache when Knox was started.
 
-You are likely to run into this situation on developer machines where develeoper could have knited for some testing.
+You are likely to run into this situation on developer machines where the developer could have kinited for some testing.
 
-Work Around: clear TGT of Knox from disk cache ( calling kdestroy would do it), before starting knox
+Work Around: clear TGT of Knox from disk cache (calling `kdestroy` would do it), before starting Knox
 
 ### Filing Bugs ###
 

Modified: knox/trunk/books/0.7.0/book_ui_service_details.md
URL: http://svn.apache.org/viewvc/knox/trunk/books/0.7.0/book_ui_service_details.md?rev=1724836&r1=1724835&r2=1724836&view=diff
==============================================================================
--- knox/trunk/books/0.7.0/book_ui_service_details.md (original)
+++ knox/trunk/books/0.7.0/book_ui_service_details.md Fri Jan 15 15:24:45 2016
@@ -176,9 +176,9 @@ UI URLs is:
 | Gateway | `https://{gateway-host}:{gateway-port}/{gateway-path}/{cluster-name}/hbase/webui/`   |
 | Cluster | `http://{hbase-master-host}:16010/`                                                  |
 
-### Yarn UI ###
+### YARN UI ###
 
-The Yarn UI service can be configured in a topology by adding the following snippet. The values in this sample
+The YARN UI service can be configured in a topology by adding the following snippet. The values in this sample
 are configured to work with an installed Sandbox VM.
 
     <service>
@@ -193,7 +193,7 @@ The values for the host and port can be
         <value>sandbox.hortonworks.com:8088</value>
     </property>
 
-#### Yarn UI URL Mapping ####
+#### YARN UI URL Mapping ####
 
 For Resource Manager UI URLs, the mapping of Knox Gateway accessible Resource Manager UI URLs to direct Resource Manager
 UI URLs is:

Modified: knox/trunk/books/0.7.0/config.md
URL: http://svn.apache.org/viewvc/knox/trunk/books/0.7.0/config.md?rev=1724836&r1=1724835&r2=1724836&view=diff
==============================================================================
--- knox/trunk/books/0.7.0/config.md (original)
+++ knox/trunk/books/0.7.0/config.md Fri Jan 15 15:24:45 2016
@@ -32,10 +32,10 @@ dispatch requests to the various service
 
 Update `core-site.xml` and add the following lines towards the end of the file.
 
-Replace FQDN_OF_KNOX_HOST with the fully qualified domain name of the host running the gateway.
+Replace `FQDN_OF_KNOX_HOST` with the fully qualified domain name of the host running the Knox gateway.
 You can usually find this by running `hostname -f` on that host.
 
-You could use * for local developer testing if Knox host does not have static IP.
+You can use `*` for local developer testing if the Knox host does not have a static IP.
 
     <property>
         <name>hadoop.proxyuser.knox.groups</name>
@@ -50,8 +50,8 @@ You could use * for local developer test
 
 Update `webhcat-site.xml` and add the following lines towards the end of the file.
 
-Replace FQDN_OF_KNOX_HOST with right value in your cluster.
-You could use * for local developer testing if Knox host does not have static IP.
+Replace `FQDN_OF_KNOX_HOST` with the fully qualified domain name of the host running the Knox gateway.
+You can use `*` for local developer testing if the Knox host does not have a static IP.
 
     <property>
         <name>webhcat.proxyuser.knox.groups</name>
@@ -66,45 +66,45 @@ You could use * for local developer test
 
 Update `oozie-site.xml` and add the following lines towards the end of the file.
 
-Replace FQDN_OF_KNOX_HOST with right value in your cluster.
-You could use * for local developer testing if Knox host does not have static IP.
+Replace `FQDN_OF_KNOX_HOST` with the fully qualified domain name of the host running the Knox gateway.
+You can use `*` for local developer testing if the Knox host does not have a static IP.
 
     <property>
-       <name>oozie.service.ProxyUserService.proxyuser.knox.groups</name>
-       <value>users</value>
+        <name>oozie.service.ProxyUserService.proxyuser.knox.groups</name>
+        <value>users</value>
     </property>
     <property>
-       <name>oozie.service.ProxyUserService.proxyuser.knox.hosts</name>
-       <value>FQDN_OF_KNOX_HOST</value>
+        <name>oozie.service.ProxyUserService.proxyuser.knox.hosts</name>
+        <value>FQDN_OF_KNOX_HOST</value>
     </property>
 
-#### Enable http transport mode and use substitution in Hive Server2 ####
+#### Enable http transport mode and use substitution in HiveServer2 ####
 
-Update `hive-site.xml` and set the following properties on Hive Server2 hosts.
+Update `hive-site.xml` and set the following properties on HiveServer2 hosts.
 Some of the properties may already be in the hive-site.xml. 
 Ensure that the values match the ones below.
 
     <property>
-      <name>hive.server2.allow.user.substitution</name>
-      <value>true</value>
+        <name>hive.server2.allow.user.substitution</name>
+        <value>true</value>
     </property>
 
     <property>
-	    <name>hive.server2.transport.mode</name>
-	    <value>http</value>
-	    <description>Server transport mode. "binary" or "http".</description>
+        <name>hive.server2.transport.mode</name>
+        <value>http</value>
+        <description>Server transport mode. "binary" or "http".</description>
     </property>
 
     <property>
-	    <name>hive.server2.thrift.http.port</name>
-	    <value>10001</value>
-	    <description>Port number when in HTTP mode.</description>
+        <name>hive.server2.thrift.http.port</name>
+        <value>10001</value>
+        <description>Port number when in HTTP mode.</description>
     </property>
 
     <property>
-	    <name>hive.server2.thrift.http.path</name>
-	    <value>cliservice</value>
-	    <description>Path component of URL endpoint when in HTTP mode.</description>
+        <name>hive.server2.thrift.http.path</name>
+        <value>cliservice</value>
+        <description>Path component of URL endpoint when in HTTP mode.</description>
     </property>
 
 #### Gateway Server Configuration ####
@@ -309,7 +309,10 @@ The Hostmap configuration required to al
                 <role>hostmap</role>
                 <name>static</name>
                 <enabled>true</enabled>
-                <param><name>localhost</name><value>sandbox,sandbox.hortonworks.com</value></param>
+                <param>
+                    <name>localhost</name>
+                    <value>sandbox,sandbox.hortonworks.com</value>
+                </param>
             </provider>
             ...
         </gateway>
@@ -346,7 +349,7 @@ When mapping from external to internal h
 #### Logging ####
 
 If necessary you can enable additional logging by editing the `log4j.properties` file in the `conf` directory.
-Changing the rootLogger value from `ERROR` to `DEBUG` will generate a large amount of debug logging.
+Changing the `rootLogger` value from `ERROR` to `DEBUG` will generate a large amount of debug logging.
 A number of useful, more fine loggers are also provided in the file.
 
 
@@ -424,18 +427,18 @@ The following example uses openssl to cr
 
 The next example converts the PKCS12 store into a Java keystore (JKS). It should prompt you for the keystore and key passwords for the destination keystore. You must use the master-secret for the keystore password and keep track of the password that you use for the key passphrase.
 
-    keytool -importkeystore -srckeystore {server.p12} -destkeystore gateway.jks -srcstoretype pkcs12
+    keytool -importkeystore -srckeystore server.p12 -destkeystore gateway.jks -srcstoretype pkcs12
 
 While using this approach a couple of important things to be aware of:
 
 1. the alias MUST be "gateway-identity". You may need to change it using keytool after the import of the PKCS12 store. You can use keytool to do this - for example: 
 
-    keytool -changealias -alias "1" -destalias "gateway-identity" -keystore gateway.jks -storepass {knoxpw}
+        keytool -changealias -alias "1" -destalias "gateway-identity" -keystore gateway.jks -storepass {knoxpw}
     
 2. the name of the expected identity keystore for the gateway MUST be gateway.jks
 3. the passwords for the keystore and the imported key may both be set to the master secret for the gateway install. You can change the key passphrase after import using keytool as well. You may need to do this in order to provision the password in the credential store as described later in this section. For example:
 
-    keytool -keypasswd -alias gateway-identity -keystore gateway.jks
+        keytool -keypasswd -alias gateway-identity -keystore gateway.jks
 
 NOTE: The password for the keystore as well as that of the imported key may be the master secret for the gateway instance or you may set the gateway-identity-passphrase alias using the Knox CLI to the actual key passphrase. See the Knox CLI section for details.
 
@@ -464,28 +467,45 @@ You may have to adjust according to your
 
 General steps:
 
-1. stop gateway and back up all files in /var/lib/knox/data/security/keystores  
-gateway.sh stop
-2. create new master key for knox and persist, the master key will be referred to in following steps as $master-key  
-knoxcli.sh create-master -force
-3.  create identity keystore gateway.jks. cert in alias gateway-identity  
-    * cd /var/lib/knox/data/security/keystore  
-    * keytool -genkeypair -alias gateway-identity -keyalg RSA -keysize 1024 -dname "CN=$fqdn_knox,OU=hdp,O=sdge" -keypass $keypass -keystore gateway.jks -storepass $master-key -validity 300  
-NOTE: above $fqdn_knox is the hostname of the knox host. adjust validity as needed. some may choose $keypass to be the same as $master-key
-4. create credential store to store the $keypass in step 3.  this creates __gateway-credentials.jceks file  
-    * knoxcli.sh create-alias gateway-identity-passphrase --value $keypass
-5. generate a certificate signing request from the gateway.jks  
-    * keytool -keystore gateway.jks -storepass $master-key -alias gateway-identity -certreq -file knox.csr
-4. send the knox.csr file to the CA authority and get back the singed certificate, signed cert referred to as knox.signed in following steps. Also need the CA cert, which normally can be requested through openssl command or web browser.  (or can ask the CA authority to send a copy).
-5. import both the CA authority certificate (referred as corporateCA.cer) and the signed knox certificate back into gateway.jks  
-    * keytool -keystore gateway.jks -storepass $master-key -alias $hwhq -import -file corporateCA.cer  
-    * keytool -keystore gateway.jks -storepass $master-key -alias gateway-identity -import -file knox.signed  
-Note: use any alias appropriate for the corporate CA.
-6. restart gateway. check gateway.log to see that gateway started properly and clusters are deployed. Can check the timestamp on cluster deployment files 
-    * ls -alrt /var/lib/knox/data/deployment
-7. verify that clients can use the CA authority cert to access Knox (which is the goal of using public signed cert)  
-    * curl --cacert supwin12ad.cer -u hdptester:hadoop -X GET 'https://$fqdn_knox:8443/gateway/$topologyname/webhdfs/v1/tmp?op=LISTSTATUS'
-or can verify through client browser which already has the corporate CA cert installed.
+1. Stop Knox gateway and back up all files in `{GATEWWAY_HOME}/data/security/keystores`
+
+        gateway.sh stop
+
+2. Create a new master key for Knox and persist it. The master key will be referred to in following steps as `$master-key`
+
+        knoxcli.sh create-master -force
+        
+3. Create identity keystore gateway.jks. cert in alias gateway-identity  
+
+        cd {GATEWWAY_HOME}/data/security/keystore  
+        keytool -genkeypair -alias gateway-identity -keyalg RSA -keysize 1024 -dname "CN=$fqdn_knox,OU=hdp,O=sdge" -keypass $keypass -keystore gateway.jks -storepass $master-key -validity 300  
+
+    NOTE: `$fqdn_knox` is the hostname of the Knox host. Some may choose `$keypass` to be the same as `$master-key`.
+
+4. Create credential store to store the `$keypass` in step 3. This creates `__gateway-credentials.jceks` file
+
+        knoxcli.sh create-alias gateway-identity-passphrase --value $keypass
+        
+5. Generate a certificate signing request from the gateway.jks
+
+        keytool -keystore gateway.jks -storepass $master-key -alias gateway-identity -certreq -file knox.csr
+        
+6. Send the `knox.csr` file to the CA authority and get back the signed certificate (`knox.signed`). You also need the CA certificate, which normally can be requested through an openssl command or web browser or from the CA.
+
+7. Import both the CA authority certificate (referred as `corporateCA.cer`) and the signed Knox certificate back into `gateway.jks`
+
+        keytool -keystore gateway.jks -storepass $master-key -alias $hwhq -import -file corporateCA.cer  
+        keytool -keystore gateway.jks -storepass $master-key -alias gateway-identity -import -file knox.signed  
+
+    NOTE: Use any alias appropriate for the corporate CA.
+
+8. Restart Knox gateway. Check `gateway.log` to check whether the gateway started properly and clusters are deployed. You can check the timestamp on cluster deployment files
+
+        ls -alrt {GATEWAY_HOME}/data/deployment
+
+9. Verify that clients can use the CA authority cert to access Knox (which is the goal of using public signed cert) using curl or a web browsers which has the CA certificate installed
+
+        curl --cacert supwin12ad.cer -u hdptester:hadoop -X GET 'https://$fqdn_knox:8443/gateway/$topologyname/webhdfs/v1/tmp?op=LISTSTATUS'
 
 ##### Credential Store #####
 Whenever you provide your own keystore with either a self-signed cert or an issued certificate signed by a trusted authority, you will need to set an alias for the gateway-identity-passphrase or create an empty credential store. This is necessary for the current release in order for the system to determine the correct password for the keystore and the key.
@@ -510,5 +530,4 @@ Once you have created these keystores yo
 
 NOTE: the SSL certificate will need special consideration depending on the type of certificate. Wildcard certs may be able to be shared across all gateway instances in a cluster.
 When certs are dedicated to specific machines the gateway identity store will not be able to be blindly replicated as host name verification problems will ensue.
-Obviously, trust-stores will need to be taken into account as well.
-
+Obviously, trust-stores will need to be taken into account as well.
\ No newline at end of file