You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@knox.apache.org by km...@apache.org on 2013/09/27 04:32:26 UTC

svn commit: r1526775 - in /incubator/knox: ./ site/books/knox-incubating-0-3-0/ trunk/ trunk/books/0.3.0/ trunk/markbook/src/main/java/org/apache/hadoop/gateway/markbook/

Author: kminder
Date: Fri Sep 27 02:32:26 2013
New Revision: 1526775

URL: http://svn.apache.org/r1526775
Log:
More book cleanup.

Added:
    incubator/knox/build.xml
Modified:
    incubator/knox/site/books/knox-incubating-0-3-0/knox-incubating-0-3-0.html
    incubator/knox/trunk/books/0.3.0/book.md
    incubator/knox/trunk/books/0.3.0/book_client-details.md
    incubator/knox/trunk/books/0.3.0/book_gateway-details.md
    incubator/knox/trunk/books/0.3.0/book_getting-started.md
    incubator/knox/trunk/books/0.3.0/config.md
    incubator/knox/trunk/books/0.3.0/config_kerberos.md
    incubator/knox/trunk/books/0.3.0/config_sandbox.md
    incubator/knox/trunk/books/0.3.0/service_hbase.md
    incubator/knox/trunk/books/0.3.0/service_oozie.md
    incubator/knox/trunk/books/0.3.0/service_webhcat.md
    incubator/knox/trunk/books/0.3.0/service_webhdfs.md
    incubator/knox/trunk/build.xml
    incubator/knox/trunk/markbook/src/main/java/org/apache/hadoop/gateway/markbook/MarkBook.java

Added: incubator/knox/build.xml
URL: http://svn.apache.org/viewvc/incubator/knox/build.xml?rev=1526775&view=auto
==============================================================================
--- incubator/knox/build.xml (added)
+++ incubator/knox/build.xml Fri Sep 27 02:32:26 2013
@@ -0,0 +1,33 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<project name="Apache Knox Gateway Site" default="generate" basedir=".">
+
+    <description>
+        Release build file for the Apache Knox Gateway Site and Books
+    </description>
+
+    <target name="generate">
+        <ant dir="trunk" target="site"/>
+    </target>
+
+    <target name="review">
+        <ant dir="trunk" target="review"/>
+    </target>
+
+</project>
+

Modified: incubator/knox/site/books/knox-incubating-0-3-0/knox-incubating-0-3-0.html
URL: http://svn.apache.org/viewvc/incubator/knox/site/books/knox-incubating-0-3-0/knox-incubating-0-3-0.html?rev=1526775&r1=1526774&r2=1526775&view=diff
==============================================================================
--- incubator/knox/site/books/knox-incubating-0-3-0/knox-incubating-0-3-0.html (original)
+++ incubator/knox/site/books/knox-incubating-0-3-0/knox-incubating-0-3-0.html Fri Sep 27 02:32:26 2013
@@ -40,9 +40,9 @@
   <li><a href="#Service+Details">Service Details</a>
   <ul>
     <li><a href="#WebHDFS">WebHDFS</a></li>
-    <li><a href="#WebHCat">WebHCat/Templeton</a></li>
+    <li><a href="#WebHCat">WebHCat</a></li>
     <li><a href="#Oozie">Oozie</a></li>
-    <li><a href="#HBase">HBase/Starbase</a></li>
+    <li><a href="#HBase">HBase</a></li>
     <li><a href="#Hive">Hive</a></li>
   </ul></li>
   <li><a href="#Troubleshooting">Troubleshooting</a></li>
@@ -62,7 +62,7 @@
   </ul></li>
 </ul><h2><a id="Getting+Started"></a>Getting Started</h2><p>This section provides everything you need to know to get the gateway up and running against a Sandbox VM Hadoop cluster.</p><h3><a id="Requirements"></a>Requirements</h3><h4><a id="Java"></a>Java</h4><p>Java 1.6 or later is required for the Knox Gateway runtime. Use the command below to check the version of Java installed on the system where Knox will be running.</p>
 <pre><code>java -version
-</code></pre><h4><a id="Hadoop"></a>Hadoop</h4><p>An an existing Hadoop 1.x or 2.x cluster is required for Knox sit in front of and protect. One of the easiest ways to ensure this it to utilize a Hortonworks Sandbox VM. It is possible to use a Hadoop cluster deployed on EC2 but this will require additional configuration not covered here. It is also possible to use a limited set of services in Hadoop cluster secured with Kerberos. This too required additional configuration that is not described here. See the <a href="#Supported+Services">table provided</a> for details on what is supported for this release.</p><p>The Hadoop cluster should be ensured to have at least WebHDFS, WebHCat (i.e. Templeton) and Oozie configured, deployed and running. HBase/Stargate and Hive can also be accessed via the Knox Gateway given the proper versions and configuration.</p><p>The instructions that follow assume a few things:</p>
+</code></pre><h4><a id="Hadoop"></a>Hadoop</h4><p>An an existing Hadoop 1.x or 2.x cluster is required for Knox sit in front of and protect. One of the easiest ways to ensure this it to utilize a Hortonworks Sandbox VM. It is possible to use a Hadoop cluster deployed on EC2 but this will require additional configuration not covered here. It is also possible to use a limited set of services in Hadoop cluster secured with Kerberos. This too required additional configuration that is not described here. See <a href="#Supported+Services">Supported Services</a> for details on what is supported for this release.</p><p>The Hadoop cluster should be ensured to have at least WebHDFS, WebHCat (i.e. Templeton) and Oozie configured, deployed and running. HBase/Stargate and Hive can also be accessed via the Knox Gateway given the proper versions and configuration.</p><p>The instructions that follow assume a few things:</p>
 <ol>
   <li>The gateway is <em>not</em> collocated with the Hadoop clusters themselves.</li>
   <li>The host names and IP addresses of the cluster services are accessible by the gateway where ever it happens to be running.</li>
@@ -227,7 +227,7 @@
 </code></pre><p>Take note of the port identified in the logging output as you will need this for accessing the gateway.</p><p>The server will prompt you for the master secret (i.e. password). This secret is used to secure artifacts used by the gateway server for things like SSL and credential/password aliasing. This secret will have to be entered at startup unless you choose to persist it. See the Persisting the Master section for more information. Remember this secret and keep it safe. It represents the keys to the kingdom.</p><h5><a id="4.+Configure+the+Gateway+with+the+topology+of+your+Hadoop+cluster"></a>4. Configure the Gateway with the topology of your Hadoop cluster</h5><p>Edit the file <code>{GATEWAY_HOME}/deployments/sandbox.xml</code></p><p>Change the host and port in the urls of the <code>&lt;service&gt;</code> elements for WEBHDFS, WEBHCAT, OOZIE, WEBHBASE and HIVE services to match your Hadoop cluster deployment.</p><p>The default configuration contains the LDAP URL for
  a LDAP server. By default that file is configured to access the demo ApacheDS based LDAP server and its default configuration. The ApacheDS based LDAP server listens on port 33389 by default. Optionally, you can change the LDAP URL for the LDAP server to be used for authentication. This is set via the <code>main.ldapRealm.contextFactory.url</code> property in the <code>&lt;gateway&gt;&lt;provider&gt;&lt;authentication&gt;</code> section. If you use an LDAP system other than the demo LDAP server you may need to change additional configuration as well.</p><p>Save the file. The directory <code>{GATEWAY_HOME}/deployments</code> is monitored by the gateway server. When a new or changed cluster topology descriptor is detected, it will provision the endpoints for the services described in the topology descriptor. Note that the name of the file excluding the extension is also used as the path for that cluster in the URL. For example the <code>sandbox.xml</code> file will result in gateway 
 URLs of the form <code>http://{gateway-host}:{gateway-port}/gateway/sandbox/webhdfs</code>.</p><h5><a id="5.+Test+the+installation"></a>5. Test the installation</h5><p>Invoke the LISTSATUS operation on WebHDFS via the gateway. This will return a directory listing of the root (i.e. /) directory of HDFS.</p>
 <pre><code>curl -i -k -u bob:bob-password -X GET \
     &#39;https://localhost:8443/gateway/sandbox/webhdfs/v1/?op=LISTSTATUS&#39;
-</code></pre><p>The results of the above command should result in something to along the lines of the output below. The exact information returned is subject to the content within HDFS in your Hadoop cluster. Successfully executing this command at a minimum proves that the gateway is properly configured to provide access to WebHDFS. It does not necessarily provide that any of the other services are correct configured to be accessible. To validate that see the sections for the individual services in <a href="#Service+Details">Service Details</a></p>
+</code></pre><p>The results of the above command should result in something to along the lines of the output below. The exact information returned is subject to the content within HDFS in your Hadoop cluster. Successfully executing this command at a minimum proves that the gateway is properly configured to provide access to WebHDFS. It does not necessarily provide that any of the other services are correct configured to be accessible. To validate that see the sections for the individual services in <a href="#Service+Details">Service Details</a>.</p>
 <pre><code>HTTP/1.1 200 OK
 Content-Type: application/json
 Content-Length: 760
@@ -247,12 +247,12 @@ Server: Jetty(6.1.26)
   <li>Stargate (HBase) - <a href="http://wiki.apache.org/hadoop/Hbase/Stargate">http://wiki.apache.org/hadoop/Hbase/Stargate</a></li>
 </ul><h3><a id="More+Examples"></a>More Examples</h3><p>These examples provide more detail about how to access various Apache Hadoop services via the Apache Knox Gateway.</p>
 <ul>
-  <li><a href="#WebHDFS+Examples">WebHDFS</a></li>
-  <li><a href="#WebHCat+Examples">WebHCat/Templeton</a></li>
-  <li><a href="#Oozie+Examples">Oozie</a></li>
-  <li><a href="#HBase+Examples">HBase</a></li>
-  <li><a href="#Hive+Examples">Hive</a></li>
-</ul><h2><a id="Gateway+Details"></a>Gateway Details</h2><p>TODO</p><h3><a id="Mapping+Gateway+URLs+to+Hadoop+cluster+URLs"></a>Mapping Gateway URLs to Hadoop cluster URLs</h3><p>The Gateway functions much like a reverse proxy. As such it maintains a mapping of URLs that are exposed externally by the gateway to URLs that are provided by the Hadoop cluster. Examples of mappings for the WebHDFS, WebHCat, Oozie and Stargate/Hive are shown below. These mapping are generated from the combination of the gateway configuration file (i.e. <code>{GATEWAY_HOME}/conf/gateway-site.xml</code>) and the cluster topology descriptors (e.g. <code>{GATEWAY_HOME}/deployments/{cluster-name}.xml</code>).</p>
+  <li><a href="#WebHDFS+Examples">WebHDFS Examples</a></li>
+  <li><a href="#WebHCat+Examples">WebHCat Examples</a></li>
+  <li><a href="#Oozie+Examples">Oozie Examples</a></li>
+  <li><a href="#HBase+Examples">HBase Examples</a></li>
+  <li><a href="#Hive+Examples">Hive Examples</a></li>
+</ul><h2><a id="Gateway+Details"></a>Gateway Details</h2><p>TODO</p><h3><a id="URL+Mapping"></a>URL Mapping</h3><p>The gateway functions much like a reverse proxy. As such it maintains a mapping of URLs that are exposed externally by the gateway to URLs that are provided by the Hadoop cluster. Examples of mappings for the WebHDFS, WebHCat, Oozie and Stargate/HBase are shown below. These mapping are generated from the combination of the gateway configuration file (i.e. <code>{GATEWAY_HOME}/conf/gateway-site.xml</code>) and the cluster topology descriptors (e.g. <code>{GATEWAY_HOME}/deployments/{cluster-name}.xml</code>). The port numbers show for the Cluster URLs represent the default ports for these services. The actual port number may be different for a given cluster.</p>
 <ul>
   <li>WebHDFS
   <ul>
@@ -274,7 +274,7 @@ Server: Jetty(6.1.26)
     <li>Gateway: <code>https://{gateway-host}:{gateway-port}/{gateway-path}/{cluster-name}/hbase</code></li>
     <li>Cluster: <code>http://{hbase-host}:60080</code></li>
   </ul></li>
-</ul><p>The values for <code>{gateway-host}</code>, <code>{gateway-port}</code>, <code>{gateway-path}</code> are provided via the Gateway configuration file (i.e. <code>{GATEWAY_HOME}/conf/gateway-site.xml</code>).</p><p>The value for <code>{cluster-name}</code> is derived from the name of the cluster topology descriptor (e.g. <code>{GATEWAY_HOME}/deployments/{cluster-name}.xml</code>).</p><p>The value for <code>{webhdfs-host}</code> and <code>{webhcat-host}</code> are provided via the cluster topology descriptor (e.g. <code>{GATEWAY_HOME}/deployments/{cluster-name}.xml</code>).</p><p>Note: The ports 50070, 50111, 11000 and 60080 are the defaults for WebHDFS, WebHCat, Oozie and Stargate/HBase respectively. Their values can also be provided via the cluster topology descriptor if your Hadoop cluster uses different ports.</p><h3><a id="Configuration"></a>Configuration</h3><h4><a id="Host+Mapping"></a>Host Mapping</h4><p>TODO - Complete Host Mapping docs.</p><p>That really depends upon 
 how you have your VM configured. If you can hit <a href="http://c6401.ambari.apache.org:1022/">http://c6401.ambari.apache.org:1022/</a> directly from your client and knox host then you probably don&rsquo;t need the hostmap at all. The host map only exists for situations where a host in the hadoop cluster is known by one name externally and another internally. For example running hostname -q on sandbox returns sandbox.hortonworks.com but externally Sandbox is setup to be accesses using localhost via portmapping. The way the hostmap config works is that the <name/> element is what the hadoop cluster host is known as externally and the <value/> is how the hadoop cluster host identifies itself internally. <param><name>localhost</name><value>c6401,c6401.ambari.apache.org</value></param> You SHOULD be able to simply change <enabled>true</enabled> to false but I have a suspicion that that might not actually work. Please try it and file a jira if that doesn&rsquo;t work. If so, simply eithe
 r remove the full provider config for hostmap or remove the <param/> that defines the mapping.</p><h4><a id="Logging"></a>Logging</h4><p>If necessary you can enable additional logging by editing the <code>log4j.properties</code> file in the <code>conf</code> directory. Changing the rootLogger value from <code>ERROR</code> to <code>DEBUG</code> will generate a large amount of debug logging. A number of useful, more fine loggers are also provided in the file.</p><h4><a id="Java+VM+Options"></a>Java VM Options</h4><p>TODO - Java VM options doc.</p><h4><a id="Persisting+the+Master+Secret"></a>Persisting the Master Secret</h4><p>The master secret is required to start the server. This secret is used to access secured artifacts by the gateway instance. Keystore, trust stores and credential stores are all protected with the master secret.</p><p>You may persist the master secret by supplying the <em>-persist-master</em> switch at startup. This will result in a warning indicating that persist
 ing the secret is less secure than providing it at startup. We do make some provisions in order to protect the persisted password.</p><p>It is encrypted with AES 128 bit encryption and where possible the file permissions are set to only be accessible by the user that the gateway is running as.</p><p>After persisting the secret, ensure that the file at config/security/master has the appropriate permissions set for your environment. This is probably the most important layer of defense for master secret. Do not assume that the encryption if sufficient protection.</p><p>A specific user should be created to run the gateway this will protect a persisted master file.</p><h4><a id="Management+of+Security+Artifacts"></a>Management of Security Artifacts</h4><p>There are a number of artifacts that are used by the gateway in ensuring the security of wire level communications, access to protected resources and the encryption of sensitive data. These artifacts can be managed from outside of the g
 ateway instances or generated and populated by the gateway instance itself.</p><p>The following is a description of how this is coordinated with both standalone (development, demo, etc) gateway instances and instances as part of a cluster of gateways in mind.</p><p>Upon start of the gateway server we:</p>
+</ul><p>The values for <code>{gateway-host}</code>, <code>{gateway-port}</code>, <code>{gateway-path}</code> are provided via the gateway configuration file (i.e. <code>{GATEWAY_HOME}/conf/gateway-site.xml</code>).</p><p>The value for <code>{cluster-name}</code> is derived from the file name of the cluster topology descriptor (e.g. <code>{GATEWAY_HOME}/deployments/{cluster-name}.xml</code>).</p><p>The value for <code>{webhdfs-host}</code>, <code>{webhcat-host}</code>, <code>{oozie-host}</code> and <code>{hbase-host}</code> are provided via the cluster topology descriptor (e.g. <code>{GATEWAY_HOME}/deployments/{cluster-name}.xml</code>).</p><p>Note: The ports 50070, 50111, 11000 and 60080 are the defaults for WebHDFS, WebHCat, Oozie and Stargate/HBase respectively. Their values can also be provided via the cluster topology descriptor if your Hadoop cluster uses different ports.</p><h3><a id="Configuration"></a>Configuration</h3><h4><a id="Host+Mapping"></a>Host Mapping</h4><p>TODO - 
 Complete Host Mapping docs.</p><p>That really depends upon how you have your VM configured. If you can hit <a href="http://c6401.ambari.apache.org:1022/">http://c6401.ambari.apache.org:1022/</a> directly from your client and knox host then you probably don&rsquo;t need the hostmap at all. The host map only exists for situations where a host in the hadoop cluster is known by one name externally and another internally. For example running hostname -q on sandbox returns sandbox.hortonworks.com but externally Sandbox is setup to be accesses using localhost via portmapping. The way the hostmap config works is that the <name/> element is what the hadoop cluster host is known as externally and the <value/> is how the hadoop cluster host identifies itself internally. <param><name>localhost</name><value>c6401,c6401.ambari.apache.org</value></param> You SHOULD be able to simply change <enabled>true</enabled> to false but I have a suspicion that that might not actually work. Please try it and 
 file a jira if that doesn&rsquo;t work. If so, simply either remove the full provider config for hostmap or remove the <param/> that defines the mapping.</p><h4><a id="Logging"></a>Logging</h4><p>If necessary you can enable additional logging by editing the <code>log4j.properties</code> file in the <code>conf</code> directory. Changing the rootLogger value from <code>ERROR</code> to <code>DEBUG</code> will generate a large amount of debug logging. A number of useful, more fine loggers are also provided in the file.</p><h4><a id="Java+VM+Options"></a>Java VM Options</h4><p>TODO - Java VM options doc.</p><h4><a id="Persisting+the+Master+Secret"></a>Persisting the Master Secret</h4><p>The master secret is required to start the server. This secret is used to access secured artifacts by the gateway instance. Keystore, trust stores and credential stores are all protected with the master secret.</p><p>You may persist the master secret by supplying the <em>-persist-master</em> switch at sta
 rtup. This will result in a warning indicating that persisting the secret is less secure than providing it at startup. We do make some provisions in order to protect the persisted password.</p><p>It is encrypted with AES 128 bit encryption and where possible the file permissions are set to only be accessible by the user that the gateway is running as.</p><p>After persisting the secret, ensure that the file at config/security/master has the appropriate permissions set for your environment. This is probably the most important layer of defense for master secret. Do not assume that the encryption if sufficient protection.</p><p>A specific user should be created to run the gateway this will protect a persisted master file.</p><h4><a id="Management+of+Security+Artifacts"></a>Management of Security Artifacts</h4><p>There are a number of artifacts that are used by the gateway in ensuring the security of wire level communications, access to protected resources and the encryption of sensitive
  data. These artifacts can be managed from outside of the gateway instances or generated and populated by the gateway instance itself.</p><p>The following is a description of how this is coordinated with both standalone (development, demo, etc) gateway instances and instances as part of a cluster of gateways in mind.</p><p>Upon start of the gateway server we:</p>
 <ol>
   <li>Look for an identity store at <code>conf/security/keystores/gateway.jks</code>.  The identity store contains the certificate and private key used to represent the identity of the server for SSL connections and signature creation.
   <ul>
@@ -288,7 +288,7 @@ Server: Jetty(6.1.26)
   </ul></li>
 </ol><p>Upon deployment of a Hadoop cluster topology within the gateway we:</p>
 <ol>
-  <li>Look for a credential store for the topology. For instance, we have a sample topology that gets deployed out of the box. We look for <code>conf/security/keystores/sample-credentials.jceks</code>. This topology specific credential store is used for storing secrets/passwords that are used for encrypting sensitive data with topology specific keys.
+  <li>Look for a credential store for the topology. For instance, we have a sample topology that gets deployed out of the box. We look for <code>conf/security/keystores/sandbox-credentials.jceks</code>. This topology specific credential store is used for storing secrets/passwords that are used for encrypting sensitive data with topology specific keys.
   <ul>
     <li>If no credential store is found for the topology being deployed then one is created for it.  Population of the aliases is delegated to the configured providers within the system that will require the use of a secret for a particular task.  They may programmatic set the value of the secret or choose to have the value for the specified alias generated through the AliasService.</li>
     <li>If a credential store is found then we ensure that it can be loaded with the provided master secret and the configured providers have the opportunity to ensure that the aliases are populated and if not to populate them.</li>
@@ -554,13 +554,13 @@ Server: Jetty(6.1.26)
         &lt;url&gt;http://localhost:10000/&lt;/url&gt;
     &lt;/service&gt;
 &lt;/topology&gt;
-</code></pre><h3><a id="Secure+Clusters"></a>Secure Clusters</h3><p>If your Hadoop cluster is secured with Kerberos authentication, you have to do the following on Knox side.</p><p>Please secure Hadoop services with Keberos authentication.</p><p>Please see instructions at [http://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/ClusterSetup.html#Configuration_in_Secure_Mode] and [http://docs.hortonworks.com/HDPDocuments/HDP1/HDP-1.3.1/bk_installing_manually_book/content/rpm-chap14.html]</p><h4><a id="Create+Unix+account+for+Knox+on+Hadoop+master+nodes"></a>Create Unix account for Knox on Hadoop master nodes</h4>
+</code></pre><h3><a id="Secure+Clusters"></a>Secure Clusters</h3><p>See these documents for setting up a secure Hadoop cluster <a href="http://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/ClusterSetup.html#Configuration_in_Secure_Mode">http://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/ClusterSetup.html#Configuration_in_Secure_Mode</a> <a href="http://docs.hortonworks.com/HDPDocuments/HDP1/HDP-1.3.1/bk_installing_manually_book/content/rpm-chap14.html">http://docs.hortonworks.com/HDPDocuments/HDP1/HDP-1.3.1/bk_installing_manually_book/content/rpm-chap14.html</a></p><p>Once you have a Hadoop cluster that is using Kerberos for authentication, you have to do the following to configure Knox to work with that cluster.</p><h4><a id="Create+Unix+account+for+Knox+on+Hadoop+master+nodes"></a>Create Unix account for Knox on Hadoop master nodes</h4>
 <pre><code>useradd -g hadoop knox
 </code></pre><h4><a id="Create+Kerberos+principal,+keytab+for+Knox"></a>Create Kerberos principal, keytab for Knox</h4><p>One way of doing this, assuming your KDC realm is EXAMPLE.COM</p><p>ssh into your host running KDC</p>
 <pre><code>kadmin.local
 add_principal -randkey knox/knox@EXAMPLE.COM
 ktadd -norandkey -k /etc/security/keytabs/knox.service.keytab
-</code></pre><h4><a id="Grant+Proxy+privileges+for+Knox+user+in+`core-site.xml`+on+Hadoop+master+nodes"></a>Grant Proxy privileges for Knox user in <code>core-site.xml</code> on Hadoop master nodes</h4><p>Update <code>core-site.xml</code> and add the following lines towards the end of the file.</p><p>Please replace FQDN_OF_KNOX_HOST with right value in your cluster. You could use * for local developer testing if Knox host does not have static IP.</p>
+</code></pre><h4><a id="Grant+Proxy+privileges+for+Knox+user+in+`core-site.xml`+on+Hadoop+master+nodes"></a>Grant Proxy privileges for Knox user in <code>core-site.xml</code> on Hadoop master nodes</h4><p>Update <code>core-site.xml</code> and add the following lines towards the end of the file.</p><p>Replace FQDN_OF_KNOX_HOST with right value in your cluster. You could use * for local developer testing if Knox host does not have static IP.</p>
 <pre><code>&lt;property&gt;
     &lt;name&gt;hadoop.proxyuser.knox.groups&lt;/name&gt;
     &lt;value&gt;users&lt;/value&gt;
@@ -569,7 +569,7 @@ ktadd -norandkey -k /etc/security/keytab
     &lt;name&gt;hadoop.proxyuser.knox.hosts&lt;/name&gt;
     &lt;value&gt;FQDN_OF_KNOX_HOST&lt;/value&gt;
 &lt;/property&gt;
-</code></pre><h4><a id="Grant+proxy+privilege+for+Knox+in+`oozie-stie.xml`+on+Oozie+host"></a>Grant proxy privilege for Knox in <code>oozie-stie.xml</code> on Oozie host</h4><p>Update <code>oozie-site.xml</code> and add the following lines towards the end of the file.</p><p>Please replace FQDN_OF_KNOX_HOST with right value in your cluster. You could use * for local developer testing if Knox host does not have static IP.</p>
+</code></pre><h4><a id="Grant+proxy+privilege+for+Knox+in+`oozie-stie.xml`+on+Oozie+host"></a>Grant proxy privilege for Knox in <code>oozie-stie.xml</code> on Oozie host</h4><p>Update <code>oozie-site.xml</code> and add the following lines towards the end of the file.</p><p>Replace FQDN_OF_KNOX_HOST with right value in your cluster. You could use * for local developer testing if Knox host does not have static IP.</p>
 <pre><code>&lt;property&gt;
    &lt;name&gt;oozie.service.ProxyUserService.proxyuser.knox.groups&lt;/name&gt;
    &lt;value&gt;users&lt;/value&gt;
@@ -578,9 +578,9 @@ ktadd -norandkey -k /etc/security/keytab
    &lt;name&gt;oozie.service.ProxyUserService.proxyuser.knox.hosts&lt;/name&gt;
    &lt;value&gt;FQDN_OF_KNOX_HOST&lt;/value&gt;
 &lt;/property&gt;
-</code></pre><h4><a id="Copy+knox+keytab+to+Knox+host"></a>Copy knox keytab to Knox host</h4><p>Please add unix account for knox on Knox host</p>
+</code></pre><h4><a id="Copy+knox+keytab+to+Knox+host"></a>Copy knox keytab to Knox host</h4><p>Add unix account for knox on Knox host</p>
 <pre><code>useradd -g hadoop knox
-</code></pre><p>Please copy knox.service.keytab created on KDC host on to your Knox host /etc/knox/conf/knox.service.keytab</p>
+</code></pre><p>Copy knox.service.keytab created on KDC host on to your Knox host /etc/knox/conf/knox.service.keytab</p>
 <pre><code>chown knox knox.service.keytab
 chmod 400 knox.service.keytab
 </code></pre><h4><a id="Update+krb5.conf+at+/etc/knox/conf/krb5.conf+on+Knox+host"></a>Update krb5.conf at /etc/knox/conf/krb5.conf on Knox host</h4><p>You could copy the <code>templates/krb5.conf</code> file provided in the Knox binary download and customize it to suit your cluster.</p><h4><a id="Update+`krb5JAASLogin.conf`+at+`/etc/knox/conf/krb5JAASLogin.conf`+on+Knox+host"></a>Update <code>krb5JAASLogin.conf</code> at <code>/etc/knox/conf/krb5JAASLogin.conf</code> on Knox host</h4><p>You could copy the <code>templates/krb5JAASLogin.conf</code> file provided in the Knox binary download and customize it to suit your cluster.</p><h4><a id="Update+`gateway-site.xml`+on+Knox+host+on+Knox+host"></a>Update <code>gateway-site.xml</code> on Knox host on Knox host</h4><p>Update <code>conf/gateway-site.xml</code> in your Knox installation and set the value of <code>gateway.hadoop.kerberos.secured</code> to true.</p><h4><a id="Restart+Knox"></a>Restart Knox</h4><p>After you do the above con
 figurations and restart Knox, Knox would use SPNego to authenticate with Hadoop services and Oozie. There is not change in the way you make calls to Knox whether you use Curl or Knox DSL.</p><h2><a id="Client+Details"></a>Client Details</h2><p>Hadoop requires a client that can be used to interact remotely with the services provided by Hadoop cluster. This will also be true when using the Apache Knox Gateway to provide perimeter security and centralized access for these services. The two primary existing clients for Hadoop are the CLI (i.e. Command Line Interface, hadoop) and HUE (i.e. Hadoop User Environment). For several reasons however, neither of these clients can <em>currently</em> be used to access Hadoop services via the Apache Knox Gateway.</p><p>This led to thinking about a very simple client that could help people use and evaluate the gateway. The list below outlines the general requirements for such a client.</p>
@@ -610,7 +610,7 @@ set show-last-result false
 <pre><code>java -jar bin/shell.jar samples/ExamplePutFile.groovy
 </code></pre><h3><a id="Examples"></a>Examples</h3><p>Once the shell can be launched the DSL can be used to interact with the gateway and Hadoop. Below is a very simple example of an interactive shell session to upload a file to HDFS.</p>
 <pre><code>java -jar bin/shell.jar
-knox:000&gt; hadoop = Hadoop.login( &quot;https://localhost:8443/gateway/sample&quot;, &quot;bob&quot;, &quot;bob-password&quot; )
+knox:000&gt; hadoop = Hadoop.login( &quot;https://localhost:8443/gateway/sandbox&quot;, &quot;guest&quot;, &quot;guest-password&quot; )
 knox:000&gt; Hdfs.put( hadoop ).file( &quot;README&quot; ).to( &quot;/tmp/example/README&quot; ).now()
 </code></pre><p>The <code>knox:000&gt;</code> in the example above is the prompt from the embedded Groovy console. If you output doesn&rsquo;t look like this you may need to set the verbosity and show-last-result preferences as described above in the Usage section.</p><p>If you relieve an error <code>HTTP/1.1 403 Forbidden</code> it may be because that file already exists. Try deleting it with the following command and then try again.</p>
 <pre><code>knox:000&gt; Hdfs.rm(hadoop).file(&quot;/tmp/example/README&quot;).now()
@@ -638,7 +638,7 @@ knox:000&gt; exit
 import org.apache.hadoop.gateway.shell.hdfs.Hdfs
 import groovy.json.JsonSlurper
 
-gateway = &quot;https://localhost:8443/gateway/sample&quot;
+gateway = &quot;https://localhost:8443/gateway/sandbox&quot;
 username = &quot;bob&quot;
 password = &quot;bob-password&quot;
 dataFile = &quot;README&quot;
@@ -866,7 +866,7 @@ bin/shell.cfg
 <pre><code>main.class=org.apache.hadoop.gateway.shell.Shell
 class.path=../lib; ../lib/*.jar; ../ext; ../ext/*.jar
 </code></pre><p>Therefore to extend the shell you should copy any new service and command class either to the <code>ext</code> directory or if they are packaged within a JAR copy the JAR to the <code>ext</code> directory. The <code>lib</code> directory is reserved for JARs that may be delivered with the product.</p><p>Below are samples for the service and command classes that would need to be written to add new commands to the shell. These happen to be Groovy source files but could with very minor changes be Java files. The easiest way to add these to the shell is to compile them directory into the <code>ext</code> directory. <em>Note: This command depends upon having the Groovy compiler installed and available on the execution path.</em></p>
-<pre><code>groovyc \-d ext \-cp bin/shell.jar samples/SampleService.groovy samples/SampleSimpleCommand.groovy samples/SampleComplexCommand.groovy
+<pre><code>groovy -d ext -cp bin/shell.jar samples/SampleService.groovy samples/SampleSimpleCommand.groovy samples/SampleComplexCommand.groovy
 </code></pre><p>These source files are available in the samples directory of the distribution but these are included here for convenience.</p><h4><a id="Sample+Service+(Groovy)"></a>Sample Service (Groovy)</h4>
 <pre><code>import org.apache.hadoop.gateway.shell.Hadoop
 
@@ -1034,7 +1034,7 @@ dep/commons-codec-1.7.jar
 import org.apache.hadoop.gateway.shell.Hadoop
 import org.apache.hadoop.gateway.shell.hdfs.Hdfs
 
-gateway = &quot;https://localhost:8443/gateway/sample&quot;
+gateway = &quot;https://localhost:8443/gateway/sandbox&quot;
 username = &quot;bob&quot;
 password = &quot;bob-password&quot;
 dataFile = &quot;README&quot;
@@ -1052,11 +1052,11 @@ session.shutdown()
 </code></pre><h4><a id="WebHDFS+via+cURL"></a>WebHDFS via cURL</h4>
 <pre><code># 1. Optionally cleanup the sample directory in case a previous example was run without cleaning up.
 curl -i -k -u bob:bob-password -X DELETE \
-    &#39;https://localhost:8443/gateway/sample/namenode/api/v1/tmp/test?op=DELETE&amp;recursive=true&#39;
+    &#39;https://localhost:8443/gateway/sandbox/webhdfs/v1/tmp/test?op=DELETE&amp;recursive=true&#39;
 
 # 2. Create the inode for a sample input file readme.txt in /tmp/test/input.
 curl -i -k -u bob:bob-password -X PUT \
-    &#39;https://localhost:8443/gateway/sample/namenode/api/v1/tmp/test/input/README?op=CREATE&#39;
+    &#39;https://localhost:8443/gateway/sandbox/webhdfs/v1/tmp/test/input/README?op=CREATE&#39;
 
 # 3. Upload readme.txt to /tmp/test/input.  Use the readme.txt in {GATEWAY_HOME}.
 # The sample below uses this README file found in {GATEWAY_HOME}.
@@ -1065,11 +1065,11 @@ curl -i -k -u bob:bob-password -T README
 
 # 4. List the contents of the output directory /tmp/test/output
 curl -i -k -u bob:bob-password -X GET \
-    &#39;https://localhost:8443/gateway/sample/namenode/api/v1/tmp/test/input?op=LISTSTATUS&#39;
+    &#39;https://localhost:8443/gateway/sandbox/webhdfs/v1/tmp/test/input?op=LISTSTATUS&#39;
 
 # 5. Optionally cleanup the test directory
 curl -i -k -u bob:bob-password -X DELETE \
-    &#39;https://localhost:8443/gateway/sample/namenode/api/v1/tmp/test?op=DELETE&amp;recursive=true&#39;
+    &#39;https://localhost:8443/gateway/sandbox/webhdfs/v1/tmp/test?op=DELETE&amp;recursive=true&#39;
 </code></pre><h3><a id="WebHCat"></a>WebHCat</h3><p>TODO</p><h4><a id="WebHCat+URL+Mapping"></a>WebHCat URL Mapping</h4><p>TODO</p><h4><a id="WebHCat+Examples"></a>WebHCat Examples</h4><p>TODO</p><h4><a id="Assumptions"></a>Assumptions</h4><p>This document assumes a few things about your environment in order to simplify the examples.</p>
 <ul>
   <li>The JVM is executable as simply java.</li>
@@ -1092,9 +1092,9 @@ import org.apache.hadoop.gateway.shell.j
 
 import static java.util.concurrent.TimeUnit.SECONDS
 
-gateway = &quot;https://localhost:8443/gateway/sample&quot;
-username = &quot;bob&quot;
-password = &quot;bob-password&quot;
+gateway = &quot;https://localhost:8443/gateway/sandbox&quot;
+username = &quot;guest&quot;
+password = &quot;guest-password&quot;
 dataFile = &quot;LICENSE&quot;
 jarFile = &quot;samples/hadoop-examples.jar&quot;
 
@@ -1139,7 +1139,7 @@ exit
 <ul>
   <li>samples/ExampleSubmitJob.groovy</li>
   <li>samples/ExampleSubmitWorkflow.groovy</li>
-</ul><p>If you are using the Sandbox VM for your Hadoop cluster you may want to review <a href="#Sandbox+Configuration">these configuration tips</a>.</p><h4><a id="Example+#2:+WebHDFS+&+Oozie+via+KnoxShell+DSL"></a>Example #2: WebHDFS &amp; Oozie via KnoxShell DSL</h4><p>This example will also submit the familiar WordCount Java MapReduce job to the Hadoop cluster via the gateway using the KnoxShell DSL. However in this case the job will be submitted via a Oozie workflow. There are several ways to do this depending upon your preference.</p><p>You can use the &ldquo;embedded&rdquo; Groovy interpreter provided with the distribution.</p>
+</ul><p>If you are using the Sandbox VM for your Hadoop cluster you may want to review <a href="#Sandbox+Configuration">Sandbox Configuration</a>.</p><h4><a id="Example+#2:+WebHDFS+&+Oozie+via+KnoxShell+DSL"></a>Example #2: WebHDFS &amp; Oozie via KnoxShell DSL</h4><p>This example will also submit the familiar WordCount Java MapReduce job to the Hadoop cluster via the gateway using the KnoxShell DSL. However in this case the job will be submitted via a Oozie workflow. There are several ways to do this depending upon your preference.</p><p>You can use the &ldquo;embedded&rdquo; Groovy interpreter provided with the distribution.</p>
 <pre><code>java -jar bin/shell.jar samples/ExampleSubmitWorkflow.groovy
 </code></pre><p>You can manually type in the KnoxShell DSL script into the &ldquo;embedded&rdquo; Groovy interpreter provided with the distribution.</p>
 <pre><code>java -jar bin/shell.jar
@@ -1151,7 +1151,7 @@ import org.apache.hadoop.gateway.shell.w
 
 import static java.util.concurrent.TimeUnit.SECONDS
 
-gateway = &quot;https://localhost:8443/gateway/sample&quot;
+gateway = &quot;https://localhost:8443/gateway/sandbox&quot;
 jobTracker = &quot;sandbox:50300&quot;;
 nameNode = &quot;sandbox:8020&quot;;
 username = &quot;bob&quot;
@@ -1223,26 +1223,26 @@ exit
 </code></pre><h4><a id="Example+#3:+WebHDFS+&+Templeton/WebHCat+via+cURL"></a>Example #3: WebHDFS &amp; Templeton/WebHCat via cURL</h4><p>The example below illustrates the sequence of curl commands that could be used to run a &ldquo;word count&rdquo; map reduce job. It utilizes the hadoop-examples.jar from a Hadoop install for running a simple word count job. A copy of that jar has been included in the samples directory for convenience. Take care to follow the instructions below for steps 4/5 and 6/7 where the Location header returned by the call to the NameNode is copied for use with the call to the DataNode that follows it. These replacement values are identified with { } markup.</p>
 <pre><code># 0. Optionally cleanup the test directory in case a previous example was run without cleaning up.
 curl -i -k -u bob:bob-password -X DELETE \
-    &#39;https://localhost:8443/gateway/sample/namenode/api/v1/tmp/test?op=DELETE&amp;recursive=true&#39;
+    &#39;https://localhost:8443/gateway/sandbox/webhdfs/v1/tmp/test?op=DELETE&amp;recursive=true&#39;
 
 # 1. Create a test input directory /tmp/test/input
 curl -i -k -u bob:bob-password -X PUT \
-    &#39;https://localhost:8443/gateway/sample/namenode/api/v1/tmp/test/input?op=MKDIRS&#39;
+    &#39;https://localhost:8443/gateway/sandbox/webhdfs/v1/tmp/test/input?op=MKDIRS&#39;
 
 # 2. Create a test output directory /tmp/test/input
 curl -i -k -u bob:bob-password -X PUT \
-    &#39;https://localhost:8443/gateway/sample/namenode/api/v1/tmp/test/output?op=MKDIRS&#39;
+    &#39;https://localhost:8443/gateway/sandbox/webhdfs/v1/tmp/test/output?op=MKDIRS&#39;
 
 # 3. Create the inode for hadoop-examples.jar in /tmp/test
 curl -i -k -u bob:bob-password -X PUT \
-    &#39;https://localhost:8443/gateway/sample/namenode/api/v1/tmp/test/hadoop-examples.jar?op=CREATE&#39;
+    &#39;https://localhost:8443/gateway/sandbox/webhdfs/v1/tmp/test/hadoop-examples.jar?op=CREATE&#39;
 
 # 4. Upload hadoop-examples.jar to /tmp/test.  Use a hadoop-examples.jar from a Hadoop install.
 curl -i -k -u bob:bob-password -T samples/hadoop-examples.jar -X PUT &#39;{Value Location header from command above}&#39;
 
 # 5. Create the inode for a sample file README in /tmp/test/input
 curl -i -k -u bob:bob-password -X PUT \
-    &#39;https://localhost:8443/gateway/sample/namenode/api/v1/tmp/test/input/README?op=CREATE&#39;
+    &#39;https://localhost:8443/gateway/sandbox/webhdfs/v1/tmp/test/input/README?op=CREATE&#39;
 
 # 6. Upload readme.txt to /tmp/test/input.  Use the readme.txt in {GATEWAY_HOME}.
 curl -i -k -u bob:bob-password -T README -X PUT &#39;{Value of Location header from command above}&#39;
@@ -1264,19 +1264,19 @@ curl -i -k -u bob:bob-password -X GET \
 
 # 10. List the contents of the output directory /tmp/test/output
 curl -i -k -u bob:bob-password -X GET \
-    &#39;https://localhost:8443/gateway/sample/namenode/api/v1/tmp/test/output?op=LISTSTATUS&#39;
+    &#39;https://localhost:8443/gateway/sandbox/webhdfs/v1/tmp/test/output?op=LISTSTATUS&#39;
 
 # 11. Optionally cleanup the test directory
 curl -i -k -u bob:bob-password -X DELETE \
-    &#39;https://localhost:8443/gateway/sample/namenode/api/v1/tmp/test?op=DELETE&amp;recursive=true&#39;
+    &#39;https://localhost:8443/gateway/sandbox/webhdfs/v1/tmp/test?op=DELETE&amp;recursive=true&#39;
 </code></pre><h4><a id="Example+#4:+WebHDFS+&+Oozie+via+cURL"></a>Example #4: WebHDFS &amp; Oozie via cURL</h4><p>The example below illustrates the sequence of curl commands that could be used to run a &ldquo;word count&rdquo; map reduce job via an Oozie workflow. It utilizes the hadoop-examples.jar from a Hadoop install for running a simple word count job. A copy of that jar has been included in the samples directory for convenience. Take care to follow the instructions below where replacement values are required. These replacement values are identified with { } markup.</p>
 <pre><code># 0. Optionally cleanup the test directory in case a previous example was run without cleaning up.
 curl -i -k -u bob:bob-password -X DELETE \
-    &#39;https://localhost:8443/gateway/sample/namenode/api/v1/tmp/test?op=DELETE&amp;recursive=true&#39;
+    &#39;https://localhost:8443/gateway/sandbox/webhdfs/v1/tmp/test?op=DELETE&amp;recursive=true&#39;
 
 # 1. Create the inode for workflow definition file in /tmp/test
 curl -i -k -u bob:bob-password -X PUT \
-    &#39;https://localhost:8443/gateway/sample/namenode/api/v1/tmp/test/workflow.xml?op=CREATE&#39;
+    &#39;https://localhost:8443/gateway/sandbox/webhdfs/v1/tmp/test/workflow.xml?op=CREATE&#39;
 
 # 2. Upload the workflow definition file.  This file can be found in {GATEWAY_HOME}/templates
 curl -i -k -u bob:bob-password -T templates/workflow-definition.xml -X PUT \
@@ -1284,7 +1284,7 @@ curl -i -k -u bob:bob-password -T templa
 
 # 3. Create the inode for hadoop-examples.jar in /tmp/test/lib
 curl -i -k -u bob:bob-password -X PUT \
-    &#39;https://localhost:8443/gateway/sample/namenode/api/v1/tmp/test/lib/hadoop-examples.jar?op=CREATE&#39;
+    &#39;https://localhost:8443/gateway/sandbox/webhdfs/v1/tmp/test/lib/hadoop-examples.jar?op=CREATE&#39;
 
 # 4. Upload hadoop-examples.jar to /tmp/test/lib.  Use a hadoop-examples.jar from a Hadoop install.
 curl -i -k -u bob:bob-password -T samples/hadoop-examples.jar -X PUT \
@@ -1292,7 +1292,7 @@ curl -i -k -u bob:bob-password -T sample
 
 # 5. Create the inode for a sample input file readme.txt in /tmp/test/input.
 curl -i -k -u bob:bob-password -X PUT \
-    &#39;https://localhost:8443/gateway/sample/namenode/api/v1/tmp/test/input/README?op=CREATE&#39;
+    &#39;https://localhost:8443/gateway/sandbox/webhdfs/v1/tmp/test/input/README?op=CREATE&#39;
 
 # 6. Upload readme.txt to /tmp/test/input.  Use the readme.txt in {GATEWAY_HOME}.
 # The sample below uses this README file found in {GATEWAY_HOME}.
@@ -1320,11 +1320,11 @@ curl -i -k -u bob:bob-password -X GET \
 
 # 10. List the contents of the output directory /tmp/test/output
 curl -i -k -u bob:bob-password -X GET \
-    &#39;https://localhost:8443/gateway/sample/namenode/api/v1/tmp/test/output?op=LISTSTATUS&#39;
+    &#39;https://localhost:8443/gateway/sandbox/webhdfs/v1/tmp/test/output?op=LISTSTATUS&#39;
 
 # 11. Optionally cleanup the test directory
 curl -i -k -u bob:bob-password -X DELETE \
-    &#39;https://localhost:8443/gateway/sample/namenode/api/v1/tmp/test?op=DELETE&amp;recursive=true&#39;
+    &#39;https://localhost:8443/gateway/sandbox/webhdfs/v1/tmp/test?op=DELETE&amp;recursive=true&#39;
 </code></pre><h3><a id="HBase"></a>HBase</h3><p>TODO</p><h4><a id="HBase+URL+Mapping"></a>HBase URL Mapping</h4><p>TODO</p><h4><a id="HBase+Examples"></a>HBase Examples</h4><p>TODO</p><p>The examples below illustrate the set of basic operations with HBase instance using Stargate REST API. Use following link to get more more details about HBase/Stargate API: <a href="http://wiki.apache.org/hadoop/Hbase/Stargate">http://wiki.apache.org/hadoop/Hbase/Stargate</a>.</p><h3><a id="Assumptions"></a>Assumptions</h3><p>This document assumes a few things about your environment in order to simplify the examples.</p>
 <ol>
   <li>The JVM is executable as simply java.</li>
@@ -1333,7 +1333,7 @@ curl -i -k -u bob:bob-password -X DELETE
   <li>A few examples optionally require the use of commands from a standard Groovy installation. These examples are optional but to try them you will need Groovy [installed|http://groovy.codehaus.org/Installing+Groovy].</li>
 </ol><h3><a id="HBase+Stargate+Setup"></a>HBase Stargate Setup</h3><h4><a id="Launch+Stargate"></a>Launch Stargate</h4><p>The command below launches the Stargate daemon on port 60080</p>
 <pre><code>sudo /usr/lib/hbase/bin/hbase-daemon.sh start rest -p 60080
-</code></pre><p>60080 post is used because it was specified in sample Hadoop cluster deployment <code>{GATEWAY_HOME}/deployments/sample.xml</code>.</p><h4><a id="Configure+Sandbox+port+mapping+for+VirtualBox"></a>Configure Sandbox port mapping for VirtualBox</h4>
+</code></pre><p>60080 post is used because it was specified in sample Hadoop cluster deployment <code>{GATEWAY_HOME}/deployments/sandbox.xml</code>.</p><h4><a id="Configure+Sandbox+port+mapping+for+VirtualBox"></a>Configure Sandbox port mapping for VirtualBox</h4>
 <ol>
   <li>Select the VM</li>
   <li>Select menu Machine&gt;Settings&hellip;</li>
@@ -1343,7 +1343,7 @@ curl -i -k -u bob:bob-password -X DELETE
   <li>Press Plus button to insert new rule: Name=Stargate, Host Port=60080, Guest Port=60080</li>
   <li>Press OK to close the rule window</li>
   <li>Press OK to Network window save the changes</li>
-</ol><p>60080 post is used because it was specified in sample Hadoop cluster deployment <code>{GATEWAY_HOME}/deployments/sample.xml</code>.</p><h3><a id="HBase/Stargate+via+KnoxShell+DSL"></a>HBase/Stargate via KnoxShell DSL</h3><h4><a id="Usage"></a>Usage</h4><p>For more details about client DSL usage please follow this [page|https://cwiki.apache.org/confluence/display/KNOX/Client+Usage].</p><h5><a id="systemVersion()+-+Query+Software+Version."></a>systemVersion() - Query Software Version.</h5>
+</ol><p>60080 post is used because it was specified in sample Hadoop cluster deployment <code>{GATEWAY_HOME}/deployments/sandbox.xml</code>.</p><h3><a id="HBase/Stargate+via+KnoxShell+DSL"></a>HBase/Stargate via KnoxShell DSL</h3><h4><a id="Usage"></a>Usage</h4><p>For more details about client DSL usage please follow this [page|https://cwiki.apache.org/confluence/display/KNOX/Client+Usage].</p><h5><a id="systemVersion()+-+Query+Software+Version."></a>systemVersion() - Query Software Version.</h5>
 <ul>
   <li>Request
   <ul>

Modified: incubator/knox/trunk/books/0.3.0/book.md
URL: http://svn.apache.org/viewvc/incubator/knox/trunk/books/0.3.0/book.md?rev=1526775&r1=1526774&r2=1526775&view=diff
==============================================================================
--- incubator/knox/trunk/books/0.3.0/book.md (original)
+++ incubator/knox/trunk/books/0.3.0/book.md Fri Sep 27 02:32:26 2013
@@ -28,29 +28,29 @@
 
 ## Table Of Contents ##
 
-* [Introduction](#Introduction)
-* [Getting Started](#Getting+Started)
-    * [Requirements](#Requirements)
-    * [Download](#Download)
-    * [Verify](#Verify)
-    * [Install](#Install)
-    * [Supported Services](#Supported+Services)
-    * [Basic Usage](#Basic+Usage)
-    * [Sandbox Configuration](#Sandbox+Configuration)
-* [Gateway Details](#Gateway+Details)
-    * [Authentication](#Authentication)
-    * [Authorization](#Authorization)
-    * [Configuration](#Configuration)
-    * [Secure Clusters](#Secure+Clusters)
-* [Client Details](#Client+Details)
-* [Service Details](#Service+Details)
-    * [WebHDFS](#WebHDFS)
-    * [WebHCat/Templeton](#WebHCat)
-    * [Oozie](#Oozie)
-    * [HBase/Starbase](#HBase)
-    * [Hive](#Hive)
-* [Troubleshooting](#Troubleshooting)
-* [Export Controls](#Export+Controls)
+* #[Introduction]
+* #[Getting Started]
+    * #[Requirements]
+    * #[Download]
+    * #[Verify]
+    * #[Install]
+    * #[Supported Services]
+    * #[Basic Usage]
+    * #[Sandbox Configuration]
+* #[Gateway Details]
+    * #[Authentication]
+    * #[Authorization]
+    * #[Configuration]
+    * #[Secure Clusters]
+* #[Client Details]
+* #[Service Details]
+    * #[WebHDFS]
+    * #[WebHCat]
+    * #[Oozie]
+    * #[HBase]
+    * #[Hive]
+* #[Troubleshooting]
+* #[Export Controls]
 
 
 ## Introduction ##

Modified: incubator/knox/trunk/books/0.3.0/book_client-details.md
URL: http://svn.apache.org/viewvc/incubator/knox/trunk/books/0.3.0/book_client-details.md?rev=1526775&r1=1526774&r2=1526775&view=diff
==============================================================================
--- incubator/knox/trunk/books/0.3.0/book_client-details.md (original)
+++ incubator/knox/trunk/books/0.3.0/book_client-details.md Fri Sep 27 02:32:26 2013
@@ -90,7 +90,7 @@ Once the shell can be launched the DSL c
 Below is a very simple example of an interactive shell session to upload a file to HDFS.
 
     java -jar bin/shell.jar
-    knox:000> hadoop = Hadoop.login( "https://localhost:8443/gateway/sample", "bob", "bob-password" )
+    knox:000> hadoop = Hadoop.login( "https://localhost:8443/gateway/sandbox", "guest", "guest-password" )
     knox:000> Hdfs.put( hadoop ).file( "README" ).to( "/tmp/example/README" ).now()
 
 The `knox:000>` in the example above is the prompt from the embedded Groovy console.
@@ -148,7 +148,7 @@ This script file is available in the dis
     import org.apache.hadoop.gateway.shell.hdfs.Hdfs
     import groovy.json.JsonSlurper
     
-    gateway = "https://localhost:8443/gateway/sample"
+    gateway = "https://localhost:8443/gateway/sandbox"
     username = "bob"
     password = "bob-password"
     dataFile = "README"
@@ -472,7 +472,7 @@ These happen to be Groovy source files b
 The easiest way to add these to the shell is to compile them directory into the `ext` directory.
 *Note: This command depends upon having the Groovy compiler installed and available on the execution path.*
 
-    groovyc \-d ext \-cp bin/shell.jar samples/SampleService.groovy samples/SampleSimpleCommand.groovy samples/SampleComplexCommand.groovy
+    groovy -d ext -cp bin/shell.jar samples/SampleService.groovy samples/SampleSimpleCommand.groovy samples/SampleComplexCommand.groovy
 
 These source files are available in the samples directory of the distribution but these are included here for convenience.
 

Modified: incubator/knox/trunk/books/0.3.0/book_gateway-details.md
URL: http://svn.apache.org/viewvc/incubator/knox/trunk/books/0.3.0/book_gateway-details.md?rev=1526775&r1=1526774&r2=1526775&view=diff
==============================================================================
--- incubator/knox/trunk/books/0.3.0/book_gateway-details.md (original)
+++ incubator/knox/trunk/books/0.3.0/book_gateway-details.md Fri Sep 27 02:32:26 2013
@@ -19,12 +19,14 @@
 
 TODO
 
-### Mapping Gateway URLs to Hadoop cluster URLs
+### URL Mapping ###
 
-The Gateway functions much like a reverse proxy.
+The gateway functions much like a reverse proxy.
 As such it maintains a mapping of URLs that are exposed externally by the gateway to URLs that are provided by the Hadoop cluster.
-Examples of mappings for the WebHDFS, WebHCat, Oozie and Stargate/Hive are shown below.
+Examples of mappings for the WebHDFS, WebHCat, Oozie and Stargate/HBase are shown below.
 These mapping are generated from the combination of the gateway configuration file (i.e. `{GATEWAY_HOME}/conf/gateway-site.xml`) and the cluster topology descriptors (e.g. `{GATEWAY_HOME}/deployments/{cluster-name}.xml`).
+The port numbers show for the Cluster URLs represent the default ports for these services.
+The actual port number may be different for a given cluster.
 
 * WebHDFS
     * Gateway: `https://{gateway-host}:{gateway-port}/{gateway-path}/{cluster-name}/webhdfs`
@@ -39,11 +41,11 @@ These mapping are generated from the com
     * Gateway: `https://{gateway-host}:{gateway-port}/{gateway-path}/{cluster-name}/hbase`
     * Cluster: `http://{hbase-host}:60080`
 
-The values for `{gateway-host}`, `{gateway-port}`, `{gateway-path}` are provided via the Gateway configuration file (i.e. `{GATEWAY_HOME}/conf/gateway-site.xml`).
+The values for `{gateway-host}`, `{gateway-port}`, `{gateway-path}` are provided via the gateway configuration file (i.e. `{GATEWAY_HOME}/conf/gateway-site.xml`).
 
-The value for `{cluster-name}` is derived from the name of the cluster topology descriptor (e.g. `{GATEWAY_HOME}/deployments/{cluster-name}.xml`).
+The value for `{cluster-name}` is derived from the file name of the cluster topology descriptor (e.g. `{GATEWAY_HOME}/deployments/{cluster-name}.xml`).
 
-The value for `{webhdfs-host}` and `{webhcat-host}` are provided via the cluster topology descriptor (e.g. `{GATEWAY_HOME}/deployments/{cluster-name}.xml`).
+The value for `{webhdfs-host}`, `{webhcat-host}`, `{oozie-host}` and `{hbase-host}` are provided via the cluster topology descriptor (e.g. `{GATEWAY_HOME}/deployments/{cluster-name}.xml`).
 
 Note: The ports 50070, 50111, 11000 and 60080 are the defaults for WebHDFS, WebHCat, Oozie and Stargate/HBase respectively.
 Their values can also be provided via the cluster topology descriptor if your Hadoop cluster uses different ports.

Modified: incubator/knox/trunk/books/0.3.0/book_getting-started.md
URL: http://svn.apache.org/viewvc/incubator/knox/trunk/books/0.3.0/book_getting-started.md?rev=1526775&r1=1526774&r2=1526775&view=diff
==============================================================================
--- incubator/knox/trunk/books/0.3.0/book_getting-started.md (original)
+++ incubator/knox/trunk/books/0.3.0/book_getting-started.md Fri Sep 27 02:32:26 2013
@@ -36,7 +36,7 @@ One of the easiest ways to ensure this i
 It is possible to use a Hadoop cluster deployed on EC2 but this will require additional configuration not covered here.
 It is also possible to use a limited set of services in Hadoop cluster secured with Kerberos.
 This too required additional configuration that is not described here.
-See the [table provided](#Supported+Services) for details on what is supported for this release.
+See #[Supported Services] for details on what is supported for this release.
 
 The Hadoop cluster should be ensured to have at least WebHDFS, WebHCat (i.e. Templeton) and Oozie configured, deployed and running.
 HBase/Stargate and Hive can also be accessed via the Knox Gateway given the proper versions and configuration.
@@ -168,8 +168,8 @@ Only more recent versions of some Hadoop
 
 The steps described below are intended to get the Knox Gateway server up and running in its default configuration.
 Once that is accomplished a very simple example of using the gateway to interact with a Hadoop cluster is provided.
-More detailed configuration information is provided in the [Gateway Details](#Gateway+Details) section.
-More detailed examples for using each Hadoop service can be found in the [Service Details](#Service+Details) section.
+More detailed configuration information is provided in the #[Gateway Details] section.
+More detailed examples for using each Hadoop service can be found in the #[Service Details] section.
 
 Note that *nix conventions are used throughout this section but in general the Windows alternative should be obvious.
 In situations where this is not the case a Windows alternative will be provided.
@@ -248,7 +248,7 @@ The results of the above command should 
 The exact information returned is subject to the content within HDFS in your Hadoop cluster.
 Successfully executing this command at a minimum proves that the gateway is properly configured to provide access to WebHDFS.
 It does not necessarily provide that any of the other services are correct configured to be accessible.
-To validate that see the sections for the individual services in [Service Details](#Service+Details)
+To validate that see the sections for the individual services in #[Service Details].
 
     HTTP/1.1 200 OK
     Content-Type: application/json
@@ -273,8 +273,8 @@ For additional information on WebHDFS, W
 
 These examples provide more detail about how to access various Apache Hadoop services via the Apache Knox Gateway.
 
-* [WebHDFS](#WebHDFS+Examples)
-* [WebHCat/Templeton](#WebHCat+Examples)
-* [Oozie](#Oozie+Examples)
-* [HBase](#HBase+Examples)
-* [Hive](#Hive+Examples)
+* #[WebHDFS Examples]
+* #[WebHCat Examples]
+* #[Oozie Examples]
+* #[HBase Examples]
+* #[Hive Examples]

Modified: incubator/knox/trunk/books/0.3.0/config.md
URL: http://svn.apache.org/viewvc/incubator/knox/trunk/books/0.3.0/config.md?rev=1526775&r1=1526774&r2=1526775&view=diff
==============================================================================
--- incubator/knox/trunk/books/0.3.0/config.md (original)
+++ incubator/knox/trunk/books/0.3.0/config.md Fri Sep 27 02:32:26 2013
@@ -86,7 +86,7 @@ Upon start of the gateway server we:
 
 Upon deployment of a Hadoop cluster topology within the gateway we:
 
-1. Look for a credential store for the topology. For instance, we have a sample topology that gets deployed out of the box.  We look for `conf/security/keystores/sample-credentials.jceks`. This topology specific credential store is used for storing secrets/passwords that are used for encrypting sensitive data with topology specific keys.
+1. Look for a credential store for the topology. For instance, we have a sample topology that gets deployed out of the box.  We look for `conf/security/keystores/sandbox-credentials.jceks`. This topology specific credential store is used for storing secrets/passwords that are used for encrypting sensitive data with topology specific keys.
     * If no credential store is found for the topology being deployed then one is created for it.
       Population of the aliases is delegated to the configured providers within the system that will require the use of a  secret for a particular task.
       They may programmatic set the value of the secret or choose to have the value for the specified alias generated through the AliasService.

Modified: incubator/knox/trunk/books/0.3.0/config_kerberos.md
URL: http://svn.apache.org/viewvc/incubator/knox/trunk/books/0.3.0/config_kerberos.md?rev=1526775&r1=1526774&r2=1526775&view=diff
==============================================================================
--- incubator/knox/trunk/books/0.3.0/config_kerberos.md (original)
+++ incubator/knox/trunk/books/0.3.0/config_kerberos.md Fri Sep 27 02:32:26 2013
@@ -17,15 +17,11 @@
 
 ### Secure Clusters ###
 
-If your Hadoop cluster is secured with Kerberos authentication, you have to do the following on Knox side.
-
-Please secure Hadoop services with Keberos authentication.
-
-Please see instructions at
-[http://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/ClusterSetup.html#Configuration_in_Secure_Mode]
-and
-[http://docs.hortonworks.com/HDPDocuments/HDP1/HDP-1.3.1/bk_installing_manually_book/content/rpm-chap14.html]
+See these documents for setting up a secure Hadoop cluster
+http://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/ClusterSetup.html#Configuration_in_Secure_Mode
+http://docs.hortonworks.com/HDPDocuments/HDP1/HDP-1.3.1/bk_installing_manually_book/content/rpm-chap14.html
 
+Once you have a Hadoop cluster that is using Kerberos for authentication, you have to do the following to configure Knox to work with that cluster.
 
 #### Create Unix account for Knox on Hadoop master nodes ####
 
@@ -45,7 +41,7 @@ ssh into your host running KDC
 
 Update `core-site.xml` and add the following lines towards the end of the file.
 
-Please replace FQDN_OF_KNOX_HOST with right value in your cluster.
+Replace FQDN_OF_KNOX_HOST with right value in your cluster.
 You could use * for local developer testing if Knox host does not have static IP.
 
     <property>
@@ -61,7 +57,7 @@ You could use * for local developer test
 
 Update `oozie-site.xml` and add the following lines towards the end of the file.
 
-Please replace FQDN_OF_KNOX_HOST with right value in your cluster.
+Replace FQDN_OF_KNOX_HOST with right value in your cluster.
 You could use * for local developer testing if Knox host does not have static IP.
 
     <property>
@@ -75,34 +71,28 @@ You could use * for local developer test
 
 #### Copy knox keytab to Knox host ####
 
-Please add unix account for knox on Knox host
+Add unix account for knox on Knox host
 
     useradd -g hadoop knox
 
-Please copy knox.service.keytab created on KDC host on to your Knox host /etc/knox/conf/knox.service.keytab
+Copy knox.service.keytab created on KDC host on to your Knox host /etc/knox/conf/knox.service.keytab
 
     chown knox knox.service.keytab
     chmod 400 knox.service.keytab
 
-
 #### Update krb5.conf at /etc/knox/conf/krb5.conf on Knox host ####
 
 You could copy the `templates/krb5.conf` file provided in the Knox binary download and customize it to suit your cluster.
 
-
 #### Update `krb5JAASLogin.conf` at `/etc/knox/conf/krb5JAASLogin.conf` on Knox host ####
 
 You could copy the `templates/krb5JAASLogin.conf` file provided in the Knox binary download and customize it to suit your cluster.
 
-
 #### Update `gateway-site.xml` on Knox host on Knox host ####
 
 Update `conf/gateway-site.xml` in your Knox installation and set the value of `gateway.hadoop.kerberos.secured` to true.
 
-
 #### Restart Knox ####
 
 After you do the above configurations and restart Knox, Knox would use SPNego to authenticate with Hadoop services and Oozie.
-There is not change in the way you make calls to Knox whether you use Curl or Knox DSL.
-
-
+There is not change in the way you make calls to Knox whether you use Curl or Knox DSL.
\ No newline at end of file

Modified: incubator/knox/trunk/books/0.3.0/config_sandbox.md
URL: http://svn.apache.org/viewvc/incubator/knox/trunk/books/0.3.0/config_sandbox.md?rev=1526775&r1=1526774&r2=1526775&view=diff
==============================================================================
--- incubator/knox/trunk/books/0.3.0/config_sandbox.md (original)
+++ incubator/knox/trunk/books/0.3.0/config_sandbox.md Fri Sep 27 02:32:26 2013
@@ -15,6 +15,8 @@
    limitations under the License.
 --->
 
+## Sandbox Configuration ##
+
 ### Sandbox 2.x Configuration ###
 
 TODO

Modified: incubator/knox/trunk/books/0.3.0/service_hbase.md
URL: http://svn.apache.org/viewvc/incubator/knox/trunk/books/0.3.0/service_hbase.md?rev=1526775&r1=1526774&r2=1526775&view=diff
==============================================================================
--- incubator/knox/trunk/books/0.3.0/service_hbase.md (original)
+++ incubator/knox/trunk/books/0.3.0/service_hbase.md Fri Sep 27 02:32:26 2013
@@ -46,7 +46,7 @@ The command below launches the Stargate 
 
     sudo /usr/lib/hbase/bin/hbase-daemon.sh start rest -p 60080
 
-60080 post is used because it was specified in sample Hadoop cluster deployment `{GATEWAY_HOME}/deployments/sample.xml`.
+60080 post is used because it was specified in sample Hadoop cluster deployment `{GATEWAY_HOME}/deployments/sandbox.xml`.
 
 #### Configure Sandbox port mapping for VirtualBox
 
@@ -59,7 +59,7 @@ The command below launches the Stargate 
 7. Press OK to close the rule window
 8. Press OK to Network window save the changes
 
-60080 post is used because it was specified in sample Hadoop cluster deployment `{GATEWAY_HOME}/deployments/sample.xml`.
+60080 post is used because it was specified in sample Hadoop cluster deployment `{GATEWAY_HOME}/deployments/sandbox.xml`.
 
 ### HBase/Stargate via KnoxShell DSL
 

Modified: incubator/knox/trunk/books/0.3.0/service_oozie.md
URL: http://svn.apache.org/viewvc/incubator/knox/trunk/books/0.3.0/service_oozie.md?rev=1526775&r1=1526774&r2=1526775&view=diff
==============================================================================
--- incubator/knox/trunk/books/0.3.0/service_oozie.md (original)
+++ incubator/knox/trunk/books/0.3.0/service_oozie.md Fri Sep 27 02:32:26 2013
@@ -49,7 +49,7 @@ All of the values that may need to be cu
 * samples/ExampleSubmitJob.groovy
 * samples/ExampleSubmitWorkflow.groovy
 
-If you are using the Sandbox VM for your Hadoop cluster you may want to review [these configuration tips](#Sandbox+Configuration).
+If you are using the Sandbox VM for your Hadoop cluster you may want to review #[Sandbox Configuration].
 
 #### Example #2: WebHDFS & Oozie via KnoxShell DSL
 
@@ -76,7 +76,7 @@ Each line from the file below will need 
 
     import static java.util.concurrent.TimeUnit.SECONDS
 
-    gateway = "https://localhost:8443/gateway/sample"
+    gateway = "https://localhost:8443/gateway/sandbox"
     jobTracker = "sandbox:50300";
     nameNode = "sandbox:8020";
     username = "bob"
@@ -156,26 +156,26 @@ These replacement values are identified 
 
     # 0. Optionally cleanup the test directory in case a previous example was run without cleaning up.
     curl -i -k -u bob:bob-password -X DELETE \
-        'https://localhost:8443/gateway/sample/namenode/api/v1/tmp/test?op=DELETE&recursive=true'
+        'https://localhost:8443/gateway/sandbox/webhdfs/v1/tmp/test?op=DELETE&recursive=true'
 
     # 1. Create a test input directory /tmp/test/input
     curl -i -k -u bob:bob-password -X PUT \
-        'https://localhost:8443/gateway/sample/namenode/api/v1/tmp/test/input?op=MKDIRS'
+        'https://localhost:8443/gateway/sandbox/webhdfs/v1/tmp/test/input?op=MKDIRS'
 
     # 2. Create a test output directory /tmp/test/input
     curl -i -k -u bob:bob-password -X PUT \
-        'https://localhost:8443/gateway/sample/namenode/api/v1/tmp/test/output?op=MKDIRS'
+        'https://localhost:8443/gateway/sandbox/webhdfs/v1/tmp/test/output?op=MKDIRS'
 
     # 3. Create the inode for hadoop-examples.jar in /tmp/test
     curl -i -k -u bob:bob-password -X PUT \
-        'https://localhost:8443/gateway/sample/namenode/api/v1/tmp/test/hadoop-examples.jar?op=CREATE'
+        'https://localhost:8443/gateway/sandbox/webhdfs/v1/tmp/test/hadoop-examples.jar?op=CREATE'
 
     # 4. Upload hadoop-examples.jar to /tmp/test.  Use a hadoop-examples.jar from a Hadoop install.
     curl -i -k -u bob:bob-password -T samples/hadoop-examples.jar -X PUT '{Value Location header from command above}'
 
     # 5. Create the inode for a sample file README in /tmp/test/input
     curl -i -k -u bob:bob-password -X PUT \
-        'https://localhost:8443/gateway/sample/namenode/api/v1/tmp/test/input/README?op=CREATE'
+        'https://localhost:8443/gateway/sandbox/webhdfs/v1/tmp/test/input/README?op=CREATE'
 
     # 6. Upload readme.txt to /tmp/test/input.  Use the readme.txt in {GATEWAY_HOME}.
     curl -i -k -u bob:bob-password -T README -X PUT '{Value of Location header from command above}'
@@ -197,11 +197,11 @@ These replacement values are identified 
 
     # 10. List the contents of the output directory /tmp/test/output
     curl -i -k -u bob:bob-password -X GET \
-        'https://localhost:8443/gateway/sample/namenode/api/v1/tmp/test/output?op=LISTSTATUS'
+        'https://localhost:8443/gateway/sandbox/webhdfs/v1/tmp/test/output?op=LISTSTATUS'
 
     # 11. Optionally cleanup the test directory
     curl -i -k -u bob:bob-password -X DELETE \
-        'https://localhost:8443/gateway/sample/namenode/api/v1/tmp/test?op=DELETE&recursive=true'
+        'https://localhost:8443/gateway/sandbox/webhdfs/v1/tmp/test?op=DELETE&recursive=true'
 
 #### Example #4: WebHDFS & Oozie via cURL
 
@@ -213,11 +213,11 @@ These replacement values are identified 
 
     # 0. Optionally cleanup the test directory in case a previous example was run without cleaning up.
     curl -i -k -u bob:bob-password -X DELETE \
-        'https://localhost:8443/gateway/sample/namenode/api/v1/tmp/test?op=DELETE&recursive=true'
+        'https://localhost:8443/gateway/sandbox/webhdfs/v1/tmp/test?op=DELETE&recursive=true'
 
     # 1. Create the inode for workflow definition file in /tmp/test
     curl -i -k -u bob:bob-password -X PUT \
-        'https://localhost:8443/gateway/sample/namenode/api/v1/tmp/test/workflow.xml?op=CREATE'
+        'https://localhost:8443/gateway/sandbox/webhdfs/v1/tmp/test/workflow.xml?op=CREATE'
 
     # 2. Upload the workflow definition file.  This file can be found in {GATEWAY_HOME}/templates
     curl -i -k -u bob:bob-password -T templates/workflow-definition.xml -X PUT \
@@ -225,7 +225,7 @@ These replacement values are identified 
 
     # 3. Create the inode for hadoop-examples.jar in /tmp/test/lib
     curl -i -k -u bob:bob-password -X PUT \
-        'https://localhost:8443/gateway/sample/namenode/api/v1/tmp/test/lib/hadoop-examples.jar?op=CREATE'
+        'https://localhost:8443/gateway/sandbox/webhdfs/v1/tmp/test/lib/hadoop-examples.jar?op=CREATE'
 
     # 4. Upload hadoop-examples.jar to /tmp/test/lib.  Use a hadoop-examples.jar from a Hadoop install.
     curl -i -k -u bob:bob-password -T samples/hadoop-examples.jar -X PUT \
@@ -233,7 +233,7 @@ These replacement values are identified 
 
     # 5. Create the inode for a sample input file readme.txt in /tmp/test/input.
     curl -i -k -u bob:bob-password -X PUT \
-        'https://localhost:8443/gateway/sample/namenode/api/v1/tmp/test/input/README?op=CREATE'
+        'https://localhost:8443/gateway/sandbox/webhdfs/v1/tmp/test/input/README?op=CREATE'
 
     # 6. Upload readme.txt to /tmp/test/input.  Use the readme.txt in {GATEWAY_HOME}.
     # The sample below uses this README file found in {GATEWAY_HOME}.
@@ -261,8 +261,8 @@ These replacement values are identified 
 
     # 10. List the contents of the output directory /tmp/test/output
     curl -i -k -u bob:bob-password -X GET \
-        'https://localhost:8443/gateway/sample/namenode/api/v1/tmp/test/output?op=LISTSTATUS'
+        'https://localhost:8443/gateway/sandbox/webhdfs/v1/tmp/test/output?op=LISTSTATUS'
 
     # 11. Optionally cleanup the test directory
     curl -i -k -u bob:bob-password -X DELETE \
-        'https://localhost:8443/gateway/sample/namenode/api/v1/tmp/test?op=DELETE&recursive=true'
+        'https://localhost:8443/gateway/sandbox/webhdfs/v1/tmp/test?op=DELETE&recursive=true'

Modified: incubator/knox/trunk/books/0.3.0/service_webhcat.md
URL: http://svn.apache.org/viewvc/incubator/knox/trunk/books/0.3.0/service_webhcat.md?rev=1526775&r1=1526774&r2=1526775&view=diff
==============================================================================
--- incubator/knox/trunk/books/0.3.0/service_webhcat.md (original)
+++ incubator/knox/trunk/books/0.3.0/service_webhcat.md Fri Sep 27 02:32:26 2013
@@ -76,9 +76,9 @@ Each line from the file below will need 
 
     import static java.util.concurrent.TimeUnit.SECONDS
 
-    gateway = "https://localhost:8443/gateway/sample"
-    username = "bob"
-    password = "bob-password"
+    gateway = "https://localhost:8443/gateway/sandbox"
+    username = "guest"
+    password = "guest-password"
     dataFile = "LICENSE"
     jarFile = "samples/hadoop-examples.jar"
 

Modified: incubator/knox/trunk/books/0.3.0/service_webhdfs.md
URL: http://svn.apache.org/viewvc/incubator/knox/trunk/books/0.3.0/service_webhdfs.md?rev=1526775&r1=1526774&r2=1526775&view=diff
==============================================================================
--- incubator/knox/trunk/books/0.3.0/service_webhdfs.md (original)
+++ incubator/knox/trunk/books/0.3.0/service_webhdfs.md Fri Sep 27 02:32:26 2013
@@ -68,7 +68,7 @@ Each line from the file below will need 
     import org.apache.hadoop.gateway.shell.Hadoop
     import org.apache.hadoop.gateway.shell.hdfs.Hdfs
 
-    gateway = "https://localhost:8443/gateway/sample"
+    gateway = "https://localhost:8443/gateway/sandbox"
     username = "bob"
     password = "bob-password"
     dataFile = "README"
@@ -89,11 +89,11 @@ Each line from the file below will need 
 
     # 1. Optionally cleanup the sample directory in case a previous example was run without cleaning up.
     curl -i -k -u bob:bob-password -X DELETE \
-        'https://localhost:8443/gateway/sample/namenode/api/v1/tmp/test?op=DELETE&recursive=true'
+        'https://localhost:8443/gateway/sandbox/webhdfs/v1/tmp/test?op=DELETE&recursive=true'
 
     # 2. Create the inode for a sample input file readme.txt in /tmp/test/input.
     curl -i -k -u bob:bob-password -X PUT \
-        'https://localhost:8443/gateway/sample/namenode/api/v1/tmp/test/input/README?op=CREATE'
+        'https://localhost:8443/gateway/sandbox/webhdfs/v1/tmp/test/input/README?op=CREATE'
 
     # 3. Upload readme.txt to /tmp/test/input.  Use the readme.txt in {GATEWAY_HOME}.
     # The sample below uses this README file found in {GATEWAY_HOME}.
@@ -102,8 +102,8 @@ Each line from the file below will need 
 
     # 4. List the contents of the output directory /tmp/test/output
     curl -i -k -u bob:bob-password -X GET \
-        'https://localhost:8443/gateway/sample/namenode/api/v1/tmp/test/input?op=LISTSTATUS'
+        'https://localhost:8443/gateway/sandbox/webhdfs/v1/tmp/test/input?op=LISTSTATUS'
 
     # 5. Optionally cleanup the test directory
     curl -i -k -u bob:bob-password -X DELETE \
-        'https://localhost:8443/gateway/sample/namenode/api/v1/tmp/test?op=DELETE&recursive=true'
+        'https://localhost:8443/gateway/sandbox/webhdfs/v1/tmp/test?op=DELETE&recursive=true'

Modified: incubator/knox/trunk/build.xml
URL: http://svn.apache.org/viewvc/incubator/knox/trunk/build.xml?rev=1526775&r1=1526774&r2=1526775&view=diff
==============================================================================
--- incubator/knox/trunk/build.xml (original)
+++ incubator/knox/trunk/build.xml Fri Sep 27 02:32:26 2013
@@ -93,6 +93,8 @@
         </exec>
     </target>
 
+    <target name="review" depends="review-site,review-book" description="Open doc artifacts in a browser for review."/>
+
     <target name="review-book" depends="init" description="Open the default book in the default browser.">
         <exec executable="${browser.cmd}">
             <arg line="${book-0-3-0-file}" />

Modified: incubator/knox/trunk/markbook/src/main/java/org/apache/hadoop/gateway/markbook/MarkBook.java
URL: http://svn.apache.org/viewvc/incubator/knox/trunk/markbook/src/main/java/org/apache/hadoop/gateway/markbook/MarkBook.java?rev=1526775&r1=1526774&r2=1526775&view=diff
==============================================================================
--- incubator/knox/trunk/markbook/src/main/java/org/apache/hadoop/gateway/markbook/MarkBook.java (original)
+++ incubator/knox/trunk/markbook/src/main/java/org/apache/hadoop/gateway/markbook/MarkBook.java Fri Sep 27 02:32:26 2013
@@ -60,8 +60,13 @@ public class MarkBook {
 //    System.out.println( replaceHeadings( "## text ##" ) );
 //    System.out.println( removeComments( "line\r\n<!--- \r\n comment \r\n comment \r\n---> \r\nline" ) );
 
+//    System.out.println( replaceReferences( "#[ text ]" ) );
+//    System.out.println( replaceReferences( "* #[ some text ]after" ) );
+//    System.out.println( replaceReferences( "\n#[ text ]" ) );
+
     CommandLine command = parseCommandLine( args );
     String markdown = loadMarkdown( command );
+//    System.out.println( markdown );
     storeHtml( command, markdown );
   }
 
@@ -91,6 +96,7 @@ public class MarkBook {
     String text = FileUtils.readFileToString( file );
     text = removeComments( text );
     text = replaceHeadings( text );
+    text = replaceReferences( text );
     text = replaceIncludes( file, text );
     return text;
   }
@@ -119,7 +125,7 @@ public class MarkBook {
     while( matcher.find() ) {
       String tag = matcher.group( 1 );
       String name = matcher.group( 2 ).trim();
-      String id = name.replaceAll( "\\s", "+" );
+      String id = id( name );
       if( !name.startsWith( "<a id=" ) ) {
         text = replace( matcher, text, String.format( "%s <a id=\"%s\"></a>%s %s", tag, id, name, tag ) );
         matcher = pattern.matcher( text );
@@ -128,6 +134,19 @@ public class MarkBook {
     return text;
   }
 
+  private static String replaceReferences( String text ) throws IOException {
+    Pattern pattern = Pattern.compile( "(\\s)#\\[(.+?)\\]" );
+    Matcher matcher = pattern.matcher( text );
+    while( matcher.find() ) {
+      String space = matcher.group( 1 );
+      String name = matcher.group( 2 ).trim();
+      String id = id( name );
+      text = replace( matcher, text, String.format( "%s[%s](#%s)", space, name, id ) );
+      matcher = pattern.matcher( text );
+    }
+    return text;
+  }
+
   private static String removeComments( String text ) {
     Pattern pattern = Pattern.compile( "<!---.*--->", Pattern.DOTALL );
     Matcher matcher = pattern.matcher( text );
@@ -142,6 +161,11 @@ public class MarkBook {
     return original.substring( 0, matcher.start() ) + replace + original.substring( matcher.end(), matcher.regionEnd() );
   }
 
+  private static String id( String name ) {
+    String id = name.replaceAll( "\\s", "+" );
+    return id;
+  }
+
   private static Options createOptions() {
     Options options = new Options();