You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@toree.apache.org by ch...@apache.org on 2016/03/02 23:26:29 UTC

[31/31] incubator-toree-website git commit: Added old pages from wiki

Added old pages from wiki


Project: http://git-wip-us.apache.org/repos/asf/incubator-toree-website/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-toree-website/commit/046bbe0a
Tree: http://git-wip-us.apache.org/repos/asf/incubator-toree-website/tree/046bbe0a
Diff: http://git-wip-us.apache.org/repos/asf/incubator-toree-website/diff/046bbe0a

Branch: refs/heads/OverhaulSite
Commit: 046bbe0ae4eb375ff66a8e5518a1de96a5234616
Parents: 9f6d3ba
Author: Chip Senkbeil <ch...@gmail.com>
Authored: Wed Mar 2 16:09:39 2016 -0600
Committer: Chip Senkbeil <ch...@gmail.com>
Committed: Wed Mar 2 16:09:39 2016 -0600

----------------------------------------------------------------------
 Gemfile                                         |   1 +
 _config.yml                                     |   3 +
 _data/documentation.yml                         |   2 +-
 _includes/themes/apache/default.html            |   6 +-
 assets/themes/apache/css/zenburn.css            | 136 +++++++++++++
 .../quick-start/building-from-source.md         |  35 ++++
 documentation/quick-start/comm-api.md           | 199 ++++++++++++++++++
 documentation/quick-start/current-magics.md     | 112 +++++++++++
 documentation/quick-start/developing-magics.md  | 200 +++++++++++++++++++
 .../quick-start/development-workflow.md         | 123 ++++++++++++
 documentation/quick-start/getting-started.md    | 159 +++++++++++++++
 documentation/quick-start/indepth-look.md       |  17 ++
 documentation/quick-start/index.md              |  13 --
 .../quick-start/integrating-with-jupyter.md     |  89 +++++++++
 documentation/quick-start/language-support.md   | 132 ++++++++++++
 documentation/quick-start/overview-of-magics.md |  82 ++++++++
 .../quick-start/overview-of-project.md          |  45 +++++
 .../quick-start/spark-kernel-client.md          | 138 +++++++++++++
 .../quick-start/test-structure-of-project.md    |  65 ++++++
 documentation/quick-start/usage-instructions.md |  49 +++++
 documentation/quick-start/using-docker.md       |  26 +++
 documentation/quick-start/using-vagrant.md      |  26 +++
 22 files changed, 1642 insertions(+), 16 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-toree-website/blob/046bbe0a/Gemfile
----------------------------------------------------------------------
diff --git a/Gemfile b/Gemfile
index c13c470..133ef3b 100644
--- a/Gemfile
+++ b/Gemfile
@@ -17,4 +17,5 @@ source 'https://rubygems.org'
 gem 'github-pages'
 gem 'rouge'
 gem 'jekyll-oembed', :require => 'jekyll_oembed'
+gem 'redcarpet'
 # End Gemfile

http://git-wip-us.apache.org/repos/asf/incubator-toree-website/blob/046bbe0a/_config.yml
----------------------------------------------------------------------
diff --git a/_config.yml b/_config.yml
index 31ec144..07533e5 100644
--- a/_config.yml
+++ b/_config.yml
@@ -25,4 +25,7 @@ github_username: apache
 
 # Build settings
 markdown: kramdown
+kramdown:
+    auto_ids: true
+    input: GFM
 

http://git-wip-us.apache.org/repos/asf/incubator-toree-website/blob/046bbe0a/_data/documentation.yml
----------------------------------------------------------------------
diff --git a/_data/documentation.yml b/_data/documentation.yml
index af4c6e9..9a0d87e 100644
--- a/_data/documentation.yml
+++ b/_data/documentation.yml
@@ -16,7 +16,7 @@
 
 - section_name: "Quick Start"
   section_id: "quick-start"
-  section_url: "/documentation/quick-start/"
+  section_url: "/documentation/quick-start/getting-started"
 
 - section_name: "Advanced"
   section_id: "advanced"

http://git-wip-us.apache.org/repos/asf/incubator-toree-website/blob/046bbe0a/_includes/themes/apache/default.html
----------------------------------------------------------------------
diff --git a/_includes/themes/apache/default.html b/_includes/themes/apache/default.html
index c2a0620..c2f954f 100644
--- a/_includes/themes/apache/default.html
+++ b/_includes/themes/apache/default.html
@@ -17,8 +17,10 @@
     <!-- Le styles -->
     <link href="{{ ASSET_PATH }}/font-awesome-4.5.0/css/font-awesome.min.css">
     <link href="{{ ASSET_PATH }}/bootstrap/css/bootstrap.css" rel="stylesheet">
-    <!-- <link href="{{ ASSET_PATH }}/css/style.css?body=1" rel="stylesheet" type="text/css"> -->
-    <!-- <link href="{{ ASSET_PATH }}/css/syntax.css" rel="stylesheet"  type="text/css" media="screen" /> -->
+
+    <!-- NOTE: Provides syntax highlighting to pygments classes, but looks weird -->
+    <!-- <link href="{{ ASSET_PATH }}/css/zenburn.css" rel="stylesheet"> -->
+    
     <!-- Le fav and touch icons -->
     <!-- Update these with your own images
     <link rel="shortcut icon" href="images/favicon.ico">

http://git-wip-us.apache.org/repos/asf/incubator-toree-website/blob/046bbe0a/assets/themes/apache/css/zenburn.css
----------------------------------------------------------------------
diff --git a/assets/themes/apache/css/zenburn.css b/assets/themes/apache/css/zenburn.css
new file mode 100644
index 0000000..287591d
--- /dev/null
+++ b/assets/themes/apache/css/zenburn.css
@@ -0,0 +1,136 @@
+.highlight code, .highlight pre {
+color:#fdce93;
+background-color:#3f3f3f;
+}
+
+.highlight .hll {
+background-color:#222;
+}
+
+.highlight .err {
+color:#e37170;
+background-color:#3d3535;
+}
+
+.highlight .k {
+color:#f0dfaf;
+}
+
+.highlight .p {
+color:#41706f;
+}
+
+.highlight .cs {
+color:#cd0000;
+font-weight:700;
+}
+
+.highlight .gd {
+color:#cd0000;
+}
+
+.highlight .ge {
+color:#ccc;
+font-style:italic;
+}
+
+.highlight .gr {
+color:red;
+}
+
+.highlight .go {
+color:gray;
+}
+
+.highlight .gs {
+color:#ccc;
+font-weight:700;
+}
+
+.highlight .gu {
+color:purple;
+font-weight:700;
+}
+
+.highlight .gt {
+color:#0040D0;
+}
+
+.highlight .kc {
+color:#dca3a3;
+}
+
+.highlight .kd {
+color:#ffff86;
+}
+
+.highlight .kn {
+color:#dfaf8f;
+font-weight:700;
+}
+
+.highlight .kp {
+color:#cdcf99;
+}
+
+.highlight .kr {
+color:#cdcd00;
+}
+
+.highlight .ni {
+color:#c28182;
+}
+
+.highlight .ne {
+color:#c3bf9f;
+font-weight:700;
+}
+
+.highlight .nn {
+color:#8fbede;
+}
+
+.highlight .vi {
+color:#ffffc7;
+}
+
+.highlight .c,.preview-zenburn .highlight .g,.preview-zenburn .highlight .cm,.preview-zenburn .highlight .cp,.preview-zenburn .highlight .c1 {
+color:#7f9f7f;
+}
+
+.highlight .l,.preview-zenburn .highlight .x,.preview-zenburn .highlight .no,.preview-zenburn .highlight .nd,.preview-zenburn .highlight .nl,.preview-zenburn .highlight .nx,.preview-zenburn .highlight .py,.preview-zenburn .highlight .w {
+color:#ccc;
+}
+
+.highlight .n,.preview-zenburn .highlight .nv,.preview-zenburn .highlight .vg {
+color:#dcdccc;
+}
+
+.highlight .o,.preview-zenburn .highlight .ow {
+color:#f0efd0;
+}
+
+.highlight .gh,.preview-zenburn .highlight .gp {
+color:#dcdccc;
+font-weight:700;
+}
+
+.highlight .gi,.preview-zenburn .highlight .kt {
+color:#00cd00;
+}
+
+.highlight .ld,.preview-zenburn .highlight .s,.preview-zenburn .highlight .sb,.preview-zenburn .highlight .sc,.preview-zenburn .highlight .sd,.preview-zenburn .highlight .s2,.preview-zenburn .highlight .se,.preview-zenburn .highlight .sh,.preview-zenburn .highlight .si,.preview-zenburn .highlight .sx,.preview-zenburn .highlight .sr,.preview-zenburn .highlight .s1,.preview-zenburn .highlight .ss {
+color:#cc9393;
+}
+
+.highlight .m,.preview-zenburn .highlight .mf,.preview-zenburn .highlight .mh,.preview-zenburn .highlight .mi,.preview-zenburn .highlight .mo,.preview-zenburn .highlight .il {
+color:#8cd0d3;
+}
+
+.highlight .na,.preview-zenburn .highlight .nt {
+color:#9ac39f;
+}
+
+.highlight .nb,.preview-zenburn .highlight .nc,.preview-zenburn .highlight .nf,.preview-zenburn .highlight .bp,.preview-zenburn .highlight .vc {
+color:#efef8f;
+}

http://git-wip-us.apache.org/repos/asf/incubator-toree-website/blob/046bbe0a/documentation/quick-start/building-from-source.md
----------------------------------------------------------------------
diff --git a/documentation/quick-start/building-from-source.md b/documentation/quick-start/building-from-source.md
new file mode 100644
index 0000000..f15e0a9
--- /dev/null
+++ b/documentation/quick-start/building-from-source.md
@@ -0,0 +1,35 @@
+---
+layout: docpage
+title: Building from Source
+type: doc
+section: quick-start
+weight: 0
+tagline: Apache Project !
+---
+
+{% include JB/setup %}
+The project uses `make` as the entry point to build and package the code. Under the hood, `make` is invoking `sbt` to compile, test, and build assemblies. It also sets certain key env variables. Additionally, it support running all `sbt` commands within a VM managed by [`Vagrant`](Vagrant-Development-Environment) to simplify development bootstrap. For these reasons, we do not recommend running `sbt` directly.
+
+By default, `make` runs all commands directly on the host machine. This will require installation of `sbt`. You will need [sbt 0.13.7+](http://www.scala-sbt.org/download.html).
+
+If you would like to rely on the `Vagrant` VM, then set environment variable `USE_VAGRANT=true` and then run `make` as usual. This will require [Vagrant](https://www.vagrantup.com) and a VM provider (like [VirtualBox](https://www.virtualbox.org/wiki/Downloads)).
+
+### Compile Source ###
+
+To build the code, use `make build`. This will compile and build the assembly jar. The `Makefile` specifies the default version of Spark to build against (env var `APACHE_SPARK_VERSION`). 
+
+If you want to change the version of Apache Spark that the kernel is compiled against, specify the version via the `APACHE_SPARK_VERSION` environment variable.
+
+    APACHE_SPARK_VERSION=1.5.2 make build
+
+The recommended configuration options for sbt are as follows:
+
+    -Xms1024M
+    -Xmx2048M
+    -Xss1M
+    -XX:+CMSClassUnloadingEnabled
+    -XX:MaxPermSize=1024M
+
+### Packaging ###
+
+To create a packaged up kernel, run `make dist`. This will create a package under `dist/`. It will include the kernel's assembly jar plus an executable that runs the kernel using `$SPARK_HOME/bin/spark-submit`

http://git-wip-us.apache.org/repos/asf/incubator-toree-website/blob/046bbe0a/documentation/quick-start/comm-api.md
----------------------------------------------------------------------
diff --git a/documentation/quick-start/comm-api.md b/documentation/quick-start/comm-api.md
new file mode 100644
index 0000000..d597424
--- /dev/null
+++ b/documentation/quick-start/comm-api.md
@@ -0,0 +1,199 @@
+---
+layout: docpage
+title: Comm API
+type: doc
+section: quick-start
+weight: 0
+tagline: Apache Project !
+---
+
+The Comm API exposed by the Spark Kernel Client and Spark Kernel serves to
+provide a clean method of communication between the Spark Kernel and its
+clients.
+
+The API provides the ability to create and send custom messages with the
+focus on synchronizing data between a kernel and its clients, although that
+use case is not enforced.
+
+Access to the Comm API is made available for the client via 
+`<client_instance>.comm` and for the kernel via `kernel.comm`.
+
+Example of Registration and Communication
+-----------------------------------------
+
+The following example demonstrates the _client_ connecting to the _kernel_,
+receiving a response, and then closing it's connection.
+
+This is an example of registering an open callback on the _kernel_ side:
+
+    // Register the callback to respond to being opened from the client
+    kernel.comm.register("my target").addOpenHandler { 
+        (commWriter, commId, targetName, data) =>
+            commWriter.writeMsg(Map("response" -> "Hello World!"))
+    }
+    
+This is the corresponding example of registering a message receiver on the
+_client_ and initiating the Comm connection via _open_:
+
+    val client: SparkKernelClient = /* Created elsewhere */
+
+    // Register the callback to receive a message from the kernel, print it
+    // out, and then close the connection
+    client.comm.register("my target").addMsgHandler {
+        (commWriter, commId, data) =>
+            println(data("response"))
+            commWriter.close()
+    }
+    
+    // Initiate the Comm connection
+    client.comm.open("my target")
+
+Comm Events
+-----------
+
+The Comm API provides three types of events that can be captured:
+
+1. Open
+
+    - Triggered when the client/kernel receives an open request for a target
+      that has been registered
+
+2. Msg
+
+    - Triggered when the client/kernel receives a Comm message for an open
+      Comm instance
+    
+3. Close
+
+    - Triggered when the client/kernel receives a close request for an open
+      Comm instance
+      
+### Registering Callbacks ###
+
+To register callbacks that are triggered during these events, the following
+function is provided:
+
+    register(<target name>)
+    
+This function, when invoked, registers the provided target on the 
+client/kernel, but does not add any callbacks. To add functions to be called
+during events, you can chain methods onto the register function.
+
+#### Adding Open Callbacks ####
+
+To add an open callback, use the `addOpenHandler(<function>)` method:
+
+    register(<target name>).addOpenHandler(<function>)
+    
+The function is given the following four arguments:
+
+- CommWriter
+
+    - The instance of the Comm-based writer that can send messages back
+    
+- CommId
+
+    - The id associated with the new Comm instance
+    
+- TargetName
+
+    - The name of the Comm that is created
+
+- Data (_Optional_)
+
+    - The map of key/value pairs representing data associated with the new
+      Comm instance
+      
+#### Adding Message Callbacks ####
+
+To add a message callback, use the `addMsgHandler(<function>)` method:
+
+    register(<target name>).addMsgHandler(<function>)
+    
+The function is given the following three arguments:
+
+- CommWriter
+
+    - The instance of the Comm-based writer that can send messages back
+    
+- CommId
+
+    - The id associated with the Comm instance
+
+- Data
+
+    - The map of key/value pairs representing data associated with the
+      received message
+      
+#### Adding Close Callbacks ####
+
+To add a close callback, use the `addCloseHandler(<function>)` method:
+
+    register(<target name>).addCloseHandler(<function>)
+    
+The function is given the following three arguments:
+
+- CommWriter
+
+    - Unused as the Comm instance associated with the writer has been closed
+    
+- CommId
+
+    - The id associated with the Comm instance that was closed
+
+- Data
+
+    - The map of key/value pairs representing data associated with the
+      received message
+
+Comm Messaging
+--------------
+
+The Comm API exposes an _open_ method that initiates a new Comm instance on
+both sides of the connection:
+
+    `open(<target name>)`
+    
+This returns an instance of _CommWriter_ that can be used to send data via
+the Comm protocol.
+
+The kernel would initiate the connection via `kernel.comm.open(<target name>)`
+while the client would start via `<client instance>.comm.open(<target name>)`.
+
+As per the IPython protocol definition, the Comm instance can be opened from
+either side.
+
+### Using the Comm Writer ###
+
+The Comm API provides an implementation of [java.io.Writer][1] that is used to
+send _open_, _msg_, and _close_ Comm messages to the client or kernel (client
+to kernel or vice versa).
+
+The following methods are available with _CommWriter_ implementations:
+
+1. `writeOpen(<target name> [, data])`
+
+    - Sends an open request with the given target name and optional map of data
+    
+2. `writeMsg(<data>)`
+
+    - Sends the map of data as a Comm message
+    
+3. `write(<character array>, <offset>, <length>)`
+
+    - Sends the character array as a Comm message (in the same form as a 
+      _Writer's_ write(...) method) with the key for the data as "message"
+      
+        - E.g. `commWriter.write(<array>, 0, <array length>)` translates to
+        
+            Data("message": "<array>")
+    
+3. `writeClose([data])`
+
+    - Sends a close request with the optional map of data
+    
+4. `close()`
+
+    - Sends a close request with no data
+
+[1]: http://docs.oracle.com/javase/7/docs/api/java/io/Writer.html

http://git-wip-us.apache.org/repos/asf/incubator-toree-website/blob/046bbe0a/documentation/quick-start/current-magics.md
----------------------------------------------------------------------
diff --git a/documentation/quick-start/current-magics.md b/documentation/quick-start/current-magics.md
new file mode 100644
index 0000000..9d9546a
--- /dev/null
+++ b/documentation/quick-start/current-magics.md
@@ -0,0 +1,112 @@
+---
+layout: docpage
+title: Current Magics
+type: doc
+section: quick-start
+weight: 0
+tagline: Apache Project !
+---
+
+Magic names are case insensitive, so _AddJar_ is the same as _addjar_.
+
+## Line Magics
+
+* **AddDeps** _<my.company> <artifact-id> <version> [--transitive]_
+
+    * Adds the specified dependencies from Maven Central to the Spark Kernel
+      and Spark cluster
+
+    * Requires the company name, artifact id, and version of the dependency
+
+    * Including _--transitive_ will include all needed dependencies
+
+    * Examples:
+        * Adding a single library with all of its dependencies
+            ```scala           
+            %AddDeps org.apache.spark spark-streaming-kafka_2.10 1.1.0 --transitive
+            ```
+
+        * Using the programmatic API
+            ```scala           
+            kernel.magics.addDeps("org.apache.spark spark-streaming-kafka_2.10 1.1.0 --transitive")
+            ```
+* **AddJar** _<jar-path> [-f]_
+
+    * Adds the specified jars to the Spark Kernel and Spark cluster
+
+    * Requires the path to the jar, which can either be a local path or remote
+      jar hosted via HTTP
+
+    * Including _-f_ will ignore any cached jars and redownload remote
+      jars
+
+    * Examples:
+
+        * Adding a single jar from HTTP and forcing redownload if cached
+            ```scala 
+            %AddJar http://example.com/some_lib.jar -f
+            ```
+
+        * Adding a single jar from the file system relative to the kernel
+            ```scala 
+            %AddJar file:/path/to/some/lib.jar
+            ```
+
+        * Using the programmatic API
+            ```scala            
+            kernel.magics.addJar("http://example.com/some_lib.jar -f")
+            ```
+
+* **LSMagic**
+
+    * Lists all available magics in the kernel
+
+    * Examples:
+
+        * Percent syntax
+            ```scala 
+            %LSMagic
+            ```
+
+        * Using the programmatic API
+            ```scala 
+            kernel.magics.lsMagic()
+            ```
+
+## Cell Magics
+
+* **Html** _<html-code>_
+
+    * Returns the given code with a HTML MIME-type.
+
+    * Examples:
+
+        * Percent syntax
+            ```scala 
+            %%Html
+            <h1>Hello</h1>
+            <p>World</p>
+            ```
+
+        * Using the programmatic API
+            ```scala 
+            kernel.magics.html("<h1>Hello</h1><p>World</p>")
+            ```
+
+* **JavaScript** _<javascript-code>_
+
+    * Returns the given code with an `application/javascript` MIME-type.
+
+    * Examples:
+
+        * Percent syntax
+            ```scala 
+            %%JavaScript
+            var x = 3
+            var y = x + 2
+            ```
+
+        * Using the programmatic API
+            ```scala 
+            kernel.magics.javascript("var x = 3; var y = x + 2")
+            ```

http://git-wip-us.apache.org/repos/asf/incubator-toree-website/blob/046bbe0a/documentation/quick-start/developing-magics.md
----------------------------------------------------------------------
diff --git a/documentation/quick-start/developing-magics.md b/documentation/quick-start/developing-magics.md
new file mode 100644
index 0000000..b67b0ff
--- /dev/null
+++ b/documentation/quick-start/developing-magics.md
@@ -0,0 +1,200 @@
+---
+layout: docpage
+title: Developing Magics
+type: doc
+section: quick-start
+weight: 0
+tagline: Apache Project !
+---
+
+The Spark Kernel provides a pluggable interface for magics that allows
+developers to write their own magics. This guide will focus on the technical details of implementing your own magics; for an introduction and conceptual overview of magics, see [Overview of Magics for the Spark Kernel](https://github.com/ibm-et/spark-kernel/wiki/Overview-of-Magics-for-the-Spark-Kernel).
+
+In this guide we'll look at the dependencies required to develop a magic, walk through creating a line magic and a cell magic, and discuss some useful magic features.
+
+### Dependencies ###
+
+In order to write a magic, you need to add the _kernel-api_ and _protocol_ 
+modules of the Spark Kernel to your project.
+
+In _sbt_, you can add the following lines:
+
+    libraryDependencies ++= Seq(
+        "com.ibm.spark" %% "kernel-api" % "0.1.1-SNAPSHOT",
+        "com.ibm.spark" %% "protocol" % "0.1.1-SNAPSHOT"
+    )
+
+As the modules are not hosted on any repository, you will also need to build
+and publish them locally. From the root of the Spark Kernel, you can execute
+the following to compile and make available the Spark Kernel modules:
+
+    sbt compile && sbt publishLocal
+
+## Developing Magics
+
+A magic is implemented by extending either the ```LineMagic``` or ```CellMagic``` trait provided by the Spark Kernel. Each trait consists of a single function, ```execute```, that defines the magic's functionality.
+
+### Developing a Line Magic ###
+
+A line magic receives a line of code as input and performs an operation. To create a line magic, we extend the `LineMagic` trait, and override its `execute` method:
+
+```scala
+    class HelloLineMagic extends LineMagic {
+        override def execute(code: String): LineMagicOutput = {
+          // do stuff
+        }
+    }
+```
+
+Note that line magics aren't meant to return values; their return type ```LineMagicOutput``` is equivalent to ```Unit```.
+
+#### Using the Magic
+Now we can use our magic with either:
+```scala
+%helloLineMagic foo bar
+```
+or
+```scala
+kernel.magics.helloLineMagic("foo bar")
+```
+
+Behind the scenes, the ```execute``` method of ```HelloLineMagic``` gets called with ```"foo bar"``` as input.
+
+### Developing a Cell Magic ###
+A cell magic receives an entire cell of code as input and returns a mapping of MIME types to data. This mapping, defined by the type ```CellMagicOutput```, can be used to distinguish different data types produced by the magic. In an IPython setting, the ```CellMagicOutput``` mapping will influence the way a cell is rendered. 
+
+#### An HTML Cell Magic ###
+As a concrete example, we'll develop an ```HTML``` cell magic that causes a cell to render its contents as HTML.
+
+To create a cell magic, we extend the `CellMagic` trait, and override its `execute` method:
+
+```scala
+    class Html extends CellMagic {
+        override def execute(code: String): CellMagicOutput = { 
+          // TODO 
+        }
+    }
+```
+
+In this case, we want to package the code that the magic receives as HTML. To do so, we return a ```CellMagicOutput``` that maps ```MIMEType.TextHtml``` to the code received:
+
+```scala
+    class Html extends CellMagic {
+        override def execute(code: String): CellMagicOutput = { 
+          CellMagicOutput(MIMEType.TextHtml -> code)
+        }
+    }
+```
+
+#### Using the Magic
+We can use the magic with either:
+```scala
+%%HTML
+<h1>a title</h1>
+<p>some more HTML</p>
+```
+or
+```scala
+kernel.magics.html("<h1>a title</h1><p>some more HTML</p>")
+```
+
+Behind the scenes, the ```execute``` method of ```Html``` gets called with ```"<h1>a title</h1><p>some more HTML</p>"``` as input.
+
+In an IPython setting, the ```CellMagicOutput``` mapping that our magic returns will cause the cell to be rendered as HTML:
+
+
+![htmlexample](https://raw.githubusercontent.com/wiki/ibm-et/spark-kernel/magics_html_example.png)
+
+
+Making `Magic` has never been this easy!
+
+
+### Argument Parsing
+
+Sometimes, it's useful to view the code received by the magic as a string of arguments. To help with parsing arguments out of the raw code string, you can include the `ArgumentParsingSupport` trait into your magic definition:
+
+```scala
+class HelloParsing extends LineMagic with ArgumentParsingSupport {
+   val flag = parser.accepts("flag", "a boolean flag")
+                    .withOptionalArg()
+                    .ofType(classOf[Boolean])
+                    .defaultsTo(true)
+   
+   override def execute(code: String): LineMagicOutput = {
+      val args = parseArgs(code)
+      if (args(0)) // do something
+      else         // do something else
+   }
+}
+```
+### Adding Dependencies to Magics ###
+
+For more practical uses of magics, there are various dependencies that need to
+be brought in when the magic is executed. To facilitate dependency injection,
+you add the desired dependency as an additional trait to the magic.
+
+Currently, there are four available dependencies:
+
+1. SparkContext - use the trait _IncludeSparkContext_ to bring in the context
+   used by the kernel. This exposes the _sparkContext_ variable into the magic.
+
+2. Interpreter - use the trait _IncludeInterpreter_ to bring in the interpreter
+   used by the kernel. This exposes the _interpreter_ variable into the magic.
+
+3. OutputStream - use the trait _IncludeOutputStream_ to bring in an output
+   stream that directs output back to the client of the kernel (useful for
+   streaming status updates). This exposes the _outputStream_ variable into
+   the magic.
+
+4. DependencyDownloader - use the trait _IncludeDependencyDownloader_ to bring
+   in the tool used to support the _%AddDeps_ magic. This exposes the
+   _dependencyDownloader_ variable into the magic.
+
+As dependencies are injected, there is no guarantee that referencing a
+dependency outside of a method will work. Instead, mark referencing code as
+either _lazy_ or as a helper method.
+
+```scala
+    class HelloWorld extends LineMagic with IncludeOutputStream {
+        val printStream = new PrintStream(outputStream)
+
+        // ...
+    }
+```
+should become
+
+```scala
+    class HelloWorld extends LineMagic with IncludeOutputStream {
+        lazy val printStream = new PrintStream(outputStream)
+
+        // ...
+    }
+```
+
+### Adding an external magic to the Spark Kernel ###
+In order to use an external magic we first need a `.jar` containing a magic in the `com.ibm.spark.magic.builtin` package. Assuming we have such a `.jar` at location `/src/path/to/my/exampleMagic.jar` the `kernel.json` file needs to be changed to supply the path to the external magic. The command-line argument we need to add is `--magic-url` which takes a string:
+
+```json
+{
+    "display_name": "Spark 1.2.1 (Scala 2.10.4)",
+    "language_info": "scala",
+    "argv": [
+        "/home/vagrant/local/bin/sparkkernel",
+        "--profile",
+        "{connection_file}",
+        "--magic-url",
+        "/src/path/to/my/exampleMagic.jar"
+    ],
+    "codemirror_mode": "scala"
+}
+```
+
+Then on start-up, the kernel will load the magic from that location and it can be used:
+![example external magic](https://raw.githubusercontent.com/wiki/ibm-et/spark-kernel/external_magic_example.png)
+### Examples ###
+
+For some example implementations, check out the ```com.ibm.spark.magic.builtin``` package in the ```kernel``` project folder.
+
+### Other Notes ###
+
+There is a limitation with the current magic implementation that will force magic invocations to be case sensitive unless defined in the package _com.ibm.spark.magic.builtin_.

http://git-wip-us.apache.org/repos/asf/incubator-toree-website/blob/046bbe0a/documentation/quick-start/development-workflow.md
----------------------------------------------------------------------
diff --git a/documentation/quick-start/development-workflow.md b/documentation/quick-start/development-workflow.md
new file mode 100644
index 0000000..c68dbfd
--- /dev/null
+++ b/documentation/quick-start/development-workflow.md
@@ -0,0 +1,123 @@
+---
+layout: docpage
+title: Development Workflow
+type: doc
+section: quick-start
+weight: 0
+tagline: Apache Project !
+---
+
+While it is not necessary to follow this guide for development, it is being
+documented to encourage some form of standard practice for this project.
+
+### Tooling ###
+
+Most of the developers for the Spark Kernel thus far have chosen to use
+_IntelliJ_ as their means of development. Because of this, a plugin for _sbt_
+is included in our project to allow easy construction of an IntelliJ project
+that contains all of the modules.
+
+Obviously, _git_ is used as the source control for the project.
+
+Finally, we use _sbt_ for our build and test runner. You can find more
+information about compiling/testing in the main RAEDME.
+
+### Building IntelliJ Project ###
+
+To build the IntelliJ project using _sbt_, you can trigger the plugin by
+executing the following from the root of the Spark Kernel project:
+
+    sbt gen-idea
+
+This should create *.idea/* and *.idea\_modules/*  directories.
+
+From there, you should be able to open (not import) the project using IntelliJ.
+
+### Using Branches for Development ###
+
+When we tackle defects or features in the Spark Kernel, we typically break the
+problems up into the smallest pieces possible. Once we have something simple
+like "I need the kernel to print out hello world when it starts," we create a
+branch from our development branch (in the case of this project, it is
+typically master). For this example, let's call the branch 
+"AddHelloWorldDuringBoot" and use it for our feature.
+
+Once development has finished, it is good practice to ensure that all tests
+are still passing. To do this, run `sbt test` from the root of the Spark Kernel
+project.
+
+If everything passes, we want to ensure that our branch is up-to-date with the
+latest code in the kernel. So, move back to the development branch (master in
+our case) and pull the latest changes. If there are changes, we want to rebase
+our branch on top of those new changes. From the _AddHelloWorldDuringBoot_
+branch, run `git rebase master` to bring the branch up to speed with master.
+
+The advantage of using rebase on a _local_ branch is that it makes merging back
+with _master_ much cleaner for the maintainers. If your branch has been pushed
+remotely, you want to avoid rebasing in case someone else has branched off of
+your branch. Tricky stuff!
+
+After rebasing on top of master, it is a good idea to rerun the tests for your
+branch to ensure that nothing has broken from the changes: `sbt test`
+
+Finally, if the tests pass, switch back to the development branch (master) and
+merge the changes: `git merge AddHelloWorldDuringBoot`. As a last check,
+rerun the tests to ensure that the merge went well (`sbt test` in master). If
+those tests still pass, the changes can be pushed!
+
+### Writing proper unit tests ###
+
+The goal of our unit tests was to be isolated. This means that absolutely _no_
+external logic is needed to run the tests. This includes fixtures and any
+possible dependencies referenced in the code. We use _Mockito_ to provide
+mocking facilities for our dependencies and try our best to isolate dependency
+creation.
+
+    class MyClass {
+        // Bad design
+        val someDependency = new SomeDependency()
+
+        // ...
+    }
+
+instead move it to the constructor
+
+    class MyClass(someDependency: SomeDependency) {
+        // ...
+    }
+
+or use traits to mix in dependencies
+
+    trait MyDependency {
+        val someDependency = new SomeDependency()
+    }
+
+    class MyClass extends MyDependency {
+
+    }
+
+For testing, we use _ScalaTest_ with the _FunSpec_ to provide the basic
+structure of our tests (in a BDD manner). Typically, _Matchers_ from
+_ScalaTest_ are also included to provide a better flow.
+
+    class MyClassSpec extends FunSpec with Matchers {
+        describe("MyClass") {
+            describe("#someMethod") {
+                it("should indicate success by default") {
+                    val myClass = new MyClass(new SomeDependency())
+                    val expected = true
+                    val actual = myClass.someMethod()
+
+                    actual should be (expected)
+                }
+            }
+        }
+    }
+
+The above structure is to use a _describe_ block to represent the name of the
+class being tested. We nest a second layer of _describe_ blocks to indicate
+tests for individual public methods. Finally, _it_ blocks are structured to
+test single cases (such as different logical routes to be encountered).
+
+We have attempted to keep the majority of our tests clear and concise.
+Typically, we avoid helper functions because they can obfuscate the tests.

http://git-wip-us.apache.org/repos/asf/incubator-toree-website/blob/046bbe0a/documentation/quick-start/getting-started.md
----------------------------------------------------------------------
diff --git a/documentation/quick-start/getting-started.md b/documentation/quick-start/getting-started.md
new file mode 100644
index 0000000..22f023a
--- /dev/null
+++ b/documentation/quick-start/getting-started.md
@@ -0,0 +1,159 @@
+---
+layout: docpage
+title: Getting Started
+type: doc
+section: quick-start
+weight: 0
+tagline: Apache Project !
+---
+
+This short guide should serve to get you up and running with the _Spark Kernel_. [Click here](#getting-the-spark-kernel-running) to get the kernel up and running.
+
+### What is the Spark Kernel? ###
+
+The Spark Kernel is a Scala application that connects to an [Apache Spark][1] cluster. Rather than bundling the Apache Spark interface with your application and running it as an Apache Spark driver application, you can use the Spark Kernel as the proxy between your application(s) and the Apache Spark cluster.
+
+Why would you want to do this?
+
+1. Faster turnaround for your applications
+
+    - The Spark Kernel compiles Scala code using the same compiler as the _Spark Shell_, meaning that your
+      applications do not need to compile full jars to launch Spark tasks
+
+2. Direct feedback to your applications
+
+    - Rather than writing your data out to HDFS or another datastore and _immediately_ reading that data
+      into your application, the Spark Kernel can feed results from Spark operations directly to your
+      applications
+
+3. Isolate Spark dependencies
+
+    - Instead of including necessary Spark interfaces into each of your applications, you can use the
+      Spark Kernel as the middleman for your applications
+
+__For more information, please visit the [Spark Kernel Overview][2] and [In-depth Look at the Spark Kernel][3] sections.__
+
+### Getting the Spark Kernel Running ###
+
+##### Compiling and Packing the Kernel #####
+
+As the Spark Kernel does not have a release pre-built, you will need to build it from source. This project use `make` as the entry point to dev/build. By default it does require `sbt`. More information [here](wiki/Building-from-Source). To build the kernel run:
+```
+make build
+```
+The should compile all modules of the Spark Kernel and generate an assembly jar under `kernel/target/scala-2.10`. After building you can do:
+```
+make dist
+```
+This generates a `dist` directory with the kernel library and executable. It also generates a tar package. Move this package anywhere visible by Jupyter to be able to run in a notebook.
+
+##### Starting the Spark Kernel #####
+
+If everything succeeded, you should now be able to launch the Spark Kernel by issuing the following:
+
+    <spark-kernel-dist>/bin/spark-kernel
+
+![](spark_kernel_running_output.png)
+
+__To use different languages with the Spark Kernel, please visit [Language Support on the Spark Kernel][14].__
+
+### Connecting to the Spark Kernel with Jupyter ###
+
+We use Vagrant to simplify certain things. If you have Vagrant installed, the easiest way to test the kernel within Jupyter is to run
+```
+USE_VAGRANT=true make dev
+```
+This command will startup Jupyter (currently set to 3.2.1). The spark-kernel will be preconfigured and ready to use against a `local[2]` Spark cluster.
+
+For detailed instructions about installing and using Jupyter, visit http://jupyter.org/.
+
+The following are general instructions for installing a kernel on Jupyter. They may vary depending on your installation. The basic steps are:
+ 
+1. `make dist` to bundle up the kernel
+2. Create a kernel.json file for the Spark Kernel. It should look something like this:
+
+    ```
+    {
+    "display_name": "Spark 1.5.1 (Scala 2.10.4)",
+    "language_info": { "name": "scala" },
+    "argv": [
+        "/src/spark-kernel/dist/spark-kernel/bin/spark-kernel",
+        "--profile",
+        "{connection_file}"
+    ],
+    "codemirror_mode": "scala",
+    "env": {
+        "SPARK_OPTS": "--driver-java-options=-Xms1024M --driver-java-options=-Xmx4096M --driver-java-options=-Dlog4j.logLevel=trace",
+        "MAX_INTERPRETER_THREADS": "16",
+        "SPARK_CONFIGURATION": "spark.cores.max=4",
+        "CAPTURE_STANDARD_OUT": "true",
+        "CAPTURE_STANDARD_ERR": "true",
+        "SEND_EMPTY_OUTPUT": "false",
+        "SPARK_HOME": "/opt/spark",
+        "PYTHONPATH": "/opt/spark/python:/opt/spark/python/lib/py4j-0.8.2.1-src.zip"
+     }
+   }
+    ```
+3. Place kernel.json in the correct location for your Jupyter installation. Typical location is `~/.ipython/kernels/spark/kernel.json`.
+
+When you run Jupyter, you should now see a new option in the dropdown list.
+
+![](ipython_dropdown_with_spark_kernel.png)
+
+From here, you can now select the Spark Kernel, which will start a new Spark Kernel using Apache Spark local mode. 
+
+For more details on configuring the kernel, see the [usage instructions][7].
+
+### Connecting to the Spark Kernel with the Client Library ###
+
+The current client library provides an abstraction toward communicating with the Spark Kernel. The library is currently designed for Scala-based applications.
+
+As the client library does not have a release pre-built, you will need to build it from source. The client library uses _sbt_ as its build tool; so, you will need [sbt 0.13.5+][5] installed on your machine. From the root of the project, execute the following:
+
+    sbt compile
+    sbt publishLocal
+
+You should now have published all public jars used by the client library.
+
+The next step is to include the client project into your Scala application by adding the following to your _build.sbt_ file:
+
+    libraryDependencies += "com.ibm.spark" %% "client" % "0.1.1-SNAPSHOT"
+
+Finally, to instantiate a client instance within your Scala application and connect it to a running Spark Kernel, you should add the following to your application:
+
+    val profileJSON: String = """
+    {
+        "stdin_port":   48691,
+        "control_port": 40544,
+        "hb_port":      43462,
+        "shell_port":   44808,
+        "iopub_port":   49691,
+        "ip": "127.0.0.1",
+        "transport": "tcp",
+        "signature_scheme": "hmac-sha256",
+        "key": ""
+    }
+    """.stripMargin
+
+    // Parse our configuration and create a client connecting to our kernel
+    val config: Config = ConfigFactory.parseString(profileJSON)
+    val client = new SparkKernelClientBootstrap(config).createClient
+
+Currently, the client only accepts input via a JSON string (or other manners of [TypeSafe Config][11] generation). Once executed, you should now have an instance of a client with which to communicate with the Spark Kernel. 
+
+__For more information, please visit the [Guide to using the Spark Kernel Client][12] and [Guide to the Comm API][13] sections.__
+
+[1]: https://spark.apache.org
+[2]: Overview-of-the-Spark-Kernel-Project
+[3]: In-depth-Look-at-the-Spark-Kernel
+[4]: http://zeromq.org/
+[5]: http://www.scala-sbt.org/download.html
+[6]: Building-from-Source
+[7]: Usage-Instructions-for-the-Spark-Kernel
+[8]: Using-the-Docker-Container-for-the-Spark-Kernel
+[9]: Vagrant-Development-Environment
+[10]: Guide-to-Integrating-the-Spark-Kernel-with-the-IPython-Notebook-(3.x)
+[11]: https://github.com/typesafehub/config
+[12]: Guide-for-the-Spark-Kernel-Client
+[13]: Guide-to-the-Comm-API-of-the-Spark-Kernel-and-Spark-Kernel-Client
+[14]: Language-Support-on-the-Spark-Kernel

http://git-wip-us.apache.org/repos/asf/incubator-toree-website/blob/046bbe0a/documentation/quick-start/indepth-look.md
----------------------------------------------------------------------
diff --git a/documentation/quick-start/indepth-look.md b/documentation/quick-start/indepth-look.md
new file mode 100644
index 0000000..7f7d001
--- /dev/null
+++ b/documentation/quick-start/indepth-look.md
@@ -0,0 +1,17 @@
+---
+layout: docpage
+title: In-depth Look
+type: doc
+section: quick-start
+weight: 0
+tagline: Apache Project !
+---
+
+TODO
+
+[Spark Meetup talk on Spark kernel architecture][2]
+
+[Download Architecture Keynote][1]
+
+[1]: kernel_architecture_v2.key
+[2]: http://www.youtube.com/watch?v=2AX6g0tK-us

http://git-wip-us.apache.org/repos/asf/incubator-toree-website/blob/046bbe0a/documentation/quick-start/index.md
----------------------------------------------------------------------
diff --git a/documentation/quick-start/index.md b/documentation/quick-start/index.md
deleted file mode 100644
index a0be381..0000000
--- a/documentation/quick-start/index.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-layout: docpage
-title: Quick Start
-type: doc
-section: quick-start
-weight: 0
-tagline: Apache Project !
----
-
-{% include JB/setup %}
-
-Some doc page.
-

http://git-wip-us.apache.org/repos/asf/incubator-toree-website/blob/046bbe0a/documentation/quick-start/integrating-with-jupyter.md
----------------------------------------------------------------------
diff --git a/documentation/quick-start/integrating-with-jupyter.md b/documentation/quick-start/integrating-with-jupyter.md
new file mode 100644
index 0000000..d567424
--- /dev/null
+++ b/documentation/quick-start/integrating-with-jupyter.md
@@ -0,0 +1,89 @@
+---
+layout: docpage
+title: Integrating with Jupyter
+type: doc
+section: quick-start
+weight: 0
+tagline: Apache Project !
+---
+
+The following guide will explain packaging the Spark Kernel for use with the IPython notebook along with configuring the IPython notebook to provide the Spark Kernel as an option (next to python).
+
+### Requirements ###
+
+Packaged up kernel by running `make dist`. More detailed information [here](Building-from-Source#packaging). The resulting package will be under `dist/spark-kernel`. From now on, we will refer to this location as `<spark-kernel>`
+
+### Configuring Jupyter ###
+
+Kernels are registered into Jupyter by using a `kernel.json` file that describes how to launch the kernel process. This file is known as the Kernel Spec. Depending on the version of Jupyter/IPython ad your OS, the location of where to create this file varies. Please refer to [Jupyter 4](http://jupyter-client.readthedocs.org/en/latest/kernels.html#kernel-specs)/[IPython 3.2](https://ipython.readthedocs.org/en/3.x/development/kernels.html?highlight=kernel#kernel-specs) documentation for full instructions. We will refer to this location as `<kernel_config_dir>`.
+
+Create `<kernel_config_dir>`/spark/kernel.json`. Add the following content:
+
+```
+{
+    "display_name": "Spark 1.5.1 (Scala 2.10.4)",
+    "language_info": { "name": "scala" },
+    "argv": [
+        "<spark-kernel>/bin/spark-kernel",
+        "--profile",
+        "{connection_file}"
+    ],
+    "codemirror_mode": "scala",
+    "env": {
+        "SPARK_OPTS": "--master=local[2] --driver-java-options=-Xms1024M --driver-java-options=-Xmx4096M --driver-java-options=-Dlog4j.logLevel=info",
+        "MAX_INTERPRETER_THREADS": "16",
+        "CAPTURE_STANDARD_OUT": "true",
+        "CAPTURE_STANDARD_ERR": "true",
+        "SEND_EMPTY_OUTPUT": "false",
+        "SPARK_HOME": "<SPARK_HOME>",
+        "PYTHONPATH": "<SPARK_HOME>/python:<SPARK_HOME>/python/lib/py4j-0.8.2.1-src.zip"
+     }
+}
+```
+
+The _display name_ property is merely used in the dropdown menu of the
+notebook interface.
+
+The _argv_ property is the most significant part of the configuration as it
+tells Jupyter what process to start as a kernel and provides it with the
+port information Jupyter specifies for communication, as well as any other kernel specific options (see [details](Usage-Instructions-for-the-Spark-Kernel#kernel-specific-options)).
+
+
+
+The _connection file_ is replaced by Jupyter with the path to the JSON file
+containing port and signature information. That structure looks like the
+following:
+
+```
+{
+    "stdin_port": 48691,
+    "ip": "127.0.0.1",
+    "control_port": 44808,
+    "hb_port": 49691,
+    "signature_scheme": "hmac-sha256",
+    "key": "",
+    "shell_port": 40544,
+    "transport": "tcp",
+    "iopub_port": 43462
+}
+```
+
+[CodeMirror](http://codemirror.net/) is used by Jupyter for cell editing and
+syntax highlighting. It provides quite a few capabilities such as running cells
+in _vim_. The field is optional in the above and can be set to a series of keys
+and values, although a single value also works.
+
+The _env_ property is used to define environment variables. The 2 most important variables to set are `SPARK_HOME` and `SPARK_OPTS` for spark specific options (see [details](Usage-Instructions-for-the-Spark-Kernel#setting-spark_opts)).
+
+### Ensure that your kernel is available ###
+
+The following commands are specific to IPython 3.2. You can test that your kernel is recognized by running the following:
+
+1. `ipython kernelspec list` - shows a list of kernels, _spark_ should be there
+
+2. `ipython console --kernel spark` - should launch the Spark Kernel and
+    provide a console-like input
+
+3. `ipython notebook` - open a notebook from the user interface, there should
+    now be a dropdown menu (set to Python 2) in the top-right of the page that
+    has an option for the Spark Kernel

http://git-wip-us.apache.org/repos/asf/incubator-toree-website/blob/046bbe0a/documentation/quick-start/language-support.md
----------------------------------------------------------------------
diff --git a/documentation/quick-start/language-support.md b/documentation/quick-start/language-support.md
new file mode 100644
index 0000000..8516b7e
--- /dev/null
+++ b/documentation/quick-start/language-support.md
@@ -0,0 +1,132 @@
+---
+layout: docpage
+title: Language Support
+type: doc
+section: quick-start
+weight: 0
+tagline: Apache Project !
+---
+
+
+The Spark Kernel supports the following languages: [Scala](#scala), [Python](#python), [R](#r), and [SQL](#sql). As of version 0.1.5 of the Spark Kernel, only Scala is considered stable.
+
+The language support for Python and R requires the system running the Spark Kernel to also contain a Python and R runtime, respectively. Furthermore, when using R, the location of R's site packages needs to be writable by the Spark Kernel. This is due to the current setup required to be able to run R code using the SparkR library.
+
+_Note: Currently, only Scala supports evaluating magics and using the Comm API._
+
+### Setting the default language
+
+To change the default language of the Spark Kernel, you can provide an environment variable or a command line option.
+
+| Language | Spark Implementation | Value to provide to Spark Kernel |
+|----------|----------------------|----------------------------------|
+| Scala    | Scala with Spark     | Scala                            |
+| Python   | Python with PySpark  | PySpark                          |
+| R        | R with SparkR        | SparkR                           |
+| SQL      | Spark SQL            | SQL                              |
+
+_Note: Values provided to the Spark Kernel are case insensitive, meaning SparkR is the same as sparkr._
+
+
+Using bash:
+
+```bash
+DEFAULT_INTERPRETER=pyspark <spark-kernel-dist>/bin/spark-kernel
+```
+
+```bash
+<spark-kernel-dist>/bin/spark-kernel --default-interpreter pyspark
+```
+
+### Scala
+
+The Scala language is the default language used by the Spark Kernel. Any code sent to the Spark Kernel will be executed as Scala.
+
+```scala
+println(sc.parallelize(1 to 100).reduce(_ + _))
+```
+
+The Scala interpreter can also be invoked via an equivalent magic.
+
+```scala
+%%scala println(sc.parallelize(1 to 100).reduce(_ + _))
+```
+
+### Python
+
+The Python language uses PySpark to interact with Spark. Use the `%%pyspark` magic to run code using Python.
+
+```python
+%%pyspark print sc.parallelize(range(1, 101)).reduce(lambda a,b: a+b)
+```
+
+##### Requirements
+* A local copy of the Apache Spark distribution that you should point to via _SPARK\_HOME_. 
+* Add the PySpark library (and other associated libraries) to your Python path.
+
+An example of a Jupyter configuration with the property PySpark information set is as follows:
+
+```json
+{
+    "display_name": "Spark 1.5.1 (PySpark)",
+    "language_info": { "name": "python" },
+    "argv": [
+        "/home/vagrant/local/bin/sparkkernel",
+        "--profile",
+        "{connection_file}"
+    ],
+    "codemirror_mode": "python",
+    "env": {
+        "SPARK_HOME": "/opt/spark",
+        "PYTHONPATH": "/opt/spark/python:/opt/spark/python/lib/py4j-0.8.2.1-src.zip"
+     }
+}
+```
+
+> Note: <br>
+> The _PYTHONPATH_ environment variable is set to `/opt/spark/python:/opt/spark/python/lib/py4j-0.8.2.1-src.zip`, which contains the path to the PySpark library as well as inclusion of a provided copy of Py4J (needed for the PySpark library).
+
+### R
+
+The R language used SparkR to interact with Spark. Use the `%%sparkr` magic to run code using R.
+
+```r
+%%sparkr
+df <- createDataFrame(sqlContext, faithful)
+head(df)
+```
+
+> Note: <br>
+> The R language uses a forked SparkR implementation underneath. The forking of SparkR was required to expose certain APIs to access the JVM and allow connecting to an existing Spark Context. 
+
+##### Requirements
+* R version 3.2+ 
+* A local copy of the Apache Spark distribution that you should point to via _SPARK\_HOME_. 
+* Make sure that the packages directory used by R when installing packages is writable, necessary to installed modified SparkR library. This is done automatically before any R code is run. 
+
+If the package directory is not writable by the Spark Kernel, then you should see an error similar to the following:
+
+```
+Installing package into ‘/usr/local/lib/R/site-library’
+(as ‘lib’ is unspecified)
+Warning in install.packages("sparkr_bundle.tar.gz", repos = NULL, type = "source") :
+'lib = "/usr/local/lib/R/site-library"' is not writable
+Error in install.packages("sparkr_bundle.tar.gz", repos = NULL, type = "source") :
+unable to install packages
+Execution halted
+```
+
+### SQL
+
+The SQL language uses Spark SQL underneath. This serves as a wrapper around the standard `sparkSql.sql("YOUR SQL CODE")`. No additional setup is necessary to use Spark SQL.
+
+While the SQL interpreter can be set as the default, as Hive support has not yet been included directly in the Spark Kernel, it is recommended to use the SQL magic instead to access data stored via languages like Scala.
+
+```scala
+val df = sqlContext.read.json("/opt/spark/examples/src/main/resources/people.json")
+df.registerTempTable("mytable")
+```
+
+```sql
+%%sql SELECT age FROM mytable
+```

http://git-wip-us.apache.org/repos/asf/incubator-toree-website/blob/046bbe0a/documentation/quick-start/overview-of-magics.md
----------------------------------------------------------------------
diff --git a/documentation/quick-start/overview-of-magics.md b/documentation/quick-start/overview-of-magics.md
new file mode 100644
index 0000000..15fece0
--- /dev/null
+++ b/documentation/quick-start/overview-of-magics.md
@@ -0,0 +1,82 @@
+---
+layout: docpage
+title: Overview of Magics
+type: doc
+section: quick-start
+weight: 0
+tagline: Apache Project !
+---
+
+The concept of magics stems from IPython, where various pieces of functionality
+are provided through line and cell magics. Line magics are represented by a
+single `%` followed by the magic name (e.g. `%AddJar <args>`) and only
+allow for a single line of content. Cell magics are represented by `%%` followed by the magic name (e.g. `%%HTML <args>`) and allow for multiple
+lines of content. See the examples below for more details.
+
+#Line Magics
+
+###Description
+A line magic is basically a function that is supported by the kernel to expand its capabilities. Line magics are placed at the beginning of a line, prefixed by a `%` symbol, and receive the rest of the line as arguments:
+```scala
+// foo and bar will be arguments to ExampleLineMagic
+%ExampleLineMagic foo bar
+```
+
+Multiple line magics can be invoked in a single cell:
+```scala
+val x = 3
+%AddDeps com.google.guava guava 18.0
+val z = 5
+%lsmagic
+import com.google.common.base.Strings._
+```
+
+### Other Things to Note
+- Magic names are case insensitive; if a line magic `AddJar` exists, then `%addjar`, `%ADDJar`, and all other variants are valid. 
+
+- Each magic has its own arguments; usage information can be obtained for a magic by typing `%<MagicName>`.
+
+- Line magics receive the _literal_ rest of the line as arguments, so the following string interpolation will not work:
+```scala
+for(i <- (1 to 10)) 
+  %addDeps s"com.google.guava guava $i"
+```
+
+# Cell Magics
+
+### Description
+Cell Magics are magics that take the rest of the cell as input. Unlike Line Magics, Cell Magics can alter the output of a cell and must be the first thing in the cell.
+
+As an example, the `%%HTML` cell magic renders the contents of the cell as HTML:
+```scala
+%%HTML
+<h1>Hello World!</h1>
+// more arguments...
+```
+
+# Programmatic Magic Usage
+
+### Description
+There exists a programmatic API for those who do not wish to use the IPython-esque `%` and `%%` syntax. The Spark Kernel exposes a `kernel` object which provides programmatic invocation of magic code in the form:
+```scala
+//magicName is case insensitive
+kernel.magics.<magicName>("<string of args>")
+```
+e.g.
+```scala
+// some line magics
+kernel.magics.addJar("http://www.myjars.com/my.jar")
+kernel.magics.addDeps("com.google.guava guava 18.0")
+```
+
+```scala
+// a cell magic
+kernel.magics.html("
+  <h1>Hello World!</h1>
+")
+```
+
+This syntax allows magics to be embedded in loops and other iteration, unlike the `%` equivalent:
+```scala
+(1 to 10).foreach (i => kernel.magics.addDeps(s"com.google.guava guava $i"))
+```

http://git-wip-us.apache.org/repos/asf/incubator-toree-website/blob/046bbe0a/documentation/quick-start/overview-of-project.md
----------------------------------------------------------------------
diff --git a/documentation/quick-start/overview-of-project.md b/documentation/quick-start/overview-of-project.md
new file mode 100644
index 0000000..8c775b6
--- /dev/null
+++ b/documentation/quick-start/overview-of-project.md
@@ -0,0 +1,45 @@
+---
+layout: docpage
+title: Overview of Project
+type: doc
+section: quick-start
+weight: 0
+tagline: Apache Project !
+---
+
+
+The Spark Kernel has one main goal: provide the foundation for interactive applications to connect and use [Apache Spark][1].
+
+![](overview.png)
+
+The kernel provides several key features for applications:
+
+1. Define and run Spark Tasks
+
+    - Executing Scala code dynamically in a similar fashion to the _Scala REPL_ and _Spark Shell_
+
+    - Plug points for accessing Spark-only objects including an instance of the [Spark Context][2]
+
+2. Collect Results without a Datastore
+
+    - Send execution results and streaming data back via the Spark Kernel to your applications
+
+    - Use the [Comm API][3] - an abstraction of the IPython protocol - for more detailed data 
+      communication and synchronization between your applications and the Spark Kernel
+
+3. Host and Manage Applications Separately from Apache Spark
+
+    - The _Spark Kernel_ serves as a proxy for requests to the Apache Spark cluster
+
+The project intends to provide applications with the ability to send both packaged jars and code snippets. As it implements the latest IPython message protocol (5.0), the Spark Kernel can easily plug into the 3.x branch of IPython for quick, interactive data exploration. The Spark Kernel strives to be extensible, providing a [pluggable interface][4] for developers to add their own functionality.
+
+__For more information, please visit the [Spark Kernel wiki][5].__
+
+__For bug reporting and feature requests, please visit the [Spark Kernel issue list][6].__
+
+[1]: https://spark.apache.org/
+[2]: http://spark.apache.org/docs/latest/api/scala/org/apache/spark/SparkContext.html
+[3]: Guide-to-the-Comm-API-of-the-Spark-Kernel-and-Spark-Kernel-Client
+[4]: Guide-to-Developing-Magics-for-the-Spark-Kernel
+[5]: https://github.com/ibm-et/spark-kernel/wiki
+[6]: https://github.com/ibm-et/spark-kernel/issues

http://git-wip-us.apache.org/repos/asf/incubator-toree-website/blob/046bbe0a/documentation/quick-start/spark-kernel-client.md
----------------------------------------------------------------------
diff --git a/documentation/quick-start/spark-kernel-client.md b/documentation/quick-start/spark-kernel-client.md
new file mode 100644
index 0000000..ba9ac2c
--- /dev/null
+++ b/documentation/quick-start/spark-kernel-client.md
@@ -0,0 +1,138 @@
+---
+layout: docpage
+title: Spark Kernel Client
+type: doc
+section: quick-start
+weight: 0
+tagline: Apache Project !
+---
+
+The following is a rough guide to building, setting up, and using the _Spark Kernel client_, a library available to communicate with the Spark Kernel.
+
+### Building from Source ###
+
+From the root of the Spark Kernel project invoke the following commands: 
+```
+sbt compile
+sbt publishLocal
+```
+
+This will publish the Spark Kernel build artifacts to your `~/.ivy2/local`
+directory. You can now include the Spark Kernel Client jar in your SBT 
+dependencies:
+
+```scala
+libraryDependencies += "com.ibm.spark" %% "client" % "0.1.1-SNAPSHOT"
+```
+
+### Usage Instructions ###
+
+A comprehensive example of all the steps below can be found in [DocumentationExamples.scala](https://github.com/ibm-et/spark-kernel/blob/master/client/src/test/scala/examples/DocumentationExamples.scala).
+
+### Client Setup ###
+
+To begin using the Spark Kernel Client you will need to specify connection 
+information for the client. This is done by sending a JSON structure to 
+TypeSafe's ConfigFactory. This connection information is printed whenever a 
+Spark Kernel is started. An example JSON structure for the default config values
+for the Spark Kernel would be:
+
+```scala
+val profileJSON: String = """
+    {
+        "stdin_port":   48691,
+        "control_port": 40544,
+        "hb_port":      43462,
+        "shell_port":   44808,
+        "iopub_port":   49691,
+        "ip": "127.0.0.1",
+        "transport": "tcp",
+        "signature_scheme": "hmac-sha256",
+        "key": ""
+    }
+  """.stripMargin
+```
+
+Once the JSON structure exists, the kernel client can be created:
+
+```scala
+val config: Config = ConfigFactory.parseString(profileJSON)
+val client = (new ClientBootstrap(config) 
+  with StandardSystemInitialization
+  with StandardHandlerInitialization).createClient
+```
+
+### Executing Code ###
+
+Executing code with the client is done by invoking the `execute` function. 
+`execute` takes a string, representing the code to execute, as an argument. 
+Example executions include:
+ 
+```scala
+//  Create a variable, z, and assign a value to it
+client.execute("val z = 0")
+//  Perform some computation 
+client.execute("1 + 1")
+//  Print some message 
+client.execute("println(\"Hello, World\")")
+```
+
+The execute function returns a deferred object which allows for interacting
+with results from the code execution. The methods `onResult`, `onStream`, and 
+`onError` are the means to do so and are explained below.
+
+### Receiving Results `onResult` ###
+
+The results of code execution are sent to callback function registered with
+`onResult`. The argument to the callback function is an [`ExecuteResult`]
+(http://ipython.org/ipython-doc/dev/development/messaging.html#id4) from the
+IPython message protocol. Code execution results originate from variable 
+assignments and simple Scala statements. Any number of callbacks can be 
+registered, even if code execution has already completed. Each callback will be 
+invoked once for successful code execution and never in the case of failure. 
+Examples include:  
+
+```scala
+//  Define our callback
+def printResult(result: ExecuteResult) = {
+    println(s"Result was: ${result.data.get(MIMEType.PlainText).get}")
+}
+//  Create a variable, z, and assign a value to it
+client.execute("val z = 0").onResult(printResult)
+//  Perform some computation, and print it twice 
+client.execute("1 + 1").onResult(printResult).onResult(printResult)
+//  The callback will never be invoked 
+client.execute("someUndefinedVariable").onResult(printResult)
+```
+
+### Receiving Print Streams `onStream` ###
+
+The output from code which prints to stdout can be accessed by registering a 
+callback with the `onStream` method. All callbacks registered will be invoked
+1 time for every `StreamContent` message received. If a callback is registered
+after a `StreamContent` message was received, that callback will *NOT* receive
+the message. A callback will only received messages received after it has been 
+registered. Examples of stream messages include:
+
+```scala
+def printStreamContent(content:StreamContent) = {
+    println(s"Stream content was: ${content.text}")
+}
+client.execute("println(1/1)").onStream(printStreamContent)
+client.execute("println(\"Hello, World\")").onStream(printStreamContent)
+```
+
+### Handling Errors `onError` ###
+
+When an error occurs during code execution all callbacks registered with 
+`onError` will be called exactly once. Example usages include:
+
+```scala
+def printError(reply:ExecuteReplyError) = {
+    println(s"Error was: ${reply.ename.get}")
+}
+//  Error from executing a statement
+client.execute("1/0").onError(printError)
+//  Error from invoking a println
+client.execute("println(someUndefinedVar").onError(printError)
+```

http://git-wip-us.apache.org/repos/asf/incubator-toree-website/blob/046bbe0a/documentation/quick-start/test-structure-of-project.md
----------------------------------------------------------------------
diff --git a/documentation/quick-start/test-structure-of-project.md b/documentation/quick-start/test-structure-of-project.md
new file mode 100644
index 0000000..66a7c0f
--- /dev/null
+++ b/documentation/quick-start/test-structure-of-project.md
@@ -0,0 +1,65 @@
+---
+layout: docpage
+title: Test Structure of Project
+type: doc
+section: quick-start
+weight: 0
+tagline: Apache Project !
+---
+
+### Prerequisites 
+
+You must install the [library dependencies][1] to properly run the tests.
+
+### Testing Levels
+There are four levels of test in this project:
+
+1. Unit - tests that isolate a specific class/object/etc for its functionality
+
+2. Integration - tests that illustrate functionality between multiple
+   components
+
+3. System - tests that demonstrate correctness across the entire system
+
+4. Scratch - tests isolated in a local branch, used for quick sanity checks,
+   not for actual inclusion into testing solution
+
+### Test Execution
+
+To execute specific tests, run sbt with the following:
+
+1. Unit - `sbt unit:test`
+
+2. Integration - `sbt integration:test`
+
+3. System - `sbt system:test`
+
+4. Scratch - `sbt scratch:test`
+
+To run all tests, use `sbt test`!
+
+### Naming Conventions
+
+The naming convention for tests is as follows:
+
+1. Unit - test classes end with _Spec_
+   e.g. CompleteRequestSpec
+    * Placed under _com.ibm.spark_
+
+2. Integration - test classes end with _SpecForIntegration_
+   e.g. InterpreterWithActorSpecForIntegration
+    * Placed under _integration_
+
+3. System - test classes end with _SpecForSystem_
+   e.g. InputToAddJarSpecForSystem
+    * Placed under _system_
+
+4. Scratch
+    * Placed under _scratch_
+
+It is also possible to run tests for a specific project by using the following
+syntax in sbt:
+
+    sbt <PROJECT>/test
+
+[1]: Building-from-Source

http://git-wip-us.apache.org/repos/asf/incubator-toree-website/blob/046bbe0a/documentation/quick-start/usage-instructions.md
----------------------------------------------------------------------
diff --git a/documentation/quick-start/usage-instructions.md b/documentation/quick-start/usage-instructions.md
new file mode 100644
index 0000000..6f8fc7a
--- /dev/null
+++ b/documentation/quick-start/usage-instructions.md
@@ -0,0 +1,49 @@
+---
+layout: docpage
+title: Usage Instructions
+type: doc
+section: quick-start
+weight: 0
+tagline: Apache Project !
+---
+
+
+The Spark Kernel package contains an assembly jar and a startup script. The kernel can be started standalone or configured to be launched by a Jupyter notebook. Regardless of how you use the kernel, it is recommended to use the startup script.
+
+#### Setting SPARK_HOME ####
+After [building](Building-from-Source) the kernel, the startup script will be located at `<spark-kernel-project>/dist/spark-kernel/bin/spark-kernel`. This script requires a Spark distribution 1.5.x+ installed and set to `SPARK_HOME`. It uses `spark-submit` to start the kernel.
+
+#### Setting SPARK_OPTS ####
+Most options available for `spark-summit` apply to running the kernel. These options must be passed using the `SPARK_OPTS` env variable. 
+
+Refer to the Spark [documentation](http://spark.apache.org/docs/latest/submitting-applications.html) for details on available options. Please note: the kernel is intended to be launched in `client-mode` since access to the kernel communication ports is essential (specially when launched through Jupyter)
+
+#### Kernel Specific Options ####
+The kernel also supports a series of options that are specific to the kernel. These options are passed directly to the startup script.
+
+The following command line options are available:
+
+* --profile <file> - the file to load containing the ZeroMQ port information
+* --help - displays the help menu detailing usage instructions
+
+Additionally, Network configurations can be passed as command line arguments
+
+* --ip <address>
+* --stdin-port <port>
+* --shell-port <port>
+* --iopub-port <port>
+* --control-port <port>
+* --heartbeat-port <port>
+
+Ports can also be specified as Environment variables:
+
+* IP
+* STDIN_PORT
+* SHELL_PORT
+* IOPUB_PORT
+* CONTROL_PORT
+* HB_PORT
+
+Other options
+
+* --nosparkcontext  - The SparkContext is not created on startup.

http://git-wip-us.apache.org/repos/asf/incubator-toree-website/blob/046bbe0a/documentation/quick-start/using-docker.md
----------------------------------------------------------------------
diff --git a/documentation/quick-start/using-docker.md b/documentation/quick-start/using-docker.md
new file mode 100644
index 0000000..9290168
--- /dev/null
+++ b/documentation/quick-start/using-docker.md
@@ -0,0 +1,26 @@
+---
+layout: docpage
+title: Using Docker
+type: doc
+section: quick-start
+weight: 0
+tagline: Apache Project !
+---
+
+The Spark Kernel can be run in a docker container using Docker 1.0.0+. There is 
+a Dockerfile included in the root of the project. You will need to compile and 
+pack the Spark Kernel before the docker image can be built.
+
+    sbt compile
+    sbt pack
+    docker build -t spark-kernel .
+
+After the image has been successfully created, you can run your container by 
+executing the command:
+
+    docker run -d -e IP=0.0.0.0 spark-kernel 
+
+You must always include `-e IP=0.0.0.0` to allow the kernel to bind to the 
+docker container's IP. The environment variables listed in the getting 
+started section can be used in the docker run command. This allows you to 
+explicitly set the ports for the kernel.

http://git-wip-us.apache.org/repos/asf/incubator-toree-website/blob/046bbe0a/documentation/quick-start/using-vagrant.md
----------------------------------------------------------------------
diff --git a/documentation/quick-start/using-vagrant.md b/documentation/quick-start/using-vagrant.md
new file mode 100644
index 0000000..a985745
--- /dev/null
+++ b/documentation/quick-start/using-vagrant.md
@@ -0,0 +1,26 @@
+---
+layout: docpage
+title: Using Vagrant
+type: doc
+section: quick-start
+weight: 0
+tagline: Apache Project !
+---
+
+A Vagrantfile is provided to easily setup a development environment. You will 
+need to install [Virtualbox 4.3.12+](https://www.virtualbox.org/wiki/Downloads) 
+and [Vagrant 1.6.2+](https://www.vagrantup.com/downloads.html).
+
+First, make sure that the Vagrant box is up and running. From the root of the project, bring up the vagrant box:
+
+    vagrant up
+    
+Second, set `USE_VAGRANT`
+
+    export USE_VAGRANT=true
+
+Third, run any `make` target as usual and it will run within vagrant. To quickly test the kernel, all you need to do is:
+
+    make dev
+    
+This will build, package an start a notebook. You can now find the notebook frontend by going to http://192.168.44.44:8888.