You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@samza.apache.org by jg...@apache.org on 2014/06/23 22:42:57 UTC

[1/2] SAMZA-301: Add syntax highlighting to documentation code listings.

Repository: incubator-samza
Updated Branches:
  refs/heads/master c642effae -> d913037a8


http://git-wip-us.apache.org/repos/asf/incubator-samza/blob/d913037a/docs/startup/download/index.md
----------------------------------------------------------------------
diff --git a/docs/startup/download/index.md b/docs/startup/download/index.md
index 1cb1227..df86e2d 100644
--- a/docs/startup/download/index.md
+++ b/docs/startup/download/index.md
@@ -31,62 +31,68 @@ All Samza JARs are published through Maven.
 
 A Samza project that runs with Kafka and YARN should depend on the following artifacts.
 
-    <dependency>
-      <groupId>samza</groupId>
-      <artifactId>samza-api</artifactId>
-      <version>0.7.0</version>
-    </dependency>
-    <dependency>
-      <groupId>samza</groupId>
-      <artifactId>samza-core_2.9.2</artifactId>
-      <version>0.7.0</version>
-      <scope>runtime</scope>
-    </dependency>
-    <dependency>
-      <groupId>samza</groupId>
-      <artifactId>samza-serializers_2.9.2</artifactId>
-      <version>0.7.0</version>
-      <scope>runtime</scope>
-    </dependency>
-    <dependency>
-      <groupId>samza</groupId>
-      <artifactId>samza-yarn_2.9.2</artifactId>
-      <version>0.7.0</version>
-      <classifier>yarn-2.0.5-alpha</classifier>
-      <scope>runtime</scope>
-    </dependency>
-    <dependency>
-      <groupId>samza</groupId>
-      <artifactId>samza-kafka_2.9.2</artifactId>
-      <version>0.7.0</version>
-      <scope>runtime</scope>
-    </dependency>
+{% highlight xml %}
+<dependency>
+  <groupId>samza</groupId>
+  <artifactId>samza-api</artifactId>
+  <version>0.7.0</version>
+</dependency>
+<dependency>
+  <groupId>samza</groupId>
+  <artifactId>samza-core_2.9.2</artifactId>
+  <version>0.7.0</version>
+  <scope>runtime</scope>
+</dependency>
+<dependency>
+  <groupId>samza</groupId>
+  <artifactId>samza-serializers_2.9.2</artifactId>
+  <version>0.7.0</version>
+  <scope>runtime</scope>
+</dependency>
+<dependency>
+  <groupId>samza</groupId>
+  <artifactId>samza-yarn_2.9.2</artifactId>
+  <version>0.7.0</version>
+  <classifier>yarn-2.0.5-alpha</classifier>
+  <scope>runtime</scope>
+</dependency>
+<dependency>
+  <groupId>samza</groupId>
+  <artifactId>samza-kafka_2.9.2</artifactId>
+  <version>0.7.0</version>
+  <scope>runtime</scope>
+</dependency>
+{% endhighlight %}
 
 #### Repositories
 
 Samza is available in the Apache Maven repository.
 
-    <repository>
-      <id>apache-releases</id>
-      <url>https://repository.apache.org/content/groups/public</url>
-    </repository>
+{% highlight xml %}
+<repository>
+  <id>apache-releases</id>
+  <url>https://repository.apache.org/content/groups/public</url>
+</repository>
+{% endhighlight %}
 
 Snapshot builds are available in the Apache Maven snapshot repository.
 
-    <repository>
-      <id>apache-snapshots</id>
-      <url>https://repository.apache.org/content/groups/snapshots</url>
-    </repository>
+{% highlight xml %}
+<repository>
+  <id>apache-snapshots</id>
+  <url>https://repository.apache.org/content/groups/snapshots</url>
+</repository>
+{% endhighlight %}
 -->
 
 ### Checking out and Building
 
 If you're interested in working on Samza, or building the JARs from scratch, then you'll need to checkout and build the code. Samza does not have a binary release at this time. To check out and build Samza, run these commands.
 
-```
+{% highlight bash %}
 git clone http://git-wip-us.apache.org/repos/asf/incubator-samza.git
 cd incubator-samza
 ./gradlew clean build
-```
+{% endhighlight %}
 
 See the README.md file for details on building.

http://git-wip-us.apache.org/repos/asf/incubator-samza/blob/d913037a/docs/startup/hello-samza/0.7.0/index.md
----------------------------------------------------------------------
diff --git a/docs/startup/hello-samza/0.7.0/index.md b/docs/startup/hello-samza/0.7.0/index.md
index d813849..92d5ba2 100644
--- a/docs/startup/hello-samza/0.7.0/index.md
+++ b/docs/startup/hello-samza/0.7.0/index.md
@@ -24,8 +24,10 @@ The [hello-samza](https://github.com/apache/incubator-samza-hello-samza) project
 
 Check out the hello-samza project:
 
-    git clone git://git.apache.org/incubator-samza-hello-samza.git hello-samza
-    cd hello-samza
+{% highlight bash %}
+git clone git://git.apache.org/incubator-samza-hello-samza.git hello-samza
+cd hello-samza
+{% endhighlight %}
 
 This project contains everything you'll need to run your first Samza jobs.
 
@@ -33,7 +35,9 @@ This project contains everything you'll need to run your first Samza jobs.
 
 A Samza grid usually comprises three different systems: [YARN](http://hadoop.apache.org/docs/current/hadoop-yarn/hadoop-yarn-site/YARN.html), [Kafka](http://kafka.apache.org/), and [ZooKeeper](http://zookeeper.apache.org/). The hello-samza project comes with a script called "grid" to help you setup these systems. Start by running:
 
-    bin/grid bootstrap
+{% highlight bash %}
+bin/grid bootstrap
+{% endhighlight %}
 
 This command will download, install, and start ZooKeeper, Kafka, and YARN. It will also check out the latest version of Samza and build it. All package files will be put in a sub-directory called "deploy" inside hello-samza's root folder.
 
@@ -45,19 +49,25 @@ Once the grid command completes, you can verify that YARN is up and running by g
 
 Before you can run a Samza job, you need to build a package for it. This package is what YARN uses to deploy your jobs on the grid.
 
-    mvn clean package
-    mkdir -p deploy/samza
-    tar -xvf ./samza-job-package/target/samza-job-package-0.7.0-dist.tar.gz -C deploy/samza
+{% highlight bash %}
+mvn clean package
+mkdir -p deploy/samza
+tar -xvf ./samza-job-package/target/samza-job-package-0.7.0-dist.tar.gz -C deploy/samza
+{% endhighlight %}
 
 ### Run a Samza Job
 
 After you've built your Samza package, you can start a job on the grid using the run-job.sh script.
 
-    deploy/samza/bin/run-job.sh --config-factory=org.apache.samza.config.factories.PropertiesConfigFactory --config-path=file://$PWD/deploy/samza/config/wikipedia-feed.properties
+{% highlight bash %}
+deploy/samza/bin/run-job.sh --config-factory=org.apache.samza.config.factories.PropertiesConfigFactory --config-path=file://$PWD/deploy/samza/config/wikipedia-feed.properties
+{% endhighlight %}
 
 The job will consume a feed of real-time edits from Wikipedia, and produce them to a Kafka topic called "wikipedia-raw". Give the job a minute to startup, and then tail the Kafka topic:
 
-    deploy/kafka/bin/kafka-console-consumer.sh  --zookeeper localhost:2181 --topic wikipedia-raw
+{% highlight bash %}
+deploy/kafka/bin/kafka-console-consumer.sh  --zookeeper localhost:2181 --topic wikipedia-raw
+{% endhighlight %}
 
 Pretty neat, right? Now, check out the YARN UI again ([http://localhost:8088](http://localhost:8088)). This time around, you'll see your Samza job is running!
 
@@ -67,23 +77,31 @@ If you can not see any output from Kafka consumer, you may have connection probl
 
 Let's calculate some statistics based on the messages in the wikipedia-raw topic. Start two more jobs:
 
-    deploy/samza/bin/run-job.sh --config-factory=org.apache.samza.config.factories.PropertiesConfigFactory --config-path=file://$PWD/deploy/samza/config/wikipedia-parser.properties
-    deploy/samza/bin/run-job.sh --config-factory=org.apache.samza.config.factories.PropertiesConfigFactory --config-path=file://$PWD/deploy/samza/config/wikipedia-stats.properties
+{% highlight bash %}
+deploy/samza/bin/run-job.sh --config-factory=org.apache.samza.config.factories.PropertiesConfigFactory --config-path=file://$PWD/deploy/samza/config/wikipedia-parser.properties
+deploy/samza/bin/run-job.sh --config-factory=org.apache.samza.config.factories.PropertiesConfigFactory --config-path=file://$PWD/deploy/samza/config/wikipedia-stats.properties
+{% endhighlight %}
 
 The first job (wikipedia-parser) parses the messages in wikipedia-raw, and extracts information about the size of the edit, who made the change, etc. You can take a look at its output with:
 
-    deploy/kafka/bin/kafka-console-consumer.sh  --zookeeper localhost:2181 --topic wikipedia-edits
+{% highlight bash %}
+deploy/kafka/bin/kafka-console-consumer.sh  --zookeeper localhost:2181 --topic wikipedia-edits
+{% endhighlight %}
 
 The last job (wikipedia-stats) reads messages from the wikipedia-edits topic, and calculates counts, every ten seconds, for all edits that were made during that window. It outputs these counts to the wikipedia-stats topic.
 
-    deploy/kafka/bin/kafka-console-consumer.sh  --zookeeper localhost:2181 --topic wikipedia-stats
+{% highlight bash %}
+deploy/kafka/bin/kafka-console-consumer.sh  --zookeeper localhost:2181 --topic wikipedia-stats
+{% endhighlight %}
 
 The messages in the stats topic look like this:
 
-    {"is-talk":2,"bytes-added":5276,"edits":13,"unique-titles":13}
-    {"is-bot-edit":1,"is-talk":3,"bytes-added":4211,"edits":30,"unique-titles":30,"is-unpatrolled":1,"is-new":2,"is-minor":7}
-    {"bytes-added":3180,"edits":19,"unique-titles":19,"is-unpatrolled":1,"is-new":1,"is-minor":3}
-    {"bytes-added":2218,"edits":18,"unique-titles":18,"is-unpatrolled":2,"is-new":2,"is-minor":3}
+{% highlight json %}
+{"is-talk":2,"bytes-added":5276,"edits":13,"unique-titles":13}
+{"is-bot-edit":1,"is-talk":3,"bytes-added":4211,"edits":30,"unique-titles":30,"is-unpatrolled":1,"is-new":2,"is-minor":7}
+{"bytes-added":3180,"edits":19,"unique-titles":19,"is-unpatrolled":1,"is-new":1,"is-minor":3}
+{"bytes-added":2218,"edits":18,"unique-titles":18,"is-unpatrolled":2,"is-new":2,"is-minor":3}
+{% endhighlight %}
 
 If you check the YARN UI, again, you'll see that all three jobs are now listed.
 
@@ -91,6 +109,8 @@ If you check the YARN UI, again, you'll see that all three jobs are now listed.
 
 After you're done, you can clean everything up using the same grid script.
 
-    bin/grid stop all
+{% highlight bash %}
+bin/grid stop all
+{% endhighlight %}
 
 Congratulations! You've now setup a local grid that includes YARN, Kafka, and ZooKeeper, and run a Samza job on it. Next up, check out the [Background](/learn/documentation/0.7.0/introduction/background.html) and [API Overview](/learn/documentation/0.7.0/api/overview.html) pages.


[2/2] git commit: SAMZA-301: Add syntax highlighting to documentation code listings.

Posted by jg...@apache.org.
SAMZA-301: Add syntax highlighting to documentation code listings.

Signed-off-by: Jakob Homan <jg...@gmail.com>


Project: http://git-wip-us.apache.org/repos/asf/incubator-samza/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-samza/commit/d913037a
Tree: http://git-wip-us.apache.org/repos/asf/incubator-samza/tree/d913037a
Diff: http://git-wip-us.apache.org/repos/asf/incubator-samza/diff/d913037a

Branch: refs/heads/master
Commit: d913037a878c2530b9fb5c7090116f57f49b5eaf
Parents: c642eff
Author: David Z. Chen <da...@outlook.com>
Authored: Wed Jun 18 02:37:11 2014 -0700
Committer: Jakob Homan <jg...@gmail.com>
Committed: Mon Jun 23 13:42:48 2014 -0700

----------------------------------------------------------------------
 docs/_config.yml                                |   2 +-
 docs/_layouts/default.html                      |   1 +
 docs/contribute/code.md                         |   2 +-
 docs/css/main.css                               |  33 +----
 docs/css/syntax.css                             |  74 ++++++++++
 docs/learn/documentation/0.7.0/api/overview.md  | 134 ++++++++++---------
 .../0.7.0/container/checkpointing.md            |  96 +++++++------
 .../documentation/0.7.0/container/event-loop.md |  10 +-
 docs/learn/documentation/0.7.0/container/jmx.md |  10 +-
 .../documentation/0.7.0/container/metrics.md    | 106 ++++++++-------
 .../0.7.0/container/samza-container.md          |  37 +++--
 .../0.7.0/container/serialization.md            |  48 +++----
 .../0.7.0/container/state-management.md         |  66 ++++-----
 .../documentation/0.7.0/container/streams.md    |  80 ++++++-----
 .../documentation/0.7.0/container/windowing.md  |  38 +++---
 .../0.7.0/introduction/architecture.md          |   4 +-
 .../documentation/0.7.0/jobs/configuration.md   |  12 +-
 .../documentation/0.7.0/jobs/job-runner.md      |  12 +-
 docs/learn/documentation/0.7.0/jobs/logging.md  |  38 +++---
 .../learn/documentation/0.7.0/jobs/yarn-jobs.md |   6 +-
 .../documentation/0.7.0/operations/kafka.md     |   4 +-
 .../0.7.0/deploy-samza-job-from-hdfs.md         |  22 +--
 .../tutorials/0.7.0/remote-debugging-samza.md   |  32 ++---
 .../0.7.0/run-hello-samza-without-internet.md   |  23 ++--
 .../tutorials/0.7.0/run-in-multi-node-yarn.md   |  62 ++++-----
 docs/less/main.less                             |  40 +++---
 docs/startup/download/index.md                  |  86 ++++++------
 docs/startup/hello-samza/0.7.0/index.md         |  54 +++++---
 28 files changed, 654 insertions(+), 478 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-samza/blob/d913037a/docs/_config.yml
----------------------------------------------------------------------
diff --git a/docs/_config.yml b/docs/_config.yml
index 9ffb45c..2f2f895 100644
--- a/docs/_config.yml
+++ b/docs/_config.yml
@@ -16,7 +16,7 @@
 # under the License.
 permalink: /:categories/:title
 name: Samza
-pygments: true
+highlighter: pygments
 markdown: redcarpet
 exclude: ['_notes']
 redcarpet:

http://git-wip-us.apache.org/repos/asf/incubator-samza/blob/d913037a/docs/_layouts/default.html
----------------------------------------------------------------------
diff --git a/docs/_layouts/default.html b/docs/_layouts/default.html
index 226e416..5bea47a 100644
--- a/docs/_layouts/default.html
+++ b/docs/_layouts/default.html
@@ -23,6 +23,7 @@
     <link href="/css/bootstrap.min.css" rel="stylesheet"/>
     <link href="/css/font-awesome.min.css" rel="stylesheet"/>
     <link href="/css/main.css" rel="stylesheet"/>
+    <link href="/css/syntax.css" rel="stylesheet"/>
     <link rel="icon" type="image/png" href="/img/samza-icon.png">
   </head>
   <body>

http://git-wip-us.apache.org/repos/asf/incubator-samza/blob/d913037a/docs/contribute/code.md
----------------------------------------------------------------------
diff --git a/docs/contribute/code.md b/docs/contribute/code.md
index b9a3c02..a2e32bc 100644
--- a/docs/contribute/code.md
+++ b/docs/contribute/code.md
@@ -39,4 +39,4 @@ gem install jekyll
 
 Depending on your system you may also need install some additional dependencies when you try and run it. Note that some Linux distributions may have older versions of Jekyll packaged that treat arguments differently and may result in changes not being incorporated into the generated site.
 
-The script to commit the updated webpage files is docs/_tools/publish-site.sh
+The script to commit the updated webpage files is `docs/_tools/publish-site.sh`

http://git-wip-us.apache.org/repos/asf/incubator-samza/blob/d913037a/docs/css/main.css
----------------------------------------------------------------------
diff --git a/docs/css/main.css b/docs/css/main.css
index 9dd0b0c..525d4f5 100755
--- a/docs/css/main.css
+++ b/docs/css/main.css
@@ -16,7 +16,6 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 /* Non-responsive overrides
  *
  * Utilitze the following CSS to disable the responsive-ness of the container,
@@ -155,14 +154,13 @@ h4 {
 pre {
   border: 0px !important;
   border-radius: 0px !important;
-  overflow: scroll !important;
-  white-space: pre;
-  overflow-wrap: normal;
-  word-wrap: normal !important;
+  overflow-x: auto;
+  background-color: #f7f7f7;
+  font-size: 12px;
 }
 pre code {
+  overflow-wrap: normal;
   white-space: pre;
-  font-size: 12px;
 }
 th.header {
   cursor: pointer;
@@ -212,29 +210,12 @@ td.key {
 .committer-icon {
   font-size: 16px;
 }
-ul.documentation-list {
-  list-style: none;
-  padding-left: 20px;
-}
 img.diagram-large {
   width: 100%;
 }
-table.documentation {
-  border-collapse: collapse;
-  font-size: 12px;
-  margin: 1em 0;
-}
-table.documentation th, table.documentation td {
-  text-align: left;
-  vertical-align: top;
-  border: 1px solid #888;
-  padding: 5px;
-}
-table.documentation th.nowrap, table.documentation td.nowrap {
-  white-space: nowrap;
-}
-table.documentation th {
-  background-color: #eee;
+ul.documentation-list {
+  list-style: none;
+  padding-left: 20px;
 }
 .footer {
   clear: both;

http://git-wip-us.apache.org/repos/asf/incubator-samza/blob/d913037a/docs/css/syntax.css
----------------------------------------------------------------------
diff --git a/docs/css/syntax.css b/docs/css/syntax.css
new file mode 100644
index 0000000..6a8835f
--- /dev/null
+++ b/docs/css/syntax.css
@@ -0,0 +1,74 @@
+/*
+
+    Name:       Base16 Default Light
+    Author:     Chris Kempson (http://chriskempson.com)
+
+    Pygments template by Jan T. Sott (https://github.com/idleberg)
+    Created with Base16 Builder by Chris Kempson (https://github.com/chriskempson/base16-builder)
+
+*/
+.highlight .hll { background-color: #e0e0e0 }
+.highlight  { background: #f5f5f5; color: #151515 }
+.highlight .c { color: #b0b0b0 } /* Comment */
+.highlight .err { color: #ac4142 } /* Error */
+.highlight .k { color: #aa759f } /* Keyword */
+.highlight .l { color: #d28445 } /* Literal */
+.highlight .n { color: #151515 } /* Name */
+.highlight .o { color: #75b5aa } /* Operator */
+.highlight .p { color: #151515 } /* Punctuation */
+.highlight .cm { color: #b0b0b0 } /* Comment.Multiline */
+.highlight .cp { color: #b0b0b0 } /* Comment.Preproc */
+.highlight .c1 { color: #b0b0b0 } /* Comment.Single */
+.highlight .cs { color: #b0b0b0 } /* Comment.Special */
+.highlight .gd { color: #ac4142 } /* Generic.Deleted */
+.highlight .ge { font-style: italic } /* Generic.Emph */
+.highlight .gh { color: #151515; font-weight: bold } /* Generic.Heading */
+.highlight .gi { color: #90a959 } /* Generic.Inserted */
+.highlight .gp { color: #b0b0b0; font-weight: bold } /* Generic.Prompt */
+.highlight .gs { font-weight: bold } /* Generic.Strong */
+.highlight .gu { color: #75b5aa; font-weight: bold } /* Generic.Subheading */
+.highlight .kc { color: #aa759f } /* Keyword.Constant */
+.highlight .kd { color: #aa759f } /* Keyword.Declaration */
+.highlight .kn { color: #75b5aa } /* Keyword.Namespace */
+.highlight .kp { color: #aa759f } /* Keyword.Pseudo */
+.highlight .kr { color: #aa759f } /* Keyword.Reserved */
+.highlight .kt { color: #f4bf75 } /* Keyword.Type */
+.highlight .ld { color: #90a959 } /* Literal.Date */
+.highlight .m { color: #d28445 } /* Literal.Number */
+.highlight .s { color: #90a959 } /* Literal.String */
+.highlight .na { color: #6a9fb5 } /* Name.Attribute */
+.highlight .nb { color: #151515 } /* Name.Builtin */
+.highlight .nc { color: #f4bf75 } /* Name.Class */
+.highlight .no { color: #ac4142 } /* Name.Constant */
+.highlight .nd { color: #75b5aa } /* Name.Decorator */
+.highlight .ni { color: #151515 } /* Name.Entity */
+.highlight .ne { color: #ac4142 } /* Name.Exception */
+.highlight .nf { color: #6a9fb5 } /* Name.Function */
+.highlight .nl { color: #151515 } /* Name.Label */
+.highlight .nn { color: #f4bf75 } /* Name.Namespace */
+.highlight .nx { color: #6a9fb5 } /* Name.Other */
+.highlight .py { color: #151515 } /* Name.Property */
+.highlight .nt { color: #75b5aa } /* Name.Tag */
+.highlight .nv { color: #ac4142 } /* Name.Variable */
+.highlight .ow { color: #75b5aa } /* Operator.Word */
+.highlight .w { color: #151515 } /* Text.Whitespace */
+.highlight .mf { color: #d28445 } /* Literal.Number.Float */
+.highlight .mh { color: #d28445 } /* Literal.Number.Hex */
+.highlight .mi { color: #d28445 } /* Literal.Number.Integer */
+.highlight .mo { color: #d28445 } /* Literal.Number.Oct */
+.highlight .sb { color: #90a959 } /* Literal.String.Backtick */
+.highlight .sc { color: #151515 } /* Literal.String.Char */
+.highlight .sd { color: #b0b0b0 } /* Literal.String.Doc */
+.highlight .s2 { color: #90a959 } /* Literal.String.Double */
+.highlight .se { color: #d28445 } /* Literal.String.Escape */
+.highlight .sh { color: #90a959 } /* Literal.String.Heredoc */
+.highlight .si { color: #d28445 } /* Literal.String.Interpol */
+.highlight .sx { color: #90a959 } /* Literal.String.Other */
+.highlight .sr { color: #90a959 } /* Literal.String.Regex */
+.highlight .s1 { color: #90a959 } /* Literal.String.Single */
+.highlight .ss { color: #90a959 } /* Literal.String.Symbol */
+.highlight .bp { color: #151515 } /* Name.Builtin.Pseudo */
+.highlight .vc { color: #ac4142 } /* Name.Variable.Class */
+.highlight .vg { color: #ac4142 } /* Name.Variable.Global */
+.highlight .vi { color: #ac4142 } /* Name.Variable.Instance */
+.highlight .il { color: #d28445 } /* Literal.Number.Integer.Long */

http://git-wip-us.apache.org/repos/asf/incubator-samza/blob/d913037a/docs/learn/documentation/0.7.0/api/overview.md
----------------------------------------------------------------------
diff --git a/docs/learn/documentation/0.7.0/api/overview.md b/docs/learn/documentation/0.7.0/api/overview.md
index 7e06d4e..6712344 100644
--- a/docs/learn/documentation/0.7.0/api/overview.md
+++ b/docs/learn/documentation/0.7.0/api/overview.md
@@ -21,56 +21,62 @@ title: API Overview
 
 When writing a stream processor for Samza, you must implement the [StreamTask](javadocs/org/apache/samza/task/StreamTask.html) interface:
 
-    package com.example.samza;
+{% highlight java %}
+package com.example.samza;
 
-    public class MyTaskClass implements StreamTask {
+public class MyTaskClass implements StreamTask {
 
-      public void process(IncomingMessageEnvelope envelope,
-                          MessageCollector collector,
-                          TaskCoordinator coordinator) {
-        // process message
-      }
-    }
+  public void process(IncomingMessageEnvelope envelope,
+                      MessageCollector collector,
+                      TaskCoordinator coordinator) {
+    // process message
+  }
+}
+{% endhighlight %}
 
 When you run your job, Samza will create several instances of your class (potentially on multiple machines). These task instances process the messages in the input streams.
 
 In your job's configuration you can tell Samza which streams you want to consume. An incomplete example could look like this (see the [configuration documentation](../jobs/configuration.html) for more detail):
 
-    # This is the class above, which Samza will instantiate when the job is run
-    task.class=com.example.samza.MyTaskClass
+{% highlight jproperties %}
+# This is the class above, which Samza will instantiate when the job is run
+task.class=com.example.samza.MyTaskClass
 
-    # Define a system called "kafka" (you can give it any name, and you can define
-    # multiple systems if you want to process messages from different sources)
-    systems.kafka.samza.factory=org.apache.samza.system.kafka.KafkaSystemFactory
+# Define a system called "kafka" (you can give it any name, and you can define
+# multiple systems if you want to process messages from different sources)
+systems.kafka.samza.factory=org.apache.samza.system.kafka.KafkaSystemFactory
 
-    # The job consumes a topic called "PageViewEvent" from the "kafka" system
-    task.inputs=kafka.PageViewEvent
+# The job consumes a topic called "PageViewEvent" from the "kafka" system
+task.inputs=kafka.PageViewEvent
 
-    # Define a serializer/deserializer called "json" which parses JSON messages
-    serializers.registry.json.class=org.apache.samza.serializers.JsonSerdeFactory
+# Define a serializer/deserializer called "json" which parses JSON messages
+serializers.registry.json.class=org.apache.samza.serializers.JsonSerdeFactory
 
-    # Use the "json" serializer for messages in the "PageViewEvent" topic
-    systems.kafka.streams.PageViewEvent.samza.msg.serde=json
+# Use the "json" serializer for messages in the "PageViewEvent" topic
+systems.kafka.streams.PageViewEvent.samza.msg.serde=json
+{% endhighlight %}
 
 For each message that Samza receives from the task's input streams, the *process* method is called. The [envelope](javadocs/org/apache/samza/system/IncomingMessageEnvelope.html) contains three things of importance: the message, the key, and the stream that the message came from.
 
-    /** Every message that is delivered to a StreamTask is wrapped
-     * in an IncomingMessageEnvelope, which contains metadata about
-     * the origin of the message. */
-    public class IncomingMessageEnvelope {
-      /** A deserialized message. */
-      Object getMessage() { ... }
+{% highlight java %}
+/** Every message that is delivered to a StreamTask is wrapped
+ * in an IncomingMessageEnvelope, which contains metadata about
+ * the origin of the message. */
+public class IncomingMessageEnvelope {
+  /** A deserialized message. */
+  Object getMessage() { ... }
 
-      /** A deserialized key. */
-      Object getKey() { ... }
+  /** A deserialized key. */
+  Object getKey() { ... }
 
-      /** The stream and partition that this message came from. */
-      SystemStreamPartition getSystemStreamPartition() { ... }
-    }
+  /** The stream and partition that this message came from. */
+  SystemStreamPartition getSystemStreamPartition() { ... }
+}
+{% endhighlight %}
 
 The key and value are declared as Object, and need to be cast to the correct type. If you don't configure a [serializer/deserializer](../container/serialization.html), they are typically Java byte arrays. A deserializer can convert these bytes into any other type, for example the JSON deserializer mentioned above parses the byte array into java.util.Map, java.util.List and String objects.
 
-The getSystemStreamPartition() method returns a [SystemStreamPartition](javadocs/org/apache/samza/system/SystemStreamPartition.html) object, which tells you where the message came from. It consists of three parts:
+The `getSystemStreamPartition()` method returns a [SystemStreamPartition](javadocs/org/apache/samza/system/SystemStreamPartition.html) object, which tells you where the message came from. It consists of three parts:
 
 1. The *system*: the name of the system from which the message came, as defined in your job configuration. You can have multiple systems for input and/or output, each with a different name.
 2. The *stream name*: the name of the stream (topic, queue) within the source system. This is also defined in the job configuration.
@@ -78,53 +84,59 @@ The getSystemStreamPartition() method returns a [SystemStreamPartition](javadocs
 
 The API looks like this:
 
-    /** A triple of system name, stream name and partition. */
-    public class SystemStreamPartition extends SystemStream {
+{% highlight java %}
+/** A triple of system name, stream name and partition. */
+public class SystemStreamPartition extends SystemStream {
 
-      /** The name of the system which provides this stream. It is
-          defined in the Samza job's configuration. */
-      public String getSystem() { ... }
+  /** The name of the system which provides this stream. It is
+      defined in the Samza job's configuration. */
+  public String getSystem() { ... }
 
-      /** The name of the stream/topic/queue within the system. */
-      public String getStream() { ... }
+  /** The name of the stream/topic/queue within the system. */
+  public String getStream() { ... }
 
-      /** The partition within the stream. */
-      public Partition getPartition() { ... }
-    }
+  /** The partition within the stream. */
+  public Partition getPartition() { ... }
+}
+{% endhighlight %}
 
 In the example job configuration above, the system name is "kafka", the stream name is "PageViewEvent". (The name "kafka" isn't special &mdash; you can give your system any name you want.) If you have several input streams feeding into your StreamTask, you can use the SystemStreamPartition to determine what kind of message you've received.
 
 What about sending messages? If you take a look at the process() method in StreamTask, you'll see that you get a [MessageCollector](javadocs/org/apache/samza/task/MessageCollector.html).
 
-    /** When a task wishes to send a message, it uses this interface. */
-    public interface MessageCollector {
-      void send(OutgoingMessageEnvelope envelope);
-    }
+{% highlight java %}
+/** When a task wishes to send a message, it uses this interface. */
+public interface MessageCollector {
+  void send(OutgoingMessageEnvelope envelope);
+}
+{% endhighlight %}
 
 To send a message, you create an [OutgoingMessageEnvelope](javadocs/org/apache/samza/system/OutgoingMessageEnvelope.html) object and pass it to the message collector. At a minimum, the envelope specifies the message you want to send, and the system and stream name to send it to. Optionally you can specify the partitioning key and other parameters. See the [javadoc](javadocs/org/apache/samza/system/OutgoingMessageEnvelope.html) for details.
 
-**NOTE:** Please only use the MessageCollector object within the process() method. If you hold on to a MessageCollector instance and use it again later, your messages may not be sent correctly.
+**NOTE:** Please only use the MessageCollector object within the `process()` method. If you hold on to a MessageCollector instance and use it again later, your messages may not be sent correctly.
 
 For example, here's a simple task that splits each input message into words, and emits each word as a separate message:
 
-    public class SplitStringIntoWords implements StreamTask {
+{% highlight java %}
+public class SplitStringIntoWords implements StreamTask {
 
-      // Send outgoing messages to a stream called "words"
-      // in the "kafka" system.
-      private final SystemStream OUTPUT_STREAM =
-        new SystemStream("kafka", "words");
+  // Send outgoing messages to a stream called "words"
+  // in the "kafka" system.
+  private final SystemStream OUTPUT_STREAM =
+    new SystemStream("kafka", "words");
 
-      public void process(IncomingMessageEnvelope envelope,
-                          MessageCollector collector,
-                          TaskCoordinator coordinator) {
-        String message = (String) envelope.getMessage();
+  public void process(IncomingMessageEnvelope envelope,
+                      MessageCollector collector,
+                      TaskCoordinator coordinator) {
+    String message = (String) envelope.getMessage();
 
-        for (String word : message.split(" ")) {
-          // Use the word as the key, and 1 as the value.
-          // A second task can add the 1's to get the word count.
-          collector.send(new OutgoingMessageEnvelope(OUTPUT_STREAM, word, 1));
-        }
-      }
+    for (String word : message.split(" ")) {
+      // Use the word as the key, and 1 as the value.
+      // A second task can add the 1's to get the word count.
+      collector.send(new OutgoingMessageEnvelope(OUTPUT_STREAM, word, 1));
     }
+  }
+}
+{% endhighlight %}
 
 ## [SamzaContainer &raquo;](../container/samza-container.html)

http://git-wip-us.apache.org/repos/asf/incubator-samza/blob/d913037a/docs/learn/documentation/0.7.0/container/checkpointing.md
----------------------------------------------------------------------
diff --git a/docs/learn/documentation/0.7.0/container/checkpointing.md b/docs/learn/documentation/0.7.0/container/checkpointing.md
index 434195a..4efcef8 100644
--- a/docs/learn/documentation/0.7.0/container/checkpointing.md
+++ b/docs/learn/documentation/0.7.0/container/checkpointing.md
@@ -39,55 +39,63 @@ This guarantee is called *at-least-once processing*: Samza ensures that your job
 
 For checkpoints to be effective, they need to be written somewhere where they will survive faults. Samza allows you to write checkpoints to the file system (using FileSystemCheckpointManager), but that doesn't help if the machine fails and the container needs to be restarted on another machine. The most common configuration is to use Kafka for checkpointing. You can enable this with the following job configuration:
 
-    # The name of your job determines the name under which checkpoints will be stored
-    job.name=example-job
+{% highlight jproperties %}
+# The name of your job determines the name under which checkpoints will be stored
+job.name=example-job
 
-    # Define a system called "kafka" for consuming and producing to a Kafka cluster
-    systems.kafka.samza.factory=org.apache.samza.system.kafka.KafkaSystemFactory
+# Define a system called "kafka" for consuming and producing to a Kafka cluster
+systems.kafka.samza.factory=org.apache.samza.system.kafka.KafkaSystemFactory
 
-    # Declare that we want our job's checkpoints to be written to Kafka
-    task.checkpoint.factory=org.apache.samza.checkpoint.kafka.KafkaCheckpointManagerFactory
-    task.checkpoint.system=kafka
+# Declare that we want our job's checkpoints to be written to Kafka
+task.checkpoint.factory=org.apache.samza.checkpoint.kafka.KafkaCheckpointManagerFactory
+task.checkpoint.system=kafka
 
-    # By default, a checkpoint is written every 60 seconds. You can change this if you like.
-    task.commit.ms=60000
+# By default, a checkpoint is written every 60 seconds. You can change this if you like.
+task.commit.ms=60000
+{% endhighlight %}
 
 In this configuration, Samza writes checkpoints to a separate Kafka topic called \_\_samza\_checkpoint\_&lt;job-name&gt;\_&lt;job-id&gt; (in the example configuration above, the topic would be called \_\_samza\_checkpoint\_example-job\_1). Once per minute, Samza automatically sends a message to this topic, in which the current offsets of the input streams are encoded. When a Samza container starts up, it looks for the most recent offset message in this topic, and loads that checkpoint.
 
 Sometimes it can be useful to use checkpoints only for some input streams, but not for others. In this case, you can tell Samza to ignore any checkpointed offsets for a particular stream name:
 
-    # Ignore any checkpoints for the topic "my-special-topic"
-    systems.kafka.streams.my-special-topic.samza.reset.offset=true
+{% highlight jproperties %}
+# Ignore any checkpoints for the topic "my-special-topic"
+systems.kafka.streams.my-special-topic.samza.reset.offset=true
 
-    # Always start consuming "my-special-topic" at the oldest available offset
-    systems.kafka.streams.my-special-topic.samza.offset.default=oldest
+# Always start consuming "my-special-topic" at the oldest available offset
+systems.kafka.streams.my-special-topic.samza.offset.default=oldest
+{% endhighlight %}
 
 The following table explains the meaning of these configuration parameters:
 
-<table class="documentation">
-  <tr>
-    <th>Parameter name</th>
-    <th>Value</th>
-    <th>Meaning</th>
-  </tr>
-  <tr>
-    <td rowspan="2" class="nowrap">systems.&lt;system&gt;.<br>streams.&lt;stream&gt;.<br>samza.reset.offset</td>
-    <td>false (default)</td>
-    <td>When container starts up, resume processing from last checkpoint</td>
-  </tr>
-  <tr>
-    <td>true</td>
-    <td>Ignore checkpoint (pretend that no checkpoint is present)</td>
-  </tr>
-  <tr>
-    <td rowspan="2" class="nowrap">systems.&lt;system&gt;.<br>streams.&lt;stream&gt;.<br>samza.offset.default</td>
-    <td>upcoming (default)</td>
-    <td>When container starts and there is no checkpoint (or the checkpoint is ignored), only process messages that are published after the job is started, but no old messages</td>
-  </tr>
-  <tr>
-    <td>oldest</td>
-    <td>When container starts and there is no checkpoint (or the checkpoint is ignored), jump back to the oldest available message in the system, and consume all messages from that point onwards (most likely this means repeated processing of messages already seen previously)</td>
-  </tr>
+<table class="table table-condensed table-bordered table-striped">
+  <thead>
+    <tr>
+      <th>Parameter name</th>
+      <th>Value</th>
+      <th>Meaning</th>
+    </tr>
+  </thead>
+  <tbody>
+    <tr>
+      <td rowspan="2" class="nowrap">systems.&lt;system&gt;.<br>streams.&lt;stream&gt;.<br>samza.reset.offset</td>
+      <td>false (default)</td>
+      <td>When container starts up, resume processing from last checkpoint</td>
+    </tr>
+    <tr>
+      <td>true</td>
+      <td>Ignore checkpoint (pretend that no checkpoint is present)</td>
+    </tr>
+    <tr>
+      <td rowspan="2" class="nowrap">systems.&lt;system&gt;.<br>streams.&lt;stream&gt;.<br>samza.offset.default</td>
+      <td>upcoming (default)</td>
+      <td>When container starts and there is no checkpoint (or the checkpoint is ignored), only process messages that are published after the job is started, but no old messages</td>
+    </tr>
+    <tr>
+      <td>oldest</td>
+      <td>When container starts and there is no checkpoint (or the checkpoint is ignored), jump back to the oldest available message in the system, and consume all messages from that point onwards (most likely this means repeated processing of messages already seen previously)</td>
+    </tr>
+  </tbody>
 </table>
 
 Note that the example configuration above causes your tasks to start consuming from the oldest offset *every time a container starts up*. This is useful in case you have some in-memory state in your tasks that you need to rebuild from source data in an input stream. If you are using streams in this way, you may also find [bootstrap streams](streams.html) useful.
@@ -98,14 +106,18 @@ If you want to make a one-off change to a job's consumer offsets, for example to
 
 To inspect a job's latest checkpoint, you need to specify your job's config file, so that the tool knows which job it is dealing with:
 
-    samza-example/target/bin/checkpoint-tool.sh \
-      --config-path=file:///path/to/job/config.properties
+{% highlight bash %}
+samza-example/target/bin/checkpoint-tool.sh \
+  --config-path=file:///path/to/job/config.properties
+{% endhighlight %}
 
 This command prints out the latest checkpoint in a properties file format. You can save the output to a file, and edit it as you wish. For example, to jump back to the oldest possible point in time, you can set all the offsets to 0. Then you can feed that properties file back into checkpoint-tool.sh and save the modified checkpoint:
 
-    samza-example/target/bin/checkpoint-tool.sh \
-      --config-path=file:///path/to/job/config.properties \
-      --new-offsets=file:///path/to/new/offsets.properties
+{% highlight bash %}
+samza-example/target/bin/checkpoint-tool.sh \
+  --config-path=file:///path/to/job/config.properties \
+  --new-offsets=file:///path/to/new/offsets.properties
+{% endhighlight %}
 
 Note that Samza only reads checkpoints on container startup. In order for your checkpoint change to take effect, you need to first stop the job, then save the modified offsets, and then start the job again. If you write a checkpoint while the job is running, it will most likely have no effect.
 

http://git-wip-us.apache.org/repos/asf/incubator-samza/blob/d913037a/docs/learn/documentation/0.7.0/container/event-loop.md
----------------------------------------------------------------------
diff --git a/docs/learn/documentation/0.7.0/container/event-loop.md b/docs/learn/documentation/0.7.0/container/event-loop.md
index 7ea9fe9..f0f21b0 100644
--- a/docs/learn/documentation/0.7.0/container/event-loop.md
+++ b/docs/learn/documentation/0.7.0/container/event-loop.md
@@ -47,11 +47,13 @@ To receive notifications when such events happen, you can implement the [TaskLif
 
 You can then tell Samza to use your lifecycle listener with the following properties in your job configuration:
 
-    # Define a listener called "my-listener" by giving the factory class name
-    task.lifecycle.listener.my-listener.class=com.example.foo.MyListenerFactory
+{% highlight jproperties %}
+# Define a listener called "my-listener" by giving the factory class name
+task.lifecycle.listener.my-listener.class=com.example.foo.MyListenerFactory
 
-    # Enable it in this job (multiple listeners can be separated by commas)
-    task.lifecycle.listeners=my-listener
+# Enable it in this job (multiple listeners can be separated by commas)
+task.lifecycle.listeners=my-listener
+{% endhighlight %}
 
 The Samza container creates one instance of your [TaskLifecycleListener](../api/javadocs/org/apache/samza/task/TaskLifecycleListener.html). If the container has multiple task instances (processing different input stream partitions), the beforeInit, afterInit, beforeClose and afterClose methods are called for each task instance. The [TaskContext](../api/javadocs/org/apache/samza/task/TaskContext.html) argument of those methods gives you more information about the partitions.
 

http://git-wip-us.apache.org/repos/asf/incubator-samza/blob/d913037a/docs/learn/documentation/0.7.0/container/jmx.md
----------------------------------------------------------------------
diff --git a/docs/learn/documentation/0.7.0/container/jmx.md b/docs/learn/documentation/0.7.0/container/jmx.md
index 539ed58..bdd5614 100644
--- a/docs/learn/documentation/0.7.0/container/jmx.md
+++ b/docs/learn/documentation/0.7.0/container/jmx.md
@@ -23,11 +23,13 @@ Samza's containers and YARN ApplicationMaster enable [JMX](http://docs.oracle.co
 
 You can tell Samza to publish its internal [metrics](metrics.html), and any custom metrics you define, as JMX MBeans. To enable this, set the following properties in your job configuration:
 
-    # Define a Samza metrics reporter called "jmx", which publishes to JMX
-    metrics.reporter.jmx.class=org.apache.samza.metrics.reporter.JmxReporterFactory
+{% highlight jproperties %}
+# Define a Samza metrics reporter called "jmx", which publishes to JMX
+metrics.reporter.jmx.class=org.apache.samza.metrics.reporter.JmxReporterFactory
 
-    # Use it (if you have multiple reporters defined, separate them with commas)
-    metrics.reporters=jmx
+# Use it (if you have multiple reporters defined, separate them with commas)
+metrics.reporters=jmx
+{% endhighlight %}
 
 JMX needs to be configured to use a specific port, but in a distributed environment, there is no way of knowing in advance which ports are available on the machines running your containers. Therefore Samza chooses the JMX port randomly. If you need to connect to it, you can find the port by looking in the container's logs, which report the JMX server details as follows:
 

http://git-wip-us.apache.org/repos/asf/incubator-samza/blob/d913037a/docs/learn/documentation/0.7.0/container/metrics.md
----------------------------------------------------------------------
diff --git a/docs/learn/documentation/0.7.0/container/metrics.md b/docs/learn/documentation/0.7.0/container/metrics.md
index c6acb52..8ec7740 100644
--- a/docs/learn/documentation/0.7.0/container/metrics.md
+++ b/docs/learn/documentation/0.7.0/container/metrics.md
@@ -25,47 +25,51 @@ Metrics can be reported in various ways. You can expose them via [JMX](jmx.html)
 
 To set up your job to publish metrics to Kafka, you can use the following configuration:
 
-    # Define a metrics reporter called "snapshot", which publishes metrics
-    # every 60 seconds.
-    metrics.reporters=snapshot
-    metrics.reporter.snapshot.class=org.apache.samza.metrics.reporter.MetricsSnapshotReporterFactory
+{% highlight jproperties %}
+# Define a metrics reporter called "snapshot", which publishes metrics
+# every 60 seconds.
+metrics.reporters=snapshot
+metrics.reporter.snapshot.class=org.apache.samza.metrics.reporter.MetricsSnapshotReporterFactory
 
-    # Tell the snapshot reporter to publish to a topic called "metrics"
-    # in the "kafka" system.
-    metrics.reporter.snapshot.stream=kafka.metrics
+# Tell the snapshot reporter to publish to a topic called "metrics"
+# in the "kafka" system.
+metrics.reporter.snapshot.stream=kafka.metrics
 
-    # Encode metrics data as JSON.
-    serializers.registry.metrics.class=org.apache.samza.serializers.MetricsSnapshotSerdeFactory
-    systems.kafka.streams.metrics.samza.msg.serde=metrics
+# Encode metrics data as JSON.
+serializers.registry.metrics.class=org.apache.samza.serializers.MetricsSnapshotSerdeFactory
+systems.kafka.streams.metrics.samza.msg.serde=metrics
+{% endhighlight %}
 
 With this configuration, the job automatically sends several JSON-encoded messages to the "metrics" topic in Kafka every 60 seconds. The messages look something like this:
 
-    {
-      "header": {
-        "container-name": "samza-container-0",
-        "host": "samza-grid-1234.example.com",
-        "job-id": "1",
-        "job-name": "my-samza-job",
-        "reset-time": 1401729000347,
-        "samza-version": "0.0.1",
-        "source": "Partition-2",
-        "time": 1401729420566,
-        "version": "0.0.1"
-      },
-      "metrics": {
-        "org.apache.samza.container.TaskInstanceMetrics": {
-          "commit-calls": 7,
-          "commit-skipped": 77948,
-          "kafka-input-topic-offset": "1606",
-          "messages-sent": 985,
-          "process-calls": 1093,
-          "send-calls": 985,
-          "send-skipped": 76970,
-          "window-calls": 0,
-          "window-skipped": 77955
-        }
-      }
+{% highlight json %}
+{
+  "header": {
+    "container-name": "samza-container-0",
+    "host": "samza-grid-1234.example.com",
+    "job-id": "1",
+    "job-name": "my-samza-job",
+    "reset-time": 1401729000347,
+    "samza-version": "0.0.1",
+    "source": "Partition-2",
+    "time": 1401729420566,
+    "version": "0.0.1"
+  },
+  "metrics": {
+    "org.apache.samza.container.TaskInstanceMetrics": {
+      "commit-calls": 7,
+      "commit-skipped": 77948,
+      "kafka-input-topic-offset": "1606",
+      "messages-sent": 985,
+      "process-calls": 1093,
+      "send-calls": 985,
+      "send-skipped": 76970,
+      "window-calls": 0,
+      "window-skipped": 77955
     }
+  }
+}
+{% endhighlight %}
 
 There is a separate message for each task instance, and the header tells you the job name, job ID and partition of the task. The metrics allow you to see how many messages have been processed and sent, the current offset in the input stream partition, and other details. There are additional messages which give you metrics about the JVM (heap size, garbage collection information, threads etc.), internal metrics of the Kafka producers and consumers, and more.
 
@@ -73,21 +77,23 @@ It's easy to generate custom metrics in your job, if there's some value you want
 
 You can register your custom metrics through a [MetricsRegistry](../api/javadocs/org/apache/samza/metrics/MetricsRegistry.html). Your stream task needs to implement [InitableTask](../api/javadocs/org/apache/samza/task/InitableTask.html), so that you can get the metrics registry from the [TaskContext](../api/javadocs/org/apache/samza/task/TaskContext.html). This simple example shows how to count the number of messages processed by your task:
 
-    public class MyJavaStreamTask implements StreamTask, InitableTask {
-      private Counter messageCount;
-
-      public void init(Config config, TaskContext context) {
-        this.messageCount = context
-          .getMetricsRegistry()
-          .newCounter(getClass().getName(), "message-count");
-      }
-
-      public void process(IncomingMessageEnvelope envelope,
-                          MessageCollector collector,
-                          TaskCoordinator coordinator) {
-        messageCount.inc();
-      }
-    }
+{% highlight java %}
+public class MyJavaStreamTask implements StreamTask, InitableTask {
+  private Counter messageCount;
+
+  public void init(Config config, TaskContext context) {
+    this.messageCount = context
+      .getMetricsRegistry()
+      .newCounter(getClass().getName(), "message-count");
+  }
+
+  public void process(IncomingMessageEnvelope envelope,
+                      MessageCollector collector,
+                      TaskCoordinator coordinator) {
+    messageCount.inc();
+  }
+}
+{% endhighlight %}
 
 Samza currently supports two kind of metrics: [counters](../api/javadocs/org/apache/samza/metrics/Counter.html) and [gauges](../api/javadocs/org/apache/samza/metrics/Gauge.html). Use a counter when you want to track how often something occurs, and a gauge when you want to report the level of something, such as the size of a buffer. Each task instance (for each input stream partition) gets its own set of metrics.
 

http://git-wip-us.apache.org/repos/asf/incubator-samza/blob/d913037a/docs/learn/documentation/0.7.0/container/samza-container.md
----------------------------------------------------------------------
diff --git a/docs/learn/documentation/0.7.0/container/samza-container.md b/docs/learn/documentation/0.7.0/container/samza-container.md
index cdcf8b6..a96ab4a 100644
--- a/docs/learn/documentation/0.7.0/container/samza-container.md
+++ b/docs/learn/documentation/0.7.0/container/samza-container.md
@@ -38,10 +38,12 @@ Let's start in the middle, with the instantiation of a StreamTask. The following
 
 When the container starts, it creates instances of the [task class](../api/overview.html) that you've written. If the task class implements the [InitableTask](../api/javadocs/org/apache/samza/task/InitableTask.html) interface, the SamzaContainer will also call the init() method.
 
-    /** Implement this if you want a callback when your task starts up. */
-    public interface InitableTask {
-      void init(Config config, TaskContext context);
-    }
+{% highlight java %}
+/** Implement this if you want a callback when your task starts up. */
+public interface InitableTask {
+  void init(Config config, TaskContext context);
+}
+{% endhighlight %}
 
 How many instances of your task class are created depends on the number of partitions in the job's input streams. If your Samza job has ten partitions, there will be ten instantiations of your task class: one for each partition. The first task instance will receive all messages for partition one, the second instance will receive all messages for partition two, and so on.
 
@@ -67,12 +69,27 @@ Any [state](state-management.html) in your job belongs to a task instance, not t
 
 If your job has multiple input streams, Samza provides a simple but powerful mechanism for joining data from different streams: each task instance receives messages from one partition of *each* of the input streams. For example, say you have two input streams, A and B, each with four partitions. Samza creates four task instances to process them, and assigns the partitions as follows:
 
-<table class="documentation">
-<tr><th>Task instance</th><th>Consumes stream partitions</th></tr>
-<tr><td>0</td><td>stream A partition 0, stream B partition 0</td></tr>
-<tr><td>1</td><td>stream A partition 1, stream B partition 1</td></tr>
-<tr><td>2</td><td>stream A partition 2, stream B partition 2</td></tr>
-<tr><td>3</td><td>stream A partition 3, stream B partition 3</td></tr>
+<table class="table table-condensed table-bordered table-striped">
+  <thead>
+    <tr>
+      <th>Task instance</th>
+      <th>Consumes stream partitions</th>
+    </tr>
+  </thead>
+  <tbody>
+    <tr>
+      <td>0</td><td>stream A partition 0, stream B partition 0</td>
+    </tr>
+    <tr>
+      <td>1</td><td>stream A partition 1, stream B partition 1</td>
+    </tr>
+    <tr>
+      <td>2</td><td>stream A partition 2, stream B partition 2</td>
+    </tr>
+    <tr>
+      <td>3</td><td>stream A partition 3, stream B partition 3</td>
+    </tr>
+  </tbody>
 </table>
 
 Thus, if you want two events in different streams to be processed by the same task instance, you need to ensure they are sent to the same partition number. You can achieve this by using the same partitioning key when [sending the messages](../api/overview.html). Joining streams is discussed in detail in the [state management](state-management.html) section.

http://git-wip-us.apache.org/repos/asf/incubator-samza/blob/d913037a/docs/learn/documentation/0.7.0/container/serialization.md
----------------------------------------------------------------------
diff --git a/docs/learn/documentation/0.7.0/container/serialization.md b/docs/learn/documentation/0.7.0/container/serialization.md
index 1bc0946..ff7d8b9 100644
--- a/docs/learn/documentation/0.7.0/container/serialization.md
+++ b/docs/learn/documentation/0.7.0/container/serialization.md
@@ -27,29 +27,31 @@ Every message that is read from or written to a [stream](streams.html) or a [per
 
 You can use whatever makes sense for your job; Samza doesn't impose any particular data model or serialization scheme on you. However, the cleanest solution is usually to use Samza's serde layer. The following configuration example shows how to use it.
 
-    # Define a system called "kafka"
-    systems.kafka.samza.factory=org.apache.samza.system.kafka.KafkaSystemFactory
-
-    # The job is going to consume a topic called "PageViewEvent" from the "kafka" system
-    task.inputs=kafka.PageViewEvent
-
-    # Define a serde called "json" which parses/serializes JSON objects
-    serializers.registry.json.class=org.apache.samza.serializers.JsonSerdeFactory
-
-    # Define a serde called "integer" which encodes an integer as 4 binary bytes (big-endian)
-    serializers.registry.integer.class=org.apache.samza.serializers.IntegerSerdeFactory
-
-    # For messages in the "PageViewEvent" topic, the key (the ID of the user viewing the page)
-    # is encoded as a binary integer, and the message is encoded as JSON.
-    systems.kafka.streams.PageViewEvent.samza.key.serde=integer
-    systems.kafka.streams.PageViewEvent.samza.msg.serde=json
-
-    # Define a key-value store which stores the most recent page view for each user ID.
-    # Again, the key is an integer user ID, and the value is JSON.
-    stores.LastPageViewPerUser.factory=org.apache.samza.storage.kv.KeyValueStorageEngineFactory
-    stores.LastPageViewPerUser.changelog=kafka.last-page-view-per-user
-    stores.LastPageViewPerUser.key.serde=integer
-    stores.LastPageViewPerUser.msg.serde=json
+{% highlight jproperties %}
+# Define a system called "kafka"
+systems.kafka.samza.factory=org.apache.samza.system.kafka.KafkaSystemFactory
+
+# The job is going to consume a topic called "PageViewEvent" from the "kafka" system
+task.inputs=kafka.PageViewEvent
+
+# Define a serde called "json" which parses/serializes JSON objects
+serializers.registry.json.class=org.apache.samza.serializers.JsonSerdeFactory
+
+# Define a serde called "integer" which encodes an integer as 4 binary bytes (big-endian)
+serializers.registry.integer.class=org.apache.samza.serializers.IntegerSerdeFactory
+
+# For messages in the "PageViewEvent" topic, the key (the ID of the user viewing the page)
+# is encoded as a binary integer, and the message is encoded as JSON.
+systems.kafka.streams.PageViewEvent.samza.key.serde=integer
+systems.kafka.streams.PageViewEvent.samza.msg.serde=json
+
+# Define a key-value store which stores the most recent page view for each user ID.
+# Again, the key is an integer user ID, and the value is JSON.
+stores.LastPageViewPerUser.factory=org.apache.samza.storage.kv.KeyValueStorageEngineFactory
+stores.LastPageViewPerUser.changelog=kafka.last-page-view-per-user
+stores.LastPageViewPerUser.key.serde=integer
+stores.LastPageViewPerUser.msg.serde=json
+{% endhighlight %}
 
 Each serde is defined with a factory class. Samza comes with several builtin serdes for UTF-8 strings, binary-encoded integers, JSON (requires the samza-serializers dependency) and more. You can also create your own serializer by implementing the [SerdeFactory](../api/javadocs/org/apache/samza/serializers/SerdeFactory.html) interface.
 

http://git-wip-us.apache.org/repos/asf/incubator-samza/blob/d913037a/docs/learn/documentation/0.7.0/container/state-management.md
----------------------------------------------------------------------
diff --git a/docs/learn/documentation/0.7.0/container/state-management.md b/docs/learn/documentation/0.7.0/container/state-management.md
index d021588..e54739c 100644
--- a/docs/learn/documentation/0.7.0/container/state-management.md
+++ b/docs/learn/documentation/0.7.0/container/state-management.md
@@ -134,47 +134,53 @@ Samza includes an additional in-memory caching layer in front of LevelDB, which
 
 To use a key-value store in your job, add the following to your job config:
 
-    # Use the key-value store implementation for a store called "my-store"
-    stores.my-store.factory=org.apache.samza.storage.kv.KeyValueStorageEngineFactory
+{% highlight jproperties %}
+# Use the key-value store implementation for a store called "my-store"
+stores.my-store.factory=org.apache.samza.storage.kv.KeyValueStorageEngineFactory
 
-    # Use the Kafka topic "my-store-changelog" as the changelog stream for this store.
-    # This enables automatic recovery of the store after a failure. If you don't
-    # configure this, no changelog stream will be generated.
-    stores.my-store.changelog=kafka.my-store-changelog
+# Use the Kafka topic "my-store-changelog" as the changelog stream for this store.
+# This enables automatic recovery of the store after a failure. If you don't
+# configure this, no changelog stream will be generated.
+stores.my-store.changelog=kafka.my-store-changelog
 
-    # Encode keys and values in the store as UTF-8 strings.
-    serializers.registry.string.class=org.apache.samza.serializers.StringSerdeFactory
-    stores.my-store.key.serde=string
-    stores.my-store.msg.serde=string
+# Encode keys and values in the store as UTF-8 strings.
+serializers.registry.string.class=org.apache.samza.serializers.StringSerdeFactory
+stores.my-store.key.serde=string
+stores.my-store.msg.serde=string
+{% endhighlight %}
 
 See the [serialization section](serialization.html) for more information on the *serde* options.
 
 Here is a simple example that writes every incoming message to the store:
 
-    public class MyStatefulTask implements StreamTask, InitableTask {
-      private KeyValueStore<String, String> store;
-      
-      public void init(Config config, TaskContext context) {
-        this.store = (KeyValueStore<String, String>) context.getStore("my-store");
-      }
+{% highlight java %}
+public class MyStatefulTask implements StreamTask, InitableTask {
+  private KeyValueStore<String, String> store;
 
-      public void process(IncomingMessageEnvelope envelope,
-                          MessageCollector collector,
-                          TaskCoordinator coordinator) {
-        store.put((String) envelope.getKey(), (String) envelope.getMessage());
-      }
-    }
+  public void init(Config config, TaskContext context) {
+    this.store = (KeyValueStore<String, String>) context.getStore("my-store");
+  }
+
+  public void process(IncomingMessageEnvelope envelope,
+                      MessageCollector collector,
+                      TaskCoordinator coordinator) {
+    store.put((String) envelope.getKey(), (String) envelope.getMessage());
+  }
+}
+{% endhighlight %}
 
 Here is the complete key-value store API:
 
-    public interface KeyValueStore<K, V> {
-      V get(K key);
-      void put(K key, V value);
-      void putAll(List<Entry<K,V>> entries);
-      void delete(K key);
-      KeyValueIterator<K,V> range(K from, K to);
-      KeyValueIterator<K,V> all();
-    }
+{% highlight java %}
+public interface KeyValueStore<K, V> {
+  V get(K key);
+  void put(K key, V value);
+  void putAll(List<Entry<K,V>> entries);
+  void delete(K key);
+  KeyValueIterator<K,V> range(K from, K to);
+  KeyValueIterator<K,V> all();
+}
+{% endhighlight %}
 
 Additional configuration properties for the key-value store are documented in the [configuration reference](../jobs/configuration-table.html#keyvalue).
 

http://git-wip-us.apache.org/repos/asf/incubator-samza/blob/d913037a/docs/learn/documentation/0.7.0/container/streams.md
----------------------------------------------------------------------
diff --git a/docs/learn/documentation/0.7.0/container/streams.md b/docs/learn/documentation/0.7.0/container/streams.md
index f17ba99..59e0855 100644
--- a/docs/learn/documentation/0.7.0/container/streams.md
+++ b/docs/learn/documentation/0.7.0/container/streams.md
@@ -21,47 +21,49 @@ title: Streams
 
 The [samza container](samza-container.html) reads and writes messages using the [SystemConsumer](../api/javadocs/org/apache/samza/system/SystemConsumer.html) and [SystemProducer](../api/javadocs/org/apache/samza/system/SystemProducer.html) interfaces. You can integrate any message broker with Samza by implementing these two interfaces.
 
-    public interface SystemConsumer {
-      void start();
+{% highlight java %}
+public interface SystemConsumer {
+  void start();
 
-      void stop();
+  void stop();
 
-      void register(
-          SystemStreamPartition systemStreamPartition,
-          String lastReadOffset);
+  void register(
+      SystemStreamPartition systemStreamPartition,
+      String lastReadOffset);
 
-      List<IncomingMessageEnvelope> poll(
-          Map<SystemStreamPartition, Integer> systemStreamPartitions,
-          long timeout)
-        throws InterruptedException;
-    }
+  List<IncomingMessageEnvelope> poll(
+      Map<SystemStreamPartition, Integer> systemStreamPartitions,
+      long timeout)
+    throws InterruptedException;
+}
 
-    public class IncomingMessageEnvelope {
-      public Object getMessage() { ... }
+public class IncomingMessageEnvelope {
+  public Object getMessage() { ... }
 
-      public Object getKey() { ... }
+  public Object getKey() { ... }
 
-      public SystemStreamPartition getSystemStreamPartition() { ... }
-    }
+  public SystemStreamPartition getSystemStreamPartition() { ... }
+}
 
-    public interface SystemProducer {
-      void start();
+public interface SystemProducer {
+  void start();
 
-      void stop();
+  void stop();
 
-      void register(String source);
+  void register(String source);
 
-      void send(String source, OutgoingMessageEnvelope envelope);
+  void send(String source, OutgoingMessageEnvelope envelope);
 
-      void flush(String source);
-    }
+  void flush(String source);
+}
 
-    public class OutgoingMessageEnvelope {
-      ...
-      public Object getKey() { ... }
+public class OutgoingMessageEnvelope {
+  ...
+  public Object getKey() { ... }
 
-      public Object getMessage() { ... }
-    }
+  public Object getMessage() { ... }
+}
+{% endhighlight %}
 
 Out of the box, Samza supports Kafka (KafkaSystemConsumer and KafkaSystemProducer). However, any message bus system can be plugged in, as long as it can provide the semantics required by Samza, as described in the [javadoc](../api/javadocs/org/apache/samza/system/SystemConsumer.html).
 
@@ -81,7 +83,9 @@ When a Samza container has several incoming messages on different stream partiti
 
 To plug in your own message chooser, you need to implement the [MessageChooserFactory](../api/javadocs/org/apache/samza/system/chooser/MessageChooserFactory.html) interface, and set the "task.chooser.class" configuration to the fully-qualified class name of your implementation:
 
-    task.chooser.class=com.example.samza.YourMessageChooserFactory
+{% highlight jproperties %}
+task.chooser.class=com.example.samza.YourMessageChooserFactory
+{% endhighlight %}
 
 #### Prioritizing input streams
 
@@ -89,8 +93,10 @@ There are certain times when messages from one stream should be processed with h
 
 Samza provides a mechanism to prioritize one stream over another by setting this configuration parameter: systems.&lt;system&gt;.streams.&lt;stream&gt;.samza.priority=&lt;number&gt;. For example:
 
-    systems.kafka.streams.my-real-time-stream.samza.priority=2
-    systems.kafka.streams.my-batch-stream.samza.priority=1
+{% highlight jproperties %}
+systems.kafka.streams.my-real-time-stream.samza.priority=2
+systems.kafka.streams.my-batch-stream.samza.priority=1
+{% endhighlight %}
 
 This declares that my-real-time-stream's messages should be processed with higher priority than my-batch-stream's messages. If my-real-time-stream has any messages available, they are processed first. Only if there are no messages currently waiting on my-real-time-stream, the Samza job continues processing my-batch-stream.
 
@@ -108,9 +114,11 @@ Another difference between a bootstrap stream and a high-priority stream is that
 
 To configure a stream called "my-bootstrap-stream" to be a fully-consumed bootstrap stream, use the following settings:
 
-    systems.kafka.streams.my-bootstrap-stream.samza.bootstrap=true
-    systems.kafka.streams.my-bootstrap-stream.samza.reset.offset=true
-    systems.kafka.streams.my-bootstrap-stream.samza.offset.default=oldest
+{% highlight jproperties %}
+systems.kafka.streams.my-bootstrap-stream.samza.bootstrap=true
+systems.kafka.streams.my-bootstrap-stream.samza.reset.offset=true
+systems.kafka.streams.my-bootstrap-stream.samza.offset.default=oldest
+{% endhighlight %}
 
 The bootstrap=true parameter enables the bootstrap behavior (prioritization over other streams). The combination of reset.offset=true and offset.default=oldest tells Samza to always start reading the stream from the oldest offset, every time a container starts up (rather than starting to read from the most recent checkpoint).
 
@@ -122,7 +130,9 @@ In some cases, you can improve performance by consuming several messages from th
 
 For example, if you want to read 100 messages in a row from each stream partition (regardless of the MessageChooser), you can use this configuration parameter:
 
-    task.consumer.batch.size=100
+{% highlight jproperties %}
+task.consumer.batch.size=100
+{% endhighlight %}
 
 With this setting, Samza tries to read a message from the most recently used [SystemStreamPartition](../api/javadocs/org/apache/samza/system/SystemStreamPartition.html). This behavior continues either until no more messages are available for that SystemStreamPartition, or until the batch size has been reached. When that happens, Samza defers to the MessageChooser to determine the next message to process. It then again tries to continue consume from the chosen message's SystemStreamPartition until the batch size is reached.
 

http://git-wip-us.apache.org/repos/asf/incubator-samza/blob/d913037a/docs/learn/documentation/0.7.0/container/windowing.md
----------------------------------------------------------------------
diff --git a/docs/learn/documentation/0.7.0/container/windowing.md b/docs/learn/documentation/0.7.0/container/windowing.md
index 312a493..b10e5d4 100644
--- a/docs/learn/documentation/0.7.0/container/windowing.md
+++ b/docs/learn/documentation/0.7.0/container/windowing.md
@@ -23,32 +23,36 @@ Sometimes a stream processing job needs to do something in regular time interval
 
 Samza's *windowing* feature provides a way for tasks to do something in regular time intervals, for example once per minute. To enable windowing, you just need to set one property in your job configuration:
 
-    # Call the window() method every 60 seconds
-    task.window.ms=60000
+{% highlight jproperties %}
+# Call the window() method every 60 seconds
+task.window.ms=60000
+{% endhighlight %}
 
 Next, your stream task needs to implement the [WindowableTask](../api/javadocs/org/apache/samza/task/WindowableTask.html) interface. This interface defines a window() method which is called by Samza in the regular interval that you configured.
 
 For example, this is how you would implement a basic per-minute event counter:
 
-    public class EventCounterTask implements StreamTask, WindowableTask {
+{% highlight java %}
+public class EventCounterTask implements StreamTask, WindowableTask {
 
-      public static final SystemStream OUTPUT_STREAM =
-        new SystemStream("kafka", "events-per-minute");
+  public static final SystemStream OUTPUT_STREAM =
+    new SystemStream("kafka", "events-per-minute");
 
-      private int eventsSeen = 0;
+  private int eventsSeen = 0;
 
-      public void process(IncomingMessageEnvelope envelope,
-                          MessageCollector collector,
-                          TaskCoordinator coordinator) {
-        eventsSeen++;
-      }
+  public void process(IncomingMessageEnvelope envelope,
+                      MessageCollector collector,
+                      TaskCoordinator coordinator) {
+    eventsSeen++;
+  }
 
-      public void window(MessageCollector collector,
-                         TaskCoordinator coordinator) {
-        collector.send(new OutgoingMessageEnvelope(OUTPUT_STREAM, eventsSeen));
-        eventsSeen = 0;
-      }
-    }
+  public void window(MessageCollector collector,
+                     TaskCoordinator coordinator) {
+    collector.send(new OutgoingMessageEnvelope(OUTPUT_STREAM, eventsSeen));
+    eventsSeen = 0;
+  }
+}
+{% endhighlight %}
 
 If you need to send messages to output streams, you can use the [MessageCollector](../api/javadocs/org/apache/samza/task/MessageCollector.html) object passed to the window() method. Please only use that MessageCollector object for sending messages, and don't use it outside of the call to window().
 

http://git-wip-us.apache.org/repos/asf/incubator-samza/blob/d913037a/docs/learn/documentation/0.7.0/introduction/architecture.md
----------------------------------------------------------------------
diff --git a/docs/learn/documentation/0.7.0/introduction/architecture.md b/docs/learn/documentation/0.7.0/introduction/architecture.md
index 00a5e89..46987e5 100644
--- a/docs/learn/documentation/0.7.0/introduction/architecture.md
+++ b/docs/learn/documentation/0.7.0/introduction/architecture.md
@@ -91,7 +91,9 @@ The Samza client uses YARN to run a Samza job: YARN starts and supervises one or
 
 Let's take a look at a real example: suppose we want to count the number of page views. In SQL, you would write something like:
 
-    SELECT user_id, COUNT(*) FROM PageViewEvent GROUP BY user_id
+{% highlight sql %}
+SELECT user_id, COUNT(*) FROM PageViewEvent GROUP BY user_id
+{% endhighlight %}
 
 Although Samza doesn't support SQL right now, the idea is the same. Two jobs are required to calculate this query: one to group messages by user ID, and the other to do the counting.
 

http://git-wip-us.apache.org/repos/asf/incubator-samza/blob/d913037a/docs/learn/documentation/0.7.0/jobs/configuration.md
----------------------------------------------------------------------
diff --git a/docs/learn/documentation/0.7.0/jobs/configuration.md b/docs/learn/documentation/0.7.0/jobs/configuration.md
index 7ed6402..2ed3ea7 100644
--- a/docs/learn/documentation/0.7.0/jobs/configuration.md
+++ b/docs/learn/documentation/0.7.0/jobs/configuration.md
@@ -21,7 +21,7 @@ title: Configuration
 
 All Samza jobs have a configuration file that defines the job. A very basic configuration file looks like this:
 
-```
+{% highlight jproperties %}
 # Job
 job.factory.class=samza.job.local.LocalJobFactory
 job.name=hello-world
@@ -38,7 +38,7 @@ serializers.registry.string.class=org.apache.samza.serializers.StringSerdeFactor
 systems.example-system.samza.factory=samza.stream.example.ExampleConsumerFactory
 systems.example-system.samza.key.serde=string
 systems.example-system.samza.msg.serde=json
-```
+{% endhighlight %}
 
 There are four major sections to a configuration file:
 
@@ -51,10 +51,10 @@ There are four major sections to a configuration file:
 
 Configuration keys that absolutely must be defined for a Samza job are:
 
-* job.factory.class
-* job.name
-* task.class
-* task.inputs
+* `job.factory.class`
+* `job.name`
+* `task.class`
+* `task.inputs`
 
 ### Configuration Keys
 

http://git-wip-us.apache.org/repos/asf/incubator-samza/blob/d913037a/docs/learn/documentation/0.7.0/jobs/job-runner.md
----------------------------------------------------------------------
diff --git a/docs/learn/documentation/0.7.0/jobs/job-runner.md b/docs/learn/documentation/0.7.0/jobs/job-runner.md
index 7fef2ec..55c9114 100644
--- a/docs/learn/documentation/0.7.0/jobs/job-runner.md
+++ b/docs/learn/documentation/0.7.0/jobs/job-runner.md
@@ -21,25 +21,25 @@ title: JobRunner
 
 Samza jobs are started using a script called run-job.sh.
 
-```
+{% highlight bash %}
 samza-example/target/bin/run-job.sh \
   --config-factory=samza.config.factories.PropertiesConfigFactory \
   --config-path=file://$PWD/config/hello-world.properties
-```
+{% endhighlight %}
 
 You provide two parameters to the run-job.sh script. One is the config location, and the other is a factory class that is used to read your configuration file. The run-job.sh script is actually executing a Samza class called JobRunner. The JobRunner uses your ConfigFactory to get a Config object from the config path.
 
-```
+{% highlight java %}
 public interface ConfigFactory {
   Config getConfig(URI configUri);
 }
-```
+{% endhighlight %}
 
 The Config object is just a wrapper around Map<String, String>, with some nice helper methods. Out of the box, Samza ships with the PropertiesConfigFactory, but developers can implement any kind of ConfigFactory they wish.
 
 Once the JobRunner gets your configuration, it gives your configuration to the StreamJobFactory class defined by the "job.factory" property. Samza ships with two job factory implementations: LocalJobFactory and YarnJobFactory. The StreamJobFactory's responsibility is to give the JobRunner a job that it can run.
 
-```
+{% highlight java %}
 public interface StreamJob {
   StreamJob submit();
 
@@ -51,7 +51,7 @@ public interface StreamJob {
 
   ApplicationStatus getStatus();
 }
-```
+{% endhighlight %}
 
 Once the JobRunner gets a job, it calls submit() on the job. This method is what tells the StreamJob implementation to start the SamzaContainer. In the case of LocalJobRunner, it uses a run-container.sh script to execute the SamzaContainer in a separate process, which will start one SamzaContainer locally on the machine that you ran run-job.sh on.
 

http://git-wip-us.apache.org/repos/asf/incubator-samza/blob/d913037a/docs/learn/documentation/0.7.0/jobs/logging.md
----------------------------------------------------------------------
diff --git a/docs/learn/documentation/0.7.0/jobs/logging.md b/docs/learn/documentation/0.7.0/jobs/logging.md
index b56bd67..5b78d7f 100644
--- a/docs/learn/documentation/0.7.0/jobs/logging.md
+++ b/docs/learn/documentation/0.7.0/jobs/logging.md
@@ -25,12 +25,14 @@ Samza uses [SLF4J](http://www.slf4j.org/) for all of its logging. By default, Sa
 
 The [hello-samza](/startup/hello-samza/0.7.0) project shows how to use [log4j](http://logging.apache.org/log4j/1.2/) with Samza. To turn on log4j logging, you just need to make sure slf4j-log4j12 is in your SamzaContainer's classpath. In Maven, this can be done by adding the following dependency to your Samza package project.
 
-    <dependency>
-      <groupId>org.slf4j</groupId>
-      <artifactId>slf4j-log4j12</artifactId>
-      <scope>runtime</scope>
-      <version>1.6.2</version>
-    </dependency>
+{% highlight xml %}
+<dependency>
+  <groupId>org.slf4j</groupId>
+  <artifactId>slf4j-log4j12</artifactId>
+  <scope>runtime</scope>
+  <version>1.6.2</version>
+</dependency>
+{% endhighlight %}
 
 If you're not using Maven, just make sure that slf4j-log4j12 ends up in your Samza package's lib directory.
 
@@ -38,15 +40,19 @@ If you're not using Maven, just make sure that slf4j-log4j12 ends up in your Sam
 
 Samza's [run-class.sh](packaging.html) script will automatically set the following setting if log4j.xml exists in your [Samza package's](packaging.html) lib directory.
 
-    -Dlog4j.configuration=file:$base_dir/lib/log4j.xml
+{% highlight bash %}
+-Dlog4j.configuration=file:$base_dir/lib/log4j.xml
+{% endhighlight %}
 
 The [run-class.sh](packaging.html) script will also set the following Java system properties:
 
-    -Dsamza.log.dir=$SAMZA_LOG_DIR -Dsamza.container.name=$SAMZA_CONTAINER_NAME
+{% highlight bash %}
+-Dsamza.log.dir=$SAMZA_LOG_DIR -Dsamza.container.name=$SAMZA_CONTAINER_NAME=
+{% endhighlight %}
 
 These settings are very useful if you're using a file-based appender. For example, you can use a daily rolling appender by configuring log4j.xml like this:
 
-```
+{% highlight xml %}
 <appender name="RollingAppender" class="org.apache.log4j.DailyRollingFileAppender">
    <param name="File" value="${samza.log.dir}/${samza.container.name}.log" />
    <param name="DatePattern" value="'.'yyyy-MM-dd" />
@@ -54,21 +60,23 @@ These settings are very useful if you're using a file-based appender. For exampl
     <param name="ConversionPattern" value="%d{yyyy-MM-dd HH:mm:ss} %c{1} [%p] %m%n" />
    </layout>
 </appender>
-```
+{% endhighlight %}
 
 Setting up a file-based appender is recommended as a better alternative to using standard out. Standard out log files (see below) don't roll, and can get quite large if used for logging.
 
-**NOTE:** If you use the task.opts configuration property, the log configuration is disrupted. This is a known bug; please see [SAMZA-109](https://issues.apache.org/jira/browse/SAMZA-109) for a workaround.
+**NOTE:** If you use the `task.opts` configuration property, the log configuration is disrupted. This is a known bug; please see [SAMZA-109](https://issues.apache.org/jira/browse/SAMZA-109) for a workaround.
 
 ### Log Directory
 
-Samza will look for the _SAMZA_\__LOG_\__DIR_ environment variable when it executes. If this variable is defined, all logs will be written to this directory. If the environment variable is empty, or not defined, then Samza will use /tmp. This environment variable can also be referenced inside log4j.xml files (see above).
+Samza will look for the `SAMZA_LOG_DIR` environment variable when it executes. If this variable is defined, all logs will be written to this directory. If the environment variable is empty, or not defined, then Samza will use /tmp. This environment variable can also be referenced inside log4j.xml files (see above).
 
 ### Garbage Collection Logging
 
-Samza's will automatically set the following garbage collection logging setting, and will output it to _$SAMZA_\__LOG_\__DIR_/gc.log.
+Samza's will automatically set the following garbage collection logging setting, and will output it to `$SAMZA_LOG_DIR/gc.log`.
 
-    -XX:+PrintGCDateStamps -Xloggc:$SAMZA_LOG_DIR/gc.log
+{% highlight bash %}
+-XX:+PrintGCDateStamps -Xloggc:$SAMZA_LOG_DIR/gc.log
+{% endhighlight %}
 
 #### Rotation
 
@@ -76,7 +84,7 @@ In older versions of Java, it is impossible to have GC logs roll over based on t
 
 ### YARN
 
-When a Samza job executes on a YARN grid, the _$SAMZA_\__LOG_\__DIR_ environment variable will point to a directory that is secured such that only the user executing the Samza job can read and write to it, if YARN is [securely configured](http://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/ClusterSetup.html).
+When a Samza job executes on a YARN grid, the `$SAMZA_LOG_DIR` environment variable will point to a directory that is secured such that only the user executing the Samza job can read and write to it, if YARN is [securely configured](http://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/ClusterSetup.html).
 
 #### STDOUT
 

http://git-wip-us.apache.org/repos/asf/incubator-samza/blob/d913037a/docs/learn/documentation/0.7.0/jobs/yarn-jobs.md
----------------------------------------------------------------------
diff --git a/docs/learn/documentation/0.7.0/jobs/yarn-jobs.md b/docs/learn/documentation/0.7.0/jobs/yarn-jobs.md
index 12f5063..58ca50d 100644
--- a/docs/learn/documentation/0.7.0/jobs/yarn-jobs.md
+++ b/docs/learn/documentation/0.7.0/jobs/yarn-jobs.md
@@ -19,13 +19,13 @@ title: YARN Jobs
    limitations under the License.
 -->
 
-When you define job.factory.class=org.apache.samza.job.yarn.YarnJobFactory in your job's configuration, Samza will use YARN to execute your job. The YarnJobFactory will use the YARN_HOME environment variable on the machine that run-job.sh is executed on to get the appropriate YARN configuration, which will define where the YARN resource manager is. The YarnJob will work with the resource manager to get your job started on the YARN cluster.
+When you define `job.factory.class=org.apache.samza.job.yarn.YarnJobFactory` in your job's configuration, Samza will use YARN to execute your job. The YarnJobFactory will use the YARN_HOME environment variable on the machine that run-job.sh is executed on to get the appropriate YARN configuration, which will define where the YARN resource manager is. The YarnJob will work with the resource manager to get your job started on the YARN cluster.
 
 If you want to use YARN to run your Samza job, you'll also need to define the location of your Samza job's package. For example, you might say:
 
-```
+{% highlight jproperties %}
 yarn.package.path=http://my.http.server/jobs/ingraphs-package-0.0.55.tgz
-```
+{% endhighlight %}
 
 This .tgz file follows the conventions outlined on the [Packaging](packaging.html) page (it has bin/run-am.sh and bin/run-container.sh). YARN NodeManagers will take responsibility for downloading this .tgz file on the appropriate machines, and untar'ing them. From there, YARN will execute run-am.sh or run-container.sh for the Samza Application Master, and SamzaContainer, respectively.
 

http://git-wip-us.apache.org/repos/asf/incubator-samza/blob/d913037a/docs/learn/documentation/0.7.0/operations/kafka.md
----------------------------------------------------------------------
diff --git a/docs/learn/documentation/0.7.0/operations/kafka.md b/docs/learn/documentation/0.7.0/operations/kafka.md
index b426fdb..29833e4 100644
--- a/docs/learn/documentation/0.7.0/operations/kafka.md
+++ b/docs/learn/documentation/0.7.0/operations/kafka.md
@@ -29,4 +29,6 @@ Kafka has a great [operations wiki](http://kafka.apache.org/08/ops.html), which
 
 Kafka brokers should be configured to automatically create topics. Without this, it's going to be very cumbersome to run Samze jobs, since jobs will write to arbitrary (and sometimes new) topics.
 
-    auto.create.topics.enable=true
+{% highlight jproperties %}
+auto.create.topics.enable=true
+{% endhighlight %}

http://git-wip-us.apache.org/repos/asf/incubator-samza/blob/d913037a/docs/learn/tutorials/0.7.0/deploy-samza-job-from-hdfs.md
----------------------------------------------------------------------
diff --git a/docs/learn/tutorials/0.7.0/deploy-samza-job-from-hdfs.md b/docs/learn/tutorials/0.7.0/deploy-samza-job-from-hdfs.md
index 63bbf07..145d51c 100644
--- a/docs/learn/tutorials/0.7.0/deploy-samza-job-from-hdfs.md
+++ b/docs/learn/tutorials/0.7.0/deploy-samza-job-from-hdfs.md
@@ -27,33 +27,33 @@ Build a new Samza job package to include the hadoop-hdfs-version.jar.
 
 * Add dependency statement in pom.xml of samza-job-package
 
-```
+{% highlight xml %}
 <dependency>
   <groupId>org.apache.hadoop</groupId>
   <artifactId>hadoop-hdfs</artifactId>
   <version>2.2.0</version>
 </dependency>
-```
+{% endhighlight %}
 
 * Add the following code to src/main/assembly/src.xml in samza-job-package.
 
-```
+{% highlight xml %}
 <include>org.apache.hadoop:hadoop-hdfs</include>
-```
+{% endhighlight %}
 
 * Create .tar.gz package
 
-```
+{% highlight bash %}
 mvn clean pacakge
-```
+{% endhighlight %}
 
 * Make sure hadoop-common-version.jar has the same version as your hadoop-hdfs-version.jar. Otherwise, you may still have errors.
 
 ### Upload the package
 
-```
+{% highlight bash %}
 hadoop fs -put ./samza-job-package/target/samza-job-package-0.7.0-dist.tar.gz /path/for/tgz
-```
+{% endhighlight %}
 
 ### Add HDFS configuration
 
@@ -63,9 +63,9 @@ Put the hdfs-site.xml file of your cluster into ~/.samza/conf directory. (The sa
 
 Change the yarn.package.path in the properties file to your HDFS location.
 
-```
+{% highlight jproperties %}
 yarn.package.path=hdfs://<hdfs name node ip>:<hdfs name node port>/path/to/tgz
-```
+{% endhighlight %}
 
 Then you should be able to run the Samza job as described in [hello-samza](../../../startup/hello-samza/0.7.0/).
-   
+

http://git-wip-us.apache.org/repos/asf/incubator-samza/blob/d913037a/docs/learn/tutorials/0.7.0/remote-debugging-samza.md
----------------------------------------------------------------------
diff --git a/docs/learn/tutorials/0.7.0/remote-debugging-samza.md b/docs/learn/tutorials/0.7.0/remote-debugging-samza.md
index c3623d8..89d0856 100644
--- a/docs/learn/tutorials/0.7.0/remote-debugging-samza.md
+++ b/docs/learn/tutorials/0.7.0/remote-debugging-samza.md
@@ -25,30 +25,30 @@ Let's use Eclipse to attach a remote debugger to a Samza container. If you're an
 
 Start by checking out Samza, so we have access to the source.
 
-```
+{% highlight bash %}
 git clone http://git-wip-us.apache.org/repos/asf/incubator-samza.git
-```
+{% endhighlight %}
 
 Next, grab hello-samza.
 
-```
+{% highlight bash %}
 git clone git://git.apache.org/incubator-samza-hello-samza.git
-```
+{% endhighlight %}
 
 ### Setup the Environment
 
 Now, let's setup the Eclipse project files.
 
-```
+{% highlight bash %}
 cd incubator-samza
 ./gradlew eclipse
-```
+{% endhighlight %}
 
 Let's also release Samza to Maven's local repository, so hello-samza has access to the JARs that it needs.
 
-```
+{% highlight bash %}
 ./gradlew -PscalaVersion=2.9.2 clean publishToMavenLocal
-```
+{% endhighlight %}
 
 Next, open Eclipse, and import the Samza source code into your workspace: "File" &gt; "Import" &gt; "Existing Projects into Workspace" &gt; "Browse". Select 'incubator-samza' folder, and hit 'finish'.
 
@@ -56,9 +56,9 @@ Next, open Eclipse, and import the Samza source code into your workspace: "File"
 
 Now, go back to the hello-samza project, and edit ./samza-job-package/src/main/config/wikipedia-feed.properties to add the following line:
 
-```
+{% highlight jproperties %}
 task.opts=-agentlib:jdwp=transport=dt_socket,address=localhost:9009,server=y,suspend=y
-```
+{% endhighlight %}
 
 The [task.opts](../../documentation/0.7.0/jobs/configuration-table.html) configuration parameter is a way to override Java parameters at runtime for your Samza containers. In this example, we're setting the agentlib parameter to enable remote debugging on localhost, port 9009. In a more realistic environment, you might also set Java heap settings (-Xmx, -Xms, etc), as well as garbage collection and logging settings.
 
@@ -68,24 +68,24 @@ The [task.opts](../../documentation/0.7.0/jobs/configuration-table.html) configu
 
 Now that the Samza job has been setup to enable remote debugging when a Samza container starts, let's start the ZooKeeper, Kafka, and YARN.
 
-```
+{% highlight bash %}
 bin/grid
-```
+{% endhighlight %}
 
 If you get a complaint that JAVA_HOME is not set, then you'll need to set it. This can be done on OSX by running:
 
-```
+{% highlight bash %}
 export JAVA_HOME=$(/usr/libexec/java_home)
-```
+{% endhighlight %}
 
 Once the grid starts, you can start the wikipedia-feed Samza job.
 
-```
+{% highlight bash %}
 mvn clean package
 mkdir -p deploy/samza
 tar -xvf ./samza-job-package/target/samza-job-package-0.7.0-dist.tar.gz -C deploy/samza
 deploy/samza/bin/run-job.sh --config-factory=org.apache.samza.config.factories.PropertiesConfigFactory --config-path=file://$PWD/deploy/samza/config/wikipedia-feed.properties
-```
+{% endhighlight %}
 
 When the wikipedia-feed job starts up, a single Samza container will be created to process all incoming messages. This is the container that we'll want to connect to from the remote debugger.
 

http://git-wip-us.apache.org/repos/asf/incubator-samza/blob/d913037a/docs/learn/tutorials/0.7.0/run-hello-samza-without-internet.md
----------------------------------------------------------------------
diff --git a/docs/learn/tutorials/0.7.0/run-hello-samza-without-internet.md b/docs/learn/tutorials/0.7.0/run-hello-samza-without-internet.md
index 8ff5f1d..a5503ef 100644
--- a/docs/learn/tutorials/0.7.0/run-hello-samza-without-internet.md
+++ b/docs/learn/tutorials/0.7.0/run-hello-samza-without-internet.md
@@ -25,9 +25,9 @@ This tutorial is to help you run [Hello Samza](../../../startup/hello-samza/0.7.
 
 Ping irc.wikimedia.org. Sometimes the firewall in your company blocks this service.
 
-```
+{% highlight bash %}
 telnet irc.wikimedia.org 6667
-```
+{% endhighlight %}
 
 You should see something like this:
 
@@ -47,30 +47,31 @@ Otherwise, you may have the connection problem.
 
 We provide an alternative to get wikipedia feed data. Instead of running
 
-```
+{% highlight bash %}
 deploy/samza/bin/run-job.sh --config-factory=org.apache.samza.config.factories.PropertiesConfigFactory --config-path=file://$PWD/deploy/samza/config/wikipedia-feed.properties
-```
+{% endhighlight %}
 
 You will run
-```
+
+{% highlight bash %}
 bin/produce-wikipedia-raw-data.sh
-``` 
+{% endhighlight %}
 
 This script will read wikipedia feed data from local file and produce them to the Kafka broker. By default, it produces to localhost:9092 as the Kafka broker and uses localhost:2181 as zookeeper. You can overwrite them:
 
-```
+{% highlight bash %}
 bin/produce-wikipedia-raw-data.sh -b yourKafkaBrokerAddress -z yourZookeeperAddress
-```
+{% endhighlight %}
 
 Now you can go back to Generate Wikipedia Statistics section in [Hello Samza](../../../startup/hello-samza/0.7.0/) and follow the remaining steps.
 
 ### A Little Explanation
 
-The goal of 
+The goal of
 
-```
+{% highlight bash %}
 deploy/samza/bin/run-job.sh --config-factory=org.apache.samza.config.factories.PropertiesConfigFactory --config-path=file://$PWD/deploy/samza/config/wikipedia-feed.properties
-```
+{% endhighlight %}
 
 is to deploy a Samza job which listens to wikipedia API, receives the feed in realtime and produces the feed to the Kafka topic wikipedia-raw. The alternative in this tutorial is reading local wikipedia feed in an infinite loop and producing the data to Kafka wikipedia-raw. The follow-up job, wikipedia-parser is getting data from Kafka topic wikipedia-raw, so as long as we have correct data in Kafka topic wikipedia-raw, we are fine. All Samza jobs are connected by the Kafka and do not depend on each other.
 

http://git-wip-us.apache.org/repos/asf/incubator-samza/blob/d913037a/docs/learn/tutorials/0.7.0/run-in-multi-node-yarn.md
----------------------------------------------------------------------
diff --git a/docs/learn/tutorials/0.7.0/run-in-multi-node-yarn.md b/docs/learn/tutorials/0.7.0/run-in-multi-node-yarn.md
index dc70790..c079233 100644
--- a/docs/learn/tutorials/0.7.0/run-in-multi-node-yarn.md
+++ b/docs/learn/tutorials/0.7.0/run-in-multi-node-yarn.md
@@ -29,36 +29,36 @@ If you already have a multi-node YARN cluster (such as CDH5 cluster), you can sk
 
 1\. Dowload [YARN 2.3](http://mirror.symnds.com/software/Apache/hadoop/common/hadoop-2.3.0/hadoop-2.3.0.tar.gz) to /tmp and untar it.
 
-```
+{% highlight bash %}
 cd /tmp
 tar -xvf hadoop-2.3.0.tar.gz
 cd hadoop-2.3.0
-```
+{% endhighlight %}
 
 2\. Set up environment variables.
 
-```
+{% highlight bash %}
 export HADOOP_YARN_HOME=$(pwd)
 mkdir conf
 export HADOOP_CONF_DIR=$HADOOP_YARN_HOME/conf
-```
+{% endhighlight %}
 
 3\. Configure YARN setting file.
 
-```
+{% highlight bash %}
 cp ./etc/hadoop/yarn-site.xml conf
 vi conf/yarn-site.xml
-```
+{% endhighlight %}
 
 Add the following property to yarn-site.xml:
 
-```
+{% highlight xml %}
 <property>
     <name>yarn.resourcemanager.hostname</name>
     <!-- hostname that is accessible from all NMs -->
     <value>yourHostname</value>
 </property>
-```
+{% endhighlight %}
 
 Download and add capacity-schedule.xml.
 
@@ -66,35 +66,35 @@ Download and add capacity-schedule.xml.
 curl http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/resources/capacity-scheduler.xml?view=co > conf/capacity-scheduler.xml
 ```
 
-###Set Up Http Filesystem for YARN
+### Set Up Http Filesystem for YARN
 
 The goal of these steps is to configure YARN to read http filesystem because we will use Http server to deploy Samza job package. If you want to use HDFS to deploy Samza job package, you can skip step 4~6 and follow [Deploying a Samza Job from HDFS](deploy-samza-job-from-hdfs.html)
 
 4\. Download Scala package and untar it.
 
-```
+{% highlight bash %}
 cd /tmp
 curl http://www.scala-lang.org/files/archive/scala-2.10.3.tgz > scala-2.10.3.tgz
 tar -xvf scala-2.10.3.tgz
-```
+{% endhighlight %}
 
 5\. Add Scala and its log jars.
 
-```
+{% highlight bash %}
 cp /tmp/scala-2.10.3/lib/scala-compiler.jar $HADOOP_YARN_HOME/share/hadoop/hdfs/lib
 cp /tmp/scala-2.10.3/lib/scala-library.jar $HADOOP_YARN_HOME/share/hadoop/hdfs/lib
 curl http://search.maven.org/remotecontent?filepath=org/clapper/grizzled-slf4j_2.10/1.0.1/grizzled-slf4j_2.10-1.0.1.jar > $HADOOP_YARN_HOME/share/hadoop/hdfs/lib/grizzled-slf4j_2.10-1.0.1.jar
-```
+{% endhighlight %}
 
 6\. Add http configuration in core-site.xml (create the core-site.xml file and add content).
 
-```
+{% highlight xml %}
 vi $HADOOP_YARN_HOME/conf/core-site.xml
-```
+{% endhighlight %}
 
 Add the following code:
 
-```
+{% highlight xml %}
 <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
 <configuration>
     <property>
@@ -102,17 +102,17 @@ Add the following code:
       <value>org.apache.samza.util.hadoop.HttpFileSystem</value>
     </property>
 </configuration>
-```
+{% endhighlight %}
 
 ### Distribute Hadoop File to Slaves
 
 7\. Basically, you copy the hadoop file in your host machine to slave machines. (172.21.100.35, in my case):
 
-```
+{% highlight bash %}
 scp -r . 172.21.100.35:/tmp/hadoop-2.3.0
 echo 172.21.100.35 > conf/slaves
 sbin/start-yarn.sh
-```
+{% endhighlight %}
 
 * If you get "172.21.100.35: Error: JAVA_HOME is not set and could not be found.", you'll need to add a conf/hadoop-env.sh file to the machine with the failure (172.21.100.35, in this case), which has "export JAVA_HOME=/export/apps/jdk/JDK-1_6_0_27" (or wherever your JAVA_HOME actually is).
 
@@ -124,49 +124,49 @@ Some of the following steps are exactlly identical to what you have seen in [hel
 
 1\. Download Samza and publish it to Maven local repository.
 
-```
+{% highlight bash %}
 cd /tmp
 git clone http://git-wip-us.apache.org/repos/asf/incubator-samza.git
 cd incubator-samza
 ./gradlew clean publishToMavenLocal
 cd ..
-```
+{% endhighlight %}
 
 2\. Download hello-samza project and change the job properties file.
 
-```
+{% highlight bash %}
 git clone git://github.com/linkedin/hello-samza.git
 cd hello-samza
 vi samza-job-package/src/main/config/wikipedia-feed.properties
-```
+{% endhighlight %}
 
 Change the yarn.package.path property to be:
 
-```
+{% highlight jproperties %}
 yarn.package.path=http://yourHostname:8000/samza-job-package/target/samza-job-package-0.7.0-dist.tar.gz
-```
+{% endhighlight %}
 
 3\. Complie hello-samza.
 
-```
+{% highlight bash %}
 mvn clean package
 mkdir -p deploy/samza
 tar -xvf ./samza-job-package/target/samza-job-package-0.7.0-dist.tar.gz -C deploy/samza
-```
+{% endhighlight %}
 
 4\. Deploy Samza job package to Http server..
 
 Open a new terminal, and run:
 
-```
+{% highlight bash %}
 cd /tmp/hello-samza && python -m SimpleHTTPServer
-```
+{% endhighlight %}
 
 Go back to the original terminal (not the one running the HTTP server):
 
-```
+{% highlight bash %}
 deploy/samza/bin/run-job.sh --config-factory=org.apache.samza.config.factories.PropertiesConfigFactory --config-path=file://$PWD/deploy/samza/config/wikipedia-feed.properties
-```
+{% endhighlight %}
 
 Go to http://yourHostname:8088 and find the wikipedia-feed job. Click on the ApplicationMaster link to see that it's running.
 

http://git-wip-us.apache.org/repos/asf/incubator-samza/blob/d913037a/docs/less/main.less
----------------------------------------------------------------------
diff --git a/docs/less/main.less b/docs/less/main.less
index 8c7cea6..60b41ac 100644
--- a/docs/less/main.less
+++ b/docs/less/main.less
@@ -1,19 +1,21 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
 
 @import "variables.less";
 @import "non-responsive.less";
@@ -90,6 +92,8 @@ pre {
   border: 0px !important;
   border-radius: 0px !important;
   overflow-x: auto;
+  background-color: #f7f7f7;
+  font-size: 12px;
   code {
     overflow-wrap: normal;
     white-space: pre;
@@ -162,6 +166,10 @@ td.key {
   font-size: 16px;
 }
 
+img.diagram-large {
+  width: 100%;
+}
+
 ul.documentation-list {
   list-style: none;
   padding-left: 20px;