You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@zeppelin.apache.org by mo...@apache.org on 2017/09/29 03:41:56 UTC

svn commit: r1810049 [2/3] - in /zeppelin/site/docs/0.8.0-SNAPSHOT: ./ assets/themes/zeppelin/img/ui-img/ development/ development/contribution/ development/helium/ interpreter/ quickstart/ setup/basics/ setup/deployment/ setup/operation/ setup/securit...

Modified: zeppelin/site/docs/0.8.0-SNAPSHOT/search_data.json
URL: http://svn.apache.org/viewvc/zeppelin/site/docs/0.8.0-SNAPSHOT/search_data.json?rev=1810049&r1=1810048&r2=1810049&view=diff
==============================================================================
--- zeppelin/site/docs/0.8.0-SNAPSHOT/search_data.json (original)
+++ zeppelin/site/docs/0.8.0-SNAPSHOT/search_data.json Fri Sep 29 03:41:54 2017
@@ -116,7 +116,7 @@
 
     "/interpreter/beam.html": {
       "title": "Beam interpreter in Apache Zeppelin",
-      "content"  : "<!--Licensed under the Apache License, Version 2.0 (the "License");you may not use this file except in compliance with the License.You may obtain a copy of the License athttp://www.apache.org/licenses/LICENSE-2.0Unless required by applicable law or agreed to in writing, softwaredistributed under the License is distributed on an "AS IS" BASIS,WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.See the License for the specific language governing permissions andlimitations under the License.-->Beam interpreter for Apache ZeppelinOverviewApache Beam is an open source unified platform for data processing pipelines. A pipeline can be build using one of the Beam SDKs.The execution of the pipeline is done by different Runners. Currently, Beam supports Apache Flink Runner, Apache Spark Runner, and Google Dataflow Runner.How to useBasically, you can write normal Beam java code where you can determine the Runner. You should writ
 e the main method inside a class becuase the interpreter invoke this main to execute the pipeline. Unlike Zeppelin normal pattern, each paragraph is considered as a separate job, there isn't any relation to any other paragraph.The following is a demonstration of a word count example with data represented in array of stringsBut it can read data from files by replacing Create.of(SENTENCES).withCoder(StringUtf8Coder.of()) with TextIO.Read.from("path/to/filename.txt")%beam// most used importsimport org.apache.beam.sdk.coders.StringUtf8Coder;import org.apache.beam.sdk.transforms.Create;import java.io.Serializable;import java.util.Arrays;import java.util.List;import java.util.ArrayList;import org.apache.spark.api.java.*;import org.apache.spark.api.java.function.Function;import org.apache.spark.SparkConf;import org.apache.spark.streaming.*;import org.apache.spark.SparkContext;import org.apache.beam.runners.direct.*;import org.apache.beam.sdk.runners.*;import org.a
 pache.beam.sdk.options.*;import org.apache.beam.runners.spark.*;import org.apache.beam.runners.spark.io.ConsoleIO;import org.apache.beam.runners.flink.*;import org.apache.beam.runners.flink.examples.WordCount.Options;import org.apache.beam.sdk.Pipeline;import org.apache.beam.sdk.io.TextIO;import org.apache.beam.sdk.options.PipelineOptionsFactory;import org.apache.beam.sdk.transforms.Count;import org.apache.beam.sdk.transforms.DoFn;import org.apache.beam.sdk.transforms.MapElements;import org.apache.beam.sdk.transforms.ParDo;import org.apache.beam.sdk.transforms.SimpleFunction;import org.apache.beam.sdk.values.KV;import org.apache.beam.sdk.options.PipelineOptions;public class MinimalWordCount {  static List<String> s = new ArrayList<>();  static final String[] SENTENCES_ARRAY = new String[] {    "Hadoop is the Elephant King!",    "A yellow and elegant thing.",    "He never forgets",    "Useful d
 ata, or lets",    "An extraneous element cling!",    "A wonderful king is Hadoop.",    "The elephant plays well with Sqoop.",    "But what helps him to thrive",    "Are Impala, and Hive,",    "And HDFS in the group.",    "Hadoop is an elegant fellow.",    "An elephant gentle and mellow.",    "He never gets mad,",    "Or does anything bad,",    "Because, at his core, he is yellow",    };    static final List<String> SENTENCES = Arrays.asList(SENTENCES_ARRAY);  public static void main(String[] args) {    Options options = PipelineOptionsFactory.create().as(Options.class);    options.setRunner(FlinkRunner.class);    Pipeline p = Pipeline.create(options);    p.apply(Create.of(SENTENCES).withCoder(StringUtf8Coder.of()))         .apply("ExtractWords", Pa
 rDo.of(new DoFn<String, String>() {           @Override           public void processElement(ProcessContext c) {             for (String word : c.element().split("[^a-zA-Z']+")) {               if (!word.isEmpty()) {                 c.output(word);               }             }           }         }))        .apply(Count.<String> perElement())        .apply("FormatResults", ParDo.of(new DoFn<KV<String, Long>, String>() {          @Override          public void processElement(DoFn<KV<String, Long>, String>.ProcessContext arg0)            throws Exception {            s.add("n" + arg0.element().getKey() + "t" + arg0.element().getValue());            }        }));    p.run();    System.out.println("%table wordtcount");    for (int i = 0; i < s.size(); i++) {      System.out.print(s.get(i));    }  }}"
 ,
+      "content"  : "<!--Licensed under the Apache License, Version 2.0 (the "License");you may not use this file except in compliance with the License.You may obtain a copy of the License athttp://www.apache.org/licenses/LICENSE-2.0Unless required by applicable law or agreed to in writing, softwaredistributed under the License is distributed on an "AS IS" BASIS,WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.See the License for the specific language governing permissions andlimitations under the License.-->Beam interpreter for Apache ZeppelinOverviewApache Beam is an open source unified platform for data processing pipelines. A pipeline can be build using one of the Beam SDKs.The execution of the pipeline is done by different Runners. Currently, Beam supports Apache Flink Runner, Apache Spark Runner, and Google Dataflow Runner.How to useBasically, you can write normal Beam java code where you can determine the Runner. You should writ
 e the main method inside a class becuase the interpreter invoke this main to execute the pipeline. Unlike Zeppelin normal pattern, each paragraph is considered as a separate job, there isn't any relation to any other paragraph.The following is a demonstration of a word count example with data represented in array of stringsBut it can read data from files by replacing Create.of(SENTENCES).withCoder(StringUtf8Coder.of()) with TextIO.Read.from("path/to/filename.txt")%beam// most used importsimport org.apache.beam.sdk.coders.StringUtf8Coder;import org.apache.beam.sdk.transforms.Create;import java.io.Serializable;import java.util.Arrays;import java.util.List;import java.util.ArrayList;import org.apache.beam.runners.direct.*;import org.apache.beam.sdk.runners.*;import org.apache.beam.sdk.options.*;import org.apache.beam.runners.flink.*;import org.apache.beam.sdk.Pipeline;import org.apache.beam.sdk.io.TextIO;import org.apache.beam.sdk.options.PipelineOptionsFactor
 y;import org.apache.beam.sdk.transforms.Count;import org.apache.beam.sdk.transforms.DoFn;import org.apache.beam.sdk.transforms.MapElements;import org.apache.beam.sdk.transforms.ParDo;import org.apache.beam.sdk.transforms.SimpleFunction;import org.apache.beam.sdk.values.KV;import org.apache.beam.sdk.options.PipelineOptions;public class MinimalWordCount {  static List<String> s = new ArrayList<>();  static final String[] SENTENCES_ARRAY = new String[] {    "Hadoop is the Elephant King!",    "A yellow and elegant thing.",    "He never forgets",    "Useful data, or lets",    "An extraneous element cling!",    "A wonderful king is Hadoop.",    "The elephant plays well with Sqoop.",    "But what helps him to thrive",    "Are Impala, and Hive,",    "And HDFS in the group.",    &quo
 t;Hadoop is an elegant fellow.",    "An elephant gentle and mellow.",    "He never gets mad,",    "Or does anything bad,",    "Because, at his core, he is yellow",    };    static final List<String> SENTENCES = Arrays.asList(SENTENCES_ARRAY);  public static void main(String[] args) {    PipelineOptions options = PipelineOptionsFactory.create().as(PipelineOptions.class);    options.setRunner(FlinkRunner.class);    Pipeline p = Pipeline.create(options);    p.apply(Create.of(SENTENCES).withCoder(StringUtf8Coder.of()))         .apply("ExtractWords", ParDo.of(new DoFn<String, String>() {           @ProcessElement           public void processElement(ProcessContext c) {             for (String word : c.element().split("[^a-zA-Z']+")) {               if (!word.isEmpty()) {                 c.output(word);               }             }      
      }         }))        .apply(Count.<String> perElement())        .apply("FormatResults", ParDo.of(new DoFn<KV<String, Long>, String>() {          @ProcessElement          public void processElement(DoFn<KV<String, Long>, String>.ProcessContext arg0)            throws Exception {            s.add("n" + arg0.element().getKey() + "t" + arg0.element().getValue());            }        }));    p.run();    System.out.println("%table wordtcount");    for (int i = 0; i < s.size(); i++) {      System.out.print(s.get(i));    }  }}",
       "url": " /interpreter/beam.html",
       "group": "interpreter",
       "excerpt": "Apache Beam is an open source, unified programming model that you can use to create a data processing pipeline."
@@ -138,7 +138,7 @@
 
     "/interpreter/cassandra.html": {
       "title": "Cassandra CQL Interpreter for Apache Zeppelin",
-      "content"  : "<!--Licensed under the Apache License, Version 2.0 (the "License");you may not use this file except in compliance with the License.You may obtain a copy of the License athttp://www.apache.org/licenses/LICENSE-2.0Unless required by applicable law or agreed to in writing, softwaredistributed under the License is distributed on an "AS IS" BASIS,WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.See the License for the specific language governing permissions andlimitations under the License.-->Cassandra CQL Interpreter for Apache Zeppelin      Name    Class    Description        %cassandra    CassandraInterpreter    Provides interpreter for Apache Cassandra CQL query language  Enabling Cassandra InterpreterIn a notebook, to enable the Cassandra interpreter, click on the Gear icon and select Cassandra  Using the Cassandra InterpreterIn a paragraph, use %cassandra to select the Cassandra interpreter and then input all comm
 ands.To access the interactive help, type HELP;    Interpreter CommandsThe Cassandra interpreter accepts the following commands            Command Type      Command Name      Description              Help command      HELP      Display the interactive help menu              Schema commands      DESCRIBE KEYSPACE, DESCRIBE CLUSTER, DESCRIBE TABLES ...      Custom commands to describe the Cassandra schema              Option commands      @consistency, @retryPolicy, @fetchSize ...      Inject runtime options to all statements in the paragraph              Prepared statement commands      @prepare, @bind, @remove_prepared      Let you register a prepared command and re-use it later by injecting bound values              Native CQL statements      All CQL-compatible statements (SELECT, INSERT, CREATE ...)      All CQL statements are executed directly against the Cassandra server      CQL statementsThis interpreter is compatible with any CQL statement supported by Cassandra. Ex:INSERT IN
 TO users(login,name) VALUES('jdoe','John DOE');SELECT * FROM users WHERE login='jdoe';Each statement should be separated by a semi-colon ( ; ) except the special commands below:@prepare@bind@remove_prepare@consistency@serialConsistency@timestamp@retryPolicy@fetchSize@requestTimeOutMulti-line statements as well as multiple statements on the same line are also supported as long as they are separated by a semi-colon. Ex:USE spark_demo;SELECT * FROM albums_by_country LIMIT 1; SELECT * FROM countries LIMIT 1;SELECT *FROM artistsWHERE login='jlennon';Batch statements are supported and can span multiple lines, as well as DDL(CREATE/ALTER/DROP) statements:BEGIN BATCH    INSERT INTO users(login,name) VALUES('jdoe','John DOE');    INSERT INTO users_preferences(login,account_type) VALUES('jdoe','BASIC');APPLY BATCH;CREATE TABLE IF NOT EXISTS test(    key int PRIMARY K
 EY,    value text);CQL statements are case-insensitive (except for column names and values). This means that the following statements are equivalent and valid:INSERT INTO users(login,name) VALUES('jdoe','John DOE');Insert into users(login,name) vAlues('hsue','Helen SUE');The complete list of all CQL statements and versions can be found below:         Cassandra Version     Documentation Link           3.x             <a target="_blank"          href="http://docs.datastax.com/en/cql/3.3/cql/cqlIntro.html">          http://docs.datastax.com/en/cql/3.3/cql/cqlIntro.html                        2.2             <a target="_blank"          href="http://docs.datastax.com/en/cql/3.3/cql/cqlIntro.html">          http://docs.datastax.com/en/cql/3.3/cql/cqlIntro.html                        2.1 & 2.0             <a target="_blank"          href="http://docs
 .datastax.com/en/cql/3.1/cql/cql_intro_c.html">          http://docs.datastax.com/en/cql/3.1/cql/cqlintroc.html                        1.2             <a target="_blank"          href="http://docs.datastax.com/en/cql/3.0/cql/aboutCQL.html">          http://docs.datastax.com/en/cql/3.0/cql/aboutCQL.html                 Comments in statementsIt is possible to add comments between statements. Single line comments start with the hash sign (#) or double slashes (//). Multi-line comments are enclosed between /** and **/. Ex:#Single line comment style 1INSERT INTO users(login,name) VALUES('jdoe','John DOE');//Single line comment style 2/** Multi line comments **/Insert into users(login,name) vAlues('hsue','Helen SUE');Syntax ValidationThe interpreters is shipped with a built-in syntax validator. This validator only checks for basic syntax errors.All CQL-related syntax validation is delegated d
 irectly to CassandraMost of the time, syntax errors are due to missing semi-colons between statements or typo errors.Schema commandsTo make schema discovery easier and more interactive, the following commands are supported:         Command     Description           DESCRIBE CLUSTER;     Show the current cluster name and its partitioner           DESCRIBE KEYSPACES;     List all existing keyspaces in the cluster and their configuration (replication factor, durable write ...)           DESCRIBE TABLES;     List all existing keyspaces in the cluster and for each, all the tables name           DESCRIBE TYPES;     List all existing keyspaces in the cluster and for each, all the user-defined types name           DESCRIBE FUNCTIONS;     List all existing keyspaces in the cluster and for each, all the functions name           DESCRIBE AGGREGATES;     List all existing keyspaces in the cluster and for each, all the aggregates name           DESCRIBE MATERIALIZED VIEWS;     List all existing 
 keyspaces in the cluster and for each, all the materialized views name           DESCRIBE KEYSPACE <keyspacename>;     Describe the given keyspace configuration and all its table details (name, columns, ...)           DESCRIBE TABLE (<keyspacename>).<tablename>;             Describe the given table. If the keyspace is not provided, the current logged in keyspace is used.        If there is no logged in keyspace, the default system keyspace is used.        If no table is found, an error message is raised                DESCRIBE TYPE (<keyspacename>).<typename>;             Describe the given type(UDT). If the keyspace is not provided, the current logged in keyspace is used.        If there is no logged in keyspace, the default system keyspace is used.        If no type is found, an error message is raised                DESCRIBE FUNCTION (<keyspacename>).<functionname>;     Describe the 
 given function. If the keyspace is not provided, the current logged in keyspace is used.         If there is no logged in keyspace, the default system keyspace is used.         If no function is found, an error message is raised                DESCRIBE AGGREGATE (<keyspacename>).<aggregatename>;     Describe the given aggregate. If the keyspace is not provided, the current logged in keyspace is used.         If there is no logged in keyspace, the default system keyspace is used.         If no aggregate is found, an error message is raised                DESCRIBE MATERIALIZED VIEW (<keyspacename>).<view_name>;     Describe the given view. If the keyspace is not provided, the current logged in keyspace is used.         If there is no logged in keyspace, the default system keyspace is used.         If no view is found, an error message is raised         The schema objects (cluster, keyspace, table, type, function and aggregate) ar
 e displayed in a tabular format.There is a drop-down menu on the top left corner to expand objects details. On the top right menu is shown the Icon legend.  Runtime ParametersSometimes you want to be able to pass runtime query parameters to your statements.Those parameters are not part of the CQL specs and are specific to the interpreter.Below is the list of all parameters:         Parameter     Syntax     Description           Consistency Level     @consistency=value     Apply the given consistency level to all queries in the paragraph           Serial Consistency Level     @serialConsistency=value     Apply the given serial consistency level to all queries in the paragraph           Timestamp     @timestamp=long value             Apply the given timestamp to all queries in the paragraph.        Please note that timestamp value passed directly in CQL statement will override this value                 Retry Policy     @retryPolicy=value     Apply the given retry policy to all querie
 s in the paragraph           Fetch Size     @fetchSize=integer value     Apply the given fetch size to all queries in the paragraph           Request Time Out     @requestTimeOut=integer value     Apply the given request timeout in millisecs to all queries in the paragraph    Some parameters only accept restricted values:         Parameter     Possible Values           Consistency Level     ALL, ANY, ONE, TWO, THREE, QUORUM, LOCALONE, LOCALQUORUM, EACHQUORUM           Serial Consistency Level     SERIAL, LOCALSERIAL           Timestamp     Any long value           Retry Policy     DEFAULT, DOWNGRADINGCONSISTENCY, FALLTHROUGH, LOGGINGDEFAULT, LOGGINGDOWNGRADING, LOGGINGFALLTHROUGH           Fetch Size     Any integer value    Please note that you should not add semi-colon ( ; ) at the end of each parameter statementSome examples:CREATE TABLE IF NOT EXISTS spark_demo.ts(    key int PRIMARY KEY,    value text);TRUNCATE spark_demo.ts;// Timestamp in the past@timestamp=10// Force timesta
 mp directly in the first insertINSERT INTO spark_demo.ts(key,value) VALUES(1,'first insert') USING TIMESTAMP 100;// Select some data to make the clock turnSELECT * FROM spark_demo.albums LIMIT 100;// Now insert using the timestamp parameter set at the beginning(10)INSERT INTO spark_demo.ts(key,value) VALUES(1,'second insert');// Check for the result. You should see 'first insert'SELECT value FROM spark_demo.ts WHERE key=1;Some remarks about query parameters:many query parameters can be set in the same paragraphif the same query parameter is set many time with different values, the interpreter only take into account the first valueeach query parameter applies to all CQL statements in the same paragraph, unless you override the option using plain CQL text (like forcing timestamp with the USING clause)the order of each query parameter with regard to CQL statement does not matterSupport for Prepared StatementsFor performance reason, it is 
 better to prepare statements before-hand and reuse them later by providing bound values.This interpreter provides 3 commands to handle prepared and bound statements:@prepare@bind@remove_preparedExample:@prepare[statement-name]=...@bind[statement-name]=’text’, 1223, ’2015-07-30 12:00:01’, null, true, [‘list_item1’, ’list_item2’]@bind[statement-name-with-no-bound-value]@remove_prepare[statement-name]@prepareYou can use the syntax "@prepare[statement-name]=SELECT..." to create a prepared statement.The statement-name is mandatory because the interpreter prepares the given statement with the Java driver andsaves the generated prepared statement in an internal hash map, using the provided statement-name as search key.Please note that this internal prepared statement map is shared with all notebooks and all paragraphs becausethere is only one instance of the interpreter for CassandraIf the interpreter encounters many @prepare for th
 e same statement-name (key), only the first statement will be taken into account.Example:@prepare[select]=SELECT * FROM spark_demo.albums LIMIT ?@prepare[select]=SELECT * FROM spark_demo.artists LIMIT ?For the above example, the prepared statement is SELECT * FROM spark_demo.albums LIMIT ?.`SELECT * FROM spark_demo.artists LIMIT ? is ignored because an entry already exists in the prepared statements map with the key select.In the context of Zeppelin, a notebook can be scheduled to be executed at regular interval,thus it is necessary to avoid re-preparing many time the same statement (considered an anti-pattern).@bindOnce the statement is prepared (possibly in a separated notebook/paragraph). You can bind values to it:@bind[select_first]=10Bound values are not mandatory for the @bind statement. However if you provide bound values, they need to comply to some syntax:String values should be enclosed between simple quotes ( ‘ )Date values should be enclosed between simple quotes ( 
 ‘ ) and respect the formats:yyyy-MM-dd HH:MM:ssyyyy-MM-dd HH:MM:ss.SSSnull is parsed as-isboolean (true|false) are parsed as-iscollection values must follow the standard CQL syntax:list: [‘listitem1’, ’listitem2’, ...]set: {‘setitem1’, ‘setitem2’, …}map: {‘key1’: ‘val1’, ‘key2’: ‘val2’, …}tuple values should be enclosed between parenthesis (see Tuple CQL syntax): (‘text’, 123, true)udt values should be enclosed between brackets (see UDT CQL syntax): {streename: ‘Beverly Hills’, number: 104, zipcode: 90020, state: ‘California’, …}It is possible to use the @bind statement inside a batch:BEGIN BATCH   @bind[insert_user]='jdoe','John DOE'   UPDATE users SET age = 27 WHERE login='hsue';APPLY BATCH;@remove_prepareTo avoid for a prepared statement to stay forever in the prepared statement map, you can use the@remov
 e_prepare[statement-name] syntax to remove it.Removing a non-existing prepared statement yields no error.Using Dynamic FormsInstead of hard-coding your CQL queries, it is possible to use [Zeppelin dynamic form] syntax to inject simple value or multiple choices forms.The legacy mustache syntax ( {{ }} ) to bind input text and select form is still supported but is deprecated and will be removed in future releases.LegacyThe syntax for simple parameter is: {{input_Label=default value}}. The default value is mandatory because the first time the paragraph is executed,we launch the CQL query before rendering the form so at least one value should be provided.The syntax for multiple choices parameter is: {{input_Label=value1 | value2 | … | valueN }}. By default the first choice is used for CQL querythe first time the paragraph is executed.Example:#Secondary index on performer styleSELECT name, country, performerFROM spark_demo.performersWHERE name='${performer=Sheryl Crow|Doof|F
 anfarlo|Los Paranoia}'AND styles CONTAINS '${style=Rock}';In the above example, the first CQL query will be executed for performer='Sheryl Crow' AND style='Rock'.For subsequent queries, you can change the value directly using the form.Please note that we enclosed the ${ } block between simple quotes ( ' ) because Cassandra expects a String here.We could have also use the ${style='Rock'} syntax but this time, the value displayed on the form is 'Rock' and not Rock.It is also possible to use dynamic forms for prepared statements:@bind[select]=='${performer=Sheryl Crow|Doof|Fanfarlo|Los Paranoia}', '${style=Rock}'Shared statesIt is possible to execute many paragraphs in parallel. However, at the back-end side, we’re still using synchronous queries.Asynchronous execution is only possible when it is possible to return a Future value in the InterpreterResult.
 It may be an interesting proposal for the Zeppelin project.Recently, Zeppelin allows you to choose the level of isolation for your interpreters (see [Interpreter Binding Mode] ).Long story short, you have 3 available bindings:shared : same JVM and same Interpreter instance for all notesscoped : same JVM but different Interpreter instances, one for each noteisolated: different JVM running a single Interpreter instance, one JVM for each noteUsing the shared binding, the same com.datastax.driver.core.Session object is used for all notes and paragraphs.Consequently, if you use the USE keyspace name; statement to log into a keyspace, it will change the keyspace forall current users of the Cassandra interpreter because we only create 1 com.datastax.driver.core.Session objectper instance of Cassandra interpreter.The same remark does apply to the prepared statement hash map, it is shared by all users using the same instance of Cassandra interpreter.When using scoped binding, in the same JVM
  Zeppelin will create multiple instances of the Cassandra interpreter, thus multiple com.datastax.driver.core.Session objects. Beware of resource and memory usage using this binding ! The isolated mode is the most extreme and will create as many JVM/com.datastax.driver.core.Session object as there are distinct notes.Interpreter ConfigurationTo configure the Cassandra interpreter, go to the Interpreter menu and scroll down to change the parameters.The Cassandra interpreter is using the official Cassandra Java Driver and most of the parameters are usedto configure the Java driverBelow are the configuration parameters and their default value.        Property Name     Description     Default Value           cassandra.cluster     Name of the Cassandra cluster to connect to     Test Cluster           cassandra.compression.protocol     On wire compression. Possible values are: NONE, SNAPPY, LZ4     NONE           cassandra.credentials.username     If security is enable, provide the login  
    none           cassandra.credentials.password     If security is enable, provide the password     none           cassandra.hosts             Comma separated Cassandra hosts (DNS name or IP address).                Ex: '192.168.0.12,node2,node3'           localhost           cassandra.interpreter.parallelism     Number of concurrent paragraphs(queries block) that can be executed     10           cassandra.keyspace             Default keyspace to connect to.                  It is strongly recommended to let the default value          and prefix the table name with the actual keyspace          in all of your queries                  system           cassandra.load.balancing.policy             Load balancing policy. Default = new TokenAwarePolicy(new DCAwareRoundRobinPolicy())        To Specify your own policy, provide the fully qualify class name (FQCN) of your policy.        At runtime the interpreter will instantiate the policy using        Class.forName(FQCN)    
       DEFAULT           cassandra.max.schema.agreement.wait.second     Cassandra max schema agreement wait in second     10           cassandra.pooling.core.connection.per.host.local     Protocol V2 and below default = 2. Protocol V3 and above default = 1     2           cassandra.pooling.core.connection.per.host.remote     Protocol V2 and below default = 1. Protocol V3 and above default = 1     1           cassandra.pooling.heartbeat.interval.seconds     Cassandra pool heartbeat interval in secs     30           cassandra.pooling.idle.timeout.seconds     Cassandra idle time out in seconds     120           cassandra.pooling.max.connection.per.host.local     Protocol V2 and below default = 8. Protocol V3 and above default = 1     8           cassandra.pooling.max.connection.per.host.remote     Protocol V2 and below default = 2. Protocol V3 and above default = 1     2           cassandra.pooling.max.request.per.connection.local     Protocol V2 and below default = 128. Protocol V3 and
  above default = 1024     128           cassandra.pooling.max.request.per.connection.remote     Protocol V2 and below default = 128. Protocol V3 and above default = 256     128           cassandra.pooling.new.connection.threshold.local     Protocol V2 and below default = 100. Protocol V3 and above default = 800     100           cassandra.pooling.new.connection.threshold.remote     Protocol V2 and below default = 100. Protocol V3 and above default = 200     100           cassandra.pooling.pool.timeout.millisecs     Cassandra pool time out in millisecs     5000           cassandra.protocol.version     Cassandra binary protocol version     4           cassandra.query.default.consistency           Cassandra query default consistency level            Available values: ONE, TWO, THREE, QUORUM, LOCALONE, LOCALQUORUM, EACHQUORUM, ALL          ONE           cassandra.query.default.fetchSize     Cassandra query default fetch size     5000           cassandra.query.default.serial.consistency 
           Cassandra query default serial consistency level            Available values: SERIAL, LOCALSERIAL          SERIAL           cassandra.reconnection.policy             Cassandra Reconnection Policy.        Default = new ExponentialReconnectionPolicy(1000, 10 * 60 * 1000)        To Specify your own policy, provide the fully qualify class name (FQCN) of your policy.        At runtime the interpreter will instantiate the policy using        Class.forName(FQCN)          DEFAULT           cassandra.retry.policy             Cassandra Retry Policy.        Default = DefaultRetryPolicy.INSTANCE        To Specify your own policy, provide the fully qualify class name (FQCN) of your policy.        At runtime the interpreter will instantiate the policy using        Class.forName(FQCN)          DEFAULT           cassandra.socket.connection.timeout.millisecs     Cassandra socket default connection timeout in millisecs     500           cassandra.socket.read.timeout.millisecs     Cassandra 
 socket read timeout in millisecs     12000           cassandra.socket.tcp.no_delay     Cassandra socket TCP no delay     true           cassandra.speculative.execution.policy             Cassandra Speculative Execution Policy.        Default = NoSpeculativeExecutionPolicy.INSTANCE        To Specify your own policy, provide the fully qualify class name (FQCN) of your policy.        At runtime the interpreter will instantiate the policy using        Class.forName(FQCN)          DEFAULT    Change Log3.0 (Zeppelin 0.8.0-SNAPSHOT) :Update documentationUpdate interactive documentationAdd support for binary protocol V4Implement new @requestTimeOut runtime optionUpgrade Java driver version to 3.0.1Allow interpreter to add dynamic forms programmatically when using FormType.SIMPLEAllow dynamic form using default Zeppelin syntaxFixing typo on FallThroughPolicyLook for data in AngularObjectRegistry before creating dynamic formAdd missing support for ALTER statements2.0 (Zeppelin 0.8.0-SNAPSHOT)
  :Update help menu and add changelogAdd Support for User Defined Functions, User Defined Aggregates and Materialized ViewsUpgrade Java driver version to 3.0.0-rc11.0 (Zeppelin 0.5.5-incubating) :Initial versionBugs & ContactsIf you encounter a bug for this interpreter, please create a JIRA ticket and ping me on Twitter at @doanduyhaiZeppelin Dynamic FormInterpreter Binding Mode",
+      "content"  : "<!--Licensed under the Apache License, Version 2.0 (the "License");you may not use this file except in compliance with the License.You may obtain a copy of the License athttp://www.apache.org/licenses/LICENSE-2.0Unless required by applicable law or agreed to in writing, softwaredistributed under the License is distributed on an "AS IS" BASIS,WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.See the License for the specific language governing permissions andlimitations under the License.-->Cassandra CQL Interpreter for Apache Zeppelin      Name    Class    Description        %cassandra    CassandraInterpreter    Provides interpreter for Apache Cassandra CQL query language  Enabling Cassandra InterpreterIn a notebook, to enable the Cassandra interpreter, click on the Gear icon and select Cassandra  Using the Cassandra InterpreterIn a paragraph, use %cassandra to select the Cassandra interpreter and then input all comm
 ands.To access the interactive help, type HELP;    Interpreter CommandsThe Cassandra interpreter accepts the following commands            Command Type      Command Name      Description              Help command      HELP      Display the interactive help menu              Schema commands      DESCRIBE KEYSPACE, DESCRIBE CLUSTER, DESCRIBE TABLES ...      Custom commands to describe the Cassandra schema              Option commands      @consistency, @retryPolicy, @fetchSize ...      Inject runtime options to all statements in the paragraph              Prepared statement commands      @prepare, @bind, @remove_prepared      Let you register a prepared command and re-use it later by injecting bound values              Native CQL statements      All CQL-compatible statements (SELECT, INSERT, CREATE ...)      All CQL statements are executed directly against the Cassandra server      CQL statementsThis interpreter is compatible with any CQL statement supported by Cassandra. Ex:INSERT IN
 TO users(login,name) VALUES('jdoe','John DOE');SELECT * FROM users WHERE login='jdoe';Each statement should be separated by a semi-colon ( ; ) except the special commands below:@prepare@bind@remove_prepare@consistency@serialConsistency@timestamp@retryPolicy@fetchSize@requestTimeOutMulti-line statements as well as multiple statements on the same line are also supported as long as they are separated by a semi-colon. Ex:USE spark_demo;SELECT * FROM albums_by_country LIMIT 1; SELECT * FROM countries LIMIT 1;SELECT *FROM artistsWHERE login='jlennon';Batch statements are supported and can span multiple lines, as well as DDL(CREATE/ALTER/DROP) statements:BEGIN BATCH    INSERT INTO users(login,name) VALUES('jdoe','John DOE');    INSERT INTO users_preferences(login,account_type) VALUES('jdoe','BASIC');APPLY BATCH;CREATE TABLE IF NOT EXISTS test(    key int PRIMARY K
 EY,    value text);CQL statements are case-insensitive (except for column names and values). This means that the following statements are equivalent and valid:INSERT INTO users(login,name) VALUES('jdoe','John DOE');Insert into users(login,name) vAlues('hsue','Helen SUE');The complete list of all CQL statements and versions can be found below:         Cassandra Version     Documentation Link           3.x             <a target="_blank"          href="http://docs.datastax.com/en/cql/3.3/cql/cqlIntro.html">          http://docs.datastax.com/en/cql/3.3/cql/cqlIntro.html                        2.2             <a target="_blank"          href="http://docs.datastax.com/en/cql/3.3/cql/cqlIntro.html">          http://docs.datastax.com/en/cql/3.3/cql/cqlIntro.html                        2.1 & 2.0             <a target="_blank"          href="http://docs
 .datastax.com/en/cql/3.1/cql/cql_intro_c.html">          http://docs.datastax.com/en/cql/3.1/cql/cqlintroc.html                        1.2             <a target="_blank"          href="http://docs.datastax.com/en/cql/3.0/cql/aboutCQL.html">          http://docs.datastax.com/en/cql/3.0/cql/aboutCQL.html                 Comments in statementsIt is possible to add comments between statements. Single line comments start with the hash sign (#) or double slashes (//). Multi-line comments are enclosed between /** and **/. Ex:#Single line comment style 1INSERT INTO users(login,name) VALUES('jdoe','John DOE');//Single line comment style 2/** Multi line comments **/Insert into users(login,name) vAlues('hsue','Helen SUE');Syntax ValidationThe interpreters is shipped with a built-in syntax validator. This validator only checks for basic syntax errors.All CQL-related syntax validation is delegated d
 irectly to CassandraMost of the time, syntax errors are due to missing semi-colons between statements or typo errors.Schema commandsTo make schema discovery easier and more interactive, the following commands are supported:         Command     Description           DESCRIBE CLUSTER;     Show the current cluster name and its partitioner           DESCRIBE KEYSPACES;     List all existing keyspaces in the cluster and their configuration (replication factor, durable write ...)           DESCRIBE TABLES;     List all existing keyspaces in the cluster and for each, all the tables name           DESCRIBE TYPES;     List all existing keyspaces in the cluster and for each, all the user-defined types name           DESCRIBE FUNCTIONS;     List all existing keyspaces in the cluster and for each, all the functions name           DESCRIBE AGGREGATES;     List all existing keyspaces in the cluster and for each, all the aggregates name           DESCRIBE MATERIALIZED VIEWS;     List all existing 
 keyspaces in the cluster and for each, all the materialized views name           DESCRIBE KEYSPACE <keyspacename>;     Describe the given keyspace configuration and all its table details (name, columns, ...)           DESCRIBE TABLE (<keyspacename>).<tablename>;             Describe the given table. If the keyspace is not provided, the current logged in keyspace is used.        If there is no logged in keyspace, the default system keyspace is used.        If no table is found, an error message is raised                DESCRIBE TYPE (<keyspacename>).<typename>;             Describe the given type(UDT). If the keyspace is not provided, the current logged in keyspace is used.        If there is no logged in keyspace, the default system keyspace is used.        If no type is found, an error message is raised                DESCRIBE FUNCTION (<keyspacename>).<functionname>;     Describe the 
 given function. If the keyspace is not provided, the current logged in keyspace is used.         If there is no logged in keyspace, the default system keyspace is used.         If no function is found, an error message is raised                DESCRIBE AGGREGATE (<keyspacename>).<aggregatename>;     Describe the given aggregate. If the keyspace is not provided, the current logged in keyspace is used.         If there is no logged in keyspace, the default system keyspace is used.         If no aggregate is found, an error message is raised                DESCRIBE MATERIALIZED VIEW (<keyspacename>).<view_name>;     Describe the given view. If the keyspace is not provided, the current logged in keyspace is used.         If there is no logged in keyspace, the default system keyspace is used.         If no view is found, an error message is raised         The schema objects (cluster, keyspace, table, type, function and aggregate) ar
 e displayed in a tabular format.There is a drop-down menu on the top left corner to expand objects details. On the top right menu is shown the Icon legend.  Runtime ParametersSometimes you want to be able to pass runtime query parameters to your statements.Those parameters are not part of the CQL specs and are specific to the interpreter.Below is the list of all parameters:         Parameter     Syntax     Description           Consistency Level     @consistency=value     Apply the given consistency level to all queries in the paragraph           Serial Consistency Level     @serialConsistency=value     Apply the given serial consistency level to all queries in the paragraph           Timestamp     @timestamp=long value             Apply the given timestamp to all queries in the paragraph.        Please note that timestamp value passed directly in CQL statement will override this value                 Retry Policy     @retryPolicy=value     Apply the given retry policy to all querie
 s in the paragraph           Fetch Size     @fetchSize=integer value     Apply the given fetch size to all queries in the paragraph           Request Time Out     @requestTimeOut=integer value     Apply the given request timeout in millisecs to all queries in the paragraph    Some parameters only accept restricted values:         Parameter     Possible Values           Consistency Level     ALL, ANY, ONE, TWO, THREE, QUORUM, LOCALONE, LOCALQUORUM, EACHQUORUM           Serial Consistency Level     SERIAL, LOCALSERIAL           Timestamp     Any long value           Retry Policy     DEFAULT, DOWNGRADINGCONSISTENCY, FALLTHROUGH, LOGGINGDEFAULT, LOGGINGDOWNGRADING, LOGGINGFALLTHROUGH           Fetch Size     Any integer value    Please note that you should not add semi-colon ( ; ) at the end of each parameter statementSome examples:CREATE TABLE IF NOT EXISTS spark_demo.ts(    key int PRIMARY KEY,    value text);TRUNCATE spark_demo.ts;// Timestamp in the past@timestamp=10// Force timesta
 mp directly in the first insertINSERT INTO spark_demo.ts(key,value) VALUES(1,'first insert') USING TIMESTAMP 100;// Select some data to make the clock turnSELECT * FROM spark_demo.albums LIMIT 100;// Now insert using the timestamp parameter set at the beginning(10)INSERT INTO spark_demo.ts(key,value) VALUES(1,'second insert');// Check for the result. You should see 'first insert'SELECT value FROM spark_demo.ts WHERE key=1;Some remarks about query parameters:many query parameters can be set in the same paragraphif the same query parameter is set many time with different values, the interpreter only take into account the first valueeach query parameter applies to all CQL statements in the same paragraph, unless you override the option using plain CQL text (like forcing timestamp with the USING clause)the order of each query parameter with regard to CQL statement does not matterSupport for Prepared StatementsFor performance reason, it is 
 better to prepare statements before-hand and reuse them later by providing bound values.This interpreter provides 3 commands to handle prepared and bound statements:@prepare@bind@remove_preparedExample:@prepare[statement-name]=...@bind[statement-name]=’text’, 1223, ’2015-07-30 12:00:01’, null, true, [‘list_item1’, ’list_item2’]@bind[statement-name-with-no-bound-value]@remove_prepare[statement-name]@prepareYou can use the syntax "@prepare[statement-name]=SELECT..." to create a prepared statement.The statement-name is mandatory because the interpreter prepares the given statement with the Java driver andsaves the generated prepared statement in an internal hash map, using the provided statement-name as search key.Please note that this internal prepared statement map is shared with all notebooks and all paragraphs becausethere is only one instance of the interpreter for CassandraIf the interpreter encounters many @prepare for th
 e same statement-name (key), only the first statement will be taken into account.Example:@prepare[select]=SELECT * FROM spark_demo.albums LIMIT ?@prepare[select]=SELECT * FROM spark_demo.artists LIMIT ?For the above example, the prepared statement is SELECT * FROM spark_demo.albums LIMIT ?.`SELECT * FROM spark_demo.artists LIMIT ? is ignored because an entry already exists in the prepared statements map with the key select.In the context of Zeppelin, a notebook can be scheduled to be executed at regular interval,thus it is necessary to avoid re-preparing many time the same statement (considered an anti-pattern).@bindOnce the statement is prepared (possibly in a separated notebook/paragraph). You can bind values to it:@bind[select_first]=10Bound values are not mandatory for the @bind statement. However if you provide bound values, they need to comply to some syntax:String values should be enclosed between simple quotes ( ‘ )Date values should be enclosed between simple quotes ( 
 ‘ ) and respect the formats:yyyy-MM-dd HH:MM:ssyyyy-MM-dd HH:MM:ss.SSSnull is parsed as-isboolean (true|false) are parsed as-iscollection values must follow the standard CQL syntax:list: [‘listitem1’, ’listitem2’, ...]set: {‘setitem1’, ‘setitem2’, …}map: {‘key1’: ‘val1’, ‘key2’: ‘val2’, …}tuple values should be enclosed between parenthesis (see Tuple CQL syntax): (‘text’, 123, true)udt values should be enclosed between brackets (see UDT CQL syntax): {streename: ‘Beverly Hills’, number: 104, zipcode: 90020, state: ‘California’, …}It is possible to use the @bind statement inside a batch:BEGIN BATCH   @bind[insert_user]='jdoe','John DOE'   UPDATE users SET age = 27 WHERE login='hsue';APPLY BATCH;@remove_prepareTo avoid for a prepared statement to stay forever in the prepared statement map, you can use the@remov
 e_prepare[statement-name] syntax to remove it.Removing a non-existing prepared statement yields no error.Using Dynamic FormsInstead of hard-coding your CQL queries, it is possible to use [Zeppelin dynamic form] syntax to inject simple value or multiple choices forms.The legacy mustache syntax ( {{ }} ) to bind input text and select form is still supported but is deprecated and will be removed in future releases.LegacyThe syntax for simple parameter is: {{input_Label=default value}}. The default value is mandatory because the first time the paragraph is executed,we launch the CQL query before rendering the form so at least one value should be provided.The syntax for multiple choices parameter is: {{input_Label=value1 | value2 | … | valueN }}. By default the first choice is used for CQL querythe first time the paragraph is executed.Example:#Secondary index on performer styleSELECT name, country, performerFROM spark_demo.performersWHERE name='${performer=Sheryl Crow|Doof|F
 anfarlo|Los Paranoia}'AND styles CONTAINS '${style=Rock}';In the above example, the first CQL query will be executed for performer='Sheryl Crow' AND style='Rock'.For subsequent queries, you can change the value directly using the form.Please note that we enclosed the ${ } block between simple quotes ( ' ) because Cassandra expects a String here.We could have also use the ${style='Rock'} syntax but this time, the value displayed on the form is 'Rock' and not Rock.It is also possible to use dynamic forms for prepared statements:@bind[select]=='${performer=Sheryl Crow|Doof|Fanfarlo|Los Paranoia}', '${style=Rock}'Shared statesIt is possible to execute many paragraphs in parallel. However, at the back-end side, we’re still using synchronous queries.Asynchronous execution is only possible when it is possible to return a Future value in the InterpreterResult.
 It may be an interesting proposal for the Zeppelin project.Recently, Zeppelin allows you to choose the level of isolation for your interpreters (see [Interpreter Binding Mode] ).Long story short, you have 3 available bindings:shared : same JVM and same Interpreter instance for all notesscoped : same JVM but different Interpreter instances, one for each noteisolated: different JVM running a single Interpreter instance, one JVM for each noteUsing the shared binding, the same com.datastax.driver.core.Session object is used for all notes and paragraphs.Consequently, if you use the USE keyspace name; statement to log into a keyspace, it will change the keyspace forall current users of the Cassandra interpreter because we only create 1 com.datastax.driver.core.Session objectper instance of Cassandra interpreter.The same remark does apply to the prepared statement hash map, it is shared by all users using the same instance of Cassandra interpreter.When using scoped binding, in the same JVM
  Zeppelin will create multiple instances of the Cassandra interpreter, thus multiple com.datastax.driver.core.Session objects. Beware of resource and memory usage using this binding ! The isolated mode is the most extreme and will create as many JVM/com.datastax.driver.core.Session object as there are distinct notes.Interpreter ConfigurationTo configure the Cassandra interpreter, go to the Interpreter menu and scroll down to change the parameters.The Cassandra interpreter is using the official Cassandra Java Driver and most of the parameters are usedto configure the Java driverBelow are the configuration parameters and their default value.        Property Name     Description     Default Value           cassandra.cluster     Name of the Cassandra cluster to connect to     Test Cluster           cassandra.compression.protocol     On wire compression. Possible values are: NONE, SNAPPY, LZ4     NONE           cassandra.credentials.username     If security is enable, provide the login  
    none           cassandra.credentials.password     If security is enable, provide the password     none           cassandra.hosts             Comma separated Cassandra hosts (DNS name or IP address).                Ex: '192.168.0.12,node2,node3'           localhost           cassandra.interpreter.parallelism     Number of concurrent paragraphs(queries block) that can be executed     10           cassandra.keyspace             Default keyspace to connect to.                  It is strongly recommended to let the default value          and prefix the table name with the actual keyspace          in all of your queries                  system           cassandra.load.balancing.policy             Load balancing policy. Default = new TokenAwarePolicy(new DCAwareRoundRobinPolicy())        To Specify your own policy, provide the fully qualify class name (FQCN) of your policy.        At runtime the interpreter will instantiate the policy using        Class.forName(FQCN)    
       DEFAULT           cassandra.max.schema.agreement.wait.second     Cassandra max schema agreement wait in second     10           cassandra.pooling.core.connection.per.host.local     Protocol V2 and below default = 2. Protocol V3 and above default = 1     2           cassandra.pooling.core.connection.per.host.remote     Protocol V2 and below default = 1. Protocol V3 and above default = 1     1           cassandra.pooling.heartbeat.interval.seconds     Cassandra pool heartbeat interval in secs     30           cassandra.pooling.idle.timeout.seconds     Cassandra idle time out in seconds     120           cassandra.pooling.max.connection.per.host.local     Protocol V2 and below default = 8. Protocol V3 and above default = 1     8           cassandra.pooling.max.connection.per.host.remote     Protocol V2 and below default = 2. Protocol V3 and above default = 1     2           cassandra.pooling.max.request.per.connection.local     Protocol V2 and below default = 128. Protocol V3 and
  above default = 1024     128           cassandra.pooling.max.request.per.connection.remote     Protocol V2 and below default = 128. Protocol V3 and above default = 256     128           cassandra.pooling.new.connection.threshold.local     Protocol V2 and below default = 100. Protocol V3 and above default = 800     100           cassandra.pooling.new.connection.threshold.remote     Protocol V2 and below default = 100. Protocol V3 and above default = 200     100           cassandra.pooling.pool.timeout.millisecs     Cassandra pool time out in millisecs     5000           cassandra.protocol.version     Cassandra binary protocol version     4           cassandra.query.default.consistency           Cassandra query default consistency level            Available values: ONE, TWO, THREE, QUORUM, LOCALONE, LOCALQUORUM, EACHQUORUM, ALL          ONE           cassandra.query.default.fetchSize     Cassandra query default fetch size     5000           cassandra.query.default.serial.consistency 
           Cassandra query default serial consistency level            Available values: SERIAL, LOCALSERIAL          SERIAL           cassandra.reconnection.policy             Cassandra Reconnection Policy.        Default = new ExponentialReconnectionPolicy(1000, 10 * 60 * 1000)        To Specify your own policy, provide the fully qualify class name (FQCN) of your policy.        At runtime the interpreter will instantiate the policy using        Class.forName(FQCN)          DEFAULT           cassandra.retry.policy             Cassandra Retry Policy.        Default = DefaultRetryPolicy.INSTANCE        To Specify your own policy, provide the fully qualify class name (FQCN) of your policy.        At runtime the interpreter will instantiate the policy using        Class.forName(FQCN)          DEFAULT           cassandra.socket.connection.timeout.millisecs     Cassandra socket default connection timeout in millisecs     500           cassandra.socket.read.timeout.millisecs     Cassandra 
 socket read timeout in millisecs     12000           cassandra.socket.tcp.no_delay     Cassandra socket TCP no delay     true           cassandra.speculative.execution.policy             Cassandra Speculative Execution Policy.        Default = NoSpeculativeExecutionPolicy.INSTANCE        To Specify your own policy, provide the fully qualify class name (FQCN) of your policy.        At runtime the interpreter will instantiate the policy using        Class.forName(FQCN)          DEFAULT           cassandra.ssl.enabled             Enable support for connecting to the Cassandra configured with SSL.        To connect to Cassandra configured with SSL use true        and provide a truststore file and password with following options.          false           cassandra.ssl.truststore.path             Filepath for the truststore file to use for connection to Cassandra with SSL.                     cassandra.ssl.truststore.password             Password for the truststore file to use for connect
 ion to Cassandra with SSL.              Change Log3.0 (Zeppelin 0.8.0-SNAPSHOT) :Update documentationUpdate interactive documentationAdd support for binary protocol V4Implement new @requestTimeOut runtime optionUpgrade Java driver version to 3.0.1Allow interpreter to add dynamic forms programmatically when using FormType.SIMPLEAllow dynamic form using default Zeppelin syntaxFixing typo on FallThroughPolicyLook for data in AngularObjectRegistry before creating dynamic formAdd missing support for ALTER statements2.0 (Zeppelin 0.8.0-SNAPSHOT) :Update help menu and add changelogAdd Support for User Defined Functions, User Defined Aggregates and Materialized ViewsUpgrade Java driver version to 3.0.0-rc11.0 (Zeppelin 0.5.5-incubating) :Initial versionBugs & ContactsIf you encounter a bug for this interpreter, please create a JIRA ticket and ping me on Twitter at @doanduyhaiZeppelin Dynamic FormInterpreter Binding Mode",
       "url": " /interpreter/cassandra.html",
       "group": "interpreter",
       "excerpt": "Apache Cassandra database is the right choice when you need scalability and high availability without compromising performance."
@@ -325,7 +325,7 @@
 
     "/interpreter/python.html": {
       "title": "Python 2 & 3 Interpreter for Apache Zeppelin",
-      "content"  : "<!--Licensed under the Apache License, Version 2.0 (the "License");you may not use this file except in compliance with the License.You may obtain a copy of the License athttp://www.apache.org/licenses/LICENSE-2.0Unless required by applicable law or agreed to in writing, softwaredistributed under the License is distributed on an "AS IS" BASIS,WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.See the License for the specific language governing permissions andlimitations under the License.-->Python 2 & 3 Interpreter for Apache ZeppelinConfiguration      Property    Default    Description        zeppelin.python    python    Path of the already installed Python binary (could be python2 or python3).    If python is not in your $PATH you can set the absolute directory (example : /usr/bin/python)            zeppelin.python.maxResult    1000    Max number of dataframe rows to display.  Enabling Python InterpreterIn a
  notebook, to enable the Python interpreter, click on the Gear icon and select PythonUsing the Python InterpreterIn a paragraph, use %python to select the Python interpreter and then input all commands.The interpreter can only work if you already have python installed (the interpreter doesn't bring it own python binaries).To access the help, type help()Python environmentsDefaultBy default, PythonInterpreter will use python command defined in zeppelin.python property to run python process.The interpreter can use all modules already installed (with pip, easy_install...)CondaConda is an package management system and environment management system for python.%python.conda interpreter lets you change between environments.Usageget the Conda Infomation: %python.conda infolist the Conda environments: %python.conda env listcreate a conda enviornment: %python.conda create --name [ENV NAME]activate an environment (python interpreter will be restarted): %python.conda activate [ENV NAME]d
 eactivate%python.conda deactivateget installed package list inside the current environment%python.conda listinstall package%python.conda install [PACKAGE NAME]uninstall package%python.conda uninstall [PACKAGE NAME]Docker%python.docker interpreter allows PythonInterpreter creates python process in a specified docker container.Usageactivate an environment%python.docker activate [Repository]%python.docker activate [Repository:Tag]%python.docker activate [Image Id]deactivate%python.docker deactivateHere is an example# activate latest tensorflow image as a python environment%python.docker activate gcr.io/tensorflow/tensorflow:latestUsing Zeppelin Dynamic FormsYou can leverage Zeppelin Dynamic Form inside your Python code.Zeppelin Dynamic Form can only be used if py4j Python library is installed in your system. If not, you can install it with pip install py4j.Example : %python### Input formprint (z.input("f1","defaultValue"))### Select formprint (z.sele
 ct("f1",[("o1","1"),("o2","2")],"2"))### Checkbox formprint("".join(z.checkbox("f3", [("o1","1"), ("o2","2")],["1"])))Matplotlib integrationThe python interpreter can display matplotlib figures inline automatically using the pyplot module:%pythonimport matplotlib.pyplot as pltplt.plot([1, 2, 3])This is the recommended method for using matplotlib from within a Zeppelin notebook. The output of this command will by default be converted to HTML by implicitly making use of the %html magic. Additional configuration can be achieved using the builtin z.configure_mpl() method. For example, z.configure_mpl(width=400, height=300, fmt='svg')plt.plot([1, 2, 3])Will produce a 400x300 image in SVG format, which by default are normally 600x400 and PNG r
 espectively. In the future, another option called angular can be used to make it possible to update a plot produced from one paragraph directly from another (the output will be %angular instead of %html). However, this feature is already available in the pyspark interpreter. More details can be found in the included "Zeppelin Tutorial: Python - matplotlib basic" tutorial notebook. If Zeppelin cannot find the matplotlib backend files (which should usually be found in $ZEPPELIN_HOME/interpreter/lib/python) in your PYTHONPATH, then the backend will automatically be set to agg, and the (otherwise deprecated) instructions below can be used for more limited inline plotting.If you are unable to load the inline backend, use z.show(plt): python%pythonimport matplotlib.pyplot as pltplt.figure()(.. ..)z.show(plt)plt.close()The z.show() function can take optional parameters to adapt graph dimensions (width and height) as well as output format (png or optionally svg).%pythonz.s
 how(plt, width='50px')z.show(plt, height='150px', fmt='svg')Pandas integrationApache Zeppelin Table Display System provides built-in data visualization capabilities. Python interpreter leverages it to visualize Pandas DataFrames though similar z.show() API, same as with Matplotlib integration.Example:import pandas as pdrates = pd.read_csv("bank.csv", sep=";")z.show(rates)SQL over Pandas DataFramesThere is a convenience %python.sql interpreter that matches Apache Spark experience in Zeppelin and enables usage of SQL language to query Pandas DataFrames and visualization of results though built-in Table Display System.Pre-requestsPandas pip install pandasPandaSQL pip install -U pandasqlIn case default binded interpreter is Python (first in the interpreter list, under the Gear Icon), you can just use it as %sql i.efirst paragraphimport pandas as pdrates = pd.read_csv("bank.csv", sep=&am
 p;quot;;")next paragraph%sqlSELECT * FROM rates WHERE age < 40Otherwise it can be referred to as %python.sqlTechnical descriptionFor in-depth technical details on current implementation please refer to python/README.md.Some features not yet implemented in the Python InterpreterInterrupt a paragraph execution (cancel() method) is currently only supported in Linux and MacOs. If interpreter runs in another operating system (for instance MS Windows) , interrupt a paragraph will close the whole interpreter. A JIRA ticket (ZEPPELIN-893) is opened to implement this feature in a next release of the interpreter.Progression bar in webUI  (getProgress() method) is currently not implemented.Code-completion is currently not implemented.",
+      "content"  : "<!--Licensed under the Apache License, Version 2.0 (the "License");you may not use this file except in compliance with the License.You may obtain a copy of the License athttp://www.apache.org/licenses/LICENSE-2.0Unless required by applicable law or agreed to in writing, softwaredistributed under the License is distributed on an "AS IS" BASIS,WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.See the License for the specific language governing permissions andlimitations under the License.-->Python 2 & 3 Interpreter for Apache ZeppelinConfiguration      Property    Default    Description        zeppelin.python    python    Path of the already installed Python binary (could be python2 or python3).    If python is not in your $PATH you can set the absolute directory (example : /usr/bin/python)            zeppelin.python.maxResult    1000    Max number of dataframe rows to display.  Enabling Python InterpreterIn a
  notebook, to enable the Python interpreter, click on the Gear icon and select PythonUsing the Python InterpreterIn a paragraph, use %python to select the Python interpreter and then input all commands.The interpreter can only work if you already have python installed (the interpreter doesn't bring it own python binaries).To access the help, type help()Python environmentsDefaultBy default, PythonInterpreter will use python command defined in zeppelin.python property to run python process.The interpreter can use all modules already installed (with pip, easy_install...)CondaConda is an package management system and environment management system for python.%python.conda interpreter lets you change between environments.Usageget the Conda Infomation: %python.conda infolist the Conda environments: %python.conda env listcreate a conda enviornment: %python.conda create --name [ENV NAME]activate an environment (python interpreter will be restarted): %python.conda activate [ENV NAME]d
 eactivate%python.conda deactivateget installed package list inside the current environment%python.conda listinstall package%python.conda install [PACKAGE NAME]uninstall package%python.conda uninstall [PACKAGE NAME]Docker%python.docker interpreter allows PythonInterpreter creates python process in a specified docker container.Usageactivate an environment%python.docker activate [Repository]%python.docker activate [Repository:Tag]%python.docker activate [Image Id]deactivate%python.docker deactivateHere is an example# activate latest tensorflow image as a python environment%python.docker activate gcr.io/tensorflow/tensorflow:latestUsing Zeppelin Dynamic FormsYou can leverage Zeppelin Dynamic Form inside your Python code.Zeppelin Dynamic Form can only be used if py4j Python library is installed in your system. If not, you can install it with pip install py4j.Example : %python### Input formprint (z.input("f1","defaultValue"))### Select formprint (z.sele
 ct("f1",[("o1","1"),("o2","2")],"2"))### Checkbox formprint("".join(z.checkbox("f3", [("o1","1"), ("o2","2")],["1"])))Matplotlib integrationThe python interpreter can display matplotlib figures inline automatically using the pyplot module:%pythonimport matplotlib.pyplot as pltplt.plot([1, 2, 3])This is the recommended method for using matplotlib from within a Zeppelin notebook. The output of this command will by default be converted to HTML by implicitly making use of the %html magic. Additional configuration can be achieved using the builtin z.configure_mpl() method. For example, z.configure_mpl(width=400, height=300, fmt='svg')plt.plot([1, 2, 3])Will produce a 400x300 image in SVG format, which by default are normally 600x400 and PNG r
 espectively. In the future, another option called angular can be used to make it possible to update a plot produced from one paragraph directly from another (the output will be %angular instead of %html). However, this feature is already available in the pyspark interpreter. More details can be found in the included "Zeppelin Tutorial: Python - matplotlib basic" tutorial notebook. If Zeppelin cannot find the matplotlib backend files (which should usually be found in $ZEPPELIN_HOME/interpreter/lib/python) in your PYTHONPATH, then the backend will automatically be set to agg, and the (otherwise deprecated) instructions below can be used for more limited inline plotting.If you are unable to load the inline backend, use z.show(plt): python%pythonimport matplotlib.pyplot as pltplt.figure()(.. ..)z.show(plt)plt.close()The z.show() function can take optional parameters to adapt graph dimensions (width and height) as well as output format (png or optionally svg).%pythonz.s
 how(plt, width='50px')z.show(plt, height='150px', fmt='svg')Pandas integrationApache Zeppelin Table Display System provides built-in data visualization capabilities. Python interpreter leverages it to visualize Pandas DataFrames though similar z.show() API, same as with Matplotlib integration.Example:import pandas as pdrates = pd.read_csv("bank.csv", sep=";")z.show(rates)SQL over Pandas DataFramesThere is a convenience %python.sql interpreter that matches Apache Spark experience in Zeppelin and enables usage of SQL language to query Pandas DataFrames and visualization of results though built-in Table Display System.Pre-requestsPandas pip install pandasPandaSQL pip install -U pandasqlIn case default binded interpreter is Python (first in the interpreter list, under the Gear Icon), you can just use it as %sql i.efirst paragraphimport pandas as pdrates = pd.read_csv("bank.csv", sep=&am
 p;quot;;")next paragraph%sqlSELECT * FROM rates WHERE age < 40Otherwise it can be referred to as %python.sqlIPython SupportIPython is more powerful than the default python interpreter with extra functionality. You can use IPython with Python2 or Python3 which depends on which python you set zeppelin.python.Pre-requests- Jupyter `pip install jupyter`- grpcio `pip install grpcio`If you already install anaconda, then you just need to install grpcio as Jupyter is already included in anaconda.In addition to all basic functions of the python interpreter, you can use all the IPython advanced features as you use it in Jupyter Notebook.e.g. Use IPython magic%python.ipython#python helprange?#timeit%timeit range(100)Use matplotlib %python.ipython%matplotlib inlineimport matplotlib.pyplot as pltprint("hello world")data=[1,2,3,4]plt.figure()plt.plot(data)We also make ZeppelinContext available in IPython Interpreter. You can use ZeppelinContext to create dynamic 
 forms and display pandas DataFrame.e.g.Create dynamic formz.input(name='my_name', defaultValue='hello')Show pandas dataframeimport pandas as pddf = pd.DataFrame({'id':[1,2,3], 'name':['a','b','c']})z.show(df)By default, we would use IPython in %python.python if IPython is available. Otherwise it would fall back to the original Python implementation.If you don't want to use IPython, then you can set zeppelin.python.useIPython as false in interpreter setting.Technical descriptionFor in-depth technical details on current implementation please refer to python/README.md.Some features not yet implemented in the Python InterpreterInterrupt a paragraph execution (cancel() method) is currently only supported in Linux and MacOs. If interpreter runs in another operating system (for instance MS Windows) , interrupt a paragraph will close the whole interpreter. A JIRA ticket (Z
 EPPELIN-893) is opened to implement this feature in a next release of the interpreter.Progression bar in webUI  (getProgress() method) is currently not implemented.Code-completion is currently not implemented.",
       "url": " /interpreter/python.html",
       "group": "interpreter",
       "excerpt": "Python is a programming language that lets you work quickly and integrate systems more effectively."
@@ -369,7 +369,7 @@
 
     "/interpreter/shell.html": {
       "title": "Shell interpreter for Apache Zeppelin",
-      "content"  : "<!--Licensed under the Apache License, Version 2.0 (the "License");you may not use this file except in compliance with the License.You may obtain a copy of the License athttp://www.apache.org/licenses/LICENSE-2.0Unless required by applicable law or agreed to in writing, softwaredistributed under the License is distributed on an "AS IS" BASIS,WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.See the License for the specific language governing permissions andlimitations under the License.-->Shell interpreter for Apache ZeppelinOverviewShell interpreter uses Apache Commons Exec to execute external processes. In Zeppelin notebook, you can use %sh in the beginning of a paragraph to invoke system shell and run commands.Note : Currently each command runs as the user Zeppelin server is running as.ConfigurationAt the "Interpreters" menu in Zeppelin dropdown menu, you can set the property value for Shell int
 erpreter.      Name    Value    Description        shell.command.timeout.millisecs    60000    Shell command time out in millisecs        zeppelin.shell.auth.type        Types of authentications' methods supported are SIMPLE, and KERBEROS        zeppelin.shell.principal        The principal name to load from the keytab        zeppelin.shell.keytab.location        The path to the keytab file  ExampleThe following example demonstrates the basic usage of Shell in a Zeppelin notebook.If you need further information about Zeppelin Interpreter Setting for using Shell interpreter, please read What is interpreter setting? section first.Kerberos refresh intervalFor changing the default behavior of when to renew Kerberos ticket following changes can be made in conf/zeppelin-env.sh.# Change Kerberos refresh interval (default value is 1d). Allowed postfix are ms, s, m, min, h, and d.export LAUNCH_KERBEROS_REFRESH_INTERVAL=4h# Change kinit number retries (default value is 5), which means if 
 the kinit command fails for 5 retries consecutively it will close the interpreter. export KINIT_FAIL_THRESHOLD=10",
+      "content"  : "<!--Licensed under the Apache License, Version 2.0 (the "License");you may not use this file except in compliance with the License.You may obtain a copy of the License athttp://www.apache.org/licenses/LICENSE-2.0Unless required by applicable law or agreed to in writing, softwaredistributed under the License is distributed on an "AS IS" BASIS,WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.See the License for the specific language governing permissions andlimitations under the License.-->Shell interpreter for Apache ZeppelinOverviewShell interpreter uses Apache Commons Exec to execute external processes. In Zeppelin notebook, you can use %sh in the beginning of a paragraph to invoke system shell and run commands.Note : Currently each command runs as the user Zeppelin server is running as.ConfigurationAt the "Interpreters" menu in Zeppelin dropdown menu, you can set the property value for Shell int
 erpreter.      Name    Value    Description        shell.command.timeout.millisecs    60000    Shell command time out in millisecs        shell.working.directory.user.home    false    If this set to true, the shell's working directory will be set to user home        zeppelin.shell.auth.type        Types of authentications' methods supported are SIMPLE, and KERBEROS        zeppelin.shell.principal        The principal name to load from the keytab        zeppelin.shell.keytab.location        The path to the keytab file  ExampleThe following example demonstrates the basic usage of Shell in a Zeppelin notebook.If you need further information about Zeppelin Interpreter Setting for using Shell interpreter, please read What is interpreter setting? section first.Kerberos refresh intervalFor changing the default behavior of when to renew Kerberos ticket following changes can be made in conf/zeppelin-env.sh.# Change Kerberos refresh interval (default value is 1d). Allowed postfix are 
 ms, s, m, min, h, and d.export LAUNCH_KERBEROS_REFRESH_INTERVAL=4h# Change kinit number retries (default value is 5), which means if the kinit command fails for 5 retries consecutively it will close the interpreter. export KINIT_FAIL_THRESHOLD=10",
       "url": " /interpreter/shell.html",
       "group": "interpreter",
       "excerpt": "Shell interpreter uses Apache Commons Exec to execute external processes."
@@ -380,7 +380,7 @@
 
     "/interpreter/spark.html": {
       "title": "Apache Spark Interpreter for Apache Zeppelin",

[... 64 lines stripped ...]
Modified: zeppelin/site/docs/0.8.0-SNAPSHOT/setup/basics/how_to_build.html
URL: http://svn.apache.org/viewvc/zeppelin/site/docs/0.8.0-SNAPSHOT/setup/basics/how_to_build.html?rev=1810049&r1=1810048&r2=1810049&view=diff
==============================================================================
--- zeppelin/site/docs/0.8.0-SNAPSHOT/setup/basics/how_to_build.html (original)
+++ zeppelin/site/docs/0.8.0-SNAPSHOT/setup/basics/how_to_build.html Fri Sep 29 03:41:54 2017
@@ -141,6 +141,7 @@
                 <li><a href="/docs/0.8.0-SNAPSHOT/setup/security/shiro_authentication.html">Shiro Authentication</a></li>
                 <li><a href="/docs/0.8.0-SNAPSHOT/setup/security/notebook_authorization.html">Notebook Authorization</a></li>
                 <li><a href="/docs/0.8.0-SNAPSHOT/setup/security/datasource_authorization.html">Data Source Authorization</a></li>
+                <li><a href="/docs/0.8.0-SNAPSHOT/setup/security/http_security_headers.html">HTTP Security Headers</a></li>
                 <li role="separator" class="divider"></li>
                 <li class="title"><span>Notebook Storage</span></li>
                 <li><a href="/docs/0.8.0-SNAPSHOT/setup/storage/storage.html#notebook-storage-in-local-git-repository">Git Storage</a></li>
@@ -402,6 +403,29 @@ mvn clean package -Pspark-1.5 -Pmapr50 -
 <p>Scalding Interpreter</p>
 <div class="highlight"><pre><code class="bash language-bash" data-lang="bash">mvn clean package -Pscalding -DskipTests
 </code></pre></div>
+<h3>Optional configurations</h3>
+
+<p>Here are additional configurations that could be optionally tuned using the trailing <code>-D</code> option for maven commands</p>
+
+<p>Spark package</p>
+<div class="highlight"><pre><code class="bash language-bash" data-lang="bash">spark.archive <span class="c"># default spark-${spark.version}</span>
+spark.src.download.url <span class="c"># default http://d3kbcqa49mib13.cloudfront.net/${spark.archive}.tgz</span>
+spark.bin.download.url <span class="c"># default http://d3kbcqa49mib13.cloudfront.net/${spark.archive}-bin-without-hadoop.tgz</span>
+</code></pre></div>
+<p>Py4J package</p>
+<div class="highlight"><pre><code class="bash language-bash" data-lang="bash">python.py4j.version <span class="c"># default 0.9.2</span>
+pypi.repo.url <span class="c"># default https://pypi.python.org/packages</span>
+python.py4j.repo.folder <span class="c"># default /64/5c/01e13b68e8caafece40d549f232c9b5677ad1016071a48d04cc3895acaa3</span>
+</code></pre></div>
+<p>final URL location for Py4J package will be produced as following:</p>
+
+<p><code>${pypi.repo.url}${python.py4j.repo.folder}py4j-${python.py4j.version}.zip</code></p>
+
+<p>Frontend Maven Plugin configurations</p>
+<div class="highlight"><pre><code class="text language-text" data-lang="text">plugin.frontend.nodeDownloadRoot # default https://nodejs.org/dist/
+plugin.frontend.npmDownloadRoot # default http://registry.npmjs.org/npm/-/
+plugin.frontend.yarnDownloadRoot # default https://github.com/yarnpkg/yarn/releases/download/
+</code></pre></div>
 <h2>Build requirements</h2>
 
 <h3>Install requirements</h3>

Modified: zeppelin/site/docs/0.8.0-SNAPSHOT/setup/basics/multi_user_support.html
URL: http://svn.apache.org/viewvc/zeppelin/site/docs/0.8.0-SNAPSHOT/setup/basics/multi_user_support.html?rev=1810049&r1=1810048&r2=1810049&view=diff
==============================================================================
--- zeppelin/site/docs/0.8.0-SNAPSHOT/setup/basics/multi_user_support.html (original)
+++ zeppelin/site/docs/0.8.0-SNAPSHOT/setup/basics/multi_user_support.html Fri Sep 29 03:41:54 2017
@@ -141,6 +141,7 @@
                 <li><a href="/docs/0.8.0-SNAPSHOT/setup/security/shiro_authentication.html">Shiro Authentication</a></li>
                 <li><a href="/docs/0.8.0-SNAPSHOT/setup/security/notebook_authorization.html">Notebook Authorization</a></li>
                 <li><a href="/docs/0.8.0-SNAPSHOT/setup/security/datasource_authorization.html">Data Source Authorization</a></li>
+                <li><a href="/docs/0.8.0-SNAPSHOT/setup/security/http_security_headers.html">HTTP Security Headers</a></li>
                 <li role="separator" class="divider"></li>
                 <li class="title"><span>Notebook Storage</span></li>
                 <li><a href="/docs/0.8.0-SNAPSHOT/setup/storage/storage.html#notebook-storage-in-local-git-repository">Git Storage</a></li>

Modified: zeppelin/site/docs/0.8.0-SNAPSHOT/setup/deployment/cdh.html
URL: http://svn.apache.org/viewvc/zeppelin/site/docs/0.8.0-SNAPSHOT/setup/deployment/cdh.html?rev=1810049&r1=1810048&r2=1810049&view=diff
==============================================================================
--- zeppelin/site/docs/0.8.0-SNAPSHOT/setup/deployment/cdh.html (original)
+++ zeppelin/site/docs/0.8.0-SNAPSHOT/setup/deployment/cdh.html Fri Sep 29 03:41:54 2017
@@ -141,6 +141,7 @@
                 <li><a href="/docs/0.8.0-SNAPSHOT/setup/security/shiro_authentication.html">Shiro Authentication</a></li>
                 <li><a href="/docs/0.8.0-SNAPSHOT/setup/security/notebook_authorization.html">Notebook Authorization</a></li>
                 <li><a href="/docs/0.8.0-SNAPSHOT/setup/security/datasource_authorization.html">Data Source Authorization</a></li>
+                <li><a href="/docs/0.8.0-SNAPSHOT/setup/security/http_security_headers.html">HTTP Security Headers</a></li>
                 <li role="separator" class="divider"></li>
                 <li class="title"><span>Notebook Storage</span></li>
                 <li><a href="/docs/0.8.0-SNAPSHOT/setup/storage/storage.html#notebook-storage-in-local-git-repository">Git Storage</a></li>

Modified: zeppelin/site/docs/0.8.0-SNAPSHOT/setup/deployment/docker.html
URL: http://svn.apache.org/viewvc/zeppelin/site/docs/0.8.0-SNAPSHOT/setup/deployment/docker.html?rev=1810049&r1=1810048&r2=1810049&view=diff
==============================================================================
--- zeppelin/site/docs/0.8.0-SNAPSHOT/setup/deployment/docker.html (original)
+++ zeppelin/site/docs/0.8.0-SNAPSHOT/setup/deployment/docker.html Fri Sep 29 03:41:54 2017
@@ -141,6 +141,7 @@
                 <li><a href="/docs/0.8.0-SNAPSHOT/setup/security/shiro_authentication.html">Shiro Authentication</a></li>
                 <li><a href="/docs/0.8.0-SNAPSHOT/setup/security/notebook_authorization.html">Notebook Authorization</a></li>
                 <li><a href="/docs/0.8.0-SNAPSHOT/setup/security/datasource_authorization.html">Data Source Authorization</a></li>
+                <li><a href="/docs/0.8.0-SNAPSHOT/setup/security/http_security_headers.html">HTTP Security Headers</a></li>
                 <li role="separator" class="divider"></li>
                 <li class="title"><span>Notebook Storage</span></li>
                 <li><a href="/docs/0.8.0-SNAPSHOT/setup/storage/storage.html#notebook-storage-in-local-git-repository">Git Storage</a></li>

Modified: zeppelin/site/docs/0.8.0-SNAPSHOT/setup/deployment/flink_and_spark_cluster.html
URL: http://svn.apache.org/viewvc/zeppelin/site/docs/0.8.0-SNAPSHOT/setup/deployment/flink_and_spark_cluster.html?rev=1810049&r1=1810048&r2=1810049&view=diff
==============================================================================
--- zeppelin/site/docs/0.8.0-SNAPSHOT/setup/deployment/flink_and_spark_cluster.html (original)
+++ zeppelin/site/docs/0.8.0-SNAPSHOT/setup/deployment/flink_and_spark_cluster.html Fri Sep 29 03:41:54 2017
@@ -141,6 +141,7 @@
                 <li><a href="/docs/0.8.0-SNAPSHOT/setup/security/shiro_authentication.html">Shiro Authentication</a></li>
                 <li><a href="/docs/0.8.0-SNAPSHOT/setup/security/notebook_authorization.html">Notebook Authorization</a></li>
                 <li><a href="/docs/0.8.0-SNAPSHOT/setup/security/datasource_authorization.html">Data Source Authorization</a></li>
+                <li><a href="/docs/0.8.0-SNAPSHOT/setup/security/http_security_headers.html">HTTP Security Headers</a></li>
                 <li role="separator" class="divider"></li>
                 <li class="title"><span>Notebook Storage</span></li>
                 <li><a href="/docs/0.8.0-SNAPSHOT/setup/storage/storage.html#notebook-storage-in-local-git-repository">Git Storage</a></li>

Modified: zeppelin/site/docs/0.8.0-SNAPSHOT/setup/deployment/spark_cluster_mode.html
URL: http://svn.apache.org/viewvc/zeppelin/site/docs/0.8.0-SNAPSHOT/setup/deployment/spark_cluster_mode.html?rev=1810049&r1=1810048&r2=1810049&view=diff
==============================================================================
--- zeppelin/site/docs/0.8.0-SNAPSHOT/setup/deployment/spark_cluster_mode.html (original)
+++ zeppelin/site/docs/0.8.0-SNAPSHOT/setup/deployment/spark_cluster_mode.html Fri Sep 29 03:41:54 2017
@@ -141,6 +141,7 @@
                 <li><a href="/docs/0.8.0-SNAPSHOT/setup/security/shiro_authentication.html">Shiro Authentication</a></li>
                 <li><a href="/docs/0.8.0-SNAPSHOT/setup/security/notebook_authorization.html">Notebook Authorization</a></li>
                 <li><a href="/docs/0.8.0-SNAPSHOT/setup/security/datasource_authorization.html">Data Source Authorization</a></li>
+                <li><a href="/docs/0.8.0-SNAPSHOT/setup/security/http_security_headers.html">HTTP Security Headers</a></li>
                 <li role="separator" class="divider"></li>
                 <li class="title"><span>Notebook Storage</span></li>
                 <li><a href="/docs/0.8.0-SNAPSHOT/setup/storage/storage.html#notebook-storage-in-local-git-repository">Git Storage</a></li>

Modified: zeppelin/site/docs/0.8.0-SNAPSHOT/setup/deployment/virtual_machine.html
URL: http://svn.apache.org/viewvc/zeppelin/site/docs/0.8.0-SNAPSHOT/setup/deployment/virtual_machine.html?rev=1810049&r1=1810048&r2=1810049&view=diff
==============================================================================
--- zeppelin/site/docs/0.8.0-SNAPSHOT/setup/deployment/virtual_machine.html (original)
+++ zeppelin/site/docs/0.8.0-SNAPSHOT/setup/deployment/virtual_machine.html Fri Sep 29 03:41:54 2017
@@ -141,6 +141,7 @@
                 <li><a href="/docs/0.8.0-SNAPSHOT/setup/security/shiro_authentication.html">Shiro Authentication</a></li>
                 <li><a href="/docs/0.8.0-SNAPSHOT/setup/security/notebook_authorization.html">Notebook Authorization</a></li>
                 <li><a href="/docs/0.8.0-SNAPSHOT/setup/security/datasource_authorization.html">Data Source Authorization</a></li>
+                <li><a href="/docs/0.8.0-SNAPSHOT/setup/security/http_security_headers.html">HTTP Security Headers</a></li>
                 <li role="separator" class="divider"></li>
                 <li class="title"><span>Notebook Storage</span></li>
                 <li><a href="/docs/0.8.0-SNAPSHOT/setup/storage/storage.html#notebook-storage-in-local-git-repository">Git Storage</a></li>