You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@carbondata.apache.org by ra...@apache.org on 2019/02/11 03:36:45 UTC

[carbondata-site] branch asf-site updated: Added 1.5.2 version information

This is an automated email from the ASF dual-hosted git repository.

raghunandan pushed a commit to branch asf-site
in repository https://gitbox.apache.org/repos/asf/carbondata-site.git


The following commit(s) were added to refs/heads/asf-site by this push:
     new e94a0cc  Added 1.5.2 version information
e94a0cc is described below

commit e94a0ccf9c64dbc3f51ea8deb0b75ecc098b0132
Author: Raghunandan S <ca...@gmail.com>
AuthorDate: Wed Feb 6 19:29:04 2019 +0530

    Added 1.5.2 version information
---
 content/CSDK-guide.html                            | 628 ++++++++------
 content/WEB-INF/classes/META-INF/NOTICE            |   2 +-
 content/WEB-INF/classes/application.conf           |   5 +-
 .../alluxio-guide.html                             | 298 +++----
 content/bloomfilter-datamap-guide.html             |  46 +-
 content/carbon-as-spark-datasource-guide.html      |   6 +-
 content/configuration-parameters.html              |  28 +-
 content/datamap-developer-guide.html               |  17 +-
 content/datamap-management.html                    |  20 +-
 content/ddl-of-carbondata.html                     | 406 +++++----
 content/dml-of-carbondata.html                     |  89 +-
 content/documentation.html                         |  14 +-
 content/faq.html                                   |  16 +-
 content/file-structure-of-carbondata.html          |   6 +-
 .../s3-guide.html => content/hive-guide.html       | 135 +--
 .../how-to-contribute-to-apache-carbondata.html    |   6 +-
 content/index.html                                 |  19 +-
 content/introduction.html                          |  16 +-
 content/language-manual.html                       |   6 +-
 content/lucene-datamap-guide.html                  |   6 +-
 content/performance-tuning.html                    |   6 +-
 content/preaggregate-datamap-guide.html            |   6 +-
 .../{quick-start-guide.html => presto-guide.html}  | 499 +++--------
 content/quick-start-guide.html                     | 152 ++--
 content/release-guide.html                         |   6 +-
 content/s3-guide.html                              |  15 +-
 content/sdk-guide.html                             | 947 ++++++++++----------
 content/security.html                              |   6 +-
 content/segment-management-on-carbondata.html      |   6 +-
 content/streaming-guide.html                       |  20 +-
 content/supported-data-types-in-carbondata.html    |   6 +-
 content/timeseries-datamap-guide.html              |  49 +-
 content/usecases.html                              |   9 +-
 content/videogallery.html                          |   3 +
 src/main/resources/application.conf                |   5 +-
 src/main/scala/html/header.html                    |   6 +-
 src/main/scala/scripts/alluxio-guide               |   4 +
 src/main/scala/scripts/hive-guide                  |   4 +
 src/main/scala/scripts/presto-guide                |   4 +
 src/main/webapp/CSDK-guide.html                    | 628 ++++++++------
 ...ilter-datamap-guide.html => alluxio-guide.html} | 298 +++----
 src/main/webapp/bloomfilter-datamap-guide.html     |  46 +-
 .../webapp/carbon-as-spark-datasource-guide.html   |   6 +-
 src/main/webapp/configuration-parameters.html      |  28 +-
 src/main/webapp/datamap-developer-guide.html       |  17 +-
 src/main/webapp/datamap-management.html            |  20 +-
 src/main/webapp/ddl-of-carbondata.html             | 406 +++++----
 src/main/webapp/dml-of-carbondata.html             |  89 +-
 src/main/webapp/documentation.html                 |  14 +-
 src/main/webapp/faq.html                           |  16 +-
 src/main/webapp/file-structure-of-carbondata.html  |   6 +-
 src/main/webapp/{s3-guide.html => hive-guide.html} | 135 +--
 .../how-to-contribute-to-apache-carbondata.html    |   6 +-
 src/main/webapp/index.html                         |   6 +-
 src/main/webapp/introduction.html                  |  16 +-
 src/main/webapp/language-manual.html               |   6 +-
 src/main/webapp/lucene-datamap-guide.html          |   6 +-
 src/main/webapp/performance-tuning.html            |   6 +-
 src/main/webapp/preaggregate-datamap-guide.html    |   6 +-
 .../main/webapp/presto-guide.html                  | 499 +++--------
 src/main/webapp/quick-start-guide.html             | 152 ++--
 src/main/webapp/release-guide.html                 |   6 +-
 src/main/webapp/s3-guide.html                      |  15 +-
 src/main/webapp/sdk-guide.html                     | 947 ++++++++++----------
 src/main/webapp/security.html                      |   6 +-
 .../webapp/segment-management-on-carbondata.html   |   6 +-
 src/main/webapp/streaming-guide.html               |  20 +-
 .../webapp/supported-data-types-in-carbondata.html |   6 +-
 src/main/webapp/timeseries-datamap-guide.html      |  49 +-
 src/main/webapp/usecases.html                      |   9 +-
 src/main/webapp/videogallery.html                  |   3 +
 src/site/markdown/CSDK-guide.md                    | 596 ++++++++-----
 src/site/markdown/alluxio-guide.md                 | 136 +++
 src/site/markdown/bloomfilter-datamap-guide.md     |  40 +-
 src/site/markdown/configuration-parameters.md      |  14 +-
 src/site/markdown/datamap-developer-guide.md       |  25 +-
 src/site/markdown/datamap-management.md            |  14 +-
 src/site/markdown/ddl-of-carbondata.md             | 408 +++++----
 src/site/markdown/dml-of-carbondata.md             |  62 +-
 src/site/markdown/documentation.md                 |   6 +-
 src/site/markdown/faq.md                           |  33 +-
 src/site/markdown/hive-guide.md                    | 102 +++
 src/site/markdown/introduction.md                  |  21 +-
 src/site/markdown/presto-guide.md                  | 287 +++++++
 src/site/markdown/quick-start-guide.md             | 241 +++---
 src/site/markdown/s3-guide.md                      |   9 +-
 src/site/markdown/sdk-guide.md                     | 949 +++++++++++----------
 src/site/markdown/streaming-guide.md               |  14 +-
 src/site/markdown/timeseries-datamap-guide.md      |  43 +-
 src/site/markdown/usecases.md                      |  17 +
 90 files changed, 5552 insertions(+), 4466 deletions(-)

diff --git a/content/CSDK-guide.html b/content/CSDK-guide.html
index 73e1d67..e7809e8 100644
--- a/content/CSDK-guide.html
+++ b/content/CSDK-guide.html
@@ -52,6 +52,9 @@
                            aria-expanded="false"> Download <span class="caret"></span></a>
                         <ul class="dropdown-menu">
                             <li>
+                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.2/"
+                                   target="_blank">Apache CarbonData 1.5.2</a></li>
+                            <li>
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.1/"
                                    target="_blank">Apache CarbonData 1.5.1</a></li>
                             <li>
@@ -64,9 +67,6 @@
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.4.0/"
                                    target="_blank">Apache CarbonData 1.4.0</a></li>
                             <li>
-                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.3.1/"
-                                   target="_blank">Apache CarbonData 1.3.1</a></li>
-                            <li>
                                 <a href="https://cwiki.apache.org/confluence/display/CARBONDATA/Releases"
                                    target="_blank">Release Archive</a></li>
                         </ul>
@@ -241,94 +241,96 @@ release the memory and destroy JVM.</p>
 <a id="api-list" class="anchor" href="#api-list" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>API List</h2>
 <h3>
 <a id="carbonreader" class="anchor" href="#carbonreader" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>CarbonReader</h3>
-<pre><code>    /**
-     * create a CarbonReaderBuilder object for building carbonReader,
-     * CarbonReaderBuilder object  can configure different parameter
-     *
-     * @param env JNIEnv
-     * @param path data store path
-     * @param tableName table name
-     * @return CarbonReaderBuilder object
-     */
-    jobject builder(JNIEnv *env, char *path, char *tableName);
-</code></pre>
-<pre><code>    /**
-     * create a CarbonReaderBuilder object for building carbonReader,
-     * CarbonReaderBuilder object  can configure different parameter
-     *
-     * @param env JNIEnv
-     * @param path data store path
-     * */
-    void builder(JNIEnv *env, char *path);
-</code></pre>
-<pre><code>    /**
-     * Configure the projection column names of carbon reader
-     *
-     * @param argc argument counter
-     * @param argv argument vector
-     * @return CarbonReaderBuilder object
-     */
-    jobject projection(int argc, char *argv[]);
-</code></pre>
-<pre><code>    /**
-     *  build carbon reader with argument vector
-     *  it support multiple parameter
-     *  like: key=value
-     *  for example: fs.s3a.access.key=XXXX, XXXX is user's access key value
-     *
-     * @param argc argument counter
-     * @param argv argument vector
-     * @return CarbonReaderBuilder object
-     **/
-    jobject withHadoopConf(int argc, char *argv[]);
-</code></pre>
-<pre><code>   /**
-     * Sets the batch size of records to read
-     *
-     * @param batch batch size
-     * @return CarbonReaderBuilder object
-     */
-    void withBatch(int batch);
-</code></pre>
-<pre><code>    /**
-     * Configure Row Record Reader for reading.
-     */
-    void withRowRecordReader();
-</code></pre>
-<pre><code>    /**
-     * build carbonReader object for reading data
-     * it support read data from load disk
-     *
-     * @return carbonReader object
-     */
-    jobject build();
-</code></pre>
-<pre><code>    /**
-     * Whether it has next row data
-     *
-     * @return boolean value, if it has next row, return true. if it hasn't next row, return false.
-     */
-    jboolean hasNext();
-</code></pre>
-<pre><code>    /**
-     * read next carbonRow from data
-     * @return carbonRow object of one row
-     */
-     jobject readNextRow();
-</code></pre>
-<pre><code>    /**
-     * read Next Batch Row
-     *
-     * @return rows
-     */
-    jobjectArray readNextBatchRow();
-</code></pre>
-<pre><code>    /**
-     * close the carbon reader
-     *
-     * @return  boolean value
-     */
-    jboolean close();
+<pre><code>/**
+ * Create a CarbonReaderBuilder object for building carbonReader,
+ * CarbonReaderBuilder object  can configure different parameter
+ *
+ * @param env JNIEnv
+ * @param path data store path
+ * @param tableName table name
+ * @return CarbonReaderBuilder object
+ */
+jobject builder(JNIEnv *env, char *path, char *tableName);
+</code></pre>
+<pre><code>/**
+ * Create a CarbonReaderBuilder object for building carbonReader,
+ * CarbonReaderBuilder object can configure different parameter
+ *
+ * @param env JNIEnv
+ * @param path data store path
+ * 
+ */
+void builder(JNIEnv *env, char *path);
+</code></pre>
+<pre><code>/**
+ * Configure the projection column names of carbon reader
+ *
+ * @param argc argument counter
+ * @param argv argument vector
+ * @return CarbonReaderBuilder object
+ */
+jobject projection(int argc, char *argv[]);
+</code></pre>
+<pre><code>/**
+ * Build carbon reader with argument vector
+ * it supports multiple parameters
+ * like: key=value
+ * for example: fs.s3a.access.key=XXXX, XXXX is user's access key value
+ *
+ * @param argc argument counter
+ * @param argv argument vector
+ * @return CarbonReaderBuilder object
+ *
+ */
+jobject withHadoopConf(int argc, char *argv[]);
+</code></pre>
+<pre><code>/**
+ * Sets the batch size of records to read
+ *
+ * @param batch batch size
+ * @return CarbonReaderBuilder object
+ */
+void withBatch(int batch);
+</code></pre>
+<pre><code>/**
+ * Configure Row Record Reader for reading.
+ */
+void withRowRecordReader();
+</code></pre>
+<pre><code>/**
+ * Build carbonReader object for reading data
+ * it supports read data from load disk
+ *
+ * @return carbonReader object
+ */
+jobject build();
+</code></pre>
+<pre><code>/**
+ * Whether it has next row data
+ *
+ * @return boolean value, if it has next row, return true. if it hasn't next row, return false.
+ */
+jboolean hasNext();
+</code></pre>
+<pre><code>/**
+ * Read next carbonRow from data
+ * @return carbonRow object of one row
+ */
+jobject readNextRow();
+</code></pre>
+<pre><code>/**
+ * Read Next Batch Row
+ *
+ * @return rows
+ */
+jobjectArray readNextBatchRow();
+</code></pre>
+<pre><code>/**
+ * Close the carbon reader
+ *
+ * @return  boolean value
+ */
+jboolean close();
 </code></pre>
 <h1>
 <a id="c-sdk-writer" class="anchor" href="#c-sdk-writer" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>C++ SDK Writer</h1>
@@ -348,172 +350,302 @@ release the memory and destroy JVM.</p>
 <a id="api-list-1" class="anchor" href="#api-list-1" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>API List</h2>
 <h3>
 <a id="carbonwriter" class="anchor" href="#carbonwriter" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>CarbonWriter</h3>
-<pre><code>    /**
-     * create a CarbonWriterBuilder object for building carbonWriter,
-     * CarbonWriterBuilder object  can configure different parameter
-     *
-     * @param env JNIEnv
-     * @return CarbonWriterBuilder object
-     */
-    void builder(JNIEnv *env);
-</code></pre>
-<pre><code>    /**
-     * Sets the output path of the writer builder
-     *
-     * @param path is the absolute path where output files are written
-     * This method must be called when building CarbonWriterBuilder
-     * @return updated CarbonWriterBuilder
-     */
-    void outputPath(char *path);
-</code></pre>
-<pre><code>    /**
-     * configure the schema with json style schema
-     *
-     * @param jsonSchema json style schema
-     * @return updated CarbonWriterBuilder
-     */
-    void withCsvInput(char *jsonSchema);
-</code></pre>
-<pre><code>    /**
-    * Updates the hadoop configuration with the given key value
-    *
-    * @param key key word
-    * @param value value
-    * @return CarbonWriterBuilder object
-    */
-    void withHadoopConf(char *key, char *value);
-</code></pre>
-<pre><code>    /**
-     * @param appName appName which is writing the carbondata files
-     */
-    void writtenBy(char *appName);
-</code></pre>
-<pre><code>    /**
-     * build carbonWriter object for writing data
-     * it support write data from load disk
-     *
-     * @return carbonWriter object
-     */
-    void build();
-</code></pre>
-<pre><code>    /**
-     * Write an object to the file, the format of the object depends on the
-     * implementation.
-     * Note: This API is not thread safe
-     */
-    void write(jobject obj);
-</code></pre>
-<pre><code>    /**
-     * close the carbon Writer
-     */
-    void close();
+<pre><code>/**
+ * Create a CarbonWriterBuilder object for building carbonWriter,
+ * CarbonWriterBuilder object  can configure different parameter
+ *
+ * @param env JNIEnv
+ * @return CarbonWriterBuilder object
+ */
+void builder(JNIEnv *env);
+</code></pre>
+<pre><code>/**
+ * Sets the output path of the writer builder
+ *
+ * @param path is the absolute path where output files are written
+ * This method must be called when building CarbonWriterBuilder
+ * @return updated CarbonWriterBuilder
+ */
+void outputPath(char *path);
+</code></pre>
+<pre><code>/**
+ * Sets the list of columns that needs to be in sorted order
+ *
+ * @param argc argc argument counter, the number of projection column
+ * @param argv argv is a string array of columns that needs to be sorted.
+ *                  If it is null or by default all dimensions are selected for sorting
+ *                  If it is empty array, no columns are sorted
+ */
+void sortBy(int argc, char *argv[]);
+</code></pre>
+<pre><code>/**
+ * Configure the schema with json style schema
+ *
+ * @param jsonSchema json style schema
+ * @return updated CarbonWriterBuilder
+ */
+void withCsvInput(char *jsonSchema);
+</code></pre>
+<pre><code>/**
+ * Updates the hadoop configuration with the given key value
+ *
+ * @param key key word
+ * @param value value
+ * @return CarbonWriterBuilder object
+ */
+void withHadoopConf(char *key, char *value);
+</code></pre>
+<pre><code>/**
+ * To support the table properties for writer
+ *
+ * @param key properties key
+ * @param value properties value
+ */
+void withTableProperty(char *key, char *value);
+</code></pre>
+<pre><code>/**
+ * To support the load options for C++ sdk writer
+ *
+ * @param options key,value pair of load options.
+ * supported keys values are
+ * a. bad_records_logger_enable -- true (write into separate logs), false
+ * b. bad_records_action -- FAIL, FORCE, IGNORE, REDIRECT
+ * c. bad_record_path -- path
+ * d. dateformat -- same as JAVA SimpleDateFormat
+ * e. timestampformat -- same as JAVA SimpleDateFormat
+ * f. complex_delimiter_level_1 -- value to Split the complexTypeData
+ * g. complex_delimiter_level_2 -- value to Split the nested complexTypeData
+ * h. quotechar
+ * i. escapechar
+ *
+ * Default values are as follows.
+ *
+ * a. bad_records_logger_enable -- "false"
+ * b. bad_records_action -- "FAIL"
+ * c. bad_record_path -- ""
+ * d. dateformat -- "" , uses from carbon.properties file
+ * e. timestampformat -- "", uses from carbon.properties file
+ * f. complex_delimiter_level_1 -- "$"
+ * g. complex_delimiter_level_2 -- ":"
+ * h. quotechar -- "\""
+ * i. escapechar -- "\\"
+ *
+ * @return updated CarbonWriterBuilder
+ */
+void withLoadOption(char *key, char *value);
+</code></pre>
+<pre><code>/**
+ * Sets the taskNo for the writer. CSDKs concurrently running
+ * will set taskNo in order to avoid conflicts in file's name during write.
+ *
+ * @param taskNo is the TaskNo user wants to specify.
+ *               by default it is system time in nano seconds.
+ */
+void taskNo(long taskNo);
+</code></pre>
+<pre><code>/**
+ * Set the timestamp in the carbondata and carbonindex index files
+ *
+ * @param timestamp is a timestamp to be used in the carbondata and carbonindex index files.
+ * By default set to zero.
+ * @return updated CarbonWriterBuilder
+ */
+void uniqueIdentifier(long timestamp);
+</code></pre>
+<pre><code>/**
+ * To make c++ sdk writer thread safe.
+ *
+ * @param numOfThreads should number of threads in which writer is called in multi-thread scenario
+ *                      default C++ sdk writer is not thread safe.
+ *                      can use one writer instance in one thread only.
+ */
+void withThreadSafe(short numOfThreads) ;
+</code></pre>
+<pre><code>/**
+ * To set the carbondata file size in MB between 1MB-2048MB
+ *
+ * @param blockSize is size in MB between 1MB to 2048 MB
+ * default value is 1024 MB
+ */
+void withBlockSize(int blockSize);
+</code></pre>
+<pre><code>/**
+ * To set the blocklet size of CarbonData file
+ *
+ * @param blockletSize is blocklet size in MB
+ *        default value is 64 MB
+ * @return updated CarbonWriterBuilder
+ */
+void withBlockletSize(int blockletSize);
+</code></pre>
+<pre><code>/**
+ * @param localDictionaryThreshold is localDictionaryThreshold, default is 10000
+ * @return updated CarbonWriterBuilder
+ */
+void localDictionaryThreshold(int localDictionaryThreshold);
+</code></pre>
+<pre><code>/**
+ * @param enableLocalDictionary enable local dictionary, default is false
+ * @return updated CarbonWriterBuilder
+ */
+void enableLocalDictionary(bool enableLocalDictionary);
+</code></pre>
+<pre><code>/**
+ * @param appName appName which is writing the carbondata files
+ */
+void writtenBy(char *appName);
+</code></pre>
+<pre><code>/**
+ * Build carbonWriter object for writing data
+ * it support write data from load disk
+ *
+ * @return carbonWriter object
+ */
+void build();
+</code></pre>
+<pre><code>/**
+ * Write an object to the file, the format of the object depends on the
+ * implementation.
+ * Note: This API is not thread safe
+ */
+void write(jobject obj);
+</code></pre>
+<pre><code>/**
+ * close the carbon Writer
+ */
+void close();
 </code></pre>
 <h3>
 <a id="carbonschemareader" class="anchor" href="#carbonschemareader" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>CarbonSchemaReader</h3>
-<pre><code>    /**
-     * constructor with jni env
-     *
-     * @param env  jni env
-     */
-    CarbonSchemaReader(JNIEnv *env);
-</code></pre>
-<pre><code>    /**
-     * read schema from path,
-     * path can be folder path, carbonindex file path, and carbondata file path
-     * and will not check all files schema
-     *
-     * @param path file/folder path
-     * @return schema
-     */
-    jobject readSchema(char *path);
-</code></pre>
-<pre><code>    /**
-     *  read schema from path,
-     *  path can be folder path, carbonindex file path, and carbondata file path
-     *  and user can decide whether check all files schema
-     *
-     * @param path carbon data path
-     * @param validateSchema whether check all files schema
-     * @return schema
-     */
-    jobject readSchema(char *path, bool validateSchema);
+<pre><code>/**
+ * Constructor with jni env
+ *
+ * @param env  jni env
+ */
+CarbonSchemaReader(JNIEnv *env);
+</code></pre>
+<pre><code>/**
+ * Read schema from path,
+ * path can be folder path, carbonindex file path, and carbondata file path
+ * and will not check all files schema
+ *
+ * @param path file/folder path
+ * @return schema
+ */
+jobject readSchema(char *path);
+</code></pre>
+<pre><code>/**
+ * Read schema from path,
+ * path can be folder path, carbonindex file path, and carbondata file path
+ * and user can decide whether check all files schema
+ *
+ * @param path carbon data path
+ * @param validateSchema whether check all files schema
+ * @return schema
+ */
+jobject readSchema(char *path, bool validateSchema);
+</code></pre>
+<pre><code>/**
+ * Read schema from path,
+ * path can be folder path, carbonindex file path, and carbondata file path
+ * and will not check all files schema
+ *
+ * @param path file/folder path
+ * @param conf           configuration support, can set s3a AK,SK,
+ *                       end point and other conf with this
+ * @return schema
+ */
+jobject readSchema(char *path, Configuration conf);
+</code></pre>
+<pre><code>/**
+ * Read schema from path,
+ * path can be folder path, carbonindex file path, and carbondata file path
+ * and user can decide whether check all files schema
+ *
+ * @param path carbon data path
+ * @param validateSchema whether check all files schema
+ * @param conf           configuration support, can set s3a AK,SK,
+ *                       end point and other conf with this
+ * @return schema
+ */
+jobject readSchema(char *path, bool validateSchema, Configuration conf);
 </code></pre>
 <h3>
 <a id="schema" class="anchor" href="#schema" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Schema</h3>
-<pre><code> /**
-     * constructor with jni env and carbon schema data
-     *
-     * @param env jni env
-     * @param schema  carbon schema data
-     */
-    Schema(JNIEnv *env, jobject schema);
-</code></pre>
-<pre><code>    /**
-     * get fields length of schema
-     *
-     * @return fields length
-     */
-    int getFieldsLength();
-</code></pre>
-<pre><code>    /**
-     * get field name by ordinal
-     *
-     * @param ordinal the data index of carbon schema
-     * @return ordinal field name
-     */
-    char *getFieldName(int ordinal);
-</code></pre>
-<pre><code>    /**
-     * get  field data type name by ordinal
-     *
-     * @param ordinal the data index of carbon schema
-     * @return ordinal field data type name
-     */
-    char *getFieldDataTypeName(int ordinal);
-</code></pre>
-<pre><code>    /**
-     * get  array child element data type name by ordinal
-     *
-     * @param ordinal the data index of carbon schema
-     * @return ordinal array child element data type name
-     */
-    char *getArrayElementTypeName(int ordinal);
+<pre><code>/**
+ * Constructor with jni env and carbon schema data
+ *
+ * @param env jni env
+ * @param schema  carbon schema data
+ */
+Schema(JNIEnv *env, jobject schema);
+</code></pre>
+<pre><code>/**
+ * Get fields length of schema
+ *
+ * @return fields length
+ */
+int getFieldsLength();
+</code></pre>
+<pre><code>/**
+ * Get field name by ordinal
+ *
+ * @param ordinal the data index of carbon schema
+ * @return ordinal field name
+ */
+char *getFieldName(int ordinal);
+</code></pre>
+<pre><code>/**
+ * Get  field data type name by ordinal
+ *
+ * @param ordinal the data index of carbon schema
+ * @return ordinal field data type name
+ */
+char *getFieldDataTypeName(int ordinal);
+</code></pre>
+<pre><code>/**
+ * Get  array child element data type name by ordinal
+ *
+ * @param ordinal the data index of carbon schema
+ * @return ordinal array child element data type name
+ */
+char *getArrayElementTypeName(int ordinal);
 </code></pre>
 <h3>
 <a id="carbonproperties" class="anchor" href="#carbonproperties" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>CarbonProperties</h3>
-<pre><code>  /**
-     * Constructor of CarbonProperties
-     *
-     * @param env JNI env
-     */
-    CarbonProperties(JNIEnv *env);
-</code></pre>
-<pre><code>    /**
-     * This method will be used to add a new property
-     * 
-     * @param key property key
-     * @param value property value
-     * @return CarbonProperties object
-     */
-    jobject addProperty(char *key, char *value);
-</code></pre>
-<pre><code>    /**
-     * This method will be used to get the properties value
-     *
-     * @param key  property key
-     * @return  property value
-     */
-    char *getProperty(char *key);
-</code></pre>
-<pre><code>    /**
-     * This method will be used to get the properties value
-     * if property is not present then it will return the default value
-     *
-     * @param key  property key
-     * @param defaultValue  property default Value
-     * @return
-     */
-    char *getProperty(char *key, char *defaultValue);
+<pre><code>/**
+ * Constructor of CarbonProperties
+ *
+ * @param env JNI env
+ */
+CarbonProperties(JNIEnv *env);
+</code></pre>
+<pre><code>/**
+ * This method will be used to add a new property
+ * 
+ * @param key property key
+ * @param value property value
+ * @return CarbonProperties object
+ */
+jobject addProperty(char *key, char *value);
+</code></pre>
+<pre><code>/**
+ * This method will be used to get the properties value
+ *
+ * @param key property key
+ * @return property value
+ */
+char *getProperty(char *key);
+</code></pre>
+<pre><code>/**
+ * This method will be used to get the properties value
+ * if property is not present then it will return the default value
+ *
+ * @param key  property key
+ * @param defaultValue  property default Value
+ * @return
+ */
+char *getProperty(char *key, char *defaultValue);
 </code></pre>
 <script>
 $(function() {
diff --git a/content/WEB-INF/classes/META-INF/NOTICE b/content/WEB-INF/classes/META-INF/NOTICE
index 531cd4e..0d64141 100644
--- a/content/WEB-INF/classes/META-INF/NOTICE
+++ b/content/WEB-INF/classes/META-INF/NOTICE
@@ -1,6 +1,6 @@
 
 Apache CarbonData :: Website
-Copyright 2018 The Apache Software Foundation
+Copyright 2019 The Apache Software Foundation
 
 This product includes software developed at
 The Apache Software Foundation (http://www.apache.org/).
diff --git a/content/WEB-INF/classes/application.conf b/content/WEB-INF/classes/application.conf
index 2f1b695..430fa47 100644
--- a/content/WEB-INF/classes/application.conf
+++ b/content/WEB-INF/classes/application.conf
@@ -18,7 +18,10 @@ fileList=["configuration-parameters",
   "introduction",
   "usecases",
   "csdk-guide",
-  "carbon-as-spark-datasource-guide"
+  "carbon-as-spark-datasource-guide",
+  "alluxio-guide",
+  "hive-guide",
+  "presto-guide"
   ]
 dataMapFileList=[
   "bloomfilter-datamap-guide",
diff --git a/src/main/webapp/bloomfilter-datamap-guide.html b/content/alluxio-guide.html
similarity index 58%
copy from src/main/webapp/bloomfilter-datamap-guide.html
copy to content/alluxio-guide.html
index aab8dc0..037f29d 100644
--- a/src/main/webapp/bloomfilter-datamap-guide.html
+++ b/content/alluxio-guide.html
@@ -52,6 +52,9 @@
                            aria-expanded="false"> Download <span class="caret"></span></a>
                         <ul class="dropdown-menu">
                             <li>
+                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.2/"
+                                   target="_blank">Apache CarbonData 1.5.2</a></li>
+                            <li>
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.1/"
                                    target="_blank">Apache CarbonData 1.5.1</a></li>
                             <li>
@@ -64,9 +67,6 @@
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.4.0/"
                                    target="_blank">Apache CarbonData 1.4.0</a></li>
                             <li>
-                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.3.1/"
-                                   target="_blank">Apache CarbonData 1.3.1</a></li>
-                            <li>
                                 <a href="https://cwiki.apache.org/confluence/display/CARBONDATA/Releases"
                                    target="_blank">Release Archive</a></li>
                         </ul>
@@ -219,161 +219,163 @@
                                 <div class="col-sm-12  col-md-12">
                                     <div>
 <h1>
-<a id="carbondata-bloomfilter-datamap" class="anchor" href="#carbondata-bloomfilter-datamap" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>CarbonData BloomFilter DataMap</h1>
+<a id="alluxio-guide" class="anchor" href="#alluxio-guide" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Alluxio guide</h1>
+<p>This tutorial provides a brief introduction to using Alluxio.</p>
+<ul>
+<li>How to use Alluxio in CarbonData?
 <ul>
-<li><a href="#datamap-management">DataMap Management</a></li>
-<li><a href="#bloomfilter-datamap-introduction">BloomFilter Datamap Introduction</a></li>
-<li><a href="#loading-data">Loading Data</a></li>
-<li><a href="#querying-data">Querying Data</a></li>
-<li><a href="#data-management-with-bloomfilter-datamap">Data Management</a></li>
-<li><a href="#useful-tips">Useful Tips</a></li>
+<li>[Running alluxio example in CarbonData project by IDEA](#Running alluxio example in CarbonData project by IDEA)</li>
+<li>[CarbonData supports alluxio by spark-shell](#CarbonData supports alluxio by spark-shell)</li>
+<li>[CarbonData supports alluxio by spark-submit](#CarbonData supports alluxio by spark-submit)</li>
 </ul>
-<h4>
-<a id="datamap-management" class="anchor" href="#datamap-management" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>DataMap Management</h4>
-<p>Creating BloomFilter DataMap</p>
-<pre><code>CREATE DATAMAP [IF NOT EXISTS] datamap_name
-ON TABLE main_table
-USING 'bloomfilter'
-DMPROPERTIES ('index_columns'='city, name', 'BLOOM_SIZE'='640000', 'BLOOM_FPP'='0.00001')
-</code></pre>
-<p>Dropping specified datamap</p>
-<pre><code>DROP DATAMAP [IF EXISTS] datamap_name
-ON TABLE main_table
+</li>
+</ul>
+<h2>
+<a id="running-alluxio-example-in-carbondata-project-by-idea" class="anchor" href="#running-alluxio-example-in-carbondata-project-by-idea" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Running alluxio example in CarbonData project by IDEA</h2>
+<h3>
+<a id="building-carbondata" class="anchor" href="#building-carbondata" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a><a href="https://github.com/apache/carbondata/tree/master/build" target=_blank>Building CarbonData</a>
+</h3>
+<ul>
+<li>Please refer to <a href="https://github.com/apache/carbondata/tree/master/build" target=_blank>Building CarbonData</a>.</li>
+<li>Users need to install IDEA and scala plugin, and import CarbonData project.</li>
+</ul>
+<h3>
+<a id="installing-and-starting-alluxio" class="anchor" href="#installing-and-starting-alluxio" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Installing and starting Alluxio</h3>
+<ul>
+<li>Please refer to <a href="https://www.alluxio.org/docs/1.8/en/Getting-Started.html#starting-alluxio" rel="nofollow">https://www.alluxio.org/docs/1.8/en/Getting-Started.html#starting-alluxio</a>
+</li>
+<li>Access the Alluxio web: <a href="http://localhost:19999/home" rel="nofollow">http://localhost:19999/home</a>
+</li>
+</ul>
+<h3>
+<a id="running-example" class="anchor" href="#running-example" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Running Example</h3>
+<ul>
+<li>Please refer to <a href="https://github.com/apache/carbondata/blob/master/examples/spark2/src/main/scala/org/apache/carbondata/examples/AlluxioExample.scala" target=_blank>AlluxioExample</a>
+</li>
+</ul>
+<h2>
+<a id="carbondata-supports-alluxio-by-spark-shell" class="anchor" href="#carbondata-supports-alluxio-by-spark-shell" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>CarbonData supports alluxio by spark-shell</h2>
+<h3>
+<a id="building-carbondata-1" class="anchor" href="#building-carbondata-1" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a><a href="https://github.com/apache/carbondata/tree/master/build" target=_blank>Building CarbonData</a>
+</h3>
+<ul>
+<li>Please refer to <a href="https://github.com/apache/carbondata/tree/master/build" target=_blank>Building CarbonData</a>.</li>
+</ul>
+<h3>
+<a id="preparing-spark" class="anchor" href="#preparing-spark" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Preparing Spark</h3>
+<ul>
+<li>Please refer to <a href="http://spark.apache.org/docs/latest/" target=_blank rel="nofollow">http://spark.apache.org/docs/latest/</a>
+</li>
+</ul>
+<h3>
+<a id="downloading-alluxio-and-uncompressing-it" class="anchor" href="#downloading-alluxio-and-uncompressing-it" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Downloading alluxio and uncompressing it</h3>
+<ul>
+<li>Please refer to <a href="https://www.alluxio.org/download" target=_blank rel="nofollow">https://www.alluxio.org/download</a>
+</li>
+</ul>
+<h3>
+<a id="running-spark-shell" class="anchor" href="#running-spark-shell" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Running spark-shell</h3>
+<ul>
+<li>Running the command in spark path</li>
+</ul>
+<pre lang="$command"><code>./bin/spark-shell --jars ${CARBONDATA_PATH}/assembly/target/scala-2.11/apache-carbondata-1.6.0-SNAPSHOT-bin-spark2.2.1-hadoop2.7.2.jar,${ALLUXIO_PATH}/client/alluxio-1.8.1-client.jar
 </code></pre>
-<p>Showing all DataMaps on this table</p>
-<pre><code>SHOW DATAMAP
-ON TABLE main_table
+<ul>
+<li>Testing use alluxio by CarbonSession</li>
+</ul>
+<pre lang="$scala"><code>import org.apache.spark.sql.CarbonSession._
+import org.apache.spark.sql.SparkSession
+   
+val carbon = SparkSession.builder().master("local").appName("test").getOrCreateCarbonSession("alluxio://localhost:19998/carbondata");
+carbon.sql("CREATE TABLE carbon_alluxio(id String,name String, city String,age Int) STORED as carbondata");
+carbon.sql(s"LOAD DATA LOCAL INPATH '${CARBONDATA_PATH}/integration/spark-common-test/src/test/resources/sample.csv' into table carbon_alluxio");
+carbon.sql("select * from carbon_alluxio").show
 </code></pre>
-<p>Disable Datamap</p>
-<blockquote>
-<p>The datamap by default is enabled. To support tuning on query, we can disable a specific datamap during query to observe whether we can gain performance enhancement from it. This is effective only for current session.</p>
-</blockquote>
-<pre><code>// disable the datamap
-SET carbon.datamap.visible.dbName.tableName.dataMapName = false
-// enable the datamap
-SET carbon.datamap.visible.dbName.tableName.dataMapName = true
+<ul>
+<li>Result</li>
+</ul>
+<pre lang="$scala"><code>scala&gt; carbon.sql("select * from carbon_alluxio").show
++---+------+---------+---+
+| id|  name|     city|age|
++---+------+---------+---+
+|  1| david| shenzhen| 31|
+|  2| eason| shenzhen| 27|
+|  3| jarry|    wuhan| 35|
+|  3| jarry|Bangalore| 35|
+|  4| kunal|    Delhi| 26|
+|  4|vishal|Bangalore| 29|
++---+------+---------+---+
 </code></pre>
 <h2>
-<a id="bloomfilter-datamap-introduction" class="anchor" href="#bloomfilter-datamap-introduction" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>BloomFilter DataMap Introduction</h2>
-<p>A Bloom filter is a space-efficient probabilistic data structure that is used to test whether an element is a member of a set.
-Carbondata introduced BloomFilter as an index datamap to enhance the performance of querying with precise value.
-It is well suitable for queries that do precise match on high cardinality columns(such as Name/ID).
-Internally, CarbonData maintains a BloomFilter per blocklet for each index column to indicate that whether a value of the column is in this blocklet.
-Just like the other datamaps, BloomFilter datamap is managed along with main tables by CarbonData.
-User can create BloomFilter datamap on specified columns with specified BloomFilter configurations such as size and probability.</p>
-<p>For instance, main table called <strong>datamap_test</strong> which is defined as:</p>
-<pre><code>CREATE TABLE datamap_test (
-  id string,
-  name string,
-  age int,
-  city string,
-  country string)
-STORED AS carbondata
-TBLPROPERTIES('SORT_COLUMNS'='id')
+<a id="carbondata-supports-alluxio-by-spark-submit" class="anchor" href="#carbondata-supports-alluxio-by-spark-submit" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>CarbonData supports alluxio by spark-submit</h2>
+<h3>
+<a id="building-carbondata-2" class="anchor" href="#building-carbondata-2" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a><a href="https://github.com/apache/carbondata/tree/master/build" target=_blank>Building CarbonData</a>
+</h3>
+<ul>
+<li>Please refer to <a href="https://github.com/apache/carbondata/tree/master/build" target=_blank>Building CarbonData</a>.</li>
+</ul>
+<h3>
+<a id="preparing-spark-1" class="anchor" href="#preparing-spark-1" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Preparing Spark</h3>
+<ul>
+<li>Please refer to <a href="http://spark.apache.org/docs/latest/" target=_blank rel="nofollow">http://spark.apache.org/docs/latest/</a>
+</li>
+</ul>
+<h3>
+<a id="downloading-alluxio-and-uncompressing-it-1" class="anchor" href="#downloading-alluxio-and-uncompressing-it-1" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Downloading alluxio and uncompressing it</h3>
+<ul>
+<li>Please refer to <a href="https://www.alluxio.org/download" target=_blank rel="nofollow">https://www.alluxio.org/download</a>
+</li>
+</ul>
+<h3>
+<a id="running-spark-submit" class="anchor" href="#running-spark-submit" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Running spark-submit</h3>
+<h4>
+<a id="upload-data-to-alluxio" class="anchor" href="#upload-data-to-alluxio" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Upload data to alluxio</h4>
+<pre lang="$command"><code>./bin/alluxio fs  copyFromLocal ${CARBONDATA_PATH}/hadoop/src/test/resources/data.csv /
 </code></pre>
-<p>In the above example, <code>id</code> and <code>name</code> are high cardinality columns
-and we always query on <code>id</code> and <code>name</code> with precise value.
-since <code>id</code> is in the sort_columns and it is orderd,
-query on it will be fast because CarbonData can skip all the irrelative blocklets.
-But queries on <code>name</code> may be bad since the blocklet minmax may not help,
-because in each blocklet the range of the value of <code>name</code> may be the same -- all from A* to z*.
-In this case, user can create a BloomFilter datamap on column <code>name</code>.
-Moreover, user can also create a BloomFilter datamap on the sort_columns.
-This is useful if user has too many segments and the range of the value of sort_columns are almost the same.</p>
-<p>User can create BloomFilter datamap using the Create DataMap DDL:</p>
-<pre><code>CREATE DATAMAP dm
-ON TABLE datamap_test
-USING 'bloomfilter'
-DMPROPERTIES ('INDEX_COLUMNS' = 'name,id', 'BLOOM_SIZE'='640000', 'BLOOM_FPP'='0.00001', 'BLOOM_COMPRESS'='true')
+<h4>
+<a id="command" class="anchor" href="#command" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Command</h4>
+<pre lang="$command"><code>./bin/spark-submit \
+--master local \
+--jars ${ALLUXIO_PATH}/client/alluxio-1.8.1-client.jar,${CARBONDATA_PATH}/examples/spark2/target/carbondata-examples-1.6.0-SNAPSHOT.jar \
+--class org.apache.carbondata.examples.AlluxioExample \
+${CARBONDATA_PATH}/assembly/target/scala-2.11/apache-carbondata-1.6.0-SNAPSHOT-bin-spark2.2.1-hadoop2.7.2.jar \
+false
+</code></pre>
+<p><strong>NOTE</strong>: Please set runShell as false, which can avoid dependency on alluxio shell module.</p>
+<h4>
+<a id="result" class="anchor" href="#result" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Result</h4>
+<pre lang="$command"><code>+-----------------+-------+--------------------+--------------------+---------+-----------+---------+----------+
+|SegmentSequenceId| Status|     Load Start Time|       Load End Time|Merged To|File Format|Data Size|Index Size|
++-----------------+-------+--------------------+--------------------+---------+-----------+---------+----------+
+|                1|Success|2019-01-09 15:10:...|2019-01-09 15:10:...|       NA|COLUMNAR_V3|  23.92KB|    1.07KB|
+|                0|Success|2019-01-09 15:10:...|2019-01-09 15:10:...|       NA|COLUMNAR_V3|  23.92KB|    1.07KB|
++-----------------+-------+--------------------+--------------------+---------+-----------+---------+----------+
+
++-------+------+
+|country|amount|
++-------+------+
+| france|   202|
+|  china|  1698|
++-------+------+
+
++-----------------+---------+--------------------+--------------------+---------+-----------+---------+----------+
+|SegmentSequenceId|   Status|     Load Start Time|       Load End Time|Merged To|File Format|Data Size|Index Size|
++-----------------+---------+--------------------+--------------------+---------+-----------+---------+----------+
+|                3|Compacted|2019-01-09 15:10:...|2019-01-09 15:10:...|      0.1|COLUMNAR_V3|  23.92KB|    1.03KB|
+|                2|Compacted|2019-01-09 15:10:...|2019-01-09 15:10:...|      0.1|COLUMNAR_V3|  23.92KB|    1.07KB|
+|                1|Compacted|2019-01-09 15:10:...|2019-01-09 15:10:...|      0.1|COLUMNAR_V3|  23.92KB|    1.07KB|
+|              0.1|  Success|2019-01-09 15:10:...|2019-01-09 15:10:...|       NA|COLUMNAR_V3|  37.65KB|    1.08KB|
+|                0|Compacted|2019-01-09 15:10:...|2019-01-09 15:10:...|      0.1|COLUMNAR_V3|  23.92KB|    1.07KB|
++-----------------+---------+--------------------+--------------------+---------+-----------+---------+----------+
+
 </code></pre>
-<p><strong>Properties for BloomFilter DataMap</strong></p>
-<table>
-<thead>
-<tr>
-<th>Property</th>
-<th>Is Required</th>
-<th>Default Value</th>
-<th>Description</th>
-</tr>
-</thead>
-<tbody>
-<tr>
-<td>INDEX_COLUMNS</td>
-<td>YES</td>
-<td></td>
-<td>Carbondata will generate BloomFilter index on these columns. Queries on these columns are usually like 'COL = VAL'.</td>
-</tr>
-<tr>
-<td>BLOOM_SIZE</td>
-<td>NO</td>
-<td>640000</td>
-<td>This value is internally used by BloomFilter as the number of expected insertions, it will affect the size of BloomFilter index. Since each blocklet has a BloomFilter here, so the default value is the approximate distinct index values in a blocklet assuming that each blocklet contains 20 pages and each page contains 32000 records. The value should be an integer.</td>
-</tr>
-<tr>
-<td>BLOOM_FPP</td>
-<td>NO</td>
-<td>0.00001</td>
-<td>This value is internally used by BloomFilter as the False-Positive Probability, it will affect the size of bloomfilter index as well as the number of hash functions for the BloomFilter. The value should be in the range (0, 1). In one test scenario, a 96GB TPCH customer table with bloom_size=320000 and bloom_fpp=0.00001 will result in 18 false positive samples.</td>
-</tr>
-<tr>
-<td>BLOOM_COMPRESS</td>
-<td>NO</td>
-<td>true</td>
-<td>Whether to compress the BloomFilter index files.</td>
-</tr>
-</tbody>
-</table>
-<h2>
-<a id="loading-data" class="anchor" href="#loading-data" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Loading Data</h2>
-<p>When loading data to main table, BloomFilter files will be generated for all the
-index_columns given in DMProperties which contains the blockletId and a BloomFilter for each index column.
-These index files will be written inside a folder named with datamap name
-inside each segment folders.</p>
-<h2>
-<a id="querying-data" class="anchor" href="#querying-data" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Querying Data</h2>
-<p>User can verify whether a query can leverage BloomFilter datamap by executing <code>EXPLAIN</code> command,
-which will show the transformed logical plan, and thus user can check whether the BloomFilter datamap can skip blocklets during the scan.
-If the datamap does not prune blocklets well, you can try to increase the value of property <code>BLOOM_SIZE</code> and decrease the value of property <code>BLOOM_FPP</code>.</p>
-<h2>
-<a id="data-management-with-bloomfilter-datamap" class="anchor" href="#data-management-with-bloomfilter-datamap" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Data Management With BloomFilter DataMap</h2>
-<p>Data management with BloomFilter datamap has no difference with that on Lucene datamap.
-You can refer to the corresponding section in <code>CarbonData Lucene DataMap</code>.</p>
 <h2>
-<a id="useful-tips" class="anchor" href="#useful-tips" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Useful Tips</h2>
-<ul>
-<li>BloomFilter DataMap is suggested to be created on the high cardinality columns.
-Query conditions on these columns are always simple <code>equal</code> or <code>in</code>,
-such as 'col1=XX', 'col1 in (XX, YY)'.</li>
-<li>We can create multiple BloomFilter datamaps on one table,
-but we do recommend you to create one BloomFilter datamap that contains multiple index columns,
-because the data loading and query performance will be better.</li>
-<li>
-<code>BLOOM_FPP</code> is only the expected number from user, the actually FPP may be worse.
-If the BloomFilter datamap does not work well,
-you can try to increase <code>BLOOM_SIZE</code> and decrease <code>BLOOM_FPP</code> at the same time.
-Notice that bigger <code>BLOOM_SIZE</code> will increase the size of index file
-and smaller <code>BLOOM_FPP</code> will increase runtime calculation while performing query.</li>
-<li>'0' skipped blocklets of BloomFilter datamap in explain output indicates that
-BloomFilter datamap does not prune better than Main datamap.
-(For example since the data is not ordered, a specific value may be contained in many blocklets. In this case, bloom may not work better than Main DataMap.)
-If this occurs very often, it means that current BloomFilter is useless. You can disable or drop it.
-Sometimes we cannot see any pruning result about BloomFilter datamap in the explain output,
-this indicates that the previous datamap has pruned all the blocklets and there is no need to continue pruning.</li>
-<li>In some scenarios, the BloomFilter datamap may not enhance the query performance significantly
-but if it can reduce the number of spark task,
-there is still a chance that BloomFilter datamap can enhance the performance for concurrent query.</li>
-<li>Note that BloomFilter datamap will decrease the data loading performance and may cause slightly storage expansion (for datamap index file).</li>
-</ul>
+<a id="reference" class="anchor" href="#reference" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Reference</h2>
+<p>[1] <a href="https://www.alluxio.org/docs/1.8/en/Getting-Started.html" target=_blank rel="nofollow">https://www.alluxio.org/docs/1.8/en/Getting-Started.html</a>
+[2] <a href="https://www.alluxio.org/docs/1.8/en/compute/Spark.html" target=_blank rel="nofollow">https://www.alluxio.org/docs/1.8/en/compute/Spark.html</a></p>
 <script>
-$(function() {
-  // Show selected style on nav item
-  $('.b-nav__datamap').addClass('selected');
-  
-  if (!$('.b-nav__datamap').parent().hasClass('nav__item__with__subs--expanded')) {
-    // Display datamap subnav items
-    $('.b-nav__datamap').parent().toggleClass('nav__item__with__subs--expanded');
-  }
-});
+// Show selected style on nav item
+$(function() { $('.b-nav__quickstart').addClass('selected'); });
 </script></div>
 </div>
 </div>
diff --git a/content/bloomfilter-datamap-guide.html b/content/bloomfilter-datamap-guide.html
index aab8dc0..b9a073f 100644
--- a/content/bloomfilter-datamap-guide.html
+++ b/content/bloomfilter-datamap-guide.html
@@ -52,6 +52,9 @@
                            aria-expanded="false"> Download <span class="caret"></span></a>
                         <ul class="dropdown-menu">
                             <li>
+                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.2/"
+                                   target="_blank">Apache CarbonData 1.5.2</a></li>
+                            <li>
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.1/"
                                    target="_blank">Apache CarbonData 1.5.1</a></li>
                             <li>
@@ -64,9 +67,6 @@
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.4.0/"
                                    target="_blank">Apache CarbonData 1.4.0</a></li>
                             <li>
-                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.3.1/"
-                                   target="_blank">Apache CarbonData 1.3.1</a></li>
-                            <li>
                                 <a href="https://cwiki.apache.org/confluence/display/CARBONDATA/Releases"
                                    target="_blank">Release Archive</a></li>
                         </ul>
@@ -236,7 +236,7 @@ ON TABLE main_table
 USING 'bloomfilter'
 DMPROPERTIES ('index_columns'='city, name', 'BLOOM_SIZE'='640000', 'BLOOM_FPP'='0.00001')
 </code></pre>
-<p>Dropping specified datamap</p>
+<p>Dropping Specified DataMap</p>
 <pre><code>DROP DATAMAP [IF EXISTS] datamap_name
 ON TABLE main_table
 </code></pre>
@@ -244,7 +244,7 @@ ON TABLE main_table
 <pre><code>SHOW DATAMAP
 ON TABLE main_table
 </code></pre>
-<p>Disable Datamap</p>
+<p>Disable DataMap</p>
 <blockquote>
 <p>The datamap by default is enabled. To support tuning on query, we can disable a specific datamap during query to observe whether we can gain performance enhancement from it. This is effective only for current session.</p>
 </blockquote>
@@ -277,10 +277,10 @@ since <code>id</code> is in the sort_columns and it is orderd,
 query on it will be fast because CarbonData can skip all the irrelative blocklets.
 But queries on <code>name</code> may be bad since the blocklet minmax may not help,
 because in each blocklet the range of the value of <code>name</code> may be the same -- all from A* to z*.
-In this case, user can create a BloomFilter datamap on column <code>name</code>.
-Moreover, user can also create a BloomFilter datamap on the sort_columns.
+In this case, user can create a BloomFilter DataMap on column <code>name</code>.
+Moreover, user can also create a BloomFilter DataMap on the sort_columns.
 This is useful if user has too many segments and the range of the value of sort_columns are almost the same.</p>
-<p>User can create BloomFilter datamap using the Create DataMap DDL:</p>
+<p>User can create BloomFilter DataMap using the Create DataMap DDL:</p>
 <pre><code>CREATE DATAMAP dm
 ON TABLE datamap_test
 USING 'bloomfilter'
@@ -327,16 +327,16 @@ DMPROPERTIES ('INDEX_COLUMNS' = 'name,id', 'BLOOM_SIZE'='640000', 'BLOOM_FPP'='0
 <a id="loading-data" class="anchor" href="#loading-data" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Loading Data</h2>
 <p>When loading data to main table, BloomFilter files will be generated for all the
 index_columns given in DMProperties which contains the blockletId and a BloomFilter for each index column.
-These index files will be written inside a folder named with datamap name
+These index files will be written inside a folder named with DataMap name
 inside each segment folders.</p>
 <h2>
 <a id="querying-data" class="anchor" href="#querying-data" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Querying Data</h2>
-<p>User can verify whether a query can leverage BloomFilter datamap by executing <code>EXPLAIN</code> command,
-which will show the transformed logical plan, and thus user can check whether the BloomFilter datamap can skip blocklets during the scan.
-If the datamap does not prune blocklets well, you can try to increase the value of property <code>BLOOM_SIZE</code> and decrease the value of property <code>BLOOM_FPP</code>.</p>
+<p>User can verify whether a query can leverage BloomFilter DataMap by executing <code>EXPLAIN</code> command,
+which will show the transformed logical plan, and thus user can check whether the BloomFilter DataMap can skip blocklets during the scan.
+If the DataMap does not prune blocklets well, you can try to increase the value of property <code>BLOOM_SIZE</code> and decrease the value of property <code>BLOOM_FPP</code>.</p>
 <h2>
 <a id="data-management-with-bloomfilter-datamap" class="anchor" href="#data-management-with-bloomfilter-datamap" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Data Management With BloomFilter DataMap</h2>
-<p>Data management with BloomFilter datamap has no difference with that on Lucene datamap.
+<p>Data management with BloomFilter DataMap has no difference with that on Lucene DataMap.
 You can refer to the corresponding section in <code>CarbonData Lucene DataMap</code>.</p>
 <h2>
 <a id="useful-tips" class="anchor" href="#useful-tips" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Useful Tips</h2>
@@ -344,25 +344,25 @@ You can refer to the corresponding section in <code>CarbonData Lucene DataMap</c
 <li>BloomFilter DataMap is suggested to be created on the high cardinality columns.
 Query conditions on these columns are always simple <code>equal</code> or <code>in</code>,
 such as 'col1=XX', 'col1 in (XX, YY)'.</li>
-<li>We can create multiple BloomFilter datamaps on one table,
-but we do recommend you to create one BloomFilter datamap that contains multiple index columns,
+<li>We can create multiple BloomFilter DataMaps on one table,
+but we do recommend you to create one BloomFilter DataMap that contains multiple index columns,
 because the data loading and query performance will be better.</li>
 <li>
 <code>BLOOM_FPP</code> is only the expected number from user, the actually FPP may be worse.
-If the BloomFilter datamap does not work well,
+If the BloomFilter DataMap does not work well,
 you can try to increase <code>BLOOM_SIZE</code> and decrease <code>BLOOM_FPP</code> at the same time.
 Notice that bigger <code>BLOOM_SIZE</code> will increase the size of index file
 and smaller <code>BLOOM_FPP</code> will increase runtime calculation while performing query.</li>
-<li>'0' skipped blocklets of BloomFilter datamap in explain output indicates that
-BloomFilter datamap does not prune better than Main datamap.
+<li>'0' skipped blocklets of BloomFilter DataMap in explain output indicates that
+BloomFilter DataMap does not prune better than Main DataMap.
 (For example since the data is not ordered, a specific value may be contained in many blocklets. In this case, bloom may not work better than Main DataMap.)
 If this occurs very often, it means that current BloomFilter is useless. You can disable or drop it.
-Sometimes we cannot see any pruning result about BloomFilter datamap in the explain output,
-this indicates that the previous datamap has pruned all the blocklets and there is no need to continue pruning.</li>
-<li>In some scenarios, the BloomFilter datamap may not enhance the query performance significantly
+Sometimes we cannot see any pruning result about BloomFilter DataMap in the explain output,
+this indicates that the previous DataMap has pruned all the blocklets and there is no need to continue pruning.</li>
+<li>In some scenarios, the BloomFilter DataMap may not enhance the query performance significantly
 but if it can reduce the number of spark task,
-there is still a chance that BloomFilter datamap can enhance the performance for concurrent query.</li>
-<li>Note that BloomFilter datamap will decrease the data loading performance and may cause slightly storage expansion (for datamap index file).</li>
+there is still a chance that BloomFilter DataMap can enhance the performance for concurrent query.</li>
+<li>Note that BloomFilter DataMap will decrease the data loading performance and may cause slightly storage expansion (for DataMap index file).</li>
 </ul>
 <script>
 $(function() {
diff --git a/content/carbon-as-spark-datasource-guide.html b/content/carbon-as-spark-datasource-guide.html
index 9ffca8f..43698e5 100644
--- a/content/carbon-as-spark-datasource-guide.html
+++ b/content/carbon-as-spark-datasource-guide.html
@@ -52,6 +52,9 @@
                            aria-expanded="false"> Download <span class="caret"></span></a>
                         <ul class="dropdown-menu">
                             <li>
+                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.2/"
+                                   target="_blank">Apache CarbonData 1.5.2</a></li>
+                            <li>
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.1/"
                                    target="_blank">Apache CarbonData 1.5.1</a></li>
                             <li>
@@ -64,9 +67,6 @@
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.4.0/"
                                    target="_blank">Apache CarbonData 1.4.0</a></li>
                             <li>
-                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.3.1/"
-                                   target="_blank">Apache CarbonData 1.3.1</a></li>
-                            <li>
                                 <a href="https://cwiki.apache.org/confluence/display/CARBONDATA/Releases"
                                    target="_blank">Release Archive</a></li>
                         </ul>
diff --git a/content/configuration-parameters.html b/content/configuration-parameters.html
index 5cc7a45..6c48b5e 100644
--- a/content/configuration-parameters.html
+++ b/content/configuration-parameters.html
@@ -52,6 +52,9 @@
                            aria-expanded="false"> Download <span class="caret"></span></a>
                         <ul class="dropdown-menu">
                             <li>
+                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.2/"
+                                   target="_blank">Apache CarbonData 1.5.2</a></li>
+                            <li>
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.1/"
                                    target="_blank">Apache CarbonData 1.5.1</a></li>
                             <li>
@@ -64,9 +67,6 @@
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.4.0/"
                                    target="_blank">Apache CarbonData 1.4.0</a></li>
                             <li>
-                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.3.1/"
-                                   target="_blank">Apache CarbonData 1.3.1</a></li>
-                            <li>
                                 <a href="https://cwiki.apache.org/confluence/display/CARBONDATA/Releases"
                                    target="_blank">Release Archive</a></li>
                         </ul>
@@ -382,8 +382,7 @@
 <tr>
 <td>carbon.load.sort.scope</td>
 <td>LOCAL_SORT</td>
-<td>CarbonData can support various sorting options to match the balance between load and query performance. LOCAL_SORT:All the data given to an executor in the single load is fully sorted and written to carbondata files. Data loading performance is reduced a little as the entire data needs to be sorted in the executor. BATCH_SORT:Sorts the data in batches of configured size and writes to carbondata files. Data loading performance increases as the entire data need not be sorted. But query [...]
-</td>
+<td>CarbonData can support various sorting options to match the balance between load and query performance. LOCAL_SORT:All the data given to an executor in the single load is fully sorted and written to carbondata files. Data loading performance is reduced a little as the entire data needs to be sorted in the executor. BATCH_SORT:Sorts the data in batches of configured size and writes to carbondata files. Data loading performance increases as the entire data need not be sorted. But query [...]
 </tr>
 <tr>
 <td>carbon.load.batch.sort.size.inmb</td>
@@ -534,7 +533,7 @@
 <tr>
 <td>carbon.column.compressor</td>
 <td>snappy</td>
-<td>CarbonData will compress the column values using the compressor specified by this configuration. Currently CarbonData supports 'snappy' and 'zstd' compressors.</td>
+<td>CarbonData will compress the column values using the compressor specified by this configuration. Currently CarbonData supports 'snappy', 'zstd' and 'gzip' compressors.</td>
 </tr>
 <tr>
 <td>carbon.minmax.allowed.byte.count</td>
@@ -702,7 +701,7 @@
 <tr>
 <td>carbon.detail.batch.size</td>
 <td>100</td>
-<td>The buffer size to store records, returned from the block scan. In limit scenario this parameter is very important. For example your query limit is 1000. But if we set this value to 3000 that means we get 3000 records from scan but spark will only take 1000 rows. So the 2000 remaining are useless. In one Finance test case after we set it to 100, in the limit 1000 scenario the performance increase about 2 times in comparison to if we set this value to 12000.</td>
+<td>The buffer size to store records, returned from the block scan. In limit scenario this parameter is very important. For example your query limit is 1000. But if we set this value to 3000 that means we get 3000 records from scan but spark will only take 1000 rows. So the 2000 remaining are useless. In one Finance test case after we set it to 100, in the limit 1000 scenario the performance increase about 2 times in comparison to if we set this value to 12000.<br><br> <strong>NOTE</stro [...]
 </tr>
 <tr>
 <td>carbon.enable.vector.reader</td>
@@ -730,11 +729,6 @@
 <td>CarbonData supports unsafe operations of Java to avoid GC overhead for certain operations. This configuration enables to use unsafe functions in CarbonData while scanning the  data during query.</td>
 </tr>
 <tr>
-<td>carbon.query.validate.direct.query.on.datamap</td>
-<td>true</td>
-<td>CarbonData supports creating pre-aggregate table datamaps as an independent tables. For some debugging purposes, it might be required to directly query from such datamap tables. This configuration allows to query on such datamaps.</td>
-</tr>
-<tr>
 <td>carbon.max.driver.threads.for.block.pruning</td>
 <td>4</td>
 <td>Number of threads used for driver pruning when the carbon files are more than 100k Maximum memory. This configuration can used to set number of threads between 1 to 4.</td>
@@ -888,11 +882,15 @@
 </tr>
 <tr>
 <td>carbon.options.sort.scope</td>
-<td>Specifies how the current data load should be sorted with. <strong>NOTE:</strong> Refer to <a href="#data-loading-configuration">Data Loading Configuration</a>#carbon.sort.scope for detailed information.</td>
+<td>Specifies how the current data load should be sorted with. This sort parameter is at the table level. <strong>NOTE:</strong> Refer to <a href="#data-loading-configuration">Data Loading Configuration</a>#carbon.sort.scope for detailed information.</td>
+</tr>
+<tr>
+<td>carbon.table.load.sort.scope.db_name.table_name</td>
+<td>Overrides the SORT_SCOPE provided in CREATE TABLE.</td>
 </tr>
 <tr>
 <td>carbon.options.global.sort.partitions</td>
-<td></td>
+<td>Specifies the number of partitions to be used during global sort.</td>
 </tr>
 <tr>
 <td>carbon.options.serialization.null.format</td>
@@ -900,7 +898,7 @@
 </tr>
 <tr>
 <td>carbon.query.directQueryOnDataMap.enabled</td>
-<td>Specifies whether datamap can be queried directly. This is useful for debugging purposes.**NOTE: **Refer to <a href="#query-configuration">Query Configuration</a>#carbon.query.validate.direct.query.on.datamap for detailed information.</td>
+<td>Specifies whether datamap can be queried directly. This is useful for debugging purposes.**NOTE: **Refer to <a href="#query-configuration">Query Configuration</a> for detailed information.</td>
 </tr>
 </tbody>
 </table>
diff --git a/content/datamap-developer-guide.html b/content/datamap-developer-guide.html
index 286c21d..b0cb182 100644
--- a/content/datamap-developer-guide.html
+++ b/content/datamap-developer-guide.html
@@ -52,6 +52,9 @@
                            aria-expanded="false"> Download <span class="caret"></span></a>
                         <ul class="dropdown-menu">
                             <li>
+                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.2/"
+                                   target="_blank">Apache CarbonData 1.5.2</a></li>
+                            <li>
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.1/"
                                    target="_blank">Apache CarbonData 1.5.1</a></li>
                             <li>
@@ -64,9 +67,6 @@
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.4.0/"
                                    target="_blank">Apache CarbonData 1.4.0</a></li>
                             <li>
-                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.3.1/"
-                                   target="_blank">Apache CarbonData 1.3.1</a></li>
-                            <li>
                                 <a href="https://cwiki.apache.org/confluence/display/CARBONDATA/Releases"
                                    target="_blank">Release Archive</a></li>
                         </ul>
@@ -217,15 +217,16 @@
                         <div id="viewpage" name="viewpage">
                             <div class="row">
                                 <div class="col-sm-12  col-md-12">
-                                    <div><h1>
+                                    <div>
+<h1>
 <a id="datamap-developer-guide" class="anchor" href="#datamap-developer-guide" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>DataMap Developer Guide</h1>
 <h3>
 <a id="introduction" class="anchor" href="#introduction" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Introduction</h3>
 <p>DataMap is a data structure that can be used to accelerate certain query of the table. Different DataMap can be implemented by developers.
-Currently, there are two 2 types of DataMap supported:</p>
+Currently, there are two types of DataMap supported:</p>
 <ol>
-<li>IndexDataMap: DataMap that leverages index to accelerate filter query</li>
-<li>MVDataMap: DataMap that leverages Materialized View to accelerate olap style query, like SPJG query (select, predicate, join, groupby)</li>
+<li>IndexDataMap: DataMap that leverages index to accelerate filter query. Lucene DataMap and BloomFiler DataMap belong to this type of DataMaps.</li>
+<li>MVDataMap: DataMap that leverages Materialized View to accelerate olap style query, like SPJG query (select, predicate, join, groupby). Preaggregate, timeseries and mv DataMap belong to this type of DataMaps.</li>
 </ol>
 <h3>
 <a id="datamap-provider" class="anchor" href="#datamap-provider" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>DataMap Provider</h3>
@@ -234,7 +235,7 @@ Currently, the provider string can be:</p>
 <ol>
 <li>preaggregate: A type of MVDataMap that do pre-aggregate of single table</li>
 <li>timeseries: A type of MVDataMap that do pre-aggregate based on time dimension of the table</li>
-<li>class name IndexDataMapFactory  implementation: Developer can implement new type of IndexDataMap by extending IndexDataMapFactory</li>
+<li>class name IndexDataMapFactory implementation: Developer can implement new type of IndexDataMap by extending IndexDataMapFactory</li>
 </ol>
 <p>When user issues <code>DROP DATAMAP dm ON TABLE main</code>, the corresponding DataMapProvider interface will be called.</p>
 <p>Click for more details about <a href="./datamap-management.html#datamap-management">DataMap Management</a> and supported <a href="./datamap-management.html#overview">DSL</a>.</p>
diff --git a/content/datamap-management.html b/content/datamap-management.html
index 5dc2b33..ac847f3 100644
--- a/content/datamap-management.html
+++ b/content/datamap-management.html
@@ -52,6 +52,9 @@
                            aria-expanded="false"> Download <span class="caret"></span></a>
                         <ul class="dropdown-menu">
                             <li>
+                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.2/"
+                                   target="_blank">Apache CarbonData 1.5.2</a></li>
+                            <li>
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.1/"
                                    target="_blank">Apache CarbonData 1.5.1</a></li>
                             <li>
@@ -64,9 +67,6 @@
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.4.0/"
                                    target="_blank">Apache CarbonData 1.4.0</a></li>
                             <li>
-                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.3.1/"
-                                   target="_blank">Apache CarbonData 1.3.1</a></li>
-                            <li>
                                 <a href="https://cwiki.apache.org/confluence/display/CARBONDATA/Releases"
                                    target="_blank">Release Archive</a></li>
                         </ul>
@@ -238,13 +238,13 @@
 <h2>
 <a id="overview" class="anchor" href="#overview" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Overview</h2>
 <p>DataMap can be created using following DDL</p>
-<pre><code>  CREATE DATAMAP [IF NOT EXISTS] datamap_name
-  [ON TABLE main_table]
-  USING "datamap_provider"
-  [WITH DEFERRED REBUILD]
-  DMPROPERTIES ('key'='value', ...)
-  AS
-    SELECT statement
+<pre><code>CREATE DATAMAP [IF NOT EXISTS] datamap_name
+[ON TABLE main_table]
+USING "datamap_provider"
+[WITH DEFERRED REBUILD]
+DMPROPERTIES ('key'='value', ...)
+AS
+  SELECT statement
 </code></pre>
 <p>Currently, there are 5 DataMap implementations in CarbonData.</p>
 <table>
diff --git a/content/ddl-of-carbondata.html b/content/ddl-of-carbondata.html
index 7f84786..1ef64f8 100644
--- a/content/ddl-of-carbondata.html
+++ b/content/ddl-of-carbondata.html
@@ -52,6 +52,9 @@
                            aria-expanded="false"> Download <span class="caret"></span></a>
                         <ul class="dropdown-menu">
                             <li>
+                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.2/"
+                                   target="_blank">Apache CarbonData 1.5.2</a></li>
+                            <li>
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.1/"
                                    target="_blank">Apache CarbonData 1.5.1</a></li>
                             <li>
@@ -64,9 +67,6 @@
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.4.0/"
                                    target="_blank">Apache CarbonData 1.4.0</a></li>
                             <li>
-                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.3.1/"
-                                   target="_blank">Apache CarbonData 1.3.1</a></li>
-                            <li>
                                 <a href="https://cwiki.apache.org/confluence/display/CARBONDATA/Releases"
                                    target="_blank">Release Archive</a></li>
                         </ul>
@@ -226,13 +226,13 @@
 <p><a href="#create-table">CREATE TABLE</a></p>
 <ul>
 <li><a href="#dictionary-encoding-configuration">Dictionary Encoding</a></li>
+<li><a href="#local-dictionary-configuration">Local Dictionary</a></li>
 <li><a href="#inverted-index-configuration">Inverted Index</a></li>
 <li><a href="#sort-columns-configuration">Sort Columns</a></li>
 <li><a href="#sort-scope-configuration">Sort Scope</a></li>
 <li><a href="#table-block-size-configuration">Table Block Size</a></li>
 <li><a href="#table-compaction-configuration">Table Compaction</a></li>
 <li><a href="#streaming">Streaming</a></li>
-<li><a href="#local-dictionary-configuration">Local Dictionary</a></li>
 <li><a href="#caching-minmax-value-for-required-columns">Caching Column Min/Max</a></li>
 <li><a href="#caching-at-block-or-blocklet-level">Caching Level</a></li>
 <li><a href="#support-flat-folder-same-as-hiveparquet">Hive/Parquet folder Structure</a></li>
@@ -240,6 +240,7 @@
 <li><a href="#compression-for-table">Compression for Table</a></li>
 <li><a href="#bad-records-path">Bad Records Path</a></li>
 <li><a href="#load-minimum-data-size">Load Minimum Input File Size</a></li>
+<li><a href="#range-column">Range Column</a></li>
 </ul>
 </li>
 <li>
@@ -265,9 +266,10 @@
 <li><a href="#rename-table">RENAME TABLE</a></li>
 <li><a href="#add-columns">ADD COLUMNS</a></li>
 <li><a href="#drop-columns">DROP COLUMNS</a></li>
-<li><a href="#change-data-type">CHANGE DATA TYPE</a></li>
+<li><a href="#change-column-nametype">RENAME COLUMN</a></li>
+<li><a href="#change-column-nametype">CHANGE COLUMN NAME/TYPE</a></li>
 <li><a href="#merge-index">MERGE INDEXES</a></li>
-<li><a href="#set-and-unset-for-local-dictionary-properties">SET/UNSET Local Dictionary Properties</a></li>
+<li><a href="#set-and-unset">SET/UNSET</a></li>
 </ul>
 </li>
 <li><a href="#drop-table">DROP TABLE</a></li>
@@ -418,6 +420,10 @@ STORED AS carbondata
 <td><a href="#load-minimum-data-size">LOAD_MIN_SIZE_INMB</a></td>
 <td>Minimum input data size per node for data loading</td>
 </tr>
+<tr>
+<td><a href="#range-column">Range Column</a></td>
+<td>partition input data by range</td>
+</tr>
 </tbody>
 </table>
 <p>Following are the guidelines for TBLPROPERTIES, CarbonData's additional table options can be set via carbon.properties.</p>
@@ -429,106 +435,11 @@ STORED AS carbondata
 Suggested use cases : do dictionary encoding for low cardinality columns, it might help to improve data compression ratio and performance.</p>
 <pre><code>TBLPROPERTIES ('DICTIONARY_INCLUDE'='column1, column2')
 </code></pre>
-<p><strong>NOTE</strong>: Dictionary Include/Exclude for complex child columns is not supported.</p>
-</li>
-<li>
-<h5>
-<a id="inverted-index-configuration" class="anchor" href="#inverted-index-configuration" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Inverted Index Configuration</h5>
-<p>By default inverted index is disabled as store size will be reduced, it can be enabled by using a table property. It might help to improve compression ratio and query speed, especially for low cardinality columns which are in reward position.
-Suggested use cases : For high cardinality columns, you can disable the inverted index for improving the data loading performance.</p>
-<pre><code>TBLPROPERTIES ('NO_INVERTED_INDEX'='column1', 'INVERTED_INDEX'='column2, column3')
-</code></pre>
-</li>
-<li>
-<h5>
-<a id="sort-columns-configuration" class="anchor" href="#sort-columns-configuration" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Sort Columns Configuration</h5>
-<p>This property is for users to specify which columns belong to the MDK(Multi-Dimensions-Key) index.</p>
+<p><strong>NOTE</strong>:</p>
 <ul>
-<li>If users don't specify "SORT_COLUMN" property, by default MDK index be built by using all dimension columns except complex data type column.</li>
-<li>If this property is specified but with empty argument, then the table will be loaded without sort.</li>
-<li>This supports only string, date, timestamp, short, int, long, byte and boolean data types.
-Suggested use cases : Only build MDK index for required columns,it might help to improve the data loading performance.</li>
+<li>Dictionary Include/Exclude for complex child columns is not supported.</li>
+<li>Dictionary is global. Except global dictionary, there are local dictionary and non-dictionary in CarbonData.</li>
 </ul>
-<pre><code>TBLPROPERTIES ('SORT_COLUMNS'='column1, column3')
-OR
-TBLPROPERTIES ('SORT_COLUMNS'='')
-</code></pre>
-<p><strong>NOTE</strong>: Sort_Columns for Complex datatype columns is not supported.</p>
-</li>
-<li>
-<h5>
-<a id="sort-scope-configuration" class="anchor" href="#sort-scope-configuration" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Sort Scope Configuration</h5>
-<p>This property is for users to specify the scope of the sort during data load, following are the types of sort scope.</p>
-<ul>
-<li>LOCAL_SORT: It is the default sort scope.</li>
-<li>NO_SORT: It will load the data in unsorted manner, it will significantly increase load performance.</li>
-<li>BATCH_SORT: It increases the load performance but decreases the query performance if identified blocks &gt; parallelism.</li>
-<li>GLOBAL_SORT: It increases the query performance, especially high concurrent point query.
-And if you care about loading resources isolation strictly, because the system uses the spark GroupBy to sort data, the resource can be controlled by spark.</li>
-</ul>
-</li>
-</ul>
-<pre><code>### Example:
-
-```
-CREATE TABLE IF NOT EXISTS productSchema.productSalesTable (
-  productNumber INT,
-  productName STRING,
-  storeCity STRING,
-  storeProvince STRING,
-  productCategory STRING,
-  productBatch STRING,
-  saleQuantity INT,
-  revenue INT)
-STORED AS carbondata
-TBLPROPERTIES ('SORT_COLUMNS'='productName,storeCity',
-               'SORT_SCOPE'='NO_SORT')
-```
-</code></pre>
-<p><strong>NOTE:</strong> CarbonData also supports "using carbondata". Find example code at <a href="https://github.com/apache/carbondata/blob/master/examples/spark2/src/main/scala/org/apache/carbondata/examples/SparkSessionExample.scala" target=_blank>SparkSessionExample</a> in the CarbonData repo.</p>
-<ul>
-<li>
-<h5>
-<a id="table-block-size-configuration" class="anchor" href="#table-block-size-configuration" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Table Block Size Configuration</h5>
-<p>This property is for setting block size of this table, the default value is 1024 MB and supports a range of 1 MB to 2048 MB.</p>
-<pre><code>TBLPROPERTIES ('TABLE_BLOCKSIZE'='512')
-</code></pre>
-<p><strong>NOTE:</strong> 512 or 512M both are accepted.</p>
-</li>
-<li>
-<h5>
-<a id="table-blocklet-size-configuration" class="anchor" href="#table-blocklet-size-configuration" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Table Blocklet Size Configuration</h5>
-<p>This property is for setting blocklet size in the carbondata file, the default value is 64 MB.
-Blocklet is the minimum IO read unit, in case of point queries reduce blocklet size might improve the query performance.</p>
-<p>Example usage:</p>
-<pre><code>TBLPROPERTIES ('TABLE_BLOCKLET_SIZE'='8')
-</code></pre>
-</li>
-<li>
-<h5>
-<a id="table-compaction-configuration" class="anchor" href="#table-compaction-configuration" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Table Compaction Configuration</h5>
-<p>These properties are table level compaction configurations, if not specified, system level configurations in carbon.properties will be used.
-Following are 5 configurations:</p>
-<ul>
-<li>MAJOR_COMPACTION_SIZE: same meaning as carbon.major.compaction.size, size in MB.</li>
-<li>AUTO_LOAD_MERGE: same meaning as carbon.enable.auto.load.merge.</li>
-<li>COMPACTION_LEVEL_THRESHOLD: same meaning as carbon.compaction.level.threshold.</li>
-<li>COMPACTION_PRESERVE_SEGMENTS: same meaning as carbon.numberof.preserve.segments.</li>
-<li>ALLOWED_COMPACTION_DAYS: same meaning as carbon.allowed.compaction.days.</li>
-</ul>
-<pre><code>TBLPROPERTIES ('MAJOR_COMPACTION_SIZE'='2048',
-               'AUTO_LOAD_MERGE'='true',
-               'COMPACTION_LEVEL_THRESHOLD'='5,6',
-               'COMPACTION_PRESERVE_SEGMENTS'='10',
-               'ALLOWED_COMPACTION_DAYS'='5')
-</code></pre>
-</li>
-<li>
-<h5>
-<a id="streaming" class="anchor" href="#streaming" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Streaming</h5>
-<p>CarbonData supports streaming ingestion for real-time data. You can create the 'streaming' table using the following table properties.</p>
-<pre><code>TBLPROPERTIES ('streaming'='true')
-</code></pre>
 </li>
 <li>
 <h5>
@@ -645,27 +556,120 @@ Following are 5 configurations:</p>
 </ul>
 <h3>
 <a id="example" class="anchor" href="#example" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Example:</h3>
-<pre><code>CREATE TABLE carbontable(
-          
-            column1 string,
-          
-            column2 string,
-          
-            column3 LONG )
-          
-  STORED AS carbondata
-  TBLPROPERTIES('LOCAL_DICTIONARY_ENABLE'='true','LOCAL_DICTIONARY_THRESHOLD'='1000',
-  'LOCAL_DICTIONARY_INCLUDE'='column1','LOCAL_DICTIONARY_EXCLUDE'='column2')
+<pre><code>CREATE TABLE carbontable(             
+  column1 string,             
+  column2 string,             
+  column3 LONG)
+STORED AS carbondata
+TBLPROPERTIES('LOCAL_DICTIONARY_ENABLE'='true','LOCAL_DICTIONARY_THRESHOLD'='1000',
+'LOCAL_DICTIONARY_INCLUDE'='column1','LOCAL_DICTIONARY_EXCLUDE'='column2')
 </code></pre>
 <p><strong>NOTE:</strong></p>
 <ul>
 <li>We recommend to use Local Dictionary when cardinality is high but is distributed across multiple loads</li>
 <li>On a large cluster, decoding data can become a bottleneck for global dictionary as there will be many remote reads. In this scenario, it is better to use Local Dictionary.</li>
 <li>When cardinality is less, but loads are repetitive, it is better to use global dictionary as local dictionary generates multiple dictionary files at blocklet level increasing redundancy.</li>
+<li>If want to use non-dictionary, users can set LOCAL_DICTIONARY_ENABLE as false and don't set DICTIONARY_INCLUDE.</li>
 </ul>
 <ul>
 <li>
 <h5>
+<a id="inverted-index-configuration" class="anchor" href="#inverted-index-configuration" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Inverted Index Configuration</h5>
+<p>By default inverted index is disabled as store size will be reduced, it can be enabled by using a table property. It might help to improve compression ratio and query speed, especially for low cardinality columns which are in reward position.
+Suggested use cases : For high cardinality columns, you can disable the inverted index for improving the data loading performance.</p>
+<p><strong>NOTE</strong>: Columns specified in INVERTED_INDEX should also be present in SORT_COLUMNS.</p>
+<pre><code>TBLPROPERTIES ('SORT_COLUMNS'='column2,column3','NO_INVERTED_INDEX'='column1', 'INVERTED_INDEX'='column2, column3')
+</code></pre>
+</li>
+<li>
+<h5>
+<a id="sort-columns-configuration" class="anchor" href="#sort-columns-configuration" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Sort Columns Configuration</h5>
+<p>This property is for users to specify which columns belong to the MDK(Multi-Dimensions-Key) index.</p>
+<ul>
+<li>If users don't specify "SORT_COLUMN" property, by default no columns are sorted</li>
+<li>If this property is specified but with empty argument, then the table will be loaded without sort.</li>
+<li>This supports only string, date, timestamp, short, int, long, byte and boolean data types.
+Suggested use cases : Only build MDK index for required columns,it might help to improve the data loading performance.</li>
+</ul>
+<pre><code>TBLPROPERTIES ('SORT_COLUMNS'='column1, column3')
+</code></pre>
+<p><strong>NOTE</strong>: Sort_Columns for Complex datatype columns is not supported.</p>
+</li>
+<li>
+<h5>
+<a id="sort-scope-configuration" class="anchor" href="#sort-scope-configuration" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Sort Scope Configuration</h5>
+<p>This property is for users to specify the scope of the sort during data load, following are the types of sort scope.</p>
+<ul>
+<li>LOCAL_SORT: data will be locally sorted (task level sorting)</li>
+<li>NO_SORT: default scope. It will load the data in unsorted manner, it will significantly increase load performance.</li>
+<li>BATCH_SORT: It increases the load performance but decreases the query performance if identified blocks &gt; parallelism.</li>
+<li>GLOBAL_SORT: It increases the query performance, especially high concurrent point query.
+And if you care about loading resources isolation strictly, because the system uses the spark GroupBy to sort data, the resource can be controlled by spark.</li>
+</ul>
+</li>
+</ul>
+<h3>
+<a id="example-1" class="anchor" href="#example-1" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Example:</h3>
+<pre><code>CREATE TABLE IF NOT EXISTS productSchema.productSalesTable (
+  productNumber INT,
+  productName STRING,
+  storeCity STRING,
+  storeProvince STRING,
+  productCategory STRING,
+  productBatch STRING,
+  saleQuantity INT,
+  revenue INT)
+STORED AS carbondata
+TBLPROPERTIES ('SORT_COLUMNS'='productName,storeCity',
+               'SORT_SCOPE'='NO_SORT')
+</code></pre>
+<p><strong>NOTE:</strong> CarbonData also supports "using carbondata". Find example code at <a href="https://github.com/apache/carbondata/blob/master/examples/spark2/src/main/scala/org/apache/carbondata/examples/SparkSessionExample.scala" target=_blank>SparkSessionExample</a> in the CarbonData repo.</p>
+<ul>
+<li>
+<h5>
+<a id="table-block-size-configuration" class="anchor" href="#table-block-size-configuration" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Table Block Size Configuration</h5>
+<p>This property is for setting block size of this table, the default value is 1024 MB and supports a range of 1 MB to 2048 MB.</p>
+<pre><code>TBLPROPERTIES ('TABLE_BLOCKSIZE'='512')
+</code></pre>
+<p><strong>NOTE:</strong> 512 or 512M both are accepted.</p>
+</li>
+<li>
+<h5>
+<a id="table-blocklet-size-configuration" class="anchor" href="#table-blocklet-size-configuration" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Table Blocklet Size Configuration</h5>
+<p>This property is for setting blocklet size in the carbondata file, the default value is 64 MB.
+Blocklet is the minimum IO read unit, in case of point queries reduce blocklet size might improve the query performance.</p>
+<p>Example usage:</p>
+<pre><code>TBLPROPERTIES ('TABLE_BLOCKLET_SIZE'='8')
+</code></pre>
+</li>
+<li>
+<h5>
+<a id="table-compaction-configuration" class="anchor" href="#table-compaction-configuration" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Table Compaction Configuration</h5>
+<p>These properties are table level compaction configurations, if not specified, system level configurations in carbon.properties will be used.
+Following are 5 configurations:</p>
+<ul>
+<li>MAJOR_COMPACTION_SIZE: same meaning as carbon.major.compaction.size, size in MB.</li>
+<li>AUTO_LOAD_MERGE: same meaning as carbon.enable.auto.load.merge.</li>
+<li>COMPACTION_LEVEL_THRESHOLD: same meaning as carbon.compaction.level.threshold.</li>
+<li>COMPACTION_PRESERVE_SEGMENTS: same meaning as carbon.numberof.preserve.segments.</li>
+<li>ALLOWED_COMPACTION_DAYS: same meaning as carbon.allowed.compaction.days.</li>
+</ul>
+<pre><code>TBLPROPERTIES ('MAJOR_COMPACTION_SIZE'='2048',
+               'AUTO_LOAD_MERGE'='true',
+               'COMPACTION_LEVEL_THRESHOLD'='5,6',
+               'COMPACTION_PRESERVE_SEGMENTS'='10',
+               'ALLOWED_COMPACTION_DAYS'='5')
+</code></pre>
+</li>
+<li>
+<h5>
+<a id="streaming" class="anchor" href="#streaming" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Streaming</h5>
+<p>CarbonData supports streaming ingestion for real-time data. You can create the 'streaming' table using the following table properties.</p>
+<pre><code>TBLPROPERTIES ('streaming'='true')
+</code></pre>
+</li>
+<li>
+<h5>
 <a id="caching-minmax-value-for-required-columns" class="anchor" href="#caching-minmax-value-for-required-columns" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Caching Min/Max Value for Required Columns</h5>
 <p>By default, CarbonData caches min and max values of all the columns in schema.  As the load increases, the memory required to hold the min and max values increases considerably. This feature enables you to configure min and max values only for the required columns, resulting in optimized memory usage.</p>
 <p>Following are the valid values for COLUMN_META_CACHE:</p>
@@ -732,7 +736,7 @@ During create table operation specify the cache level in table properties.</p>
 <a id="support-flat-folder-same-as-hiveparquet" class="anchor" href="#support-flat-folder-same-as-hiveparquet" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Support Flat folder same as Hive/Parquet</h5>
 <p>This feature allows all carbondata and index files to keep directy under tablepath. Currently all carbondata/carbonindex files written under tablepath/Fact/Part0/Segment_NUM folder and it is not same as hive/parquet folder structure. This feature makes all files written will be directly under tablepath, it does not maintain any segment folder structure. This is useful for interoperability between the execution engines and plugin with other execution engines like hive or presto becomes [...]
 <p>Following table property enables this feature and default value is false.</p>
-<pre><code> 'flat_folder'='true'
+<pre><code>'flat_folder'='true'
 </code></pre>
 <p>Example:</p>
 <pre><code>CREATE TABLE employee (name String, city String, id int) STORED BY 'carbondata' TBLPROPERTIES ('flat_folder'='true')
@@ -787,7 +791,7 @@ The corresponding system property is configured in carbon.properties file as bel
 As the table path remains the same after rename therefore the user can use this property to
 specify bad records path for the table at the time of creation, so that the same path can
 be later viewed in table description for reference.</p>
-<pre><code>  TBLPROPERTIES('BAD_RECORD_PATH'='/opt/badrecords')
+<pre><code>TBLPROPERTIES('BAD_RECORD_PATH'='/opt/badrecords')
 </code></pre>
 </li>
 <li>
@@ -799,7 +803,15 @@ This property is useful if you have a large cluster and only want a small portio
 For example, if you have a cluster with 10 nodes and the input data is about 1GB. Without this property, each node will process about 100MB input data and result in at least 10 data files. With this property configured with 512, only 2 nodes will be chosen to process the input data, each with about 512MB input and result in about 2 or 4 files based on the compress ratio.
 Moreover, this property can also be specified in the load option.
 Notice that once you enable this feature, for load balance, carbondata will ignore the data locality while assigning input data to nodes, this will cause more network traffic.</p>
-<pre><code>  TBLPROPERTIES('LOAD_MIN_SIZE_INMB'='256')
+<pre><code>TBLPROPERTIES('LOAD_MIN_SIZE_INMB'='256')
+</code></pre>
+</li>
+<li>
+<h5>
+<a id="range-column" class="anchor" href="#range-column" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Range Column</h5>
+<p>This property is used to specify a column to partition the input data by range.
+Only one column can be configured. During data loading, you can use "global_sort_partitions" or "scale_factor" to avoid generating small files.</p>
+<pre><code>TBLPROPERTIES('RANGE_COLUMN'='col1')
 </code></pre>
 </li>
 </ul>
@@ -813,26 +825,37 @@ AS select_statement;
 </code></pre>
 <h3>
 <a id="examples" class="anchor" href="#examples" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Examples</h3>
-<pre><code>carbon.sql("CREATE TABLE source_table(
-                           id INT,
-                           name STRING,
-                           city STRING,
-                           age INT)
-            STORED AS parquet")
+<pre><code>carbon.sql(
+           s"""
+              | CREATE TABLE source_table(
+              |   id INT,
+              |   name STRING,
+              |   city STRING,
+              |   age INT)
+              | STORED AS parquet
+           """.stripMargin)
+              
 carbon.sql("INSERT INTO source_table SELECT 1,'bob','shenzhen',27")
+
 carbon.sql("INSERT INTO source_table SELECT 2,'david','shenzhen',31")
 
-carbon.sql("CREATE TABLE target_table
-            STORED AS carbondata
-            AS SELECT city,avg(age) FROM source_table GROUP BY city")
+carbon.sql(
+           s"""
+              | CREATE TABLE target_table
+              | STORED AS carbondata
+              | AS SELECT city, avg(age) 
+              |    FROM source_table 
+              |    GROUP BY city
+           """.stripMargin)
             
 carbon.sql("SELECT * FROM target_table").show
-  // results:
-  //    +--------+--------+
-  //    |    city|avg(age)|
-  //    +--------+--------+
-  //    |shenzhen|    29.0|
-  //    +--------+--------+
+
+// results:
+//    +--------+--------+
+//    |    city|avg(age)|
+//    +--------+--------+
+//    |shenzhen|    29.0|
+//    +--------+--------+
 
 </code></pre>
 <h2>
@@ -851,11 +874,12 @@ sql("INSERT INTO origin select 100,'spark'")
 sql("INSERT INTO origin select 200,'hive'")
 // creates a table in $storeLocation/origin
 
-sql(s"""
-|CREATE EXTERNAL TABLE source
-|STORED AS carbondata
-|LOCATION '$storeLocation/origin'
-""".stripMargin)
+sql(
+    s"""
+       | CREATE EXTERNAL TABLE source
+       | STORED AS carbondata
+       | LOCATION '$storeLocation/origin'
+    """.stripMargin)
 checkAnswer(sql("SELECT count(*) from source"), sql("SELECT count(*) from origin"))
 </code></pre>
 <h3>
@@ -864,8 +888,10 @@ checkAnswer(sql("SELECT count(*) from source"), sql("SELECT count(*) from origin
 Our SDK module currently supports writing data in this format.</p>
 <p><strong>Example:</strong></p>
 <pre><code>sql(
-s"""CREATE EXTERNAL TABLE sdkOutputTable STORED AS carbondata LOCATION
-|'$writerPath' """.stripMargin)
+    s"""
+       | CREATE EXTERNAL TABLE sdkOutputTable STORED AS carbondata LOCATION
+       |'$writerPath'
+    """.stripMargin)
 </code></pre>
 <p>Here writer path will have carbondata and index files.
 This can be SDK output or C++ SDK output. Refer <a href="./sdk-guide.html">SDK Guide</a> and <a href="./csdk-guide.html">C++ SDK Guide</a>.</p>
@@ -884,7 +910,7 @@ suggest to drop the external table and create again to register table with new s
 <pre><code>CREATE DATABASE [IF NOT EXISTS] database_name [LOCATION path];
 </code></pre>
 <h3>
-<a id="example-1" class="anchor" href="#example-1" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Example</h3>
+<a id="example-2" class="anchor" href="#example-2" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Example</h3>
 <pre><code>CREATE DATABASE carbon LOCATION "hdfs://name_cluster/dir1/carbonstore";
 </code></pre>
 <h2>
@@ -904,8 +930,8 @@ SHOW TABLES IN defaultdb
 <p>The following section introduce the commands to modify the physical or logical state of the existing table(s).</p>
 <ul>
 <li>
-<h5>
-<a id="rename-table" class="anchor" href="#rename-table" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>RENAME TABLE</h5>
+<h4>
+<a id="rename-table" class="anchor" href="#rename-table" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>RENAME TABLE</h4>
 <p>This command is used to rename the existing table.</p>
 <pre><code>ALTER TABLE [db_name.]table_name RENAME TO new_table_name
 </code></pre>
@@ -916,8 +942,8 @@ ALTER TABLE test_db.carbon RENAME TO test_db.carbonTable
 </code></pre>
 </li>
 <li>
-<h5>
-<a id="add-columns" class="anchor" href="#add-columns" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>ADD COLUMNS</h5>
+<h4>
+<a id="add-columns" class="anchor" href="#add-columns" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>ADD COLUMNS</h4>
 <p>This command is used to add a new column to the existing table.</p>
 <pre><code>ALTER TABLE [db_name.]table_name ADD COLUMNS (col_name data_type,...)
 TBLPROPERTIES('DICTIONARY_INCLUDE'='col_name,...',
@@ -937,8 +963,8 @@ TBLPROPERTIES('DICTIONARY_INCLUDE'='col_name,...',
 <code>ALTER TABLE carbon ADD COLUMNS (a1 STRING, b1 STRING) TBLPROPERTIES('LOCAL_DICTIONARY_INCLUDE'='a1','LOCAL_DICTIONARY_EXCLUDE'='b1')</code></p>
 <ul>
 <li>
-<h5>
-<a id="drop-columns" class="anchor" href="#drop-columns" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>DROP COLUMNS</h5>
+<h4>
+<a id="drop-columns" class="anchor" href="#drop-columns" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>DROP COLUMNS</h4>
 <p>This command is used to delete the existing column(s) in a table.</p>
 <pre><code>ALTER TABLE [db_name.]table_name DROP COLUMNS (col_name, ...)
 </code></pre>
@@ -952,11 +978,11 @@ ALTER TABLE carbon DROP COLUMNS (c1,d1)
 <p><strong>NOTE:</strong> Drop Complex child column is not supported.</p>
 </li>
 <li>
-<h5>
-<a id="change-data-type" class="anchor" href="#change-data-type" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>CHANGE DATA TYPE</h5>
-<p>This command is used to change the data type from INT to BIGINT or decimal precision from lower to higher.
+<h4>
+<a id="change-column-nametype" class="anchor" href="#change-column-nametype" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>CHANGE COLUMN NAME/TYPE</h4>
+<p>This command is used to change column name and the data type from INT to BIGINT or decimal precision from lower to higher.
 Change of decimal data type from lower precision to higher precision will only be supported for cases where there is no data loss.</p>
-<pre><code>ALTER TABLE [db_name.]table_name CHANGE col_name col_name changed_column_type
+<pre><code>ALTER TABLE [db_name.]table_name CHANGE col_old_name col_new_name column_type
 </code></pre>
 <p>Valid Scenarios</p>
 <ul>
@@ -965,34 +991,39 @@ Change of decimal data type from lower precision to higher precision will only b
 <li>
 <strong>NOTE:</strong> The allowed range is 38,38 (precision, scale) and is a valid upper case scenario which is not resulting in data loss.</li>
 </ul>
-<p>Example1:Changing data type of column a1 from INT to BIGINT.</p>
-<pre><code>ALTER TABLE test_db.carbon CHANGE a1 a1 BIGINT
+<p>Example1:Change column a1's name to a2 and its data type from INT to BIGINT.</p>
+<pre><code>ALTER TABLE test_db.carbon CHANGE a1 a2 BIGINT
 </code></pre>
 <p>Example2:Changing decimal precision of column a1 from 10 to 18.</p>
 <pre><code>ALTER TABLE test_db.carbon CHANGE a1 a1 DECIMAL(18,2)
 </code></pre>
+<p>Example3:Change column a3's name to a4.</p>
+<pre><code>ALTER TABLE test_db.carbon CHANGE a3 a4 STRING
+</code></pre>
+<p><strong>NOTE:</strong> Once the column is renamed, user has to take care about replacing the fileheader with the new name or changing the column header in csv file.</p>
 </li>
 <li>
-<h5>
-<a id="merge-index" class="anchor" href="#merge-index" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>MERGE INDEX</h5>
+<h4>
+<a id="merge-index" class="anchor" href="#merge-index" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>MERGE INDEX</h4>
 <p>This command is used to merge all the CarbonData index files (.carbonindex) inside a segment to a single CarbonData index merge file (.carbonindexmerge). This enhances the first query performance.</p>
-<pre><code> ALTER TABLE [db_name.]table_name COMPACT 'SEGMENT_INDEX'
-</code></pre>
-<pre><code>Examples:
+<pre><code>ALTER TABLE [db_name.]table_name COMPACT 'SEGMENT_INDEX'
 </code></pre>
-<pre><code> ALTER TABLE test_db.carbon COMPACT 'SEGMENT_INDEX'
- ```
-
- **NOTE:**
-
- * Merge index is not supported on streaming table.
-
+<p>Examples:</p>
+<pre><code>ALTER TABLE test_db.carbon COMPACT 'SEGMENT_INDEX'
 </code></pre>
+<p><strong>NOTE:</strong></p>
+<ul>
+<li>Merge index is not supported on streaming table.</li>
+</ul>
 </li>
 <li>
-<h5>
-<a id="set-and-unset-for-local-dictionary-properties" class="anchor" href="#set-and-unset-for-local-dictionary-properties" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>SET and UNSET for Local Dictionary Properties</h5>
+<h4>
+<a id="set-and-unset" class="anchor" href="#set-and-unset" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>SET and UNSET</h4>
 <p>When set command is used, all the newly set properties will override the corresponding old properties if exists.</p>
+<ul>
+<li>
+<h5>
+<a id="local-dictionary-properties" class="anchor" href="#local-dictionary-properties" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Local Dictionary Properties</h5>
 <p>Example to SET Local Dictionary Properties:</p>
 <pre><code>ALTER TABLE tablename SET TBLPROPERTIES('LOCAL_DICTIONARY_ENABLE'='false','LOCAL_DICTIONARY_THRESHOLD'='1000','LOCAL_DICTIONARY_INCLUDE'='column1','LOCAL_DICTIONARY_EXCLUDE'='column2')
 </code></pre>
@@ -1003,6 +1034,19 @@ Change of decimal data type from lower precision to higher precision will only b
 <p><strong>NOTE:</strong> For old tables, by default, local dictionary is disabled. If user wants local dictionary for these tables, user can enable/disable local dictionary for new data at their discretion.
 This can be achieved by using the alter table set command.</p>
 </li>
+<li>
+<h5>
+<a id="sort-scope" class="anchor" href="#sort-scope" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>SORT SCOPE</h5>
+<p>Example to SET SORT SCOPE:</p>
+<pre><code>ALTER TABLE tablename SET TBLPROPERTIES('SORT_SCOPE'='NO_SORT')
+</code></pre>
+<p>When Sort Scope is unset, the default values (NO_SORT) will be used.</p>
+<p>Example to UNSET SORT SCOPE:</p>
+<pre><code>ALTER TABLE tablename UNSET TBLPROPERTIES('SORT_SCOPE')
+</code></pre>
+</li>
+</ul>
+</li>
 </ul>
 <h3>
 <a id="drop-table" class="anchor" href="#drop-table" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>DROP TABLE</h3>
@@ -1041,8 +1085,8 @@ STORED AS carbondata
 <pre><code>CREATE TABLE IF NOT EXISTS productSchema.productSalesTable (
                               productNumber Int COMMENT 'unique serial number for product')
 COMMENT "This is table comment"
- STORED AS carbondata
- TBLPROPERTIES ('DICTIONARY_INCLUDE'='productNumber')
+STORED AS carbondata
+TBLPROPERTIES ('DICTIONARY_INCLUDE'='productNumber')
 </code></pre>
 <p>You can also SET and UNSET table comment using ALTER command.</p>
 <p>Example to SET table comment:</p>
@@ -1067,7 +1111,7 @@ COMMENT "This is table comment"
   [TBLPROPERTIES (property_name=property_value, ...)]
 </code></pre>
 <p>Example:</p>
-<pre><code> CREATE TABLE IF NOT EXISTS productSchema.productSalesTable (
+<pre><code>CREATE TABLE IF NOT EXISTS productSchema.productSalesTable (
                               productNumber INT,
                               productName STRING,
                               storeCity STRING,
@@ -1094,9 +1138,9 @@ STORED AS carbondata
 <h4>
 <a id="insert-overwrite" class="anchor" href="#insert-overwrite" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Insert OVERWRITE</h4>
 <p>This command allows you to insert or load overwrite on a specific partition.</p>
-<pre><code> INSERT OVERWRITE TABLE table_name
- PARTITION (column = 'partition_name')
- select_statement
+<pre><code>INSERT OVERWRITE TABLE table_name
+PARTITION (column = 'partition_name')
+select_statement
 </code></pre>
 <p>Example:</p>
 <pre><code>INSERT OVERWRITE TABLE partitioned_user
@@ -1150,10 +1194,10 @@ STORED AS carbondata
     col_C LONG,
     col_D DECIMAL(10,2),
     col_E LONG
- ) partitioned by (col_F Timestamp)
- PARTITIONED BY 'carbondata'
- TBLPROPERTIES('PARTITION_TYPE'='RANGE',
- 'RANGE_INFO'='2015-01-01, 2016-01-01, 2017-01-01, 2017-02-01')
+) partitioned by (col_F Timestamp)
+STORED BY 'carbondata'
+TBLPROPERTIES('PARTITION_TYPE'='RANGE',
+'RANGE_INFO'='2015-01-01, 2016-01-01, 2017-01-01, 2017-02-01')
 </code></pre>
 <h3>
 <a id="create-list-partition-table" class="anchor" href="#create-list-partition-table" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Create List Partition Table</h3>
@@ -1174,9 +1218,9 @@ STORED AS carbondata
     col_E LONG,
     col_F TIMESTAMP
  ) PARTITIONED BY (col_A STRING)
- STORED AS carbondata
- TBLPROPERTIES('PARTITION_TYPE'='LIST',
- 'LIST_INFO'='aaaa, bbbb, (cccc, dddd), eeee')
+STORED AS carbondata
+TBLPROPERTIES('PARTITION_TYPE'='LIST',
+'LIST_INFO'='aaaa, bbbb, (cccc, dddd), eeee')
 </code></pre>
 <h3>
 <a id="show-partitions-1" class="anchor" href="#show-partitions-1" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Show Partitions</h3>
@@ -1194,7 +1238,7 @@ STORED AS carbondata
 <h3>
 <a id="drop-a-partition" class="anchor" href="#drop-a-partition" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Drop a partition</h3>
 <p>Only drop partition definition, but keep data</p>
-<pre><code>  ALTER TABLE [db_name].table_name DROP PARTITION(partition_id)
+<pre><code>ALTER TABLE [db_name].table_name DROP PARTITION(partition_id)
 </code></pre>
 <p>Drop both partition definition and data</p>
 <pre><code>ALTER TABLE [db_name].table_name DROP PARTITION(partition_id) WITH DATA
diff --git a/content/dml-of-carbondata.html b/content/dml-of-carbondata.html
index 15ff807..e765ecb 100644
--- a/content/dml-of-carbondata.html
+++ b/content/dml-of-carbondata.html
@@ -52,6 +52,9 @@
                            aria-expanded="false"> Download <span class="caret"></span></a>
                         <ul class="dropdown-menu">
                             <li>
+                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.2/"
+                                   target="_blank">Apache CarbonData 1.5.2</a></li>
+                            <li>
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.1/"
                                    target="_blank">Apache CarbonData 1.5.1</a></li>
                             <li>
@@ -64,9 +67,6 @@
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.4.0/"
                                    target="_blank">Apache CarbonData 1.4.0</a></li>
                             <li>
-                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.3.1/"
-                                   target="_blank">Apache CarbonData 1.3.1</a></li>
-                            <li>
                                 <a href="https://cwiki.apache.org/confluence/display/CARBONDATA/Releases"
                                    target="_blank">Release Archive</a></li>
                         </ul>
@@ -269,6 +269,10 @@ OPTIONS(property_name=property_value, ...)
 <td>If header is not present in the input csv, what is the column names to be used for data read from input csv</td>
 </tr>
 <tr>
+<td><a href="#sort_scope">SORT_SCOPE</a></td>
+<td>Sort Scope to be used for current load.</td>
+</tr>
+<tr>
 <td><a href="#multiline">MULTILINE</a></td>
 <td>Whether a row data can span across multiple lines.</td>
 </tr>
@@ -332,8 +336,14 @@ OPTIONS(property_name=property_value, ...)
 <td><a href="#global_sort_partitions">GLOBAL_SORT_PARTITIONS</a></td>
 <td>Number of partition to use for shuffling of data during sorting</td>
 </tr>
+<tr>
+<td><a href="#scale_factor">SCALE_FACTOR</a></td>
+<td>Control the partition size for RANGE_COLUMN feature</td>
+</tr>
 </tbody>
 </table>
+<ul>
+<li>
 <p>You can use the following options to load data:</p>
 <ul>
 <li>
@@ -376,6 +386,24 @@ true: CSV file is with file header.</p>
 </li>
 <li>
 <h5>
+<a id="sort_scope" class="anchor" href="#sort_scope" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>SORT_SCOPE:</h5>
+<p>Sort Scope to be used for the current load. This overrides the Sort Scope of Table.
+Requirement: Sort Columns must be set while creating table. If Sort Columns is null, Sort Scope is always NO_SORT.</p>
+<pre><code>OPTIONS('SORT_SCOPE'='BATCH_SORT')
+</code></pre>
+<p>Priority order for choosing Sort Scope is:</p>
+<ol>
+<li>Load Data Command</li>
+<li>CARBON.TABLE.LOAD.SORT.SCOPE..<table> session property
+</table>
+</li>
+<li>Table level Sort Scope</li>
+<li>CARBON.OPTIONS.SORT.SCOPE session property</li>
+<li>Default Value: NO_SORT</li>
+
+
+<li>
+<h5>
 <a id="multiline" class="anchor" href="#multiline" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>MULTILINE:</h5>
 <p>CSV with new line character in quotes.</p>
 <pre><code>OPTIONS('MULTILINE'='true') 
@@ -398,15 +426,23 @@ true: CSV file is with file header.</p>
 <li>
 <h5>
 <a id="complex_delimiter_level_1" class="anchor" href="#complex_delimiter_level_1" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>COMPLEX_DELIMITER_LEVEL_1:</h5>
-<p>Split the complex type data column in a row (eg., a$b$c --&gt; Array = {a,b,c}).</p>
-<pre><code>OPTIONS('COMPLEX_DELIMITER_LEVEL_1'='$') 
+<p>Split the complex type data column in a row (eg., a\001b\001c --&gt; Array = {a,b,c}).</p>
+<pre><code>OPTIONS('COMPLEX_DELIMITER_LEVEL_1'='\001')
 </code></pre>
 </li>
 <li>
 <h5>
 <a id="complex_delimiter_level_2" class="anchor" href="#complex_delimiter_level_2" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>COMPLEX_DELIMITER_LEVEL_2:</h5>
-<p>Split the complex type nested data column in a row. Applies level_1 delimiter &amp; applies level_2 based on complex data type (eg., a:b$c:d --&gt; Array&gt; = {{a,b},{c,d}}).</p>
-<pre><code>OPTIONS('COMPLEX_DELIMITER_LEVEL_2'=':')
+<p>Split the complex type nested data column in a row. Applies level_1 delimiter &amp; applies level_2 based on complex data type (eg., a\002b\001c\002d --&gt; Array&gt; = {{a,b},{c,d}}).</p>
+<pre><code>OPTIONS('COMPLEX_DELIMITER_LEVEL_2'='\002')
+</code></pre>
+</li>
+<li>
+<h5>
+<a id="complex_delimiter_level_3" class="anchor" href="#complex_delimiter_level_3" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>COMPLEX_DELIMITER_LEVEL_3:</h5>
+<p>Split the complex type nested data column in a row. Applies level_1 delimiter, applies level_2 and then level_3 delimiter based on complex data type.
+Used in case of nested Complex Map type. (eg., 'a\003b\002b\003c\001aa\003bb\002cc\003dd' --&gt; Array Of Map&gt; = {{a -&gt; b, b -&gt; c},{aa -&gt; bb, cc -&gt; dd}}).</p>
+<pre><code>OPTIONS('COMPLEX_DELIMITER_LEVEL_3'='\003')
 </code></pre>
 </li>
 <li>
@@ -454,11 +490,12 @@ true: CSV file is with file header.</p>
 <a id="single_pass" class="anchor" href="#single_pass" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>SINGLE_PASS:</h5>
 <p>Single Pass Loading enables single job to finish data loading with dictionary generation on the fly. It enhances performance in the scenarios where the subsequent data loading after initial load involves fewer incremental updates on the dictionary.</p>
 </li>
-</ul>
+
 <p>This option specifies whether to use single pass for loading data or not. By default this option is set to FALSE.</p>
 <pre><code> OPTIONS('SINGLE_PASS'='TRUE')
 </code></pre>
 <p><strong>NOTE:</strong></p>
+</ol>
 <ul>
 <li>If this option is set to TRUE then data loading will take less time.</li>
 <li>If this option is set to some invalid value other than TRUE or FALSE then it uses the default value.</li>
@@ -470,8 +507,8 @@ options('DELIMITER'=',', 'QUOTECHAR'='"','COMMENTCHAR'='#',
 'FILEHEADER'='empno,empname,designation,doj,workgroupcategory,
 workgroupcategoryname,deptno,deptname,projectcode,
 projectjoindate,projectenddate,attendance,utilization,salary',
-'MULTILINE'='true','ESCAPECHAR'='\','COMPLEX_DELIMITER_LEVEL_1'='$',
-'COMPLEX_DELIMITER_LEVEL_2'=':',
+'MULTILINE'='true','ESCAPECHAR'='\','COMPLEX_DELIMITER_LEVEL_1'='\\\001',
+'COMPLEX_DELIMITER_LEVEL_2'='\\\002',
 'ALL_DICTIONARY_PATH'='/opt/alldictionary/data.dictionary',
 'SINGLE_PASS'='TRUE')
 </code></pre>
@@ -509,16 +546,37 @@ OPTIONS('BAD_RECORDS_LOGGER_ENABLE'='true','BAD_RECORD_PATH'='hdfs://hacluster/t
 <li>
 <h5>
 <a id="global_sort_partitions" class="anchor" href="#global_sort_partitions" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>GLOBAL_SORT_PARTITIONS:</h5>
-<p>If the SORT_SCOPE is defined as GLOBAL_SORT, then user can specify the number of partitions to use while shuffling data for sort using GLOBAL_SORT_PARTITIONS. If it is not configured, or configured less than 1, then it uses the number of map task as reduce task. It is recommended that each reduce task deal with 512MB-1GB data.</p>
+<p>If the SORT_SCOPE is defined as GLOBAL_SORT, then user can specify the number of partitions to use while shuffling data for sort using GLOBAL_SORT_PARTITIONS. If it is not configured, or configured less than 1, then it uses the number of map task as reduce task. It is recommended that each reduce task deal with 512MB-1GB data.
+For RANGE_COLUMN, GLOBAL_SORT_PARTITIONS is used to specify the number of range partitions also.</p>
 </li>
 </ul>
 <pre><code>OPTIONS('GLOBAL_SORT_PARTITIONS'='2')
 </code></pre>
-<p>NOTE:</p>
+<p><strong>NOTE:</strong></p>
 <ul>
 <li>GLOBAL_SORT_PARTITIONS should be Integer type, the range is [1,Integer.MaxValue].</li>
 <li>It is only used when the SORT_SCOPE is GLOBAL_SORT.</li>
 </ul>
+<ul>
+<li>
+<h5>
+<a id="scale_factor" class="anchor" href="#scale_factor" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>SCALE_FACTOR</h5>
+</li>
+</ul>
+<p>For RANGE_COLUMN, SCALE_FACTOR is used to control the number of range partitions as following.</p>
+<pre><code>  splitSize = max(blocklet_size, (block_size - blocklet_size)) * scale_factor
+  numPartitions = total size of input data / splitSize
+</code></pre>
+<p>The default value is 3, and the range is [1, 300].</p>
+<pre><code>  OPTIONS('SCALE_FACTOR'='10')
+</code></pre>
+<p><strong>NOTE:</strong></p>
+<ul>
+<li>If both GLOBAL_SORT_PARTITIONS and SCALE_FACTOR are used at the same time, only GLOBAL_SORT_PARTITIONS is valid.</li>
+<li>The compaction on RANGE_COLUMN will use LOCAL_SORT by default.</li>
+</ul>
+
+
 <h3>
 <a id="insert-data-into-carbondata-table" class="anchor" href="#insert-data-into-carbondata-table" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>INSERT DATA INTO CARBONDATA TABLE</h3>
 <p>This command inserts data into a CarbonData table, it is defined as a combination of two queries Insert and Select query respectively.
@@ -650,14 +708,17 @@ All specified segment ids should exist and be valid, otherwise compaction will f
 Custom compaction is usually done during the off-peak time.</p>
 <pre><code>ALTER TABLE table_name COMPACT 'CUSTOM' WHERE SEGMENT.ID IN (2,3,4)
 </code></pre>
-<p>NOTE: Compaction is unsupported for table containing Complex columns.</p>
 <ul>
 <li><strong>CLEAN SEGMENTS AFTER Compaction</strong></li>
 </ul>
 <p>Clean the segments which are compacted:</p>
 <pre><code>CLEAN FILES FOR TABLE carbon_table
 </code></pre>
-<script>
+
+</li>
+</ul>
+</li>
+</ul><script>
 $(function() {
   // Show selected style on nav item
   $('.b-nav__docs').addClass('selected');
diff --git a/content/documentation.html b/content/documentation.html
index e49cdae..09db88f 100644
--- a/content/documentation.html
+++ b/content/documentation.html
@@ -52,6 +52,9 @@
                            aria-expanded="false"> Download <span class="caret"></span></a>
                         <ul class="dropdown-menu">
                             <li>
+                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.2/"
+                                   target="_blank">Apache CarbonData 1.5.2</a></li>
+                            <li>
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.1/"
                                    target="_blank">Apache CarbonData 1.5.1</a></li>
                             <li>
@@ -64,9 +67,6 @@
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.4.0/"
                                    target="_blank">Apache CarbonData 1.4.0</a></li>
                             <li>
-                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.3.1/"
-                                   target="_blank">Apache CarbonData 1.3.1</a></li>
-                            <li>
                                 <a href="https://cwiki.apache.org/confluence/display/CARBONDATA/Releases"
                                    target="_blank">Release Archive</a></li>
                         </ul>
@@ -225,11 +225,15 @@
 <a id="getting-started" class="anchor" href="#getting-started" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Getting Started</h2>
 <p><strong>File Format Concepts:</strong> Start with the basics of understanding the <a href="./file-structure-of-carbondata.html#carbondata-file-format">CarbonData file format</a> and its <a href="./file-structure-of-carbondata.html">storage structure</a>. This will help to understand other parts of the documentation, including deployment, programming and usage guides.</p>
 <p><strong>Quick Start:</strong> <a href="./quick-start-guide.html#installing-and-configuring-carbondata-to-run-locally-with-spark-shell">Run an example program</a> on your local machine or <a href="https://github.com/apache/carbondata/tree/master/examples/spark2/src/main/scala/org/apache/carbondata/examples" target=_blank>study some examples</a>.</p>
-<p><strong>CarbonData SQL Language Reference:</strong> CarbonData extends the Spark SQL language and adds several <a href="./ddl-of-carbondata.html">DDL</a> and <a href="./dml-of-carbondata.html">DML</a> statements to support operations on it.Refer to the <a href="./language-manual.html">Reference Manual</a> to understand the supported features and functions.</p>
+<p><strong>CarbonData SQL Language Reference:</strong> CarbonData extends the Spark SQL language and adds several <a href="./ddl-of-carbondata.html">DDL</a> and <a href="./dml-of-carbondata.html">DML</a> statements to support operations on it. Refer to the <a href="./language-manual.html">Reference Manual</a> to understand the supported features and functions.</p>
 <p><strong>Programming Guides:</strong> You can read our guides about <a href="./sdk-guide.html">Java APIs supported</a> or <a href="./csdk-guide.html">C++ APIs supported</a> to learn how to integrate CarbonData with your applications.</p>
 <h2>
 <a id="integration" class="anchor" href="#integration" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Integration</h2>
-<p>CarbonData can be integrated with popular Execution engines like <a href="./quick-start-guide.html#spark">Spark</a> and <a href="./quick-start-guide.html#presto">Presto</a>.Refer to the <a href="./quick-start-guide.html#integration">Installation and Configuration</a> section to understand all modes of Integrating CarbonData.</p>
+<ul>
+<li>CarbonData can be integrated with popular execution engines like <a href="./quick-start-guide.html#spark">Spark</a> , <a href="./quick-start-guide.html#presto">Presto</a> and <a href="./quick-start-guide.html#hive">Hive</a>.</li>
+<li>CarbonData can be integrated with popular storage engines like HDFS, Huawei Cloud(OBS) and <a href="./quick-start-guide.html#alluxio">Alluxio</a>.<br>
+Refer to the <a href="./quick-start-guide.html#integration">Installation and Configuration</a> section to understand all modes of Integrating CarbonData.</li>
+</ul>
 <h2>
 <a id="contributing-to-carbondata" class="anchor" href="#contributing-to-carbondata" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Contributing to CarbonData</h2>
 <p>The Apache CarbonData community welcomes all kinds of contributions from anyone with a passion for
diff --git a/content/faq.html b/content/faq.html
index a42bbb8..7536a0b 100644
--- a/content/faq.html
+++ b/content/faq.html
@@ -52,6 +52,9 @@
                            aria-expanded="false"> Download <span class="caret"></span></a>
                         <ul class="dropdown-menu">
                             <li>
+                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.2/"
+                                   target="_blank">Apache CarbonData 1.5.2</a></li>
+                            <li>
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.1/"
                                    target="_blank">Apache CarbonData 1.5.1</a></li>
                             <li>
@@ -64,9 +67,6 @@
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.4.0/"
                                    target="_blank">Apache CarbonData 1.4.0</a></li>
                             <li>
-                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.3.1/"
-                                   target="_blank">Apache CarbonData 1.3.1</a></li>
-                            <li>
                                 <a href="https://cwiki.apache.org/confluence/display/CARBONDATA/Releases"
                                    target="_blank">Release Archive</a></li>
                         </ul>
@@ -279,12 +279,10 @@ By default <strong>carbon.badRecords.location</strong> specifies the following l
 <a id="how-to-specify-store-location-while-creating-carbon-session" class="anchor" href="#how-to-specify-store-location-while-creating-carbon-session" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>How to specify store location while creating carbon session?</h2>
 <p>The store location specified while creating carbon session is used by the CarbonData to store the meta data like the schema, dictionary files, dictionary meta data and sort indexes.</p>
 <p>Try creating <code>carbonsession</code> with <code>storepath</code> specified in the following manner :</p>
-<pre><code>val carbon = SparkSession.builder().config(sc.getConf)
-             .getOrCreateCarbonSession(&lt;store_path&gt;)
+<pre><code>val carbon = SparkSession.builder().config(sc.getConf).getOrCreateCarbonSession(&lt;carbon_store_path&gt;)
 </code></pre>
 <p>Example:</p>
-<pre><code>val carbon = SparkSession.builder().config(sc.getConf)
-             .getOrCreateCarbonSession("hdfs://localhost:9000/carbon/store")
+<pre><code>val carbon = SparkSession.builder().config(sc.getConf).getOrCreateCarbonSession("hdfs://localhost:9000/carbon/store")
 </code></pre>
 <h2>
 <a id="what-is-carbon-lock-type" class="anchor" href="#what-is-carbon-lock-type" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>What is Carbon Lock Type?</h2>
@@ -447,9 +445,9 @@ $OverrideCatalog$$overrides_$e
 <p>Use the following command :</p>
 </li>
 </ol>
-<pre><code>"mvn -Pspark-2.1 -Dspark.version {yourSparkVersion} clean package"
+<pre><code>mvn -Pspark-2.1 -Dspark.version {yourSparkVersion} clean package
 </code></pre>
-<p>Note :  Refrain from using "mvn clean package" without specifying the profile.</p>
+<p>Note : Refrain from using "mvn clean package" without specifying the profile.</p>
 <h2>
 <a id="failed-to-execute-load-query-on-cluster" class="anchor" href="#failed-to-execute-load-query-on-cluster" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Failed to execute load query on cluster</h2>
 <p><strong>Symptom</strong></p>
diff --git a/content/file-structure-of-carbondata.html b/content/file-structure-of-carbondata.html
index 5230ba3..3201546 100644
--- a/content/file-structure-of-carbondata.html
+++ b/content/file-structure-of-carbondata.html
@@ -52,6 +52,9 @@
                            aria-expanded="false"> Download <span class="caret"></span></a>
                         <ul class="dropdown-menu">
                             <li>
+                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.2/"
+                                   target="_blank">Apache CarbonData 1.5.2</a></li>
+                            <li>
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.1/"
                                    target="_blank">Apache CarbonData 1.5.1</a></li>
                             <li>
@@ -64,9 +67,6 @@
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.4.0/"
                                    target="_blank">Apache CarbonData 1.4.0</a></li>
                             <li>
-                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.3.1/"
-                                   target="_blank">Apache CarbonData 1.3.1</a></li>
-                            <li>
                                 <a href="https://cwiki.apache.org/confluence/display/CARBONDATA/Releases"
                                    target="_blank">Release Archive</a></li>
                         </ul>
diff --git a/src/main/webapp/s3-guide.html b/content/hive-guide.html
similarity index 75%
copy from src/main/webapp/s3-guide.html
copy to content/hive-guide.html
index ba25dfb..780c766 100644
--- a/src/main/webapp/s3-guide.html
+++ b/content/hive-guide.html
@@ -52,6 +52,9 @@
                            aria-expanded="false"> Download <span class="caret"></span></a>
                         <ul class="dropdown-menu">
                             <li>
+                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.2/"
+                                   target="_blank">Apache CarbonData 1.5.2</a></li>
+                            <li>
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.1/"
                                    target="_blank">Apache CarbonData 1.5.1</a></li>
                             <li>
@@ -64,9 +67,6 @@
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.4.0/"
                                    target="_blank">Apache CarbonData 1.4.0</a></li>
                             <li>
-                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.3.1/"
-                                   target="_blank">Apache CarbonData 1.3.1</a></li>
-                            <li>
                                 <a href="https://cwiki.apache.org/confluence/display/CARBONDATA/Releases"
                                    target="_blank">Release Archive</a></li>
                         </ul>
@@ -219,72 +219,87 @@
                                 <div class="col-sm-12  col-md-12">
                                     <div>
 <h1>
-<a id="s3-guide" class="anchor" href="#s3-guide" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>S3 Guide</h1>
-<p>Object storage is the recommended storage format in cloud as it can support storing large data
-files. S3 APIs are widely used for accessing object stores. This can be
-used to store or retrieve data on Amazon cloud, Huawei Cloud(OBS) or on any other object
-stores conforming to S3 API.
-Storing data in cloud is advantageous as there are no restrictions on the size of
-data and the data can be accessed from anywhere at any time.
-Carbondata can support any Object Storage that conforms to Amazon S3 API.
-Carbondata relies on Hadoop provided S3 filesystem APIs to access Object stores.</p>
-<h1>
-<a id="writing-to-object-storage" class="anchor" href="#writing-to-object-storage" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Writing to Object Storage</h1>
-<p>To store carbondata files onto Object Store, <code>carbon.storelocation</code> property will have
-to be configured with Object Store path in CarbonProperties file.</p>
-<p>For example:</p>
-<pre><code>carbon.storelocation=s3a://mybucket/carbonstore.
-</code></pre>
-<p>If the existing store location cannot be changed or only specific tables need to be stored
-onto cloud object store, it can be done so by specifying the <code>location</code> option in the create
-table DDL command.</p>
-<p>For example:</p>
-<pre><code>CREATE TABLE IF NOT EXISTS db1.table1(col1 string, col2 int) STORED AS carbondata LOCATION 's3a://mybucket/carbonstore'
-</code></pre>
-<p>For more details on create table, Refer <a href="ddl-of-carbondata.html#create-table">DDL of CarbonData</a></p>
-<h1>
-<a id="authentication" class="anchor" href="#authentication" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Authentication</h1>
-<p>Authentication properties will have to be configured to store the carbondata files on to S3 location.</p>
-<p>Authentication properties can be set in any of the following ways:</p>
-<ol>
+<a id="quick-start" class="anchor" href="#quick-start" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Quick Start</h1>
+<p>This tutorial provides a quick introduction to using current integration/hive module.</p>
+<h2>
+<a id="build-in-120-hive-integration-only-support-spark21-and-hadoop272" class="anchor" href="#build-in-120-hive-integration-only-support-spark21-and-hadoop272" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Build (In 1.2.0, hive integration only support spark2.1 and hadoop2.7.2)</h2>
+<p>mvn -DskipTests -Pspark-2.1 -Phadoop-2.7.2 clean package</p>
+<h2>
+<a id="prepare-carbondata-in-spark" class="anchor" href="#prepare-carbondata-in-spark" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Prepare CarbonData in Spark</h2>
+<ul>
 <li>
-<p>Set authentication properties in core-site.xml, refer
-<a href="https://hadoop.apache.org/docs/stable/hadoop-aws/tools/hadoop-aws/index.html#Authentication_properties" rel="nofollow">hadoop authentication document</a></p>
+<p>Create a sample.csv file using the following commands. The CSV file is required for loading data into CarbonData.</p>
+<pre><code>cd carbondata
+cat &gt; sample.csv &lt;&lt; EOF
+id,name,scale,country,salary
+1,yuhai,1.77,china,33000.1
+2,runlin,1.70,china,33000.2
+EOF
+</code></pre>
 </li>
 <li>
-<p>Set authentication properties in spark-defaults.conf.</p>
+<p>copy data to HDFS</p>
 </li>
-</ol>
-<p>Example</p>
-<pre><code>spark.hadoop.fs.s3a.secret.key=123
-spark.hadoop.fs.s3a.access.key=456
+</ul>
+<pre><code>$HADOOP_HOME/bin/hadoop fs -put sample.csv &lt;hdfs store path&gt;/sample.csv
 </code></pre>
-<ol start="3">
-<li>Pass authentication properties with spark-submit as configuration.</li>
-</ol>
-<p>Example:</p>
-<pre><code>./bin/spark-submit --master yarn --conf spark.hadoop.fs.s3a.secret.key=123 --conf spark.hadoop.fs
-.s3a.access.key=456 --class=
+<ul>
+<li>Add the following params to $SPARK_CONF_DIR/conf/hive-site.xml</li>
+</ul>
+<div class="highlight highlight-text-xml"><pre>&lt;<span class="pl-ent">property</span>&gt;
+  &lt;<span class="pl-ent">name</span>&gt;hive.metastore.pre.event.listeners&lt;/<span class="pl-ent">name</span>&gt;
+  &lt;<span class="pl-ent">value</span>&gt;org.apache.carbondata.hive.CarbonHiveMetastoreListener&lt;/<span class="pl-ent">value</span>&gt;
+&lt;/<span class="pl-ent">property</span>&gt;</pre></div>
+<ul>
+<li>Start Spark shell by running the following command in the Spark directory</li>
+</ul>
+<pre><code>./bin/spark-shell --jars &lt;carbondata assembly jar path, carbon hive jar path&gt;
 </code></pre>
-<ol start="4">
-<li>Set authentication properties to hadoop configuration object in sparkContext.</li>
-</ol>
-<p>Example:</p>
-<pre><code>sparkSession.sparkContext.hadoopConfiguration.set("fs.s3a.secret.key", "123")
-sparkSession.sparkContext.hadoopConfiguration.set("fs.s3a.access.key","456")
+<pre><code>import org.apache.spark.sql.SparkSession
+import org.apache.spark.sql.CarbonSession._
+val rootPath = "hdfs:///user/hadoop/carbon"
+val storeLocation = s"$rootPath/store"
+val warehouse = s"$rootPath/warehouse"
+val metaStoreDB = s"$rootPath/metastore_db"
+
+val carbon = SparkSession.builder().enableHiveSupport().config("spark.sql.warehouse.dir", warehouse).config(org.apache.carbondata.core.constants.CarbonCommonConstants.STORE_LOCATION, storeLocation).getOrCreateCarbonSession(storeLocation, metaStoreDB)
+
+carbon.sql("create table hive_carbon(id int, name string, scale decimal, country string, salary double) STORED BY 'carbondata'")
+carbon.sql("LOAD DATA INPATH '&lt;hdfs store path&gt;/sample.csv' INTO TABLE hive_carbon")
+scala&gt;carbon.sql("SELECT * FROM hive_carbon").show()
+</code></pre>
+<h2>
+<a id="query-data-in-hive" class="anchor" href="#query-data-in-hive" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Query Data in Hive</h2>
+<h3>
+<a id="configure-hive-classpath" class="anchor" href="#configure-hive-classpath" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Configure hive classpath</h3>
+<pre><code>mkdir hive/auxlibs/
+cp carbondata/assembly/target/scala-2.11/carbondata_2.11*.jar hive/auxlibs/
+cp carbondata/integration/hive/target/carbondata-hive-*.jar hive/auxlibs/
+cp $SPARK_HOME/jars/spark-catalyst*.jar hive/auxlibs/
+cp $SPARK_HOME/jars/scala*.jar hive/auxlibs/
+export HIVE_AUX_JARS_PATH=hive/auxlibs/
+</code></pre>
+<h3>
+<a id="fix-snappy-issue" class="anchor" href="#fix-snappy-issue" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Fix snappy issue</h3>
+<pre><code>copy snappy-java-xxx.jar from "./&lt;SPARK_HOME&gt;/jars/" to "./Library/Java/Extensions"
+export HADOOP_OPTS="-Dorg.xerial.snappy.lib.path=/Library/Java/Extensions -Dorg.xerial.snappy.lib.name=libsnappyjava.jnilib -Dorg.xerial.snappy.tempdir=/Users/apple/DEMO/tmp"
+</code></pre>
+<h3>
+<a id="start-hive-client" class="anchor" href="#start-hive-client" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Start hive client</h3>
+<pre><code>$HIVE_HOME/bin/hive
+</code></pre>
+<h3>
+<a id="query-data-from-hive-table" class="anchor" href="#query-data-from-hive-table" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Query data from hive table</h3>
+<pre><code>set hive.mapred.supports.subdirectories=true;
+set mapreduce.input.fileinputformat.input.dir.recursive=true;
+
+select * from hive_carbon;
+select count(*) from hive_carbon;
+select * from hive_carbon order by id;
 </code></pre>
-<h1>
-<a id="recommendations" class="anchor" href="#recommendations" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Recommendations</h1>
-<ol>
-<li>Object Storage like S3 does not support file leasing mechanism(supported by HDFS) that is
-required to take locks which ensure consistency between concurrent operations therefore, it is
-recommended to set the configurable lock path property(<a href="./configuration-parameters.html#system-configuration">carbon.lock.path</a>)
-to a HDFS directory.</li>
-<li>Concurrent data manipulation operations are not supported. Object stores follow eventual consistency semantics, i.e., any put request might take some time to reflect when trying to list. This behaviour causes the data read is always not consistent or not the latest.</li>
-</ol>
 <script>
 // Show selected style on nav item
-$(function() { $('.b-nav__s3').addClass('selected'); });
+$(function() { $('.b-nav__quickstart').addClass('selected'); });
 </script></div>
 </div>
 </div>
diff --git a/content/how-to-contribute-to-apache-carbondata.html b/content/how-to-contribute-to-apache-carbondata.html
index a6dc1ee..a8e5059 100644
--- a/content/how-to-contribute-to-apache-carbondata.html
+++ b/content/how-to-contribute-to-apache-carbondata.html
@@ -52,6 +52,9 @@
                            aria-expanded="false"> Download <span class="caret"></span></a>
                         <ul class="dropdown-menu">
                             <li>
+                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.2/"
+                                   target="_blank">Apache CarbonData 1.5.2</a></li>
+                            <li>
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.1/"
                                    target="_blank">Apache CarbonData 1.5.1</a></li>
                             <li>
@@ -64,9 +67,6 @@
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.4.0/"
                                    target="_blank">Apache CarbonData 1.4.0</a></li>
                             <li>
-                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.3.1/"
-                                   target="_blank">Apache CarbonData 1.3.1</a></li>
-                            <li>
                                 <a href="https://cwiki.apache.org/confluence/display/CARBONDATA/Releases"
                                    target="_blank">Release Archive</a></li>
                         </ul>
diff --git a/content/index.html b/content/index.html
index 54bf380..ba38242 100644
--- a/content/index.html
+++ b/content/index.html
@@ -54,6 +54,9 @@
                                 class="caret"></span></a>
                         <ul class="dropdown-menu">
                             <li>
+                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.2/"
+                                   target="_blank">Apache CarbonData 1.5.2</a></li>
+                            <li>
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.1/"
                                    target="_blank">Apache CarbonData 1.5.1</a></li>
                             <li>
@@ -313,6 +316,13 @@
                                 </h4>
                                 <div class="linkblock">
                                     <div class="block-row">
+                                        <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.2/"
+                                           target="_blank">Apache CarbonData 1.5.2</a>
+                                        <span class="release-date">Feb 2019</span>
+                                        <a href="https://cwiki.apache.org/confluence/display/CARBONDATA/Apache+CarbonData+1.5.2+Release"
+                                           class="whatsnew" target="_blank">what's new</a>
+                                    </div>
+                                    <div class="block-row">
                                         <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.1/"
                                            target="_blank">Apache CarbonData 1.5.1</a>
                                         <span class="release-date">Dec 2018</span>
@@ -347,13 +357,6 @@
                                         <a href="https://cwiki.apache.org/confluence/display/CARBONDATA/Apache+CarbonData+1.3.1+Release"
                                            class="whatsnew" target="_blank">what's new</a>
                                     </div>
-                                    <div class="block-row">
-                                        <a href="https://dist.apache.org/repos/dist/release/carbondata/1.3.0/"
-                                           target="_blank">Apache CarbonData 1.3.0</a>
-                                        <span class="release-date">Feb 2018</span>
-                                        <a href="https://cwiki.apache.org/confluence/display/CARBONDATA/Apache+CarbonData+1.3.0+Release"
-                                           class="whatsnew" target="_blank">what's new</a>
-                                    </div>
                             </div>
                             <div class="nextR">
                                 <h4 class="title">Release Notes
@@ -485,7 +488,7 @@
                             to do is:</p>
                         <ol class="orderlist">
                             <li>
-                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.1/"
+                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.2/"
                                    target="_blank">Download</a>the latest release.
 
                             </li>
diff --git a/content/introduction.html b/content/introduction.html
index 0cfa369..53e741c 100644
--- a/content/introduction.html
+++ b/content/introduction.html
@@ -52,6 +52,9 @@
                            aria-expanded="false"> Download <span class="caret"></span></a>
                         <ul class="dropdown-menu">
                             <li>
+                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.2/"
+                                   target="_blank">Apache CarbonData 1.5.2</a></li>
+                            <li>
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.1/"
                                    target="_blank">Apache CarbonData 1.5.1</a></li>
                             <li>
@@ -64,9 +67,6 @@
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.4.0/"
                                    target="_blank">Apache CarbonData 1.4.0</a></li>
                             <li>
-                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.3.1/"
-                                   target="_blank">Apache CarbonData 1.3.1</a></li>
-                            <li>
                                 <a href="https://cwiki.apache.org/confluence/display/CARBONDATA/Releases"
                                    target="_blank">Release Archive</a></li>
                         </ul>
@@ -217,7 +217,8 @@
                         <div id="viewpage" name="viewpage">
                             <div class="row">
                                 <div class="col-sm-12  col-md-12">
-                                    <div><h2>
+                                    <div>
+<h2>
 <a id="what-is-carbondata" class="anchor" href="#what-is-carbondata" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>What is CarbonData</h2>
 <p>CarbonData is a fully indexed columnar and Hadoop native data-store for processing heavy analytical workloads and detailed queries on big data with Spark SQL. CarbonData allows faster interactive queries over PetaBytes of data.</p>
 <h2>
@@ -340,7 +341,12 @@
 <li>
 <h5>
 <a id="hdfs" class="anchor" href="#hdfs" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>HDFS</h5>
-<p>CarbonData uses HDFS api to write and read data from HDFS.CarbonData can take advantage of the locality information to efficiently suggest spark to run tasks near to the data.</p>
+<p>CarbonData uses HDFS api to write and read data from HDFS. CarbonData can take advantage of the locality information to efficiently suggest spark to run tasks near to the data.</p>
+</li>
+<li>
+<h5>
+<a id="alluxio" class="anchor" href="#alluxio" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Alluxio</h5>
+<p>CarbonData also supports read and write with <a href="./quick-start-guide.html#alluxio">Alluxio</a>.</p>
 </li>
 </ul>
 <h2>
diff --git a/content/language-manual.html b/content/language-manual.html
index a95de91..9ac8add 100644
--- a/content/language-manual.html
+++ b/content/language-manual.html
@@ -52,6 +52,9 @@
                            aria-expanded="false"> Download <span class="caret"></span></a>
                         <ul class="dropdown-menu">
                             <li>
+                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.2/"
+                                   target="_blank">Apache CarbonData 1.5.2</a></li>
+                            <li>
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.1/"
                                    target="_blank">Apache CarbonData 1.5.1</a></li>
                             <li>
@@ -64,9 +67,6 @@
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.4.0/"
                                    target="_blank">Apache CarbonData 1.4.0</a></li>
                             <li>
-                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.3.1/"
-                                   target="_blank">Apache CarbonData 1.3.1</a></li>
-                            <li>
                                 <a href="https://cwiki.apache.org/confluence/display/CARBONDATA/Releases"
                                    target="_blank">Release Archive</a></li>
                         </ul>
diff --git a/content/lucene-datamap-guide.html b/content/lucene-datamap-guide.html
index ef819a5..f9675e5 100644
--- a/content/lucene-datamap-guide.html
+++ b/content/lucene-datamap-guide.html
@@ -52,6 +52,9 @@
                            aria-expanded="false"> Download <span class="caret"></span></a>
                         <ul class="dropdown-menu">
                             <li>
+                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.2/"
+                                   target="_blank">Apache CarbonData 1.5.2</a></li>
+                            <li>
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.1/"
                                    target="_blank">Apache CarbonData 1.5.1</a></li>
                             <li>
@@ -64,9 +67,6 @@
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.4.0/"
                                    target="_blank">Apache CarbonData 1.4.0</a></li>
                             <li>
-                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.3.1/"
-                                   target="_blank">Apache CarbonData 1.3.1</a></li>
-                            <li>
                                 <a href="https://cwiki.apache.org/confluence/display/CARBONDATA/Releases"
                                    target="_blank">Release Archive</a></li>
                         </ul>
diff --git a/content/performance-tuning.html b/content/performance-tuning.html
index e539614..0a74864 100644
--- a/content/performance-tuning.html
+++ b/content/performance-tuning.html
@@ -52,6 +52,9 @@
                            aria-expanded="false"> Download <span class="caret"></span></a>
                         <ul class="dropdown-menu">
                             <li>
+                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.2/"
+                                   target="_blank">Apache CarbonData 1.5.2</a></li>
+                            <li>
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.1/"
                                    target="_blank">Apache CarbonData 1.5.1</a></li>
                             <li>
@@ -64,9 +67,6 @@
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.4.0/"
                                    target="_blank">Apache CarbonData 1.4.0</a></li>
                             <li>
-                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.3.1/"
-                                   target="_blank">Apache CarbonData 1.3.1</a></li>
-                            <li>
                                 <a href="https://cwiki.apache.org/confluence/display/CARBONDATA/Releases"
                                    target="_blank">Release Archive</a></li>
                         </ul>
diff --git a/content/preaggregate-datamap-guide.html b/content/preaggregate-datamap-guide.html
index 5e0d4e3..9d7a387 100644
--- a/content/preaggregate-datamap-guide.html
+++ b/content/preaggregate-datamap-guide.html
@@ -52,6 +52,9 @@
                            aria-expanded="false"> Download <span class="caret"></span></a>
                         <ul class="dropdown-menu">
                             <li>
+                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.2/"
+                                   target="_blank">Apache CarbonData 1.5.2</a></li>
+                            <li>
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.1/"
                                    target="_blank">Apache CarbonData 1.5.1</a></li>
                             <li>
@@ -64,9 +67,6 @@
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.4.0/"
                                    target="_blank">Apache CarbonData 1.4.0</a></li>
                             <li>
-                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.3.1/"
-                                   target="_blank">Apache CarbonData 1.3.1</a></li>
-                            <li>
                                 <a href="https://cwiki.apache.org/confluence/display/CARBONDATA/Releases"
                                    target="_blank">Release Archive</a></li>
                         </ul>
diff --git a/content/quick-start-guide.html b/content/presto-guide.html
similarity index 51%
copy from content/quick-start-guide.html
copy to content/presto-guide.html
index a2f093d..00c3f5f 100644
--- a/content/quick-start-guide.html
+++ b/content/presto-guide.html
@@ -52,6 +52,9 @@
                            aria-expanded="false"> Download <span class="caret"></span></a>
                         <ul class="dropdown-menu">
                             <li>
+                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.2/"
+                                   target="_blank">Apache CarbonData 1.5.2</a></li>
+                            <li>
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.1/"
                                    target="_blank">Apache CarbonData 1.5.1</a></li>
                             <li>
@@ -64,9 +67,6 @@
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.4.0/"
                                    target="_blank">Apache CarbonData 1.4.0</a></li>
                             <li>
-                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.3.1/"
-                                   target="_blank">Apache CarbonData 1.3.1</a></li>
-                            <li>
                                 <a href="https://cwiki.apache.org/confluence/display/CARBONDATA/Releases"
                                    target="_blank">Release Archive</a></li>
                         </ul>
@@ -219,360 +219,20 @@
                                 <div class="col-sm-12  col-md-12">
                                     <div>
 <h1>
-<a id="quick-start" class="anchor" href="#quick-start" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Quick Start</h1>
-<p>This tutorial provides a quick introduction to using CarbonData. To follow along with this guide, first download a packaged release of CarbonData from the <a href="https://dist.apache.org/repos/dist/release/carbondata/" target=_blank rel="nofollow">CarbonData website</a>.Alternatively it can be created following <a href="https://github.com/apache/carbondata/tree/master/build" target=_blank>Building CarbonData</a> steps.</p>
-<h2>
-<a id="prerequisites" class="anchor" href="#prerequisites" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Prerequisites</h2>
-<ul>
-<li>
-<p>CarbonData supports Spark versions upto 2.2.1.Please download Spark package from <a href="https://spark.apache.org/downloads.html" target=_blank rel="nofollow">Spark website</a></p>
-</li>
-<li>
-<p>Create a sample.csv file using the following commands. The CSV file is required for loading data into CarbonData</p>
-<pre><code>cd carbondata
-cat &gt; sample.csv &lt;&lt; EOF
-id,name,city,age
-1,david,shenzhen,31
-2,eason,shenzhen,27
-3,jarry,wuhan,35
-EOF
-</code></pre>
-</li>
-</ul>
-<h2>
-<a id="integration" class="anchor" href="#integration" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Integration</h2>
-<p>CarbonData can be integrated with Spark and Presto Execution Engines. The below documentation guides on Installing and Configuring with these execution engines.</p>
-<h3>
-<a id="spark" class="anchor" href="#spark" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Spark</h3>
-<p><a href="#installing-and-configuring-carbondata-to-run-locally-with-spark-shell">Installing and Configuring CarbonData to run locally with Spark Shell</a></p>
-<p><a href="#installing-and-configuring-carbondata-on-standalone-spark-cluster">Installing and Configuring CarbonData on Standalone Spark Cluster</a></p>
-<p><a href="#installing-and-configuring-carbondata-on-spark-on-yarn-cluster">Installing and Configuring CarbonData on Spark on YARN Cluster</a></p>
-<p><a href="#query-execution-using-carbondata-thrift-server">Installing and Configuring CarbonData Thrift Server for Query Execution</a></p>
-<h3>
-<a id="presto" class="anchor" href="#presto" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Presto</h3>
-<p><a href="#installing-and-configuring-carbondata-on-presto">Installing and Configuring CarbonData on Presto</a></p>
-<h2>
-<a id="installing-and-configuring-carbondata-to-run-locally-with-spark-shell" class="anchor" href="#installing-and-configuring-carbondata-to-run-locally-with-spark-shell" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Installing and Configuring CarbonData to run locally with Spark Shell</h2>
-<p>Apache Spark Shell provides a simple way to learn the API, as well as a powerful tool to analyze data interactively. Please visit <a href="http://spark.apache.org/docs/latest/" target=_blank rel="nofollow">Apache Spark Documentation</a> for more details on Spark shell.</p>
-<h4>
-<a id="basics" class="anchor" href="#basics" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Basics</h4>
-<p>Start Spark shell by running the following command in the Spark directory:</p>
-<pre><code>./bin/spark-shell --jars &lt;carbondata assembly jar path&gt;
-</code></pre>
-<p><strong>NOTE</strong>: Path where packaged release of CarbonData was downloaded or assembly jar will be available after <a href="https://github.com/apache/carbondata/blob/master/build/README.md" target=_blank>building CarbonData</a> and can be copied from <code>./assembly/target/scala-2.1x/carbondata_xxx.jar</code></p>
-<p>In this shell, SparkSession is readily available as <code>spark</code> and Spark context is readily available as <code>sc</code>.</p>
-<p>In order to create a CarbonSession we will have to configure it explicitly in the following manner :</p>
-<ul>
-<li>Import the following :</li>
-</ul>
-<pre><code>import org.apache.spark.sql.SparkSession
-import org.apache.spark.sql.CarbonSession._
-</code></pre>
-<ul>
-<li>Create a CarbonSession :</li>
-</ul>
-<pre><code>val carbon = SparkSession.builder().config(sc.getConf)
-             .getOrCreateCarbonSession("&lt;hdfs store path&gt;")
-</code></pre>
-<p><strong>NOTE</strong>: By default metastore location points to <code>../carbon.metastore</code>, user can provide own metastore location to CarbonSession like <code>SparkSession.builder().config(sc.getConf) .getOrCreateCarbonSession("&lt;hdfs store path&gt;", "&lt;local metastore path&gt;")</code></p>
-<h4>
-<a id="executing-queries" class="anchor" href="#executing-queries" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Executing Queries</h4>
-<h6>
-<a id="creating-a-table" class="anchor" href="#creating-a-table" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Creating a Table</h6>
-<pre><code>scala&gt;carbon.sql("CREATE TABLE
-                    IF NOT EXISTS test_table(
-                    id string,
-                    name string,
-                    city string,
-                    age Int)
-                  STORED AS carbondata")
-</code></pre>
-<h6>
-<a id="loading-data-to-a-table" class="anchor" href="#loading-data-to-a-table" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Loading Data to a Table</h6>
-<pre><code>scala&gt;carbon.sql("LOAD DATA INPATH '/path/to/sample.csv'
-                  INTO TABLE test_table")
-</code></pre>
-<p><strong>NOTE</strong>: Please provide the real file path of <code>sample.csv</code> for the above script.
-If you get "tablestatus.lock" issue, please refer to <a href="faq.html">FAQ</a></p>
-<h6>
-<a id="query-data-from-a-table" class="anchor" href="#query-data-from-a-table" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Query Data from a Table</h6>
-<pre><code>scala&gt;carbon.sql("SELECT * FROM test_table").show()
-
-scala&gt;carbon.sql("SELECT city, avg(age), sum(age)
-                  FROM test_table
-                  GROUP BY city").show()
-</code></pre>
+<a id="presto-guide" class="anchor" href="#presto-guide" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Presto guide</h1>
+<p>This tutorial provides a quick introduction to using current integration/presto module.</p>
+<p><a href="#presto-multinode-cluster-setup-for-carbondata">Presto Multinode Cluster Setup for Carbondata</a></p>
+<p><a href="#presto-single-node-setup-for-carbondata">Presto Single Node Setup for Carbondata</a></p>
 <h2>
-<a id="installing-and-configuring-carbondata-on-standalone-spark-cluster" class="anchor" href="#installing-and-configuring-carbondata-on-standalone-spark-cluster" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Installing and Configuring CarbonData on Standalone Spark Cluster</h2>
+<a id="presto-multinode-cluster-setup-for-carbondata" class="anchor" href="#presto-multinode-cluster-setup-for-carbondata" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Presto Multinode Cluster Setup for Carbondata</h2>
 <h3>
-<a id="prerequisites-1" class="anchor" href="#prerequisites-1" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Prerequisites</h3>
-<ul>
-<li>Hadoop HDFS and Yarn should be installed and running.</li>
-<li>Spark should be installed and running on all the cluster nodes.</li>
-<li>CarbonData user should have permission to access HDFS.</li>
-</ul>
-<h3>
-<a id="procedure" class="anchor" href="#procedure" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Procedure</h3>
-<ol>
-<li>
-<p><a href="https://github.com/apache/carbondata/blob/master/build/README.md" target=_blank>Build the CarbonData</a> project and get the assembly jar from <code>./assembly/target/scala-2.1x/carbondata_xxx.jar</code>.</p>
-</li>
-<li>
-<p>Copy <code>./assembly/target/scala-2.1x/carbondata_xxx.jar</code> to <code>$SPARK_HOME/carbonlib</code> folder.</p>
-<p><strong>NOTE</strong>: Create the carbonlib folder if it does not exist inside <code>$SPARK_HOME</code> path.</p>
-</li>
-<li>
-<p>Add the carbonlib folder path in the Spark classpath. (Edit <code>$SPARK_HOME/conf/spark-env.sh</code> file and modify the value of <code>SPARK_CLASSPATH</code> by appending <code>$SPARK_HOME/carbonlib/*</code> to the existing value)</p>
-</li>
-<li>
-<p>Copy the <code>./conf/carbon.properties.template</code> file from CarbonData repository to <code>$SPARK_HOME/conf/</code> folder and rename the file to <code>carbon.properties</code>.</p>
-</li>
-<li>
-<p>Repeat Step 2 to Step 5 in all the nodes of the cluster.</p>
-</li>
-<li>
-<p>In Spark node[master], configure the properties mentioned in the following table in <code>$SPARK_HOME/conf/spark-defaults.conf</code> file.</p>
-</li>
-</ol>
-<table>
-<thead>
-<tr>
-<th>Property</th>
-<th>Value</th>
-<th>Description</th>
-</tr>
-</thead>
-<tbody>
-<tr>
-<td>spark.driver.extraJavaOptions</td>
-<td><code>-Dcarbon.properties.filepath = $SPARK_HOME/conf/carbon.properties</code></td>
-<td>A string of extra JVM options to pass to the driver. For instance, GC settings or other logging.</td>
-</tr>
-<tr>
-<td>spark.executor.extraJavaOptions</td>
-<td><code>-Dcarbon.properties.filepath = $SPARK_HOME/conf/carbon.properties</code></td>
-<td>A string of extra JVM options to pass to executors. For instance, GC settings or other logging. <strong>NOTE</strong>: You can enter multiple values separated by space.</td>
-</tr>
-</tbody>
-</table>
-<ol>
-<li>Add the following properties in <code>$SPARK_HOME/conf/carbon.properties</code> file:</li>
-</ol>
-<table>
-<thead>
-<tr>
-<th>Property</th>
-<th>Required</th>
-<th>Description</th>
-<th>Example</th>
-<th>Remark</th>
-</tr>
-</thead>
-<tbody>
-<tr>
-<td>carbon.storelocation</td>
-<td>NO</td>
-<td>Location where data CarbonData will create the store and write the data in its own format. If not specified then it takes spark.sql.warehouse.dir path.</td>
-<td>hdfs://HOSTNAME:PORT/Opt/CarbonStore</td>
-<td>Propose to set HDFS directory</td>
-</tr>
-</tbody>
-</table>
+<a id="installing-presto" class="anchor" href="#installing-presto" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Installing Presto</h3>
 <ol>
-<li>Verify the installation. For example:</li>
+<li>Download the 0.210 version of Presto using:</li>
 </ol>
-<pre><code>./spark-shell --master spark://HOSTNAME:PORT --total-executor-cores 2
---executor-memory 2G
+<pre><code>wget https://repo1.maven.org/maven2/com/facebook/presto/presto-server/0.210/presto-server-0.210.tar.gz
 </code></pre>
-<p><strong>NOTE</strong>: Make sure you have permissions for CarbonData JARs and files through which driver and executor will start.</p>
-<h2>
-<a id="installing-and-configuring-carbondata-on-spark-on-yarn-cluster" class="anchor" href="#installing-and-configuring-carbondata-on-spark-on-yarn-cluster" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Installing and Configuring CarbonData on Spark on YARN Cluster</h2>
-<p>This section provides the procedure to install CarbonData on "Spark on YARN" cluster.</p>
-<h3>
-<a id="prerequisites-2" class="anchor" href="#prerequisites-2" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Prerequisites</h3>
-<ul>
-<li>Hadoop HDFS and Yarn should be installed and running.</li>
-<li>Spark should be installed and running in all the clients.</li>
-<li>CarbonData user should have permission to access HDFS.</li>
-</ul>
-<h3>
-<a id="procedure-1" class="anchor" href="#procedure-1" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Procedure</h3>
-<p>The following steps are only for Driver Nodes. (Driver nodes are the one which starts the spark context.)</p>
-<ol>
-<li>
-<p><a href="https://github.com/apache/carbondata/blob/master/build/README.md" target=_blank>Build the CarbonData</a> project and get the assembly jar from <code>./assembly/target/scala-2.1x/carbondata_xxx.jar</code> and copy to <code>$SPARK_HOME/carbonlib</code> folder.</p>
-<p><strong>NOTE</strong>: Create the carbonlib folder if it does not exists inside <code>$SPARK_HOME</code> path.</p>
-</li>
-<li>
-<p>Copy the <code>./conf/carbon.properties.template</code> file from CarbonData repository to <code>$SPARK_HOME/conf/</code> folder and rename the file to <code>carbon.properties</code>.</p>
-</li>
-<li>
-<p>Create <code>tar.gz</code> file of carbonlib folder and move it inside the carbonlib folder.</p>
-</li>
-</ol>
-<pre><code>cd $SPARK_HOME
-tar -zcvf carbondata.tar.gz carbonlib/
-mv carbondata.tar.gz carbonlib/
-</code></pre>
-<ol>
-<li>Configure the properties mentioned in the following table in <code>$SPARK_HOME/conf/spark-defaults.conf</code> file.</li>
-</ol>
-<table>
-<thead>
-<tr>
-<th>Property</th>
-<th>Description</th>
-<th>Value</th>
-</tr>
-</thead>
-<tbody>
-<tr>
-<td>spark.master</td>
-<td>Set this value to run the Spark in yarn cluster mode.</td>
-<td>Set yarn-client to run the Spark in yarn cluster mode.</td>
-</tr>
-<tr>
-<td>spark.yarn.dist.files</td>
-<td>Comma-separated list of files to be placed in the working directory of each executor.</td>
-<td><code>$SPARK_HOME/conf/carbon.properties</code></td>
-</tr>
-<tr>
-<td>spark.yarn.dist.archives</td>
-<td>Comma-separated list of archives to be extracted into the working directory of each executor.</td>
-<td><code>$SPARK_HOME/carbonlib/carbondata.tar.gz</code></td>
-</tr>
-<tr>
-<td>spark.executor.extraJavaOptions</td>
-<td>A string of extra JVM options to pass to executors. For instance  <strong>NOTE</strong>: You can enter multiple values separated by space.</td>
-<td><code>-Dcarbon.properties.filepath = carbon.properties</code></td>
-</tr>
-<tr>
-<td>spark.executor.extraClassPath</td>
-<td>Extra classpath entries to prepend to the classpath of executors. <strong>NOTE</strong>: If SPARK_CLASSPATH is defined in spark-env.sh, then comment it and append the values in below parameter spark.driver.extraClassPath</td>
-<td><code>carbondata.tar.gz/carbonlib/*</code></td>
-</tr>
-<tr>
-<td>spark.driver.extraClassPath</td>
-<td>Extra classpath entries to prepend to the classpath of the driver. <strong>NOTE</strong>: If SPARK_CLASSPATH is defined in spark-env.sh, then comment it and append the value in below parameter spark.driver.extraClassPath.</td>
-<td><code>$SPARK_HOME/carbonlib/*</code></td>
-</tr>
-<tr>
-<td>spark.driver.extraJavaOptions</td>
-<td>A string of extra JVM options to pass to the driver. For instance, GC settings or other logging.</td>
-<td><code>-Dcarbon.properties.filepath = $SPARK_HOME/conf/carbon.properties</code></td>
-</tr>
-</tbody>
-</table>
-<ol>
-<li>Add the following properties in <code>$SPARK_HOME/conf/carbon.properties</code>:</li>
-</ol>
-<table>
-<thead>
-<tr>
-<th>Property</th>
-<th>Required</th>
-<th>Description</th>
-<th>Example</th>
-<th>Default Value</th>
-</tr>
-</thead>
-<tbody>
-<tr>
-<td>carbon.storelocation</td>
-<td>NO</td>
-<td>Location where CarbonData will create the store and write the data in its own format. If not specified then it takes spark.sql.warehouse.dir path.</td>
-<td>hdfs://HOSTNAME:PORT/Opt/CarbonStore</td>
-<td>Propose to set HDFS directory</td>
-</tr>
-</tbody>
-</table>
-<ol>
-<li>Verify the installation.</li>
-</ol>
-<pre><code> ./bin/spark-shell --master yarn-client --driver-memory 1g
- --executor-cores 2 --executor-memory 2G
-</code></pre>
-<p><strong>NOTE</strong>: Make sure you have permissions for CarbonData JARs and files through which driver and executor will start.</p>
-<h2>
-<a id="query-execution-using-carbondata-thrift-server" class="anchor" href="#query-execution-using-carbondata-thrift-server" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Query Execution Using CarbonData Thrift Server</h2>
-<h3>
-<a id="starting-carbondata-thrift-server" class="anchor" href="#starting-carbondata-thrift-server" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Starting CarbonData Thrift Server.</h3>
-<p>a. cd <code>$SPARK_HOME</code></p>
-<p>b. Run the following command to start the CarbonData thrift server.</p>
-<pre><code>./bin/spark-submit
---class org.apache.carbondata.spark.thriftserver.CarbonThriftServer
-$SPARK_HOME/carbonlib/$CARBON_ASSEMBLY_JAR &lt;carbon_store_path&gt;
-</code></pre>
-<table>
-<thead>
-<tr>
-<th>Parameter</th>
-<th>Description</th>
-<th>Example</th>
-</tr>
-</thead>
-<tbody>
-<tr>
-<td>CARBON_ASSEMBLY_JAR</td>
-<td>CarbonData assembly jar name present in the <code>$SPARK_HOME/carbonlib/</code> folder.</td>
-<td>carbondata_2.xx-x.x.x-SNAPSHOT-shade-hadoop2.7.2.jar</td>
-</tr>
-<tr>
-<td>carbon_store_path</td>
-<td>This is a parameter to the CarbonThriftServer class. This a HDFS path where CarbonData files will be kept. Strongly Recommended to put same as carbon.storelocation parameter of carbon.properties. If not specified then it takes spark.sql.warehouse.dir path.</td>
-<td><code>hdfs://&lt;host_name&gt;:port/user/hive/warehouse/carbon.store</code></td>
-</tr>
-</tbody>
-</table>
-<p><strong>NOTE</strong>: From Spark 1.6, by default the Thrift server runs in multi-session mode. Which means each JDBC/ODBC connection owns a copy of their own SQL configuration and temporary function registry. Cached tables are still shared though. If you prefer to run the Thrift server in single-session mode and share all SQL configuration and temporary function registry, please set option <code>spark.sql.hive.thriftServer.singleSession</code> to <code>true</code>. You may either add [...]
-<pre><code>./bin/spark-submit
---conf spark.sql.hive.thriftServer.singleSession=true
---class org.apache.carbondata.spark.thriftserver.CarbonThriftServer
-$SPARK_HOME/carbonlib/$CARBON_ASSEMBLY_JAR &lt;carbon_store_path&gt;
-</code></pre>
-<p><strong>But</strong> in single-session mode, if one user changes the database from one connection, the database of the other connections will be changed too.</p>
-<p><strong>Examples</strong></p>
-<ul>
-<li>Start with default memory and executors.</li>
-</ul>
-<pre><code>./bin/spark-submit
---class org.apache.carbondata.spark.thriftserver.CarbonThriftServer 
-$SPARK_HOME/carbonlib
-/carbondata_2.xx-x.x.x-SNAPSHOT-shade-hadoop2.7.2.jar
-hdfs://&lt;host_name&gt;:port/user/hive/warehouse/carbon.store
-</code></pre>
-<ul>
-<li>Start with Fixed executors and resources.</li>
-</ul>
-<pre><code>./bin/spark-submit
---class org.apache.carbondata.spark.thriftserver.CarbonThriftServer 
---num-executors 3 --driver-memory 20g --executor-memory 250g 
---executor-cores 32 
-/srv/OSCON/BigData/HACluster/install/spark/sparkJdbc/lib
-/carbondata_2.xx-x.x.x-SNAPSHOT-shade-hadoop2.7.2.jar
-hdfs://&lt;host_name&gt;:port/user/hive/warehouse/carbon.store
-</code></pre>
-<h3>
-<a id="connecting-to-carbondata-thrift-server-using-beeline" class="anchor" href="#connecting-to-carbondata-thrift-server-using-beeline" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Connecting to CarbonData Thrift Server Using Beeline.</h3>
-<pre><code>     cd $SPARK_HOME
-     ./sbin/start-thriftserver.sh
-     ./bin/beeline -u jdbc:hive2://&lt;thriftserver_host&gt;:port
-
-     Example
-     ./bin/beeline -u jdbc:hive2://10.10.10.10:10000
-</code></pre>
-<h2>
-<a id="installing-and-configuring-carbondata-on-presto" class="anchor" href="#installing-and-configuring-carbondata-on-presto" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Installing and Configuring CarbonData on Presto</h2>
-<p><strong>NOTE:</strong> <strong>CarbonData tables cannot be created nor loaded from Presto. User need to create CarbonData Table and load data into it
-either with <a href="#installing-and-configuring-carbondata-to-run-locally-with-spark-shell">Spark</a> or <a href="./sdk-guide.html">SDK</a> or <a href="./csdk-guide.html">C++ SDK</a>.
-Once the table is created,it can be queried from Presto.</strong></p>
-<h3>
-<a id="installing-presto" class="anchor" href="#installing-presto" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Installing Presto</h3>
-<ol>
-<li>
-<p>Download the 0.210 version of Presto using:
-<code>wget https://repo1.maven.org/maven2/com/facebook/presto/presto-server/0.210/presto-server-0.210.tar.gz</code></p>
-</li>
+<ol start="2">
 <li>
 <p>Extract Presto tar file: <code>tar zxvf presto-server-0.210.tar.gz</code>.</p>
 </li>
@@ -625,8 +285,8 @@ node.data-dir=/home/ubuntu/data
 <pre><code>com.facebook.presto=INFO
 </code></pre>
 <p>The default minimum level is <code>INFO</code>. There are four levels: <code>DEBUG</code>, <code>INFO</code>, <code>WARN</code> and <code>ERROR</code>.</p>
-<h3>
-<a id="coordinator-configurations" class="anchor" href="#coordinator-configurations" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Coordinator Configurations</h3>
+<h2>
+<a id="coordinator-configurations" class="anchor" href="#coordinator-configurations" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Coordinator Configurations</h2>
 <h5>
 <a id="contents-of-your-configproperties" class="anchor" href="#contents-of-your-configproperties" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Contents of your config.properties</h5>
 <pre><code>coordinator=true
@@ -637,13 +297,10 @@ query.max-total-memory-per-node=5GB
 query.max-memory-per-node=3GB
 memory.heap-headroom-per-node=1GB
 discovery-server.enabled=true
-discovery.uri=http://localhost:8086
-task.max-worker-threads=4
-optimizer.dictionary-aggregation=true
-optimizer.optimize-hash-generation = false
+discovery.uri=&lt;coordinator_ip&gt;:8086
 </code></pre>
 <p>The options <code>node-scheduler.include-coordinator=false</code> and <code>coordinator=true</code> indicate that the node is the coordinator and tells the coordinator not to do any of the computation work itself and to use the workers.</p>
-<p><strong>Note</strong>: It is recommended to set <code>query.max-memory-per-node</code> to half of the JVM config max memory, though the workload is highly concurrent, lower value for <code>query.max-memory-per-node</code> is to be used.</p>
+<p><strong>Note</strong>: We recommend setting <code>query.max-memory-per-node</code> to half of the JVM config max memory, though if your workload is highly concurrent, you may want to use a lower value for <code>query.max-memory-per-node</code>.</p>
 <p>Also relation between below two configuration-properties should be like:
 If, <code>query.max-memory-per-node=30GB</code>
 Then, <code>query.max-memory=&lt;30GB * number of nodes&gt;</code>.</p>
@@ -657,7 +314,7 @@ query.max-memory=5GB
 query.max-memory-per-node=2GB
 discovery.uri=&lt;coordinator_ip&gt;:8086
 </code></pre>
-<p><strong>Note</strong>: <code>jvm.config</code> and <code>node.properties</code> files are same for all the nodes (worker + coordinator). All the nodes should have different <code>node.id</code>.(generated by uuid command).</p>
+<p><strong>Note</strong>: <code>jvm.config</code> and <code>node.properties</code> files are same for all the nodes (worker + coordinator). All the nodes should have different <code>node.id</code>.</p>
 <h3>
 <a id="catalog-configurations" class="anchor" href="#catalog-configurations" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Catalog Configurations</h3>
 <ol>
@@ -684,8 +341,6 @@ discovery.uri=&lt;coordinator_ip&gt;:8086
 <p>To run it in foreground.</p>
 <h3>
 <a id="start-presto-cli" class="anchor" href="#start-presto-cli" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Start Presto CLI</h3>
-<pre><code>./presto
-</code></pre>
 <p>To connect to carbondata catalog use the following command:</p>
 <pre><code>./presto --server &lt;coordinator_ip&gt;:8086 --catalog carbondata --schema &lt;schema_name&gt;
 </code></pre>
@@ -693,19 +348,121 @@ discovery.uri=&lt;coordinator_ip&gt;:8086
 <pre><code>select * from system.runtime.nodes;
 </code></pre>
 <p>Now you can use the Presto CLI on the coordinator to query data sources in the catalog using the Presto workers.</p>
-<p>List the schemas(databases) available</p>
-<pre><code>show schemas;
+<h2>
+<a id="presto-single-node-setup-for-carbondata" class="anchor" href="#presto-single-node-setup-for-carbondata" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Presto Single Node Setup for Carbondata</h2>
+<h3>
+<a id="config-presto-server" class="anchor" href="#config-presto-server" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Config presto server</h3>
+<ul>
+<li>Download presto server (0.210 is suggested and supported) : <a href="https://repo1.maven.org/maven2/com/facebook/presto/presto-server/" target=_blank rel="nofollow">https://repo1.maven.org/maven2/com/facebook/presto/presto-server/</a>
+</li>
+<li>Finish presto configuration following <a href="https://prestodb.io/docs/current/installation/deployment.html" target=_blank rel="nofollow">https://prestodb.io/docs/current/installation/deployment.html</a>.
+A configuration example:</li>
+</ul>
+<p><strong>config.properties</strong></p>
+<pre><code>coordinator=true
+node-scheduler.include-coordinator=true
+http-server.http.port=8086
+query.max-memory=5GB
+query.max-total-memory-per-node=5GB
+query.max-memory-per-node=3GB
+memory.heap-headroom-per-node=1GB
+discovery-server.enabled=true
+discovery.uri=http://localhost:8086
+task.max-worker-threads=4
+optimizer.dictionary-aggregation=true
+optimizer.optimize-hash-generation = false  
+</code></pre>
+<p><strong>jvm.config</strong></p>
+<pre><code>-server
+-Xmx4G
+-XX:+UseG1GC
+-XX:G1HeapRegionSize=32M
+-XX:+UseGCOverheadLimit
+-XX:+ExplicitGCInvokesConcurrent
+-XX:+HeapDumpOnOutOfMemoryError
+-XX:OnOutOfMemoryError=kill -9 %p
+-XX:+TraceClassLoading
+-Dcarbon.properties.filepath=&lt;path&gt;/carbon.properties
+
 </code></pre>
-<p>Selected the schema where CarbonData table resides</p>
-<pre><code>use carbonschema;
+<p><code>carbon.properties.filepath</code> property is used to set the carbon.properties file path and it is recommended to set otherwise some features may not work. Please check the above example.</p>
+<p><strong>log.properties</strong></p>
+<pre><code>com.facebook.presto=DEBUG
+com.facebook.presto.server.PluginManager=DEBUG
 </code></pre>
-<p>List the available tables</p>
-<pre><code>show tables;
+<p><strong>node.properties</strong></p>
+<pre><code>node.environment=carbondata
+node.id=ffffffff-ffff-ffff-ffff-ffffffffffff
+node.data-dir=/Users/apple/DEMO/presto_test/data
 </code></pre>
-<p>Query from the available tables</p>
-<pre><code>select * from carbon_table;
+<ul>
+<li>
+<p>Config carbondata-connector for presto</p>
+<p>Firstly: Compile carbondata, including carbondata-presto integration module</p>
+<pre><code>$ git clone https://github.com/apache/carbondata
+$ cd carbondata
+$ mvn -DskipTests -P{spark-version} -Dspark.version={spark-version-number} -Dhadoop.version={hadoop-version-number} clean package
 </code></pre>
-<p><strong>Note :</strong> Create Tables and data loads should be done before executing queries as we can not create carbon table from this interface.</p>
+<p>Replace the spark and hadoop version with the version used in your cluster.
+For example, if you are using Spark 2.2.1 and Hadoop 2.7.2, you would like to compile using:</p>
+<pre><code>mvn -DskipTests -Pspark-2.2 -Dspark.version=2.2.1 -Dhadoop.version=2.7.2 clean package
+</code></pre>
+<p>Secondly: Create a folder named 'carbondata' under $PRESTO_HOME$/plugin and
+copy all jars from carbondata/integration/presto/target/carbondata-presto-x.x.x-SNAPSHOT
+to $PRESTO_HOME$/plugin/carbondata</p>
+<p><strong>NOTE:</strong>  Copying assemble jar alone will not work, need to copy all jars from integration/presto/target/carbondata-presto-x.x.x-SNAPSHOT</p>
+<p>Thirdly: Create a carbondata.properties file under $PRESTO_HOME$/etc/catalog/ containing the following contents:</p>
+<pre><code>connector.name=carbondata
+hive.metastore.uri=thrift://&lt;host&gt;:&lt;port&gt;
+</code></pre>
+<p>Carbondata becomes one of the supported format of presto hive plugin, so the configurations and setup is similar to hive connector of presto.
+Please refer <a href="https://prestodb.io/docs/current/connector/hive.html" target=_blank rel="nofollow">https://prestodb.io/docs/current/connector/hive.html</a> for more details.</p>
+<p><strong>Note</strong>: Since carbon can work only with hive metastore, it is necessary that spark also connects to same metastore db for creating tables and updating tables.
+All the operations done on spark will be reflected in presto immediately.
+It is mandatory to create Carbon tables from spark using CarbonData 1.5.2 or greater version since input/output formats are updated in carbon table properly from this version.</p>
+</li>
+</ul>
+<h4>
+<a id="connecting-to-carbondata-store-on-s3" class="anchor" href="#connecting-to-carbondata-store-on-s3" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Connecting to carbondata store on s3</h4>
+<ul>
+<li>
+<p>In case you want to query carbonstore on S3 using S3A api put following additional properties inside $PRESTO_HOME$/etc/catalog/carbondata.properties</p>
+<pre><code> Required properties
+
+ hive.s3.aws-access-key={value}
+ hive.s3.aws-secret-key={value}
+ 
+ Optional properties
+ 
+ hive.s3.endpoint={value}
+</code></pre>
+<p>Please refer <a href="https://prestodb.io/docs/current/connector/hive.html" target=_blank rel="nofollow">https://prestodb.io/docs/current/connector/hive.html</a> for more details on S3 integration.</p>
+</li>
+</ul>
+<h3>
+<a id="generate-carbondata-file" class="anchor" href="#generate-carbondata-file" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Generate CarbonData file</h3>
+<p>Please refer to quick start: <a href="https://github.com/apache/carbondata/blob/master/docs/quick-start-guide.html" target=_blank>https://github.com/apache/carbondata/blob/master/docs/quick-start-guide.html</a>.
+Load data statement in Spark can be used to create carbondata tables. And then you can easily find the created
+carbondata files.</p>
+<h3>
+<a id="query-carbondata-in-cli-of-presto" class="anchor" href="#query-carbondata-in-cli-of-presto" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Query carbondata in CLI of presto</h3>
+<ul>
+<li>
+<p>Download presto cli client of version 0.210 : <a href="https://repo1.maven.org/maven2/com/facebook/presto/presto-cli" target=_blank rel="nofollow">https://repo1.maven.org/maven2/com/facebook/presto/presto-cli</a></p>
+</li>
+<li>
+<p>Start CLI:</p>
+<pre><code>$ ./presto --server localhost:8086 --catalog carbondata --schema default
+</code></pre>
+<p>Replace the hostname, port and schema name with your own.</p>
+</li>
+</ul>
+<h3>
+<a id="supported-features-of-presto-carbon" class="anchor" href="#supported-features-of-presto-carbon" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Supported features of presto carbon</h3>
+<p>Presto carbon only supports reading the carbon table which is written by spark carbon or carbon SDK.
+During reading, it supports the non-distributed datamaps like block datamap and bloom datamap.
+It doesn't support MV datamap and Pre-aggregate datamap as it needs query plan to be changed and presto does not allow it.
+Also Presto carbon supports streaming segment read from streaming table created by spark.</p>
 <script>
 // Show selected style on nav item
 $(function() { $('.b-nav__quickstart').addClass('selected'); });
diff --git a/content/quick-start-guide.html b/content/quick-start-guide.html
index a2f093d..b321353 100644
--- a/content/quick-start-guide.html
+++ b/content/quick-start-guide.html
@@ -52,6 +52,9 @@
                            aria-expanded="false"> Download <span class="caret"></span></a>
                         <ul class="dropdown-menu">
                             <li>
+                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.2/"
+                                   target="_blank">Apache CarbonData 1.5.2</a></li>
+                            <li>
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.1/"
                                    target="_blank">Apache CarbonData 1.5.1</a></li>
                             <li>
@@ -64,9 +67,6 @@
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.4.0/"
                                    target="_blank">Apache CarbonData 1.4.0</a></li>
                             <li>
-                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.3.1/"
-                                   target="_blank">Apache CarbonData 1.3.1</a></li>
-                            <li>
                                 <a href="https://cwiki.apache.org/confluence/display/CARBONDATA/Releases"
                                    target="_blank">Release Archive</a></li>
                         </ul>
@@ -241,16 +241,32 @@ EOF
 </ul>
 <h2>
 <a id="integration" class="anchor" href="#integration" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Integration</h2>
-<p>CarbonData can be integrated with Spark and Presto Execution Engines. The below documentation guides on Installing and Configuring with these execution engines.</p>
 <h3>
-<a id="spark" class="anchor" href="#spark" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Spark</h3>
+<a id="integration-with-execution-engines" class="anchor" href="#integration-with-execution-engines" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Integration with Execution Engines</h3>
+<p>CarbonData can be integrated with Spark,Presto and Hive execution engines. The below documentation guides on Installing and Configuring with these execution engines.</p>
+<h4>
+<a id="spark" class="anchor" href="#spark" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Spark</h4>
 <p><a href="#installing-and-configuring-carbondata-to-run-locally-with-spark-shell">Installing and Configuring CarbonData to run locally with Spark Shell</a></p>
 <p><a href="#installing-and-configuring-carbondata-on-standalone-spark-cluster">Installing and Configuring CarbonData on Standalone Spark Cluster</a></p>
 <p><a href="#installing-and-configuring-carbondata-on-spark-on-yarn-cluster">Installing and Configuring CarbonData on Spark on YARN Cluster</a></p>
 <p><a href="#query-execution-using-carbondata-thrift-server">Installing and Configuring CarbonData Thrift Server for Query Execution</a></p>
-<h3>
-<a id="presto" class="anchor" href="#presto" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Presto</h3>
+<h4>
+<a id="presto" class="anchor" href="#presto" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Presto</h4>
 <p><a href="#installing-and-configuring-carbondata-on-presto">Installing and Configuring CarbonData on Presto</a></p>
+<h4>
+<a id="hive" class="anchor" href="#hive" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Hive</h4>
+<p><a href="https://github.com/apache/carbondata/blob/master/docs/hive-guide.html" target=_blank>Installing and Configuring CarbonData on Hive</a></p>
+<h3>
+<a id="integration-with-storage-engines" class="anchor" href="#integration-with-storage-engines" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Integration with Storage Engines</h3>
+<h4>
+<a id="hdfs" class="anchor" href="#hdfs" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>HDFS</h4>
+<p><a href="https://github.com/apache/carbondata/blob/master/docs/quick-start-guide.html#installing-and-configuring-carbondata-on-standalone-spark-cluster">CarbonData supports read and write with HDFS</a></p>
+<h4>
+<a id="s3" class="anchor" href="#s3" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>S3</h4>
+<p><a href="https://github.com/apache/carbondata/blob/master/docs/s3-guide.html" target=_blank>CarbonData supports read and write with S3</a></p>
+<h4>
+<a id="alluxio" class="anchor" href="#alluxio" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Alluxio</h4>
+<p><a href="https://github.com/apache/carbondata/blob/master/docs/alluxio-guide.html" target=_blank>CarbonData supports read and write with Alluxio</a></p>
 <h2>
 <a id="installing-and-configuring-carbondata-to-run-locally-with-spark-shell" class="anchor" href="#installing-and-configuring-carbondata-to-run-locally-with-spark-shell" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Installing and Configuring CarbonData to run locally with Spark Shell</h2>
 <p>Apache Spark Shell provides a simple way to learn the API, as well as a powerful tool to analyze data interactively. Please visit <a href="http://spark.apache.org/docs/latest/" target=_blank rel="nofollow">Apache Spark Documentation</a> for more details on Spark shell.</p>
@@ -271,36 +287,44 @@ import org.apache.spark.sql.CarbonSession._
 <ul>
 <li>Create a CarbonSession :</li>
 </ul>
-<pre><code>val carbon = SparkSession.builder().config(sc.getConf)
-             .getOrCreateCarbonSession("&lt;hdfs store path&gt;")
+<pre><code>val carbon = SparkSession.builder().config(sc.getConf).getOrCreateCarbonSession("&lt;carbon_store_path&gt;")
 </code></pre>
-<p><strong>NOTE</strong>: By default metastore location points to <code>../carbon.metastore</code>, user can provide own metastore location to CarbonSession like <code>SparkSession.builder().config(sc.getConf) .getOrCreateCarbonSession("&lt;hdfs store path&gt;", "&lt;local metastore path&gt;")</code></p>
+<p><strong>NOTE</strong></p>
+<ul>
+<li>By default metastore location points to <code>../carbon.metastore</code>, user can provide own metastore location to CarbonSession like
+<code>SparkSession.builder().config(sc.getConf).getOrCreateCarbonSession("&lt;carbon_store_path&gt;", "&lt;local metastore path&gt;")</code>.</li>
+<li>Data storage location can be specified by <code>&lt;carbon_store_path&gt;</code>, like <code>/carbon/data/store</code>, <code>hdfs://localhost:9000/carbon/data/store</code> or <code>s3a://carbon/data/store</code>.</li>
+</ul>
 <h4>
 <a id="executing-queries" class="anchor" href="#executing-queries" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Executing Queries</h4>
 <h6>
 <a id="creating-a-table" class="anchor" href="#creating-a-table" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Creating a Table</h6>
-<pre><code>scala&gt;carbon.sql("CREATE TABLE
-                    IF NOT EXISTS test_table(
-                    id string,
-                    name string,
-                    city string,
-                    age Int)
-                  STORED AS carbondata")
+<pre><code>carbon.sql(
+           s"""
+              | CREATE TABLE IF NOT EXISTS test_table(
+              |   id string,
+              |   name string,
+              |   city string,
+              |   age Int)
+              | STORED AS carbondata
+           """.stripMargin)
 </code></pre>
 <h6>
 <a id="loading-data-to-a-table" class="anchor" href="#loading-data-to-a-table" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Loading Data to a Table</h6>
-<pre><code>scala&gt;carbon.sql("LOAD DATA INPATH '/path/to/sample.csv'
-                  INTO TABLE test_table")
+<pre><code>carbon.sql("LOAD DATA INPATH '/path/to/sample.csv' INTO TABLE test_table")
 </code></pre>
 <p><strong>NOTE</strong>: Please provide the real file path of <code>sample.csv</code> for the above script.
 If you get "tablestatus.lock" issue, please refer to <a href="faq.html">FAQ</a></p>
 <h6>
 <a id="query-data-from-a-table" class="anchor" href="#query-data-from-a-table" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Query Data from a Table</h6>
-<pre><code>scala&gt;carbon.sql("SELECT * FROM test_table").show()
+<pre><code>carbon.sql("SELECT * FROM test_table").show()
 
-scala&gt;carbon.sql("SELECT city, avg(age), sum(age)
-                  FROM test_table
-                  GROUP BY city").show()
+carbon.sql(
+           s"""
+              | SELECT city, avg(age), sum(age)
+              | FROM test_table
+              | GROUP BY city
+           """.stripMargin).show()
 </code></pre>
 <h2>
 <a id="installing-and-configuring-carbondata-on-standalone-spark-cluster" class="anchor" href="#installing-and-configuring-carbondata-on-standalone-spark-cluster" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Installing and Configuring CarbonData on Standalone Spark Cluster</h2>
@@ -355,7 +379,7 @@ scala&gt;carbon.sql("SELECT city, avg(age), sum(age)
 </tr>
 </tbody>
 </table>
-<ol>
+<ol start="7">
 <li>Add the following properties in <code>$SPARK_HOME/conf/carbon.properties</code> file:</li>
 </ol>
 <table>
@@ -378,10 +402,12 @@ scala&gt;carbon.sql("SELECT city, avg(age), sum(age)
 </tr>
 </tbody>
 </table>
-<ol>
+<ol start="8">
 <li>Verify the installation. For example:</li>
 </ol>
-<pre><code>./spark-shell --master spark://HOSTNAME:PORT --total-executor-cores 2
+<pre><code>./bin/spark-shell \
+--master spark://HOSTNAME:PORT \
+--total-executor-cores 2 \
 --executor-memory 2G
 </code></pre>
 <p><strong>NOTE</strong>: Make sure you have permissions for CarbonData JARs and files through which driver and executor will start.</p>
@@ -414,7 +440,7 @@ scala&gt;carbon.sql("SELECT city, avg(age), sum(age)
 tar -zcvf carbondata.tar.gz carbonlib/
 mv carbondata.tar.gz carbonlib/
 </code></pre>
-<ol>
+<ol start="4">
 <li>Configure the properties mentioned in the following table in <code>$SPARK_HOME/conf/spark-defaults.conf</code> file.</li>
 </ol>
 <table>
@@ -463,7 +489,7 @@ mv carbondata.tar.gz carbonlib/
 </tr>
 </tbody>
 </table>
-<ol>
+<ol start="5">
 <li>Add the following properties in <code>$SPARK_HOME/conf/carbon.properties</code>:</li>
 </ol>
 <table>
@@ -486,11 +512,14 @@ mv carbondata.tar.gz carbonlib/
 </tr>
 </tbody>
 </table>
-<ol>
+<ol start="6">
 <li>Verify the installation.</li>
 </ol>
-<pre><code> ./bin/spark-shell --master yarn-client --driver-memory 1g
- --executor-cores 2 --executor-memory 2G
+<pre><code>./bin/spark-shell \
+--master yarn-client \
+--driver-memory 1G \
+--executor-memory 2G \
+--executor-cores 2
 </code></pre>
 <p><strong>NOTE</strong>: Make sure you have permissions for CarbonData JARs and files through which driver and executor will start.</p>
 <h2>
@@ -499,8 +528,8 @@ mv carbondata.tar.gz carbonlib/
 <a id="starting-carbondata-thrift-server" class="anchor" href="#starting-carbondata-thrift-server" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Starting CarbonData Thrift Server.</h3>
 <p>a. cd <code>$SPARK_HOME</code></p>
 <p>b. Run the following command to start the CarbonData thrift server.</p>
-<pre><code>./bin/spark-submit
---class org.apache.carbondata.spark.thriftserver.CarbonThriftServer
+<pre><code>./bin/spark-submit \
+--class org.apache.carbondata.spark.thriftserver.CarbonThriftServer \
 $SPARK_HOME/carbonlib/$CARBON_ASSEMBLY_JAR &lt;carbon_store_path&gt;
 </code></pre>
 <table>
@@ -525,9 +554,9 @@ $SPARK_HOME/carbonlib/$CARBON_ASSEMBLY_JAR &lt;carbon_store_path&gt;
 </tbody>
 </table>
 <p><strong>NOTE</strong>: From Spark 1.6, by default the Thrift server runs in multi-session mode. Which means each JDBC/ODBC connection owns a copy of their own SQL configuration and temporary function registry. Cached tables are still shared though. If you prefer to run the Thrift server in single-session mode and share all SQL configuration and temporary function registry, please set option <code>spark.sql.hive.thriftServer.singleSession</code> to <code>true</code>. You may either add [...]
-<pre><code>./bin/spark-submit
---conf spark.sql.hive.thriftServer.singleSession=true
---class org.apache.carbondata.spark.thriftserver.CarbonThriftServer
+<pre><code>./bin/spark-submit \
+--conf spark.sql.hive.thriftServer.singleSession=true \
+--class org.apache.carbondata.spark.thriftserver.CarbonThriftServer \
 $SPARK_HOME/carbonlib/$CARBON_ASSEMBLY_JAR &lt;carbon_store_path&gt;
 </code></pre>
 <p><strong>But</strong> in single-session mode, if one user changes the database from one connection, the database of the other connections will be changed too.</p>
@@ -535,31 +564,31 @@ $SPARK_HOME/carbonlib/$CARBON_ASSEMBLY_JAR &lt;carbon_store_path&gt;
 <ul>
 <li>Start with default memory and executors.</li>
 </ul>
-<pre><code>./bin/spark-submit
---class org.apache.carbondata.spark.thriftserver.CarbonThriftServer 
-$SPARK_HOME/carbonlib
-/carbondata_2.xx-x.x.x-SNAPSHOT-shade-hadoop2.7.2.jar
+<pre><code>./bin/spark-submit \
+--class org.apache.carbondata.spark.thriftserver.CarbonThriftServer \
+$SPARK_HOME/carbonlib/carbondata_2.xx-x.x.x-SNAPSHOT-shade-hadoop2.7.2.jar \
 hdfs://&lt;host_name&gt;:port/user/hive/warehouse/carbon.store
 </code></pre>
 <ul>
 <li>Start with Fixed executors and resources.</li>
 </ul>
-<pre><code>./bin/spark-submit
---class org.apache.carbondata.spark.thriftserver.CarbonThriftServer 
---num-executors 3 --driver-memory 20g --executor-memory 250g 
---executor-cores 32 
-/srv/OSCON/BigData/HACluster/install/spark/sparkJdbc/lib
-/carbondata_2.xx-x.x.x-SNAPSHOT-shade-hadoop2.7.2.jar
+<pre><code>./bin/spark-submit \
+--class org.apache.carbondata.spark.thriftserver.CarbonThriftServer \
+--num-executors 3 \
+--driver-memory 20G \
+--executor-memory 250G \
+--executor-cores 32 \
+$SPARK_HOME/carbonlib/carbondata_2.xx-x.x.x-SNAPSHOT-shade-hadoop2.7.2.jar \
 hdfs://&lt;host_name&gt;:port/user/hive/warehouse/carbon.store
 </code></pre>
 <h3>
 <a id="connecting-to-carbondata-thrift-server-using-beeline" class="anchor" href="#connecting-to-carbondata-thrift-server-using-beeline" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Connecting to CarbonData Thrift Server Using Beeline.</h3>
-<pre><code>     cd $SPARK_HOME
-     ./sbin/start-thriftserver.sh
-     ./bin/beeline -u jdbc:hive2://&lt;thriftserver_host&gt;:port
+<pre><code>cd $SPARK_HOME
+./sbin/start-thriftserver.sh
+./bin/beeline -u jdbc:hive2://&lt;thriftserver_host&gt;:port
 
-     Example
-     ./bin/beeline -u jdbc:hive2://10.10.10.10:10000
+Example
+./bin/beeline -u jdbc:hive2://10.10.10.10:10000
 </code></pre>
 <h2>
 <a id="installing-and-configuring-carbondata-on-presto" class="anchor" href="#installing-and-configuring-carbondata-on-presto" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Installing and Configuring CarbonData on Presto</h2>
@@ -580,29 +609,23 @@ Once the table is created,it can be queried from Presto.</strong></p>
 <p>Download the Presto CLI for the coordinator and name it presto.</p>
 </li>
 </ol>
-<pre><code>  wget https://repo1.maven.org/maven2/com/facebook/presto/presto-cli/0.210/presto-cli-0.210-executable.jar
+<pre><code>wget https://repo1.maven.org/maven2/com/facebook/presto/presto-cli/0.210/presto-cli-0.210-executable.jar
 
-  mv presto-cli-0.210-executable.jar presto
+mv presto-cli-0.210-executable.jar presto
 
-  chmod +x presto
+chmod +x presto
 </code></pre>
 <h3>
 <a id="create-configuration-files" class="anchor" href="#create-configuration-files" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Create Configuration Files</h3>
 <ol>
-<li>
-<p>Create <code>etc</code> folder in presto-server-0.210 directory.</p>
-</li>
-<li>
-<p>Create <code>config.properties</code>, <code>jvm.config</code>, <code>log.properties</code>, and <code>node.properties</code> files.</p>
-</li>
-<li>
-<p>Install uuid to generate a node.id.</p>
+<li>Create <code>etc</code> folder in presto-server-0.210 directory.</li>
+<li>Create <code>config.properties</code>, <code>jvm.config</code>, <code>log.properties</code>, and <code>node.properties</code> files.</li>
+<li>Install uuid to generate a node.id.</li>
+</ol>
 <pre><code>sudo apt-get install uuid
 
 uuid
 </code></pre>
-</li>
-</ol>
 <h5>
 <a id="contents-of-your-nodeproperties-file" class="anchor" href="#contents-of-your-nodeproperties-file" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Contents of your node.properties file</h5>
 <pre><code>node.environment=production
@@ -706,6 +729,7 @@ discovery.uri=&lt;coordinator_ip&gt;:8086
 <pre><code>select * from carbon_table;
 </code></pre>
 <p><strong>Note :</strong> Create Tables and data loads should be done before executing queries as we can not create carbon table from this interface.</p>
+<pre><code></code></pre>
 <script>
 // Show selected style on nav item
 $(function() { $('.b-nav__quickstart').addClass('selected'); });
diff --git a/content/release-guide.html b/content/release-guide.html
index dcdaba3..ad94ba0 100644
--- a/content/release-guide.html
+++ b/content/release-guide.html
@@ -52,6 +52,9 @@
                            aria-expanded="false"> Download <span class="caret"></span></a>
                         <ul class="dropdown-menu">
                             <li>
+                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.2/"
+                                   target="_blank">Apache CarbonData 1.5.2</a></li>
+                            <li>
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.1/"
                                    target="_blank">Apache CarbonData 1.5.1</a></li>
                             <li>
@@ -64,9 +67,6 @@
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.4.0/"
                                    target="_blank">Apache CarbonData 1.4.0</a></li>
                             <li>
-                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.3.1/"
-                                   target="_blank">Apache CarbonData 1.3.1</a></li>
-                            <li>
                                 <a href="https://cwiki.apache.org/confluence/display/CARBONDATA/Releases"
                                    target="_blank">Release Archive</a></li>
                         </ul>
diff --git a/content/s3-guide.html b/content/s3-guide.html
index ba25dfb..bf6f06d 100644
--- a/content/s3-guide.html
+++ b/content/s3-guide.html
@@ -52,6 +52,9 @@
                            aria-expanded="false"> Download <span class="caret"></span></a>
                         <ul class="dropdown-menu">
                             <li>
+                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.2/"
+                                   target="_blank">Apache CarbonData 1.5.2</a></li>
+                            <li>
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.1/"
                                    target="_blank">Apache CarbonData 1.5.1</a></li>
                             <li>
@@ -64,9 +67,6 @@
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.4.0/"
                                    target="_blank">Apache CarbonData 1.4.0</a></li>
                             <li>
-                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.3.1/"
-                                   target="_blank">Apache CarbonData 1.3.1</a></li>
-                            <li>
                                 <a href="https://cwiki.apache.org/confluence/display/CARBONDATA/Releases"
                                    target="_blank">Release Archive</a></li>
                         </ul>
@@ -233,7 +233,7 @@ Carbondata relies on Hadoop provided S3 filesystem APIs to access Object stores.
 <p>To store carbondata files onto Object Store, <code>carbon.storelocation</code> property will have
 to be configured with Object Store path in CarbonProperties file.</p>
 <p>For example:</p>
-<pre><code>carbon.storelocation=s3a://mybucket/carbonstore.
+<pre><code>carbon.storelocation=s3a://mybucket/carbonstore
 </code></pre>
 <p>If the existing store location cannot be changed or only specific tables need to be stored
 onto cloud object store, it can be done so by specifying the <code>location</code> option in the create
@@ -263,8 +263,11 @@ spark.hadoop.fs.s3a.access.key=456
 <li>Pass authentication properties with spark-submit as configuration.</li>
 </ol>
 <p>Example:</p>
-<pre><code>./bin/spark-submit --master yarn --conf spark.hadoop.fs.s3a.secret.key=123 --conf spark.hadoop.fs
-.s3a.access.key=456 --class=
+<pre><code>./bin/spark-submit \
+--master yarn \
+--conf spark.hadoop.fs.s3a.secret.key=123 \
+--conf spark.hadoop.fs.s3a.access.key=456 \
+--class=xxx
 </code></pre>
 <ol start="4">
 <li>Set authentication properties to hadoop configuration object in sparkContext.</li>
diff --git a/content/sdk-guide.html b/content/sdk-guide.html
index 37d6b26..32bd876 100644
--- a/content/sdk-guide.html
+++ b/content/sdk-guide.html
@@ -52,6 +52,9 @@
                            aria-expanded="false"> Download <span class="caret"></span></a>
                         <ul class="dropdown-menu">
                             <li>
+                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.2/"
+                                   target="_blank">Apache CarbonData 1.5.2</a></li>
+                            <li>
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.1/"
                                    target="_blank">Apache CarbonData 1.5.1</a></li>
                             <li>
@@ -64,9 +67,6 @@
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.4.0/"
                                    target="_blank">Apache CarbonData 1.4.0</a></li>
                             <li>
-                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.3.1/"
-                                   target="_blank">Apache CarbonData 1.3.1</a></li>
-                            <li>
                                 <a href="https://cwiki.apache.org/confluence/display/CARBONDATA/Releases"
                                    target="_blank">Release Archive</a></li>
                         </ul>
@@ -239,49 +239,49 @@ These SDK writer output contains just carbondata and carbonindex files. No metad
 <a id="quick-example" class="anchor" href="#quick-example" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Quick example</h2>
 <h3>
 <a id="example-with-csv-format" class="anchor" href="#example-with-csv-format" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Example with csv format</h3>
-<div class="highlight highlight-source-java"><pre> <span class="pl-k">import</span> <span class="pl-smi">java.io.IOException</span>;
- 
- <span class="pl-k">import</span> <span class="pl-smi">org.apache.carbondata.common.exceptions.sql.InvalidLoadOptionException</span>;
- <span class="pl-k">import</span> <span class="pl-smi">org.apache.carbondata.core.metadata.datatype.DataTypes</span>;
- <span class="pl-k">import</span> <span class="pl-smi">org.apache.carbondata.core.util.CarbonProperties</span>;
- <span class="pl-k">import</span> <span class="pl-smi">org.apache.carbondata.sdk.file.CarbonWriter</span>;
- <span class="pl-k">import</span> <span class="pl-smi">org.apache.carbondata.sdk.file.CarbonWriterBuilder</span>;
- <span class="pl-k">import</span> <span class="pl-smi">org.apache.carbondata.sdk.file.Field</span>;
- <span class="pl-k">import</span> <span class="pl-smi">org.apache.carbondata.sdk.file.Schema</span>;
- 
- <span class="pl-k">public</span> <span class="pl-k">class</span> <span class="pl-en">TestSdk</span> {
+<div class="highlight highlight-source-java"><pre><span class="pl-k">import</span> <span class="pl-smi">java.io.IOException</span>;
 
-   <span class="pl-c"><span class="pl-c">//</span> pass true or false while executing the main to use offheap memory or not</span>
-   <span class="pl-k">public</span> <span class="pl-k">static</span> <span class="pl-k">void</span> <span class="pl-en">main</span>(<span class="pl-k">String</span>[] <span class="pl-v">args</span>) <span class="pl-k">throws</span> <span class="pl-smi">IOException</span>, <span class="pl-smi">InvalidLoadOptionException</span> {
-     <span class="pl-k">if</span> (args<span class="pl-k">.</span>length <span class="pl-k">&gt;</span> <span class="pl-c1">0</span> <span class="pl-k">&amp;&amp;</span> args[<span class="pl-c1">0</span>] <span class="pl-k">!=</span> <span class="pl-c1">null</span>) {
-       testSdkWriter(args[<span class="pl-c1">0</span>]);
-     } <span class="pl-k">else</span> {
-       testSdkWriter(<span class="pl-s"><span class="pl-pds">"</span>true<span class="pl-pds">"</span></span>);
-     }
-   }
- 
-   <span class="pl-k">public</span> <span class="pl-k">static</span> <span class="pl-k">void</span> <span class="pl-en">testSdkWriter</span>(<span class="pl-smi">String</span> <span class="pl-v">enableOffheap</span>) <span class="pl-k">throws</span> <span class="pl-smi">IOException</span>, <span class="pl-smi">InvalidLoadOptionException</span> {
-     <span class="pl-smi">String</span> path <span class="pl-k">=</span> <span class="pl-s"><span class="pl-pds">"</span>./target/testCSVSdkWriter<span class="pl-pds">"</span></span>;
- 
-     <span class="pl-k">Field</span>[] fields <span class="pl-k">=</span> <span class="pl-k">new</span> <span class="pl-smi">Field</span>[<span class="pl-c1">2</span>];
-     fields[<span class="pl-c1">0</span>] <span class="pl-k">=</span> <span class="pl-k">new</span> <span class="pl-smi">Field</span>(<span class="pl-s"><span class="pl-pds">"</span>name<span class="pl-pds">"</span></span>, <span class="pl-smi">DataTypes</span><span class="pl-c1"><span class="pl-k">.</span>STRING</span>);
-     fields[<span class="pl-c1">1</span>] <span class="pl-k">=</span> <span class="pl-k">new</span> <span class="pl-smi">Field</span>(<span class="pl-s"><span class="pl-pds">"</span>age<span class="pl-pds">"</span></span>, <span class="pl-smi">DataTypes</span><span class="pl-c1"><span class="pl-k">.</span>INT</span>);
- 
-     <span class="pl-smi">Schema</span> schema <span class="pl-k">=</span> <span class="pl-k">new</span> <span class="pl-smi">Schema</span>(fields);
+<span class="pl-k">import</span> <span class="pl-smi">org.apache.carbondata.common.exceptions.sql.InvalidLoadOptionException</span>;
+<span class="pl-k">import</span> <span class="pl-smi">org.apache.carbondata.core.metadata.datatype.DataTypes</span>;
+<span class="pl-k">import</span> <span class="pl-smi">org.apache.carbondata.core.util.CarbonProperties</span>;
+<span class="pl-k">import</span> <span class="pl-smi">org.apache.carbondata.sdk.file.CarbonWriter</span>;
+<span class="pl-k">import</span> <span class="pl-smi">org.apache.carbondata.sdk.file.CarbonWriterBuilder</span>;
+<span class="pl-k">import</span> <span class="pl-smi">org.apache.carbondata.sdk.file.Field</span>;
+<span class="pl-k">import</span> <span class="pl-smi">org.apache.carbondata.sdk.file.Schema</span>;
 
-     <span class="pl-smi">CarbonProperties</span><span class="pl-k">.</span>getInstance()<span class="pl-k">.</span>addProperty(<span class="pl-s"><span class="pl-pds">"</span>enable.offheap.sort<span class="pl-pds">"</span></span>, enableOffheap);
- 
-     <span class="pl-smi">CarbonWriterBuilder</span> builder <span class="pl-k">=</span> <span class="pl-smi">CarbonWriter</span><span class="pl-k">.</span>builder()<span class="pl-k">.</span>outputPath(path)<span class="pl-k">.</span>withCsvInput(schema)<span class="pl-k">.</span>writtenBy(<span class="pl-s"><span class="pl-pds">"</span>SDK<span class="pl-pds">"</span></span>);
- 
-     <span class="pl-smi">CarbonWriter</span> writer <span class="pl-k">=</span> builder<span class="pl-k">.</span>build();
- 
-     <span class="pl-k">int</span> rows <span class="pl-k">=</span> <span class="pl-c1">5</span>;
-     <span class="pl-k">for</span> (<span class="pl-k">int</span> i <span class="pl-k">=</span> <span class="pl-c1">0</span>; i <span class="pl-k">&lt;</span> rows; i<span class="pl-k">++</span>) {
-       writer<span class="pl-k">.</span>write(<span class="pl-k">new</span> <span class="pl-smi">String</span>[] { <span class="pl-s"><span class="pl-pds">"</span>robot<span class="pl-pds">"</span></span> <span class="pl-k">+</span> (i <span class="pl-k">%</span> <span class="pl-c1">10</span>), <span class="pl-smi">String</span><span class="pl-k">.</span>valueOf(i) });
-     }
-     writer<span class="pl-k">.</span>close();
-   }
- }</pre></div>
+<span class="pl-k">public</span> <span class="pl-k">class</span> <span class="pl-en">TestSdk</span> {
+
+  <span class="pl-c"><span class="pl-c">//</span> pass true or false while executing the main to use offheap memory or not</span>
+  <span class="pl-k">public</span> <span class="pl-k">static</span> <span class="pl-k">void</span> <span class="pl-en">main</span>(<span class="pl-k">String</span>[] <span class="pl-v">args</span>) <span class="pl-k">throws</span> <span class="pl-smi">IOException</span>, <span class="pl-smi">InvalidLoadOptionException</span> {
+    <span class="pl-k">if</span> (args<span class="pl-k">.</span>length <span class="pl-k">&gt;</span> <span class="pl-c1">0</span> <span class="pl-k">&amp;&amp;</span> args[<span class="pl-c1">0</span>] <span class="pl-k">!=</span> <span class="pl-c1">null</span>) {
+      testSdkWriter(args[<span class="pl-c1">0</span>]);
+    } <span class="pl-k">else</span> {
+      testSdkWriter(<span class="pl-s"><span class="pl-pds">"</span>true<span class="pl-pds">"</span></span>);
+    }
+  }
+
+  <span class="pl-k">public</span> <span class="pl-k">static</span> <span class="pl-k">void</span> <span class="pl-en">testSdkWriter</span>(<span class="pl-smi">String</span> <span class="pl-v">enableOffheap</span>) <span class="pl-k">throws</span> <span class="pl-smi">IOException</span>, <span class="pl-smi">InvalidLoadOptionException</span> {
+    <span class="pl-smi">String</span> path <span class="pl-k">=</span> <span class="pl-s"><span class="pl-pds">"</span>./target/testCSVSdkWriter<span class="pl-pds">"</span></span>;
+
+    <span class="pl-k">Field</span>[] fields <span class="pl-k">=</span> <span class="pl-k">new</span> <span class="pl-smi">Field</span>[<span class="pl-c1">2</span>];
+    fields[<span class="pl-c1">0</span>] <span class="pl-k">=</span> <span class="pl-k">new</span> <span class="pl-smi">Field</span>(<span class="pl-s"><span class="pl-pds">"</span>name<span class="pl-pds">"</span></span>, <span class="pl-smi">DataTypes</span><span class="pl-c1"><span class="pl-k">.</span>STRING</span>);
+    fields[<span class="pl-c1">1</span>] <span class="pl-k">=</span> <span class="pl-k">new</span> <span class="pl-smi">Field</span>(<span class="pl-s"><span class="pl-pds">"</span>age<span class="pl-pds">"</span></span>, <span class="pl-smi">DataTypes</span><span class="pl-c1"><span class="pl-k">.</span>INT</span>);
+
+    <span class="pl-smi">Schema</span> schema <span class="pl-k">=</span> <span class="pl-k">new</span> <span class="pl-smi">Schema</span>(fields);
+
+    <span class="pl-smi">CarbonProperties</span><span class="pl-k">.</span>getInstance()<span class="pl-k">.</span>addProperty(<span class="pl-s"><span class="pl-pds">"</span>enable.offheap.sort<span class="pl-pds">"</span></span>, enableOffheap);
+
+    <span class="pl-smi">CarbonWriterBuilder</span> builder <span class="pl-k">=</span> <span class="pl-smi">CarbonWriter</span><span class="pl-k">.</span>builder()<span class="pl-k">.</span>outputPath(path)<span class="pl-k">.</span>withCsvInput(schema)<span class="pl-k">.</span>writtenBy(<span class="pl-s"><span class="pl-pds">"</span>SDK<span class="pl-pds">"</span></span>);
+
+    <span class="pl-smi">CarbonWriter</span> writer <span class="pl-k">=</span> builder<span class="pl-k">.</span>build();
+
+    <span class="pl-k">int</span> rows <span class="pl-k">=</span> <span class="pl-c1">5</span>;
+    <span class="pl-k">for</span> (<span class="pl-k">int</span> i <span class="pl-k">=</span> <span class="pl-c1">0</span>; i <span class="pl-k">&lt;</span> rows; i<span class="pl-k">++</span>) {
+      writer<span class="pl-k">.</span>write(<span class="pl-k">new</span> <span class="pl-smi">String</span>[] { <span class="pl-s"><span class="pl-pds">"</span>robot<span class="pl-pds">"</span></span> <span class="pl-k">+</span> (i <span class="pl-k">%</span> <span class="pl-c1">10</span>), <span class="pl-smi">String</span><span class="pl-k">.</span>valueOf(i) });
+    }
+    writer<span class="pl-k">.</span>close();
+  }
+}</pre></div>
 <h3>
 <a id="example-with-avro-format" class="anchor" href="#example-with-avro-format" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Example with Avro format</h3>
 <div class="highlight highlight-source-java"><pre><span class="pl-k">import</span> <span class="pl-smi">java.io.IOException</span>;
@@ -543,271 +543,281 @@ or directly use DataTypes.VARCHAR if it is carbon schema.</p>
 <h3>
 <a id="class-orgapachecarbondatasdkfilecarbonwriterbuilder" class="anchor" href="#class-orgapachecarbondatasdkfilecarbonwriterbuilder" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Class org.apache.carbondata.sdk.file.CarbonWriterBuilder</h3>
 <pre><code>/**
-* Sets the output path of the writer builder
-* @param path is the absolute path where output files are written
-*             This method must be called when building CarbonWriterBuilder
-* @return updated CarbonWriterBuilder
-*/
+ * Sets the output path of the writer builder
+ *
+ * @param path is the absolute path where output files are written
+ *             This method must be called when building CarbonWriterBuilder
+ * @return updated CarbonWriterBuilder
+ */
 public CarbonWriterBuilder outputPath(String path);
 </code></pre>
 <pre><code>/**
-* to set the timestamp in the carbondata and carbonindex index files
-* @param UUID is a timestamp to be used in the carbondata and carbonindex index files.
-*             By default set to zero.
-* @return updated CarbonWriterBuilder
-*/
+ * To set the timestamp in the carbondata and carbonindex index files
+ *
+ * @param UUID is a timestamp to be used in the carbondata and carbonindex index files.
+ *             By default set to zero.
+ * @return updated CarbonWriterBuilder
+ */
 public CarbonWriterBuilder uniqueIdentifier(long UUID);
 </code></pre>
 <pre><code>/**
-* To set the carbondata file size in MB between 1MB-2048MB
-* @param blockSize is size in MB between 1MB to 2048 MB
-*                  default value is 1024 MB
-* @return updated CarbonWriterBuilder
-*/
+ * To set the carbondata file size in MB between 1MB-2048MB
+ *
+ * @param blockSize is size in MB between 1MB to 2048 MB
+ *                  default value is 1024 MB
+ * @return updated CarbonWriterBuilder
+ */
 public CarbonWriterBuilder withBlockSize(int blockSize);
 </code></pre>
 <pre><code>/**
-* To set the blocklet size of carbondata file
-* @param blockletSize is blocklet size in MB
-*                     default value is 64 MB
-* @return updated CarbonWriterBuilder
-*/
+ * To set the blocklet size of carbondata file
+ *
+ * @param blockletSize is blocklet size in MB
+ *                     default value is 64 MB
+ * @return updated CarbonWriterBuilder
+ */
 public CarbonWriterBuilder withBlockletSize(int blockletSize);
 </code></pre>
 <pre><code>/**
-   * @param enableLocalDictionary enable local dictionary  , default is false
-   * @return updated CarbonWriterBuilder
-   */
+ * @param enableLocalDictionary enable local dictionary  , default is false
+ * @return updated CarbonWriterBuilder
+ */
 public CarbonWriterBuilder enableLocalDictionary(boolean enableLocalDictionary);
 </code></pre>
 <pre><code>/**
-   * @param localDictionaryThreshold is localDictionaryThreshold,default is 10000
-   * @return updated CarbonWriterBuilder
-   */
+ * @param localDictionaryThreshold is localDictionaryThreshold,default is 10000
+ * @return updated CarbonWriterBuilder
+ */
 public CarbonWriterBuilder localDictionaryThreshold(int localDictionaryThreshold) ;
 </code></pre>
 <pre><code>/**
-* sets the list of columns that needs to be in sorted order
-* @param sortColumns is a string array of columns that needs to be sorted.
-*                    If it is null or by default all dimensions are selected for sorting
-*                    If it is empty array, no columns are sorted
-* @return updated CarbonWriterBuilder
-*/
+ * Sets the list of columns that needs to be in sorted order
+ *
+ * @param sortColumns is a string array of columns that needs to be sorted.
+ *                    If it is null or by default all dimensions are selected for sorting
+ *                    If it is empty array, no columns are sorted
+ * @return updated CarbonWriterBuilder
+ */
 public CarbonWriterBuilder sortBy(String[] sortColumns);
 </code></pre>
 <pre><code>/**
-* sets the taskNo for the writer. SDKs concurrently running
-* will set taskNo in order to avoid conflicts in file's name during write.
-* @param taskNo is the TaskNo user wants to specify.
-*               by default it is system time in nano seconds.
-* @return updated CarbonWriterBuilder
-*/
+ * Sets the taskNo for the writer. SDKs concurrently running
+ * will set taskNo in order to avoid conflicts in file's name during write.
+ *
+ * @param taskNo is the TaskNo user wants to specify.
+ *               by default it is system time in nano seconds.
+ * @return updated CarbonWriterBuilder
+ */
 public CarbonWriterBuilder taskNo(long taskNo);
 </code></pre>
 <pre><code>/**
-* To support the load options for sdk writer
-* @param options key,value pair of load options.
-*                supported keys values are
-*                a. bad_records_logger_enable -- true (write into separate logs), false
-*                b. bad_records_action -- FAIL, FORCE, IGNORE, REDIRECT
-*                c. bad_record_path -- path
-*                d. dateformat -- same as JAVA SimpleDateFormat
-*                e. timestampformat -- same as JAVA SimpleDateFormat
-*                f. complex_delimiter_level_1 -- value to Split the complexTypeData
-*                g. complex_delimiter_level_2 -- value to Split the nested complexTypeData
-*                h. quotechar
-*                i. escapechar
-*                
-*                Default values are as follows.
-*
-*                a. bad_records_logger_enable -- "false"
-*                b. bad_records_action -- "FAIL"
-*                c. bad_record_path -- ""
-*                d. dateformat -- "" , uses from carbon.properties file
-*                e. timestampformat -- "", uses from carbon.properties file
-*                f. complex_delimiter_level_1 -- "$"
-*                g. complex_delimiter_level_2 -- ":"
-*                h. quotechar -- "\""
-*                i. escapechar -- "\\"
-*
-* @return updated CarbonWriterBuilder
-*/
+ * To support the load options for sdk writer
+ * @param options key,value pair of load options.
+ *                supported keys values are
+ *                a. bad_records_logger_enable -- true (write into separate logs), false
+ *                b. bad_records_action -- FAIL, FORCE, IGNORE, REDIRECT
+ *                c. bad_record_path -- path
+ *                d. dateformat -- same as JAVA SimpleDateFormat
+ *                e. timestampformat -- same as JAVA SimpleDateFormat
+ *                f. complex_delimiter_level_1 -- value to Split the complexTypeData
+ *                g. complex_delimiter_level_2 -- value to Split the nested complexTypeData
+ *                h. quotechar
+ *                i. escapechar
+ *                
+ *                Default values are as follows.
+ *
+ *                a. bad_records_logger_enable -- "false"
+ *                b. bad_records_action -- "FAIL"
+ *                c. bad_record_path -- ""
+ *                d. dateformat -- "" , uses from carbon.properties file
+ *                e. timestampformat -- "", uses from carbon.properties file
+ *                f. complex_delimiter_level_1 -- "$"
+ *                g. complex_delimiter_level_2 -- ":"
+ *                h. quotechar -- "\""
+ *                i. escapechar -- "\\"
+ *
+ * @return updated CarbonWriterBuilder
+ */
 public CarbonWriterBuilder withLoadOptions(Map&lt;String, String&gt; options);
 </code></pre>
 <pre><code>/**
-* To support the table properties for sdk writer
-*
-* @param options key,value pair of create table properties.
-* supported keys values are
-* a. table_blocksize -- [1-2048] values in MB. Default value is 1024
-* b. table_blocklet_size -- values in MB. Default value is 64 MB
-* c. local_dictionary_threshold -- positive value, default is 10000
-* d. local_dictionary_enable -- true / false. Default is false
-* e. sort_columns -- comma separated column. "c1,c2". Default all dimensions are sorted.
-                     If empty string "" is passed. No columns are sorted
-* j. sort_scope -- "local_sort", "no_sort", "batch_sort". default value is "local_sort"
-* k. long_string_columns -- comma separated string columns which are more than 32k length. 
-*                           default value is null.
-* l. inverted_index -- comma separated string columns for which inverted index needs to be
-*                      generated
-*
-* @return updated CarbonWriterBuilder
-*/
+ * To support the table properties for sdk writer
+ *
+ * @param options key,value pair of create table properties.
+ * supported keys values are
+ * a. table_blocksize -- [1-2048] values in MB. Default value is 1024
+ * b. table_blocklet_size -- values in MB. Default value is 64 MB
+ * c. local_dictionary_threshold -- positive value, default is 10000
+ * d. local_dictionary_enable -- true / false. Default is false
+ * e. sort_columns -- comma separated column. "c1,c2". Default no columns are sorted.
+ * j. sort_scope -- "local_sort", "no_sort", "batch_sort". default value is "no_sort"
+ * k. long_string_columns -- comma separated string columns which are more than 32k length. 
+ *                           default value is null.
+ * l. inverted_index -- comma separated string columns for which inverted index needs to be
+ *                      generated
+ *
+ * @return updated CarbonWriterBuilder
+ */
 public CarbonWriterBuilder withTableProperties(Map&lt;String, String&gt; options);
 </code></pre>
 <pre><code>/**
-* To make sdk writer thread safe.
-*
-* @param numOfThreads should number of threads in which writer is called in multi-thread scenario
-*                     default sdk writer is not thread safe.
-*                     can use one writer instance in one thread only.
-* @return updated CarbonWriterBuilder
-*/
+ * To make sdk writer thread safe.
+ *
+ * @param numOfThreads should number of threads in which writer is called in multi-thread scenario
+ *                     default sdk writer is not thread safe.
+ *                     can use one writer instance in one thread only.
+ * @return updated CarbonWriterBuilder
+ */
 public CarbonWriterBuilder withThreadSafe(short numOfThreads);
 </code></pre>
 <pre><code>/**
-* To support hadoop configuration
-*
-* @param conf hadoop configuration support, can set s3a AK,SK,end point and other conf with this
-* @return updated CarbonWriterBuilder
-*/
+ * To support hadoop configuration
+ *
+ * @param conf hadoop configuration support, can set s3a AK,SK,end point and other conf with this
+ * @return updated CarbonWriterBuilder
+ */
 public CarbonWriterBuilder withHadoopConf(Configuration conf)
 </code></pre>
-<pre><code>  /**
-   * Updates the hadoop configuration with the given key value
-   *
-   * @param key   key word
-   * @param value value
-   * @return this object
-   */
-  public CarbonWriterBuilder withHadoopConf(String key, String value);
+<pre><code>/**
+ * Updates the hadoop configuration with the given key value
+ *
+ * @param key   key word
+ * @param value value
+ * @return this object
+ */
+public CarbonWriterBuilder withHadoopConf(String key, String value);
 </code></pre>
 <pre><code>/**
-* to build a {@link CarbonWriter}, which accepts row in CSV format
-*
-* @param schema carbon Schema object {org.apache.carbondata.sdk.file.Schema}
-* @return CarbonWriterBuilder
-*/
+ * To build a {@link CarbonWriter}, which accepts row in CSV format
+ *
+ * @param schema carbon Schema object {org.apache.carbondata.sdk.file.Schema}
+ * @return CarbonWriterBuilder
+ */
 public CarbonWriterBuilder withCsvInput(Schema schema);
 </code></pre>
 <pre><code>/**
-* to build a {@link CarbonWriter}, which accepts Avro object
-*
-* @param avroSchema avro Schema object {org.apache.avro.Schema}
-* @return CarbonWriterBuilder
-*/
+ * To build a {@link CarbonWriter}, which accepts Avro object
+ *
+ * @param avroSchema avro Schema object {org.apache.avro.Schema}
+ * @return CarbonWriterBuilder
+ */
 public CarbonWriterBuilder withAvroInput(org.apache.avro.Schema avroSchema);
 </code></pre>
 <pre><code>/**
-* to build a {@link CarbonWriter}, which accepts Json object
-*
-* @param carbonSchema carbon Schema object
-* @return CarbonWriterBuilder
-*/
+ * To build a {@link CarbonWriter}, which accepts Json object
+ *
+ * @param carbonSchema carbon Schema object
+ * @return CarbonWriterBuilder
+ */
 public CarbonWriterBuilder withJsonInput(Schema carbonSchema);
 </code></pre>
 <pre><code>/**
-* To support writing the ApplicationName which is writing the carbondata file
-* This is a mandatory API to call, else the build() call will fail with error.
-* @param application name which is writing the carbondata files
-* @return CarbonWriterBuilder
-*/
+ * To support writing the ApplicationName which is writing the carbondata file
+ * This is a mandatory API to call, else the build() call will fail with error.
+ * @param application name which is writing the carbondata files
+ * @return CarbonWriterBuilder
+ */
 public CarbonWriterBuilder writtenBy(String appName) {
 </code></pre>
 <pre><code>/**
-* sets the list of columns for which inverted index needs to generated
-* @param invertedIndexColumns is a string array of columns for which inverted index needs to
-* generated.
-* If it is null or an empty array, inverted index will be generated for none of the columns
-* @return updated CarbonWriterBuilder
-*/
+ * Sets the list of columns for which inverted index needs to generated
+ *
+ * @param invertedIndexColumns is a string array of columns for which inverted index needs to
+ * generated.
+ * If it is null or an empty array, inverted index will be generated for none of the columns
+ * @return updated CarbonWriterBuilder
+ */
 public CarbonWriterBuilder invertedIndexFor(String[] invertedIndexColumns);
 </code></pre>
 <pre><code>/**
-* Build a {@link CarbonWriter}
-* This writer is not thread safe,
-* use withThreadSafe() configuration in multi thread environment
-* 
-* @return CarbonWriter {AvroCarbonWriter/CSVCarbonWriter/JsonCarbonWriter based on Input Type }
-* @throws IOException
-* @throws InvalidLoadOptionException
-*/
+ * Build a {@link CarbonWriter}
+ * This writer is not thread safe,
+ * use withThreadSafe() configuration in multi thread environment
+ * 
+ * @return CarbonWriter {AvroCarbonWriter/CSVCarbonWriter/JsonCarbonWriter based on Input Type }
+ * @throws IOException
+ * @throws InvalidLoadOptionException
+ */
 public CarbonWriter build() throws IOException, InvalidLoadOptionException;
 </code></pre>
-<pre><code> /**
-   * Configure Row Record Reader for reading.
-   *
-   */
-  public CarbonReaderBuilder withRowRecordReader()
+<pre><code>/**
+ * Configure Row Record Reader for reading.
+ *
+ */
+public CarbonReaderBuilder withRowRecordReader()
 </code></pre>
 <h3>
 <a id="class-orgapachecarbondatasdkfilecarbonwriter" class="anchor" href="#class-orgapachecarbondatasdkfilecarbonwriter" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Class org.apache.carbondata.sdk.file.CarbonWriter</h3>
 <pre><code>/**
-* Create a {@link CarbonWriterBuilder} to build a {@link CarbonWriter}
-*/
+ * Create a {@link CarbonWriterBuilder} to build a {@link CarbonWriter}
+ */
 public static CarbonWriterBuilder builder() {
     return new CarbonWriterBuilder();
 }
 </code></pre>
 <pre><code>/**
-* Write an object to the file, the format of the object depends on the implementation
-* If AvroCarbonWriter, object is of type org.apache.avro.generic.GenericData.Record, 
-*                      which is one row of data.
-* If CSVCarbonWriter, object is of type String[], which is one row of data
-* If JsonCarbonWriter, object is of type String, which is one row of json
-* @param object
-* @throws IOException
-*/
+ * Write an object to the file, the format of the object depends on the implementation
+ * If AvroCarbonWriter, object is of type org.apache.avro.generic.GenericData.Record, 
+ *                      which is one row of data.
+ * If CSVCarbonWriter, object is of type String[], which is one row of data
+ * If JsonCarbonWriter, object is of type String, which is one row of json
+ *
+ * @param object
+ * @throws IOException
+ */
 public abstract void write(Object object) throws IOException;
 </code></pre>
 <pre><code>/**
-* Flush and close the writer
-*/
+ * Flush and close the writer
+ */
 public abstract void close() throws IOException;
 </code></pre>
 <h3>
 <a id="class-orgapachecarbondatasdkfilefield" class="anchor" href="#class-orgapachecarbondatasdkfilefield" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Class org.apache.carbondata.sdk.file.Field</h3>
 <pre><code>/**
-* Field Constructor
-* @param name name of the field
-* @param type datatype of field, specified in strings.
-*/
+ * Field Constructor
+ *
+ * @param name name of the field
+ * @param type datatype of field, specified in strings.
+ */
 public Field(String name, String type);
 </code></pre>
 <pre><code>/**
-* Field constructor
-* @param name name of the field
-* @param type datatype of the field of class DataType
-*/
+ * Field constructor
+ *
+ * @param name name of the field
+ * @param type datatype of the field of class DataType
+ */
 public Field(String name, DataType type);  
 </code></pre>
 <h3>
 <a id="class-orgapachecarbondatasdkfileschema" class="anchor" href="#class-orgapachecarbondatasdkfileschema" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Class org.apache.carbondata.sdk.file.Schema</h3>
 <pre><code>/**
-* construct a schema with fields
-* @param fields
-*/
+ * Construct a schema with fields
+ *
+ * @param fields
+ */
 public Schema(Field[] fields);
 </code></pre>
 <pre><code>/**
-* Create a Schema using JSON string, for example:
-* [
-*   {"name":"string"},
-*   {"age":"int"}
-* ] 
-* @param json specified as string
-* @return Schema
-*/
+ * Create a Schema using JSON string, for example:
+ * [
+ *   {"name":"string"},
+ *   {"age":"int"}
+ * ] 
+ * @param json specified as string
+ * @return Schema
+ */
 public static Schema parseJson(String json);
 </code></pre>
 <h3>
 <a id="class-orgapachecarbondatasdkfileavrocarbonwriter" class="anchor" href="#class-orgapachecarbondatasdkfileavrocarbonwriter" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Class org.apache.carbondata.sdk.file.AvroCarbonWriter</h3>
 <pre><code>/**
-* converts avro schema to carbon schema, required by carbonWriter
-*
-* @param avroSchemaString json formatted avro schema as string
-* @return carbon sdk schema
-*/
+ * Converts avro schema to carbon schema, required by carbonWriter
+ *
+ * @param avroSchemaString json formatted avro schema as string
+ * @return carbon sdk schema
+ */
 public static org.apache.carbondata.sdk.file.Schema getCarbonSchemaFromAvroSchema(String avroSchemaString);
 </code></pre>
 <h1>
@@ -816,123 +826,123 @@ public static org.apache.carbondata.sdk.file.Schema getCarbonSchemaFromAvroSchem
 External client can make use of this reader to read CarbonData files without CarbonSession.</p>
 <h2>
 <a id="quick-example-1" class="anchor" href="#quick-example-1" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Quick example</h2>
-<pre><code>    // 1. Create carbon reader
-    String path = "./testWriteFiles";
-    CarbonReader reader = CarbonReader
-        .builder(path, "_temp")
-        .projection(new String[]{"stringField", "shortField", "intField", "longField", 
-                "doubleField", "boolField", "dateField", "timeField", "decimalField"})
-        .build();
+<pre><code>// 1. Create carbon reader
+String path = "./testWriteFiles";
+CarbonReader reader = CarbonReader
+    .builder(path, "_temp")
+    .projection(new String[]{"stringField", "shortField", "intField", "longField", 
+            "doubleField", "boolField", "dateField", "timeField", "decimalField"})
+    .build();
 
-    // 2. Read data
-    long day = 24L * 3600 * 1000;
-    int i = 0;
-    while (reader.hasNext()) {
-        Object[] row = (Object[]) reader.readNextRow();
-        System.out.println(String.format("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t",
-            i, row[0], row[1], row[2], row[3], row[4], row[5],
-            new Date((day * ((int) row[6]))), new Timestamp((long) row[7] / 1000), row[8]
-        ));
-        i++;
-    }
+// 2. Read data
+long day = 24L * 3600 * 1000;
+int i = 0;
+while (reader.hasNext()) {
+    Object[] row = (Object[]) reader.readNextRow();
+    System.out.println(String.format("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t",
+        i, row[0], row[1], row[2], row[3], row[4], row[5],
+        new Date((day * ((int) row[6]))), new Timestamp((long) row[7] / 1000), row[8]
+    ));
+    i++;
+}
 
-    // 3. Close this reader
-    reader.close();
+// 3. Close this reader
+reader.close();
 </code></pre>
 <p>Find example code at <a href="https://github.com/apache/carbondata/blob/master/examples/spark2/src/main/java/org/apache/carbondata/examples/sdk/CarbonReaderExample.java" target=_blank>CarbonReaderExample</a> in the CarbonData repo.</p>
 <h2>
 <a id="api-list-1" class="anchor" href="#api-list-1" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>API List</h2>
 <h3>
 <a id="class-orgapachecarbondatasdkfilecarbonreader" class="anchor" href="#class-orgapachecarbondatasdkfilecarbonreader" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Class org.apache.carbondata.sdk.file.CarbonReader</h3>
-<pre><code>   /**
-    * Return a new {@link CarbonReaderBuilder} instance
-    *
-    * @param tablePath table store path
-    * @param tableName table name
-    * @return CarbonReaderBuilder object
-    */
-  public static CarbonReaderBuilder builder(String tablePath, String tableName);
+<pre><code>/**
+ * Return a new {@link CarbonReaderBuilder} instance
+ *
+ * @param tablePath table store path
+ * @param tableName table name
+ * @return CarbonReaderBuilder object
+ */
+public static CarbonReaderBuilder builder(String tablePath, String tableName);
 </code></pre>
-<pre><code>  /**
-   * Return a new CarbonReaderBuilder instance
-   * Default value of table name is table + tablePath + time
-   *
-   * @param tablePath table path
-   * @return CarbonReaderBuilder object
-   */
-  public static CarbonReaderBuilder builder(String tablePath);
+<pre><code>/**
+ * Return a new CarbonReaderBuilder instance
+ * Default value of table name is table + tablePath + time
+ *
+ * @param tablePath table path
+ * @return CarbonReaderBuilder object
+ */
+public static CarbonReaderBuilder builder(String tablePath);
 </code></pre>
 <pre><code>/**
-  * Breaks the list of CarbonRecordReader in CarbonReader into multiple
-  * CarbonReader objects, each iterating through some 'carbondata' files
-  * and return that list of CarbonReader objects
-  *
-  * If the no. of files is greater than maxSplits, then break the
-  * CarbonReader into maxSplits splits, with each split iterating
-  * through &gt;= 1 file.
-  *
-  * If the no. of files is less than maxSplits, then return list of
-  * CarbonReader with size as the no. of files, with each CarbonReader
-  * iterating through exactly one file
-  *
-  * @param maxSplits: Int
-  * @return list of CarbonReader objects
-  */
-  public List&lt;CarbonReader&gt; split(int maxSplits);
+ * Breaks the list of CarbonRecordReader in CarbonReader into multiple
+ * CarbonReader objects, each iterating through some 'carbondata' files
+ * and return that list of CarbonReader objects
+ *
+ * If the no. of files is greater than maxSplits, then break the
+ * CarbonReader into maxSplits splits, with each split iterating
+ * through &gt;= 1 file.
+ *
+ * If the no. of files is less than maxSplits, then return list of
+ * CarbonReader with size as the no. of files, with each CarbonReader
+ * iterating through exactly one file
+ *
+ * @param maxSplits: Int
+ * @return list of CarbonReader objects
+ */
+public List&lt;CarbonReader&gt; split(int maxSplits);
 </code></pre>
-<pre><code>  /**
-   * Return true if has next row
-   */
-  public boolean hasNext();
+<pre><code>/**
+ * Return true if has next row
+ */
+public boolean hasNext();
 </code></pre>
-<pre><code>  /**
-   * Read and return next row object
-   */
-  public T readNextRow();
+<pre><code>/**
+ * Read and return next row object
+ */
+public T readNextRow();
 </code></pre>
-<pre><code>  /**
-   * Read and return next batch row objects
-   */
-  public Object[] readNextBatchRow();
+<pre><code>/**
+ * Read and return next batch row objects
+ */
+public Object[] readNextBatchRow();
 </code></pre>
-<pre><code>  /**
-   * Close reader
-   */
-  public void close();
+<pre><code>/**
+ * Close reader
+ */
+public void close();
 </code></pre>
 <h3>
 <a id="class-orgapachecarbondatasdkfilecarbonreaderbuilder" class="anchor" href="#class-orgapachecarbondatasdkfilecarbonreaderbuilder" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Class org.apache.carbondata.sdk.file.CarbonReaderBuilder</h3>
-<pre><code>  /**
-   * Construct a CarbonReaderBuilder with table path and table name
-   *
-   * @param tablePath table path
-   * @param tableName table name
-   */
-  CarbonReaderBuilder(String tablePath, String tableName);
+<pre><code>/**
+ * Construct a CarbonReaderBuilder with table path and table name
+ *
+ * @param tablePath table path
+ * @param tableName table name
+ */
+CarbonReaderBuilder(String tablePath, String tableName);
 </code></pre>
-<pre><code>  /**
-   * Configure the projection column names of carbon reader
-   *
-   * @param projectionColumnNames projection column names
-   * @return CarbonReaderBuilder object
-   */
-  public CarbonReaderBuilder projection(String[] projectionColumnNames);
+<pre><code>/**
+ * Configure the projection column names of carbon reader
+ *
+ * @param projectionColumnNames projection column names
+ * @return CarbonReaderBuilder object
+ */
+public CarbonReaderBuilder projection(String[] projectionColumnNames);
 </code></pre>
-<pre><code> /**
-  * Configure the filter expression for carbon reader
-  *
-  * @param filterExpression filter expression
-  * @return CarbonReaderBuilder object
-  */
-  public CarbonReaderBuilder filter(Expression filterExpression);
+<pre><code>/**
+ * Configure the filter expression for carbon reader
+ *
+ * @param filterExpression filter expression
+ * @return CarbonReaderBuilder object
+ */
+public CarbonReaderBuilder filter(Expression filterExpression);
 </code></pre>
-<pre><code>  /**
-   * Sets the batch size of records to read
-   *
-   * @param batch batch size
-   * @return updated CarbonReaderBuilder
-   */
-  public CarbonReaderBuilder withBatch(int batch);
+<pre><code>/**
+ * Sets the batch size of records to read
+ *
+ * @param batch batch size
+ * @return updated CarbonReaderBuilder
+ */
+public CarbonReaderBuilder withBatch(int batch);
 </code></pre>
 <pre><code>/**
  * To support hadoop configuration
@@ -940,139 +950,168 @@ External client can make use of this reader to read CarbonData files without Car
  * @param conf hadoop configuration support, can set s3a AK,SK,end point and other conf with this
  * @return updated CarbonReaderBuilder
  */
- public CarbonReaderBuilder withHadoopConf(Configuration conf);
+public CarbonReaderBuilder withHadoopConf(Configuration conf);
 </code></pre>
-<pre><code>  /**
-   * Updates the hadoop configuration with the given key value
-   *
-   * @param key   key word
-   * @param value value
-   * @return this object
-   */
-  public CarbonReaderBuilder withHadoopConf(String key, String value);
+<pre><code>/**
+ * Updates the hadoop configuration with the given key value
+ *
+ * @param key   key word
+ * @param value value
+ * @return this object
+ */
+public CarbonReaderBuilder withHadoopConf(String key, String value);
 </code></pre>
-<pre><code> /**
-   * Build CarbonReader
-   *
-   * @param &lt;T&gt;
-   * @return CarbonReader
-   * @throws IOException
-   * @throws InterruptedException
-   */
-  public &lt;T&gt; CarbonReader&lt;T&gt; build();
+<pre><code>/**
+ * Build CarbonReader
+ *
+ * @param &lt;T&gt;
+ * @return CarbonReader
+ * @throws IOException
+ * @throws InterruptedException
+ */
+public &lt;T&gt; CarbonReader&lt;T&gt; build();
 </code></pre>
 <h3>
 <a id="class-orgapachecarbondatasdkfilecarbonschemareader" class="anchor" href="#class-orgapachecarbondatasdkfilecarbonschemareader" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Class org.apache.carbondata.sdk.file.CarbonSchemaReader</h3>
-<pre><code>  /**
-   * Read schema file and return the schema
-   *
-   * @param schemaFilePath complete path including schema file name
-   * @return schema object
-   * @throws IOException
-   */
-  @Deprecated
-  public static Schema readSchemaInSchemaFile(String schemaFilePath);
+<pre><code>/**
+ * Read schema file and return the schema
+ *
+ * @param schemaFilePath complete path including schema file name
+ * @return schema object
+ * @throws IOException
+ */
+@Deprecated
+public static Schema readSchemaInSchemaFile(String schemaFilePath);
 </code></pre>
-<pre><code>  /**
-   * Read carbondata file and return the schema
-   *
-   * @param dataFilePath complete path including carbondata file name
-   * @return Schema object
-   */
-  @Deprecated
-  public static Schema readSchemaInDataFile(String dataFilePath);
+<pre><code>/**
+ * Read carbondata file and return the schema
+ *
+ * @param dataFilePath complete path including carbondata file name
+ * @return Schema object
+ */
+@Deprecated
+public static Schema readSchemaInDataFile(String dataFilePath);
 </code></pre>
-<pre><code>  /**
-   * Read carbonindex file and return the schema
-   *
-   * @param indexFilePath complete path including index file name
-   * @return schema object
-   * @throws IOException
-   */
-  @Deprecated
-  public static Schema readSchemaInIndexFile(String indexFilePath);
+<pre><code>/**
+ * Read carbonindex file and return the schema
+ *
+ * @param indexFilePath complete path including index file name
+ * @return schema object
+ * @throws IOException
+ */
+@Deprecated
+public static Schema readSchemaInIndexFile(String indexFilePath);
+</code></pre>
+<pre><code>/**
+ * Read schema from path,
+ * path can be folder path,carbonindex file path, and carbondata file path
+ * and will not check all files schema
+ *
+ * @param path file/folder path
+ * @return schema
+ * @throws IOException
+ */
+public static Schema readSchema(String path);
+</code></pre>
+<pre><code>/**
+ * Read schema from path,
+ * path can be folder path,carbonindex file path, and carbondata file path
+ * and user can decide whether check all files schema
+ *
+ * @param path             file/folder path
+ * @param validateSchema whether check all files schema
+ * @return schema
+ * @throws IOException
+ */
+public static Schema readSchema(String path, boolean validateSchema);
 </code></pre>
-<pre><code>  /**
-   * read schema from path,
-   * path can be folder path,carbonindex file path, and carbondata file path
-   * and will not check all files schema
-   *
-   * @param path file/folder path
-   * @return schema
-   * @throws IOException
-   */
-  public static Schema readSchema(String path);
+<pre><code>/**
+ * Read schema from path,
+ * path can be folder path, carbonindex file path, and carbondata file path
+ * and will not check all files schema
+ *
+ * @param path file/folder path
+ * @param conf hadoop configuration support, can set s3a AK,SK,end point and other conf with this
+ * @return schema
+ * @throws IOException
+ */
+public static Schema readSchema(String path, Configuration conf);
 </code></pre>
-<pre><code>  /**
-   * read schema from path,
-   * path can be folder path,carbonindex file path, and carbondata file path
-   * and user can decide whether check all files schema
-   *
-   * @param path             file/folder path
-   * @param validateSchema whether check all files schema
-   * @return schema
-   * @throws IOException
-   */
-  public static Schema readSchema(String path, boolean validateSchema);
+<pre><code>/**
+ * Read schema from path,
+ * path can be folder path, carbonindex file path, and carbondata file path
+ * and user can decide whether check all files schema
+ *
+ * @param path           file/folder path
+ * @param validateSchema whether check all files schema
+ * @param conf           hadoop configuration support, can set s3a AK,SK,
+ *                       end point and other conf with this
+ * @return schema
+ * @throws IOException
+ */
+public static Schema readSchema(String path, boolean validateSchema, Configuration conf);
 </code></pre>
-<pre><code>  /**
-   * This method return the version details in formatted string by reading from carbondata file
-   * If application name is SDK_1.0.0 and this has written the carbondata file in carbondata 1.6 project version,
-   * then this API returns the String "SDK_1.0.0 in version: 1.6.0-SNAPSHOT"
-   * @param dataFilePath complete path including carbondata file name
-   * @return string with information of who has written this file in which carbondata project version
-   * @throws IOException
-   */
-  public static String getVersionDetails(String dataFilePath);
+<pre><code>/**
+ * This method return the version details in formatted string by reading from carbondata file
+ * If application name is SDK_1.0.0 and this has written the carbondata file in carbondata 1.6 project version,
+ * then this API returns the String "SDK_1.0.0 in version: 1.6.0-SNAPSHOT"
+ *
+ * @param dataFilePath complete path including carbondata file name
+ * @return string with information of who has written this file in which carbondata project version
+ * @throws IOException
+ */
+public static String getVersionDetails(String dataFilePath);
 </code></pre>
 <h3>
 <a id="class-orgapachecarbondatasdkfileschema-1" class="anchor" href="#class-orgapachecarbondatasdkfileschema-1" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Class org.apache.carbondata.sdk.file.Schema</h3>
-<pre><code>  /**
-   * construct a schema with fields
-   * @param fields
-   */
-  public Schema(Field[] fields);
+<pre><code>/**
+ * Construct a schema with fields
+ *
+ * @param fields
+ */
+public Schema(Field[] fields);
 </code></pre>
-<pre><code>  /**
-   * construct a schema with List&lt;ColumnSchema&gt;
-   *
-   * @param columnSchemaList column schema list
-   */
-  public Schema(List&lt;ColumnSchema&gt; columnSchemaList);
+<pre><code>/**
+ * Construct a schema with List&lt;ColumnSchema&gt;
+ *
+ * @param columnSchemaList column schema list
+ */
+public Schema(List&lt;ColumnSchema&gt; columnSchemaList);
 </code></pre>
-<pre><code>  /**
-   * Create a Schema using JSON string, for example:
-   * [
-   *   {"name":"string"},
-   *   {"age":"int"}
-   * ]
-   * @param json specified as string
-   * @return Schema
-   */
-  public static Schema parseJson(String json);
+<pre><code>/**
+ * Create a Schema using JSON string, for example:
+ * [
+ *   {"name":"string"},
+ *   {"age":"int"}
+ * ]
+ * @param json specified as string
+ * @return Schema
+ */
+public static Schema parseJson(String json);
 </code></pre>
-<pre><code>  /**
-   * Sort the schema order as original order
-   *
-   * @return Schema object
-   */
-  public Schema asOriginOrder();
+<pre><code>/**
+ * Sort the schema order as original order
+ *
+ * @return Schema object
+ */
+public Schema asOriginOrder();
 </code></pre>
 <h3>
 <a id="class-orgapachecarbondatasdkfilefield-1" class="anchor" href="#class-orgapachecarbondatasdkfilefield-1" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Class org.apache.carbondata.sdk.file.Field</h3>
-<pre><code>  /**
-   * Field Constructor
-   * @param name name of the field
-   * @param type datatype of field, specified in strings.
-   */
-  public Field(String name, String type);
+<pre><code>/**
+ * Field Constructor
+ *
+ * @param name name of the field
+ * @param type datatype of field, specified in strings.
+ */
+public Field(String name, String type);
 </code></pre>
-<pre><code>  /**
-   * Construct Field from ColumnSchema
-   *
-   * @param columnSchema ColumnSchema, Store the information about the column meta data
-   */
-  public Field(ColumnSchema columnSchema);
+<pre><code>/**
+ * Construct Field from ColumnSchema
+ *
+ * @param columnSchema ColumnSchema, Store the information about the column meta data
+ */
+public Field(ColumnSchema columnSchema);
 </code></pre>
 <p>Find S3 example code at <a href="https://github.com/apache/carbondata/blob/master/examples/spark2/src/main/java/org/apache/carbondata/examples/sdk/SDKS3Example.java" target=_blank>SDKS3Example</a> in the CarbonData repo.</p>
 <h1>
@@ -1080,38 +1119,38 @@ External client can make use of this reader to read CarbonData files without Car
 <h3>
 <a id="class-orgapachecarbondatacoreutilcarbonproperties" class="anchor" href="#class-orgapachecarbondatacoreutilcarbonproperties" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Class org.apache.carbondata.core.util.CarbonProperties</h3>
 <pre><code>/**
-* This method will be responsible to get the instance of CarbonProperties class
-*
-* @return carbon properties instance
-*/
+ * This method will be responsible to get the instance of CarbonProperties class
+ *
+ * @return carbon properties instance
+ */
 public static CarbonProperties getInstance();
 </code></pre>
 <pre><code>/**
-* This method will be used to add a new property
-*
-* @param key is a property name to set for carbon.
-* @param value is valid parameter corresponding to property.
-* @return CarbonProperties object
-*/
+ * This method will be used to add a new property
+ *
+ * @param key is a property name to set for carbon.
+ * @param value is valid parameter corresponding to property.
+ * @return CarbonProperties object
+ */
 public CarbonProperties addProperty(String key, String value);
 </code></pre>
 <pre><code>/**
-* This method will be used to get the property value. If property is not
-* present, then it will return the default value.
-*
-* @param key is a property name to get user specified value.
-* @return properties value for corresponding key. If not set, then returns null.
-*/
+ * This method will be used to get the property value. If property is not
+ * present, then it will return the default value.
+ *
+ * @param key is a property name to get user specified value.
+ * @return properties value for corresponding key. If not set, then returns null.
+ */
 public String getProperty(String key);
 </code></pre>
 <pre><code>/**
-* This method will be used to get the property value. If property is not
-* present, then it will return the default value.
-*
-* @param key is a property name to get user specified value..
-* @param defaultValue used to be returned by function if corrosponding key not set.
-* @return properties value for corresponding key. If not set, then returns specified defaultValue.
-*/
+ * This method will be used to get the property value. If property is not
+ * present, then it will return the default value.
+ *
+ * @param key is a property name to get user specified value..
+ * @param defaultValue used to be returned by function if corrosponding key not set.
+ * @return properties value for corresponding key. If not set, then returns specified defaultValue.
+ */
 public String getProperty(String key, String defaultValue);
 </code></pre>
 <p>Reference : <a href="./configuration-parameters.html">list of carbon properties</a></p>
diff --git a/content/security.html b/content/security.html
index 75a2f65..dccfddf 100644
--- a/content/security.html
+++ b/content/security.html
@@ -45,6 +45,9 @@
                            aria-expanded="false">Download <span class="caret"></span></a>
                         <ul class="dropdown-menu">
                             <li>
+                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.2/"
+                                   target="_blank">Apache CarbonData 1.5.2</a></li>
+                            <li>
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.1/"
                                    target="_blank">Apache CarbonData 1.5.1</a></li>
                             <li>
@@ -60,9 +63,6 @@
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.3.1/"
                                    target="_blank">Apache CarbonData 1.3.1</a></li>
                             <li>
-                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.3.0/"
-                                   target="_blank">Apache CarbonData 1.3.0</a></li>
-                            <li>
                                 <a href="https://cwiki.apache.org/confluence/display/CARBONDATA/Releases"
                                    target="_blank">Release Archive</a></li>
                         </ul>
diff --git a/content/segment-management-on-carbondata.html b/content/segment-management-on-carbondata.html
index dae0d0e..30bca2e 100644
--- a/content/segment-management-on-carbondata.html
+++ b/content/segment-management-on-carbondata.html
@@ -52,6 +52,9 @@
                            aria-expanded="false"> Download <span class="caret"></span></a>
                         <ul class="dropdown-menu">
                             <li>
+                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.2/"
+                                   target="_blank">Apache CarbonData 1.5.2</a></li>
+                            <li>
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.1/"
                                    target="_blank">Apache CarbonData 1.5.1</a></li>
                             <li>
@@ -64,9 +67,6 @@
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.4.0/"
                                    target="_blank">Apache CarbonData 1.4.0</a></li>
                             <li>
-                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.3.1/"
-                                   target="_blank">Apache CarbonData 1.3.1</a></li>
-                            <li>
                                 <a href="https://cwiki.apache.org/confluence/display/CARBONDATA/Releases"
                                    target="_blank">Release Archive</a></li>
                         </ul>
diff --git a/content/streaming-guide.html b/content/streaming-guide.html
index 8d8cb82..4c45380 100644
--- a/content/streaming-guide.html
+++ b/content/streaming-guide.html
@@ -52,6 +52,9 @@
                            aria-expanded="false"> Download <span class="caret"></span></a>
                         <ul class="dropdown-menu">
                             <li>
+                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.2/"
+                                   target="_blank">Apache CarbonData 1.5.2</a></li>
+                            <li>
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.1/"
                                    target="_blank">Apache CarbonData 1.5.1</a></li>
                             <li>
@@ -64,9 +67,6 @@
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.4.0/"
                                    target="_blank">Apache CarbonData 1.4.0</a></li>
                             <li>
-                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.3.1/"
-                                   target="_blank">Apache CarbonData 1.3.1</a></li>
-                            <li>
                                 <a href="https://cwiki.apache.org/confluence/display/CARBONDATA/Releases"
                                    target="_blank">Release Archive</a></li>
                         </ul>
@@ -258,7 +258,7 @@
 <p>Package carbon jar, and copy assembly/target/scala-2.11/carbondata_2.11-1.3.0-SNAPSHOT-shade-hadoop2.7.2.jar to $SPARK_HOME/jars</p>
 <div class="highlight highlight-source-shell"><pre>mvn clean package -DskipTests -Pspark-2.2</pre></div>
 <p>Start a socket data server in a terminal</p>
-<div class="highlight highlight-source-shell"><pre> nc -lk 9099</pre></div>
+<div class="highlight highlight-source-shell"><pre>nc -lk 9099</pre></div>
 <p>type some CSV rows as following</p>
 <pre lang="csv"><code>1,col1
 2,col2
@@ -336,12 +336,12 @@
 <a id="create-table-with-streaming-property" class="anchor" href="#create-table-with-streaming-property" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Create table with streaming property</h2>
 <p>Streaming table is just a normal carbon table with "streaming" table property, user can create
 streaming table using following DDL.</p>
-<div class="highlight highlight-source-sql"><pre> <span class="pl-k">CREATE</span> <span class="pl-k">TABLE</span> <span class="pl-en">streaming_table</span> (
-  col1 <span class="pl-k">INT</span>,
-  col2 STRING
- )
- STORED <span class="pl-k">AS</span> carbondata
- TBLPROPERTIES(<span class="pl-s"><span class="pl-pds">'</span>streaming<span class="pl-pds">'</span></span><span class="pl-k">=</span><span class="pl-s"><span class="pl-pds">'</span>true<span class="pl-pds">'</span></span>)</pre></div>
+<div class="highlight highlight-source-sql"><pre><span class="pl-k">CREATE</span> <span class="pl-k">TABLE</span> <span class="pl-en">streaming_table</span> (
+ col1 <span class="pl-k">INT</span>,
+ col2 STRING
+)
+STORED <span class="pl-k">AS</span> carbondata
+TBLPROPERTIES(<span class="pl-s"><span class="pl-pds">'</span>streaming<span class="pl-pds">'</span></span><span class="pl-k">=</span><span class="pl-s"><span class="pl-pds">'</span>true<span class="pl-pds">'</span></span>)</pre></div>
 <table>
 <thead>
 <tr>
diff --git a/content/supported-data-types-in-carbondata.html b/content/supported-data-types-in-carbondata.html
index d873fab..26c0ff8 100644
--- a/content/supported-data-types-in-carbondata.html
+++ b/content/supported-data-types-in-carbondata.html
@@ -52,6 +52,9 @@
                            aria-expanded="false"> Download <span class="caret"></span></a>
                         <ul class="dropdown-menu">
                             <li>
+                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.2/"
+                                   target="_blank">Apache CarbonData 1.5.2</a></li>
+                            <li>
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.1/"
                                    target="_blank">Apache CarbonData 1.5.1</a></li>
                             <li>
@@ -64,9 +67,6 @@
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.4.0/"
                                    target="_blank">Apache CarbonData 1.4.0</a></li>
                             <li>
-                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.3.1/"
-                                   target="_blank">Apache CarbonData 1.3.1</a></li>
-                            <li>
                                 <a href="https://cwiki.apache.org/confluence/display/CARBONDATA/Releases"
                                    target="_blank">Release Archive</a></li>
                         </ul>
diff --git a/content/timeseries-datamap-guide.html b/content/timeseries-datamap-guide.html
index 9550fa1..281fd3a 100644
--- a/content/timeseries-datamap-guide.html
+++ b/content/timeseries-datamap-guide.html
@@ -52,6 +52,9 @@
                            aria-expanded="false"> Download <span class="caret"></span></a>
                         <ul class="dropdown-menu">
                             <li>
+                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.2/"
+                                   target="_blank">Apache CarbonData 1.5.2</a></li>
+                            <li>
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.1/"
                                    target="_blank">Apache CarbonData 1.5.1</a></li>
                             <li>
@@ -64,9 +67,6 @@
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.4.0/"
                                    target="_blank">Apache CarbonData 1.4.0</a></li>
                             <li>
-                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.3.1/"
-                                   target="_blank">Apache CarbonData 1.3.1</a></li>
-                            <li>
                                 <a href="https://cwiki.apache.org/confluence/display/CARBONDATA/Releases"
                                    target="_blank">Release Archive</a></li>
                         </ul>
@@ -292,32 +292,31 @@ SELECT order_time, country, sex, sum(quantity), max(quantity), count(user_id), s
 <p>For querying timeseries data, Carbondata has builtin support for following time related UDF</p>
 <pre><code>timeseries(timeseries column name, 'aggregation level')
 </code></pre>
-<pre><code>SELECT timeseries(order_time, 'hour'), sum(quantity) FROM sales GROUP BY timeseries(order_time,
-'hour')
+<pre><code>SELECT timeseries(order_time, 'hour'), sum(quantity) FROM sales GROUP BY timeseries(order_time,'hour')
 </code></pre>
 <p>It is <strong>not necessary</strong> to create pre-aggregate tables for each granularity unless required for
 query.</p>
 <p>For Example: For main table <strong>sales</strong> , if following timeseries datamaps were created for day
 level and hour level pre-aggregate</p>
-<pre><code>  CREATE DATAMAP agg_day
-  ON TABLE sales
-  USING "timeseries"
-  DMPROPERTIES (
-    'event_time'='order_time',
-    'day_granularity'='1',
-  ) AS
-  SELECT order_time, country, sex, sum(quantity), max(quantity), count(user_id), sum(price),
-   avg(price) FROM sales GROUP BY order_time, country, sex
-        
-  CREATE DATAMAP agg_sales_hour
-  ON TABLE sales
-  USING "timeseries"
-  DMPROPERTIES (
-    'event_time'='order_time',
-    'hour_granularity'='1',
-  ) AS
-  SELECT order_time, country, sex, sum(quantity), max(quantity), count(user_id), sum(price),
-   avg(price) FROM sales GROUP BY order_time, country, sex
+<pre><code>CREATE DATAMAP agg_day
+ON TABLE sales
+USING "timeseries"
+DMPROPERTIES (
+  'event_time'='order_time',
+  'day_granularity'='1',
+) AS
+SELECT order_time, country, sex, sum(quantity), max(quantity), count(user_id), sum(price),
+ avg(price) FROM sales GROUP BY order_time, country, sex
+      
+CREATE DATAMAP agg_sales_hour
+ON TABLE sales
+USING "timeseries"
+DMPROPERTIES (
+  'event_time'='order_time',
+  'hour_granularity'='1',
+) AS
+SELECT order_time, country, sex, sum(quantity), max(quantity), count(user_id), sum(price),
+ avg(price) FROM sales GROUP BY order_time, country, sex
 </code></pre>
 <p>Queries like below will not be rolled-up and hit the main table</p>
 <pre><code>Select timeseries(order_time, 'month'), sum(quantity) from sales group by timeseries(order_time,
@@ -326,7 +325,7 @@ level and hour level pre-aggregate</p>
 Select timeseries(order_time, 'year'), sum(quantity) from sales group by timeseries(order_time,
   'year')
 </code></pre>
-<p>NOTE (<b>RESTRICTION</b>):</p>
+<p>NOTE (<strong>RESTRICTION</strong>):</p>
 <ul>
 <li>Only value of 1 is supported for hierarchy levels. Other hierarchy levels will be supported in
 the future CarbonData release.</li>
diff --git a/content/usecases.html b/content/usecases.html
index b6b4d74..dc88c17 100644
--- a/content/usecases.html
+++ b/content/usecases.html
@@ -52,6 +52,9 @@
                            aria-expanded="false"> Download <span class="caret"></span></a>
                         <ul class="dropdown-menu">
                             <li>
+                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.2/"
+                                   target="_blank">Apache CarbonData 1.5.2</a></li>
+                            <li>
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.1/"
                                    target="_blank">Apache CarbonData 1.5.1</a></li>
                             <li>
@@ -64,9 +67,6 @@
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.4.0/"
                                    target="_blank">Apache CarbonData 1.4.0</a></li>
                             <li>
-                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.3.1/"
-                                   target="_blank">Apache CarbonData 1.3.1</a></li>
-                            <li>
                                 <a href="https://cwiki.apache.org/confluence/display/CARBONDATA/Releases"
                                    target="_blank">Release Archive</a></li>
                         </ul>
@@ -217,7 +217,8 @@
                         <div id="viewpage" name="viewpage">
                             <div class="row">
                                 <div class="col-sm-12  col-md-12">
-                                    <div><h1>
+                                    <div>
+<h1>
 <a id="use-cases" class="anchor" href="#use-cases" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Use Cases</h1>
 <p>CarbonData is useful in various analytical work loads.Some of the most typical usecases where CarbonData is being used is documented here.</p>
 <p>CarbonData is used for but not limited to</p>
diff --git a/content/videogallery.html b/content/videogallery.html
index 7b7e66c..359be97 100644
--- a/content/videogallery.html
+++ b/content/videogallery.html
@@ -49,6 +49,9 @@
                            aria-expanded="false"> Download <span class="caret"></span></a>
                         <ul class="dropdown-menu">
                             <li>
+                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.2/"
+                                   target="_blank">Apache CarbonData 1.5.2</a></li>
+                            <li>
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.1/"
                                    target="_blank">Apache CarbonData 1.5.1</a></li>
                             <li>
diff --git a/src/main/resources/application.conf b/src/main/resources/application.conf
index 2f1b695..430fa47 100644
--- a/src/main/resources/application.conf
+++ b/src/main/resources/application.conf
@@ -18,7 +18,10 @@ fileList=["configuration-parameters",
   "introduction",
   "usecases",
   "csdk-guide",
-  "carbon-as-spark-datasource-guide"
+  "carbon-as-spark-datasource-guide",
+  "alluxio-guide",
+  "hive-guide",
+  "presto-guide"
   ]
 dataMapFileList=[
   "bloomfilter-datamap-guide",
diff --git a/src/main/scala/html/header.html b/src/main/scala/html/header.html
index 196736f..6c9c9c7 100644
--- a/src/main/scala/html/header.html
+++ b/src/main/scala/html/header.html
@@ -52,6 +52,9 @@
                            aria-expanded="false"> Download <span class="caret"></span></a>
                         <ul class="dropdown-menu">
                             <li>
+                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.2/"
+                                   target="_blank">Apache CarbonData 1.5.2</a></li>
+                            <li>
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.1/"
                                    target="_blank">Apache CarbonData 1.5.1</a></li>
                             <li>
@@ -64,9 +67,6 @@
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.4.0/"
                                    target="_blank">Apache CarbonData 1.4.0</a></li>
                             <li>
-                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.3.1/"
-                                   target="_blank">Apache CarbonData 1.3.1</a></li>
-                            <li>
                                 <a href="https://cwiki.apache.org/confluence/display/CARBONDATA/Releases"
                                    target="_blank">Release Archive</a></li>
                         </ul>
diff --git a/src/main/scala/scripts/alluxio-guide b/src/main/scala/scripts/alluxio-guide
new file mode 100644
index 0000000..3bfec2b
--- /dev/null
+++ b/src/main/scala/scripts/alluxio-guide
@@ -0,0 +1,4 @@
+<script>
+// Show selected style on nav item
+$(function() { $('.b-nav__quickstart').addClass('selected'); });
+</script>
\ No newline at end of file
diff --git a/src/main/scala/scripts/hive-guide b/src/main/scala/scripts/hive-guide
new file mode 100644
index 0000000..3bfec2b
--- /dev/null
+++ b/src/main/scala/scripts/hive-guide
@@ -0,0 +1,4 @@
+<script>
+// Show selected style on nav item
+$(function() { $('.b-nav__quickstart').addClass('selected'); });
+</script>
\ No newline at end of file
diff --git a/src/main/scala/scripts/presto-guide b/src/main/scala/scripts/presto-guide
new file mode 100644
index 0000000..3bfec2b
--- /dev/null
+++ b/src/main/scala/scripts/presto-guide
@@ -0,0 +1,4 @@
+<script>
+// Show selected style on nav item
+$(function() { $('.b-nav__quickstart').addClass('selected'); });
+</script>
\ No newline at end of file
diff --git a/src/main/webapp/CSDK-guide.html b/src/main/webapp/CSDK-guide.html
index 73e1d67..e7809e8 100644
--- a/src/main/webapp/CSDK-guide.html
+++ b/src/main/webapp/CSDK-guide.html
@@ -52,6 +52,9 @@
                            aria-expanded="false"> Download <span class="caret"></span></a>
                         <ul class="dropdown-menu">
                             <li>
+                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.2/"
+                                   target="_blank">Apache CarbonData 1.5.2</a></li>
+                            <li>
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.1/"
                                    target="_blank">Apache CarbonData 1.5.1</a></li>
                             <li>
@@ -64,9 +67,6 @@
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.4.0/"
                                    target="_blank">Apache CarbonData 1.4.0</a></li>
                             <li>
-                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.3.1/"
-                                   target="_blank">Apache CarbonData 1.3.1</a></li>
-                            <li>
                                 <a href="https://cwiki.apache.org/confluence/display/CARBONDATA/Releases"
                                    target="_blank">Release Archive</a></li>
                         </ul>
@@ -241,94 +241,96 @@ release the memory and destroy JVM.</p>
 <a id="api-list" class="anchor" href="#api-list" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>API List</h2>
 <h3>
 <a id="carbonreader" class="anchor" href="#carbonreader" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>CarbonReader</h3>
-<pre><code>    /**
-     * create a CarbonReaderBuilder object for building carbonReader,
-     * CarbonReaderBuilder object  can configure different parameter
-     *
-     * @param env JNIEnv
-     * @param path data store path
-     * @param tableName table name
-     * @return CarbonReaderBuilder object
-     */
-    jobject builder(JNIEnv *env, char *path, char *tableName);
-</code></pre>
-<pre><code>    /**
-     * create a CarbonReaderBuilder object for building carbonReader,
-     * CarbonReaderBuilder object  can configure different parameter
-     *
-     * @param env JNIEnv
-     * @param path data store path
-     * */
-    void builder(JNIEnv *env, char *path);
-</code></pre>
-<pre><code>    /**
-     * Configure the projection column names of carbon reader
-     *
-     * @param argc argument counter
-     * @param argv argument vector
-     * @return CarbonReaderBuilder object
-     */
-    jobject projection(int argc, char *argv[]);
-</code></pre>
-<pre><code>    /**
-     *  build carbon reader with argument vector
-     *  it support multiple parameter
-     *  like: key=value
-     *  for example: fs.s3a.access.key=XXXX, XXXX is user's access key value
-     *
-     * @param argc argument counter
-     * @param argv argument vector
-     * @return CarbonReaderBuilder object
-     **/
-    jobject withHadoopConf(int argc, char *argv[]);
-</code></pre>
-<pre><code>   /**
-     * Sets the batch size of records to read
-     *
-     * @param batch batch size
-     * @return CarbonReaderBuilder object
-     */
-    void withBatch(int batch);
-</code></pre>
-<pre><code>    /**
-     * Configure Row Record Reader for reading.
-     */
-    void withRowRecordReader();
-</code></pre>
-<pre><code>    /**
-     * build carbonReader object for reading data
-     * it support read data from load disk
-     *
-     * @return carbonReader object
-     */
-    jobject build();
-</code></pre>
-<pre><code>    /**
-     * Whether it has next row data
-     *
-     * @return boolean value, if it has next row, return true. if it hasn't next row, return false.
-     */
-    jboolean hasNext();
-</code></pre>
-<pre><code>    /**
-     * read next carbonRow from data
-     * @return carbonRow object of one row
-     */
-     jobject readNextRow();
-</code></pre>
-<pre><code>    /**
-     * read Next Batch Row
-     *
-     * @return rows
-     */
-    jobjectArray readNextBatchRow();
-</code></pre>
-<pre><code>    /**
-     * close the carbon reader
-     *
-     * @return  boolean value
-     */
-    jboolean close();
+<pre><code>/**
+ * Create a CarbonReaderBuilder object for building carbonReader,
+ * CarbonReaderBuilder object  can configure different parameter
+ *
+ * @param env JNIEnv
+ * @param path data store path
+ * @param tableName table name
+ * @return CarbonReaderBuilder object
+ */
+jobject builder(JNIEnv *env, char *path, char *tableName);
+</code></pre>
+<pre><code>/**
+ * Create a CarbonReaderBuilder object for building carbonReader,
+ * CarbonReaderBuilder object can configure different parameter
+ *
+ * @param env JNIEnv
+ * @param path data store path
+ * 
+ */
+void builder(JNIEnv *env, char *path);
+</code></pre>
+<pre><code>/**
+ * Configure the projection column names of carbon reader
+ *
+ * @param argc argument counter
+ * @param argv argument vector
+ * @return CarbonReaderBuilder object
+ */
+jobject projection(int argc, char *argv[]);
+</code></pre>
+<pre><code>/**
+ * Build carbon reader with argument vector
+ * it supports multiple parameters
+ * like: key=value
+ * for example: fs.s3a.access.key=XXXX, XXXX is user's access key value
+ *
+ * @param argc argument counter
+ * @param argv argument vector
+ * @return CarbonReaderBuilder object
+ *
+ */
+jobject withHadoopConf(int argc, char *argv[]);
+</code></pre>
+<pre><code>/**
+ * Sets the batch size of records to read
+ *
+ * @param batch batch size
+ * @return CarbonReaderBuilder object
+ */
+void withBatch(int batch);
+</code></pre>
+<pre><code>/**
+ * Configure Row Record Reader for reading.
+ */
+void withRowRecordReader();
+</code></pre>
+<pre><code>/**
+ * Build carbonReader object for reading data
+ * it supports read data from load disk
+ *
+ * @return carbonReader object
+ */
+jobject build();
+</code></pre>
+<pre><code>/**
+ * Whether it has next row data
+ *
+ * @return boolean value, if it has next row, return true. if it hasn't next row, return false.
+ */
+jboolean hasNext();
+</code></pre>
+<pre><code>/**
+ * Read next carbonRow from data
+ * @return carbonRow object of one row
+ */
+jobject readNextRow();
+</code></pre>
+<pre><code>/**
+ * Read Next Batch Row
+ *
+ * @return rows
+ */
+jobjectArray readNextBatchRow();
+</code></pre>
+<pre><code>/**
+ * Close the carbon reader
+ *
+ * @return  boolean value
+ */
+jboolean close();
 </code></pre>
 <h1>
 <a id="c-sdk-writer" class="anchor" href="#c-sdk-writer" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>C++ SDK Writer</h1>
@@ -348,172 +350,302 @@ release the memory and destroy JVM.</p>
 <a id="api-list-1" class="anchor" href="#api-list-1" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>API List</h2>
 <h3>
 <a id="carbonwriter" class="anchor" href="#carbonwriter" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>CarbonWriter</h3>
-<pre><code>    /**
-     * create a CarbonWriterBuilder object for building carbonWriter,
-     * CarbonWriterBuilder object  can configure different parameter
-     *
-     * @param env JNIEnv
-     * @return CarbonWriterBuilder object
-     */
-    void builder(JNIEnv *env);
-</code></pre>
-<pre><code>    /**
-     * Sets the output path of the writer builder
-     *
-     * @param path is the absolute path where output files are written
-     * This method must be called when building CarbonWriterBuilder
-     * @return updated CarbonWriterBuilder
-     */
-    void outputPath(char *path);
-</code></pre>
-<pre><code>    /**
-     * configure the schema with json style schema
-     *
-     * @param jsonSchema json style schema
-     * @return updated CarbonWriterBuilder
-     */
-    void withCsvInput(char *jsonSchema);
-</code></pre>
-<pre><code>    /**
-    * Updates the hadoop configuration with the given key value
-    *
-    * @param key key word
-    * @param value value
-    * @return CarbonWriterBuilder object
-    */
-    void withHadoopConf(char *key, char *value);
-</code></pre>
-<pre><code>    /**
-     * @param appName appName which is writing the carbondata files
-     */
-    void writtenBy(char *appName);
-</code></pre>
-<pre><code>    /**
-     * build carbonWriter object for writing data
-     * it support write data from load disk
-     *
-     * @return carbonWriter object
-     */
-    void build();
-</code></pre>
-<pre><code>    /**
-     * Write an object to the file, the format of the object depends on the
-     * implementation.
-     * Note: This API is not thread safe
-     */
-    void write(jobject obj);
-</code></pre>
-<pre><code>    /**
-     * close the carbon Writer
-     */
-    void close();
+<pre><code>/**
+ * Create a CarbonWriterBuilder object for building carbonWriter,
+ * CarbonWriterBuilder object  can configure different parameter
+ *
+ * @param env JNIEnv
+ * @return CarbonWriterBuilder object
+ */
+void builder(JNIEnv *env);
+</code></pre>
+<pre><code>/**
+ * Sets the output path of the writer builder
+ *
+ * @param path is the absolute path where output files are written
+ * This method must be called when building CarbonWriterBuilder
+ * @return updated CarbonWriterBuilder
+ */
+void outputPath(char *path);
+</code></pre>
+<pre><code>/**
+ * Sets the list of columns that needs to be in sorted order
+ *
+ * @param argc argc argument counter, the number of projection column
+ * @param argv argv is a string array of columns that needs to be sorted.
+ *                  If it is null or by default all dimensions are selected for sorting
+ *                  If it is empty array, no columns are sorted
+ */
+void sortBy(int argc, char *argv[]);
+</code></pre>
+<pre><code>/**
+ * Configure the schema with json style schema
+ *
+ * @param jsonSchema json style schema
+ * @return updated CarbonWriterBuilder
+ */
+void withCsvInput(char *jsonSchema);
+</code></pre>
+<pre><code>/**
+ * Updates the hadoop configuration with the given key value
+ *
+ * @param key key word
+ * @param value value
+ * @return CarbonWriterBuilder object
+ */
+void withHadoopConf(char *key, char *value);
+</code></pre>
+<pre><code>/**
+ * To support the table properties for writer
+ *
+ * @param key properties key
+ * @param value properties value
+ */
+void withTableProperty(char *key, char *value);
+</code></pre>
+<pre><code>/**
+ * To support the load options for C++ sdk writer
+ *
+ * @param options key,value pair of load options.
+ * supported keys values are
+ * a. bad_records_logger_enable -- true (write into separate logs), false
+ * b. bad_records_action -- FAIL, FORCE, IGNORE, REDIRECT
+ * c. bad_record_path -- path
+ * d. dateformat -- same as JAVA SimpleDateFormat
+ * e. timestampformat -- same as JAVA SimpleDateFormat
+ * f. complex_delimiter_level_1 -- value to Split the complexTypeData
+ * g. complex_delimiter_level_2 -- value to Split the nested complexTypeData
+ * h. quotechar
+ * i. escapechar
+ *
+ * Default values are as follows.
+ *
+ * a. bad_records_logger_enable -- "false"
+ * b. bad_records_action -- "FAIL"
+ * c. bad_record_path -- ""
+ * d. dateformat -- "" , uses from carbon.properties file
+ * e. timestampformat -- "", uses from carbon.properties file
+ * f. complex_delimiter_level_1 -- "$"
+ * g. complex_delimiter_level_2 -- ":"
+ * h. quotechar -- "\""
+ * i. escapechar -- "\\"
+ *
+ * @return updated CarbonWriterBuilder
+ */
+void withLoadOption(char *key, char *value);
+</code></pre>
+<pre><code>/**
+ * Sets the taskNo for the writer. CSDKs concurrently running
+ * will set taskNo in order to avoid conflicts in file's name during write.
+ *
+ * @param taskNo is the TaskNo user wants to specify.
+ *               by default it is system time in nano seconds.
+ */
+void taskNo(long taskNo);
+</code></pre>
+<pre><code>/**
+ * Set the timestamp in the carbondata and carbonindex index files
+ *
+ * @param timestamp is a timestamp to be used in the carbondata and carbonindex index files.
+ * By default set to zero.
+ * @return updated CarbonWriterBuilder
+ */
+void uniqueIdentifier(long timestamp);
+</code></pre>
+<pre><code>/**
+ * To make c++ sdk writer thread safe.
+ *
+ * @param numOfThreads should number of threads in which writer is called in multi-thread scenario
+ *                      default C++ sdk writer is not thread safe.
+ *                      can use one writer instance in one thread only.
+ */
+void withThreadSafe(short numOfThreads) ;
+</code></pre>
+<pre><code>/**
+ * To set the carbondata file size in MB between 1MB-2048MB
+ *
+ * @param blockSize is size in MB between 1MB to 2048 MB
+ * default value is 1024 MB
+ */
+void withBlockSize(int blockSize);
+</code></pre>
+<pre><code>/**
+ * To set the blocklet size of CarbonData file
+ *
+ * @param blockletSize is blocklet size in MB
+ *        default value is 64 MB
+ * @return updated CarbonWriterBuilder
+ */
+void withBlockletSize(int blockletSize);
+</code></pre>
+<pre><code>/**
+ * @param localDictionaryThreshold is localDictionaryThreshold, default is 10000
+ * @return updated CarbonWriterBuilder
+ */
+void localDictionaryThreshold(int localDictionaryThreshold);
+</code></pre>
+<pre><code>/**
+ * @param enableLocalDictionary enable local dictionary, default is false
+ * @return updated CarbonWriterBuilder
+ */
+void enableLocalDictionary(bool enableLocalDictionary);
+</code></pre>
+<pre><code>/**
+ * @param appName appName which is writing the carbondata files
+ */
+void writtenBy(char *appName);
+</code></pre>
+<pre><code>/**
+ * Build carbonWriter object for writing data
+ * it support write data from load disk
+ *
+ * @return carbonWriter object
+ */
+void build();
+</code></pre>
+<pre><code>/**
+ * Write an object to the file, the format of the object depends on the
+ * implementation.
+ * Note: This API is not thread safe
+ */
+void write(jobject obj);
+</code></pre>
+<pre><code>/**
+ * close the carbon Writer
+ */
+void close();
 </code></pre>
 <h3>
 <a id="carbonschemareader" class="anchor" href="#carbonschemareader" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>CarbonSchemaReader</h3>
-<pre><code>    /**
-     * constructor with jni env
-     *
-     * @param env  jni env
-     */
-    CarbonSchemaReader(JNIEnv *env);
-</code></pre>
-<pre><code>    /**
-     * read schema from path,
-     * path can be folder path, carbonindex file path, and carbondata file path
-     * and will not check all files schema
-     *
-     * @param path file/folder path
-     * @return schema
-     */
-    jobject readSchema(char *path);
-</code></pre>
-<pre><code>    /**
-     *  read schema from path,
-     *  path can be folder path, carbonindex file path, and carbondata file path
-     *  and user can decide whether check all files schema
-     *
-     * @param path carbon data path
-     * @param validateSchema whether check all files schema
-     * @return schema
-     */
-    jobject readSchema(char *path, bool validateSchema);
+<pre><code>/**
+ * Constructor with jni env
+ *
+ * @param env  jni env
+ */
+CarbonSchemaReader(JNIEnv *env);
+</code></pre>
+<pre><code>/**
+ * Read schema from path,
+ * path can be folder path, carbonindex file path, and carbondata file path
+ * and will not check all files schema
+ *
+ * @param path file/folder path
+ * @return schema
+ */
+jobject readSchema(char *path);
+</code></pre>
+<pre><code>/**
+ * Read schema from path,
+ * path can be folder path, carbonindex file path, and carbondata file path
+ * and user can decide whether check all files schema
+ *
+ * @param path carbon data path
+ * @param validateSchema whether check all files schema
+ * @return schema
+ */
+jobject readSchema(char *path, bool validateSchema);
+</code></pre>
+<pre><code>/**
+ * Read schema from path,
+ * path can be folder path, carbonindex file path, and carbondata file path
+ * and will not check all files schema
+ *
+ * @param path file/folder path
+ * @param conf           configuration support, can set s3a AK,SK,
+ *                       end point and other conf with this
+ * @return schema
+ */
+jobject readSchema(char *path, Configuration conf);
+</code></pre>
+<pre><code>/**
+ * Read schema from path,
+ * path can be folder path, carbonindex file path, and carbondata file path
+ * and user can decide whether check all files schema
+ *
+ * @param path carbon data path
+ * @param validateSchema whether check all files schema
+ * @param conf           configuration support, can set s3a AK,SK,
+ *                       end point and other conf with this
+ * @return schema
+ */
+jobject readSchema(char *path, bool validateSchema, Configuration conf);
 </code></pre>
 <h3>
 <a id="schema" class="anchor" href="#schema" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Schema</h3>
-<pre><code> /**
-     * constructor with jni env and carbon schema data
-     *
-     * @param env jni env
-     * @param schema  carbon schema data
-     */
-    Schema(JNIEnv *env, jobject schema);
-</code></pre>
-<pre><code>    /**
-     * get fields length of schema
-     *
-     * @return fields length
-     */
-    int getFieldsLength();
-</code></pre>
-<pre><code>    /**
-     * get field name by ordinal
-     *
-     * @param ordinal the data index of carbon schema
-     * @return ordinal field name
-     */
-    char *getFieldName(int ordinal);
-</code></pre>
-<pre><code>    /**
-     * get  field data type name by ordinal
-     *
-     * @param ordinal the data index of carbon schema
-     * @return ordinal field data type name
-     */
-    char *getFieldDataTypeName(int ordinal);
-</code></pre>
-<pre><code>    /**
-     * get  array child element data type name by ordinal
-     *
-     * @param ordinal the data index of carbon schema
-     * @return ordinal array child element data type name
-     */
-    char *getArrayElementTypeName(int ordinal);
+<pre><code>/**
+ * Constructor with jni env and carbon schema data
+ *
+ * @param env jni env
+ * @param schema  carbon schema data
+ */
+Schema(JNIEnv *env, jobject schema);
+</code></pre>
+<pre><code>/**
+ * Get fields length of schema
+ *
+ * @return fields length
+ */
+int getFieldsLength();
+</code></pre>
+<pre><code>/**
+ * Get field name by ordinal
+ *
+ * @param ordinal the data index of carbon schema
+ * @return ordinal field name
+ */
+char *getFieldName(int ordinal);
+</code></pre>
+<pre><code>/**
+ * Get  field data type name by ordinal
+ *
+ * @param ordinal the data index of carbon schema
+ * @return ordinal field data type name
+ */
+char *getFieldDataTypeName(int ordinal);
+</code></pre>
+<pre><code>/**
+ * Get  array child element data type name by ordinal
+ *
+ * @param ordinal the data index of carbon schema
+ * @return ordinal array child element data type name
+ */
+char *getArrayElementTypeName(int ordinal);
 </code></pre>
 <h3>
 <a id="carbonproperties" class="anchor" href="#carbonproperties" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>CarbonProperties</h3>
-<pre><code>  /**
-     * Constructor of CarbonProperties
-     *
-     * @param env JNI env
-     */
-    CarbonProperties(JNIEnv *env);
-</code></pre>
-<pre><code>    /**
-     * This method will be used to add a new property
-     * 
-     * @param key property key
-     * @param value property value
-     * @return CarbonProperties object
-     */
-    jobject addProperty(char *key, char *value);
-</code></pre>
-<pre><code>    /**
-     * This method will be used to get the properties value
-     *
-     * @param key  property key
-     * @return  property value
-     */
-    char *getProperty(char *key);
-</code></pre>
-<pre><code>    /**
-     * This method will be used to get the properties value
-     * if property is not present then it will return the default value
-     *
-     * @param key  property key
-     * @param defaultValue  property default Value
-     * @return
-     */
-    char *getProperty(char *key, char *defaultValue);
+<pre><code>/**
+ * Constructor of CarbonProperties
+ *
+ * @param env JNI env
+ */
+CarbonProperties(JNIEnv *env);
+</code></pre>
+<pre><code>/**
+ * This method will be used to add a new property
+ * 
+ * @param key property key
+ * @param value property value
+ * @return CarbonProperties object
+ */
+jobject addProperty(char *key, char *value);
+</code></pre>
+<pre><code>/**
+ * This method will be used to get the properties value
+ *
+ * @param key property key
+ * @return property value
+ */
+char *getProperty(char *key);
+</code></pre>
+<pre><code>/**
+ * This method will be used to get the properties value
+ * if property is not present then it will return the default value
+ *
+ * @param key  property key
+ * @param defaultValue  property default Value
+ * @return
+ */
+char *getProperty(char *key, char *defaultValue);
 </code></pre>
 <script>
 $(function() {
diff --git a/src/main/webapp/bloomfilter-datamap-guide.html b/src/main/webapp/alluxio-guide.html
similarity index 58%
copy from src/main/webapp/bloomfilter-datamap-guide.html
copy to src/main/webapp/alluxio-guide.html
index aab8dc0..037f29d 100644
--- a/src/main/webapp/bloomfilter-datamap-guide.html
+++ b/src/main/webapp/alluxio-guide.html
@@ -52,6 +52,9 @@
                            aria-expanded="false"> Download <span class="caret"></span></a>
                         <ul class="dropdown-menu">
                             <li>
+                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.2/"
+                                   target="_blank">Apache CarbonData 1.5.2</a></li>
+                            <li>
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.1/"
                                    target="_blank">Apache CarbonData 1.5.1</a></li>
                             <li>
@@ -64,9 +67,6 @@
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.4.0/"
                                    target="_blank">Apache CarbonData 1.4.0</a></li>
                             <li>
-                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.3.1/"
-                                   target="_blank">Apache CarbonData 1.3.1</a></li>
-                            <li>
                                 <a href="https://cwiki.apache.org/confluence/display/CARBONDATA/Releases"
                                    target="_blank">Release Archive</a></li>
                         </ul>
@@ -219,161 +219,163 @@
                                 <div class="col-sm-12  col-md-12">
                                     <div>
 <h1>
-<a id="carbondata-bloomfilter-datamap" class="anchor" href="#carbondata-bloomfilter-datamap" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>CarbonData BloomFilter DataMap</h1>
+<a id="alluxio-guide" class="anchor" href="#alluxio-guide" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Alluxio guide</h1>
+<p>This tutorial provides a brief introduction to using Alluxio.</p>
+<ul>
+<li>How to use Alluxio in CarbonData?
 <ul>
-<li><a href="#datamap-management">DataMap Management</a></li>
-<li><a href="#bloomfilter-datamap-introduction">BloomFilter Datamap Introduction</a></li>
-<li><a href="#loading-data">Loading Data</a></li>
-<li><a href="#querying-data">Querying Data</a></li>
-<li><a href="#data-management-with-bloomfilter-datamap">Data Management</a></li>
-<li><a href="#useful-tips">Useful Tips</a></li>
+<li>[Running alluxio example in CarbonData project by IDEA](#Running alluxio example in CarbonData project by IDEA)</li>
+<li>[CarbonData supports alluxio by spark-shell](#CarbonData supports alluxio by spark-shell)</li>
+<li>[CarbonData supports alluxio by spark-submit](#CarbonData supports alluxio by spark-submit)</li>
 </ul>
-<h4>
-<a id="datamap-management" class="anchor" href="#datamap-management" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>DataMap Management</h4>
-<p>Creating BloomFilter DataMap</p>
-<pre><code>CREATE DATAMAP [IF NOT EXISTS] datamap_name
-ON TABLE main_table
-USING 'bloomfilter'
-DMPROPERTIES ('index_columns'='city, name', 'BLOOM_SIZE'='640000', 'BLOOM_FPP'='0.00001')
-</code></pre>
-<p>Dropping specified datamap</p>
-<pre><code>DROP DATAMAP [IF EXISTS] datamap_name
-ON TABLE main_table
+</li>
+</ul>
+<h2>
+<a id="running-alluxio-example-in-carbondata-project-by-idea" class="anchor" href="#running-alluxio-example-in-carbondata-project-by-idea" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Running alluxio example in CarbonData project by IDEA</h2>
+<h3>
+<a id="building-carbondata" class="anchor" href="#building-carbondata" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a><a href="https://github.com/apache/carbondata/tree/master/build" target=_blank>Building CarbonData</a>
+</h3>
+<ul>
+<li>Please refer to <a href="https://github.com/apache/carbondata/tree/master/build" target=_blank>Building CarbonData</a>.</li>
+<li>Users need to install IDEA and scala plugin, and import CarbonData project.</li>
+</ul>
+<h3>
+<a id="installing-and-starting-alluxio" class="anchor" href="#installing-and-starting-alluxio" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Installing and starting Alluxio</h3>
+<ul>
+<li>Please refer to <a href="https://www.alluxio.org/docs/1.8/en/Getting-Started.html#starting-alluxio" rel="nofollow">https://www.alluxio.org/docs/1.8/en/Getting-Started.html#starting-alluxio</a>
+</li>
+<li>Access the Alluxio web: <a href="http://localhost:19999/home" rel="nofollow">http://localhost:19999/home</a>
+</li>
+</ul>
+<h3>
+<a id="running-example" class="anchor" href="#running-example" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Running Example</h3>
+<ul>
+<li>Please refer to <a href="https://github.com/apache/carbondata/blob/master/examples/spark2/src/main/scala/org/apache/carbondata/examples/AlluxioExample.scala" target=_blank>AlluxioExample</a>
+</li>
+</ul>
+<h2>
+<a id="carbondata-supports-alluxio-by-spark-shell" class="anchor" href="#carbondata-supports-alluxio-by-spark-shell" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>CarbonData supports alluxio by spark-shell</h2>
+<h3>
+<a id="building-carbondata-1" class="anchor" href="#building-carbondata-1" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a><a href="https://github.com/apache/carbondata/tree/master/build" target=_blank>Building CarbonData</a>
+</h3>
+<ul>
+<li>Please refer to <a href="https://github.com/apache/carbondata/tree/master/build" target=_blank>Building CarbonData</a>.</li>
+</ul>
+<h3>
+<a id="preparing-spark" class="anchor" href="#preparing-spark" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Preparing Spark</h3>
+<ul>
+<li>Please refer to <a href="http://spark.apache.org/docs/latest/" target=_blank rel="nofollow">http://spark.apache.org/docs/latest/</a>
+</li>
+</ul>
+<h3>
+<a id="downloading-alluxio-and-uncompressing-it" class="anchor" href="#downloading-alluxio-and-uncompressing-it" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Downloading alluxio and uncompressing it</h3>
+<ul>
+<li>Please refer to <a href="https://www.alluxio.org/download" target=_blank rel="nofollow">https://www.alluxio.org/download</a>
+</li>
+</ul>
+<h3>
+<a id="running-spark-shell" class="anchor" href="#running-spark-shell" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Running spark-shell</h3>
+<ul>
+<li>Running the command in spark path</li>
+</ul>
+<pre lang="$command"><code>./bin/spark-shell --jars ${CARBONDATA_PATH}/assembly/target/scala-2.11/apache-carbondata-1.6.0-SNAPSHOT-bin-spark2.2.1-hadoop2.7.2.jar,${ALLUXIO_PATH}/client/alluxio-1.8.1-client.jar
 </code></pre>
-<p>Showing all DataMaps on this table</p>
-<pre><code>SHOW DATAMAP
-ON TABLE main_table
+<ul>
+<li>Testing use alluxio by CarbonSession</li>
+</ul>
+<pre lang="$scala"><code>import org.apache.spark.sql.CarbonSession._
+import org.apache.spark.sql.SparkSession
+   
+val carbon = SparkSession.builder().master("local").appName("test").getOrCreateCarbonSession("alluxio://localhost:19998/carbondata");
+carbon.sql("CREATE TABLE carbon_alluxio(id String,name String, city String,age Int) STORED as carbondata");
+carbon.sql(s"LOAD DATA LOCAL INPATH '${CARBONDATA_PATH}/integration/spark-common-test/src/test/resources/sample.csv' into table carbon_alluxio");
+carbon.sql("select * from carbon_alluxio").show
 </code></pre>
-<p>Disable Datamap</p>
-<blockquote>
-<p>The datamap by default is enabled. To support tuning on query, we can disable a specific datamap during query to observe whether we can gain performance enhancement from it. This is effective only for current session.</p>
-</blockquote>
-<pre><code>// disable the datamap
-SET carbon.datamap.visible.dbName.tableName.dataMapName = false
-// enable the datamap
-SET carbon.datamap.visible.dbName.tableName.dataMapName = true
+<ul>
+<li>Result</li>
+</ul>
+<pre lang="$scala"><code>scala&gt; carbon.sql("select * from carbon_alluxio").show
++---+------+---------+---+
+| id|  name|     city|age|
++---+------+---------+---+
+|  1| david| shenzhen| 31|
+|  2| eason| shenzhen| 27|
+|  3| jarry|    wuhan| 35|
+|  3| jarry|Bangalore| 35|
+|  4| kunal|    Delhi| 26|
+|  4|vishal|Bangalore| 29|
++---+------+---------+---+
 </code></pre>
 <h2>
-<a id="bloomfilter-datamap-introduction" class="anchor" href="#bloomfilter-datamap-introduction" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>BloomFilter DataMap Introduction</h2>
-<p>A Bloom filter is a space-efficient probabilistic data structure that is used to test whether an element is a member of a set.
-Carbondata introduced BloomFilter as an index datamap to enhance the performance of querying with precise value.
-It is well suitable for queries that do precise match on high cardinality columns(such as Name/ID).
-Internally, CarbonData maintains a BloomFilter per blocklet for each index column to indicate that whether a value of the column is in this blocklet.
-Just like the other datamaps, BloomFilter datamap is managed along with main tables by CarbonData.
-User can create BloomFilter datamap on specified columns with specified BloomFilter configurations such as size and probability.</p>
-<p>For instance, main table called <strong>datamap_test</strong> which is defined as:</p>
-<pre><code>CREATE TABLE datamap_test (
-  id string,
-  name string,
-  age int,
-  city string,
-  country string)
-STORED AS carbondata
-TBLPROPERTIES('SORT_COLUMNS'='id')
+<a id="carbondata-supports-alluxio-by-spark-submit" class="anchor" href="#carbondata-supports-alluxio-by-spark-submit" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>CarbonData supports alluxio by spark-submit</h2>
+<h3>
+<a id="building-carbondata-2" class="anchor" href="#building-carbondata-2" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a><a href="https://github.com/apache/carbondata/tree/master/build" target=_blank>Building CarbonData</a>
+</h3>
+<ul>
+<li>Please refer to <a href="https://github.com/apache/carbondata/tree/master/build" target=_blank>Building CarbonData</a>.</li>
+</ul>
+<h3>
+<a id="preparing-spark-1" class="anchor" href="#preparing-spark-1" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Preparing Spark</h3>
+<ul>
+<li>Please refer to <a href="http://spark.apache.org/docs/latest/" target=_blank rel="nofollow">http://spark.apache.org/docs/latest/</a>
+</li>
+</ul>
+<h3>
+<a id="downloading-alluxio-and-uncompressing-it-1" class="anchor" href="#downloading-alluxio-and-uncompressing-it-1" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Downloading alluxio and uncompressing it</h3>
+<ul>
+<li>Please refer to <a href="https://www.alluxio.org/download" target=_blank rel="nofollow">https://www.alluxio.org/download</a>
+</li>
+</ul>
+<h3>
+<a id="running-spark-submit" class="anchor" href="#running-spark-submit" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Running spark-submit</h3>
+<h4>
+<a id="upload-data-to-alluxio" class="anchor" href="#upload-data-to-alluxio" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Upload data to alluxio</h4>
+<pre lang="$command"><code>./bin/alluxio fs  copyFromLocal ${CARBONDATA_PATH}/hadoop/src/test/resources/data.csv /
 </code></pre>
-<p>In the above example, <code>id</code> and <code>name</code> are high cardinality columns
-and we always query on <code>id</code> and <code>name</code> with precise value.
-since <code>id</code> is in the sort_columns and it is orderd,
-query on it will be fast because CarbonData can skip all the irrelative blocklets.
-But queries on <code>name</code> may be bad since the blocklet minmax may not help,
-because in each blocklet the range of the value of <code>name</code> may be the same -- all from A* to z*.
-In this case, user can create a BloomFilter datamap on column <code>name</code>.
-Moreover, user can also create a BloomFilter datamap on the sort_columns.
-This is useful if user has too many segments and the range of the value of sort_columns are almost the same.</p>
-<p>User can create BloomFilter datamap using the Create DataMap DDL:</p>
-<pre><code>CREATE DATAMAP dm
-ON TABLE datamap_test
-USING 'bloomfilter'
-DMPROPERTIES ('INDEX_COLUMNS' = 'name,id', 'BLOOM_SIZE'='640000', 'BLOOM_FPP'='0.00001', 'BLOOM_COMPRESS'='true')
+<h4>
+<a id="command" class="anchor" href="#command" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Command</h4>
+<pre lang="$command"><code>./bin/spark-submit \
+--master local \
+--jars ${ALLUXIO_PATH}/client/alluxio-1.8.1-client.jar,${CARBONDATA_PATH}/examples/spark2/target/carbondata-examples-1.6.0-SNAPSHOT.jar \
+--class org.apache.carbondata.examples.AlluxioExample \
+${CARBONDATA_PATH}/assembly/target/scala-2.11/apache-carbondata-1.6.0-SNAPSHOT-bin-spark2.2.1-hadoop2.7.2.jar \
+false
+</code></pre>
+<p><strong>NOTE</strong>: Please set runShell as false, which can avoid dependency on alluxio shell module.</p>
+<h4>
+<a id="result" class="anchor" href="#result" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Result</h4>
+<pre lang="$command"><code>+-----------------+-------+--------------------+--------------------+---------+-----------+---------+----------+
+|SegmentSequenceId| Status|     Load Start Time|       Load End Time|Merged To|File Format|Data Size|Index Size|
++-----------------+-------+--------------------+--------------------+---------+-----------+---------+----------+
+|                1|Success|2019-01-09 15:10:...|2019-01-09 15:10:...|       NA|COLUMNAR_V3|  23.92KB|    1.07KB|
+|                0|Success|2019-01-09 15:10:...|2019-01-09 15:10:...|       NA|COLUMNAR_V3|  23.92KB|    1.07KB|
++-----------------+-------+--------------------+--------------------+---------+-----------+---------+----------+
+
++-------+------+
+|country|amount|
++-------+------+
+| france|   202|
+|  china|  1698|
++-------+------+
+
++-----------------+---------+--------------------+--------------------+---------+-----------+---------+----------+
+|SegmentSequenceId|   Status|     Load Start Time|       Load End Time|Merged To|File Format|Data Size|Index Size|
++-----------------+---------+--------------------+--------------------+---------+-----------+---------+----------+
+|                3|Compacted|2019-01-09 15:10:...|2019-01-09 15:10:...|      0.1|COLUMNAR_V3|  23.92KB|    1.03KB|
+|                2|Compacted|2019-01-09 15:10:...|2019-01-09 15:10:...|      0.1|COLUMNAR_V3|  23.92KB|    1.07KB|
+|                1|Compacted|2019-01-09 15:10:...|2019-01-09 15:10:...|      0.1|COLUMNAR_V3|  23.92KB|    1.07KB|
+|              0.1|  Success|2019-01-09 15:10:...|2019-01-09 15:10:...|       NA|COLUMNAR_V3|  37.65KB|    1.08KB|
+|                0|Compacted|2019-01-09 15:10:...|2019-01-09 15:10:...|      0.1|COLUMNAR_V3|  23.92KB|    1.07KB|
++-----------------+---------+--------------------+--------------------+---------+-----------+---------+----------+
+
 </code></pre>
-<p><strong>Properties for BloomFilter DataMap</strong></p>
-<table>
-<thead>
-<tr>
-<th>Property</th>
-<th>Is Required</th>
-<th>Default Value</th>
-<th>Description</th>
-</tr>
-</thead>
-<tbody>
-<tr>
-<td>INDEX_COLUMNS</td>
-<td>YES</td>
-<td></td>
-<td>Carbondata will generate BloomFilter index on these columns. Queries on these columns are usually like 'COL = VAL'.</td>
-</tr>
-<tr>
-<td>BLOOM_SIZE</td>
-<td>NO</td>
-<td>640000</td>
-<td>This value is internally used by BloomFilter as the number of expected insertions, it will affect the size of BloomFilter index. Since each blocklet has a BloomFilter here, so the default value is the approximate distinct index values in a blocklet assuming that each blocklet contains 20 pages and each page contains 32000 records. The value should be an integer.</td>
-</tr>
-<tr>
-<td>BLOOM_FPP</td>
-<td>NO</td>
-<td>0.00001</td>
-<td>This value is internally used by BloomFilter as the False-Positive Probability, it will affect the size of bloomfilter index as well as the number of hash functions for the BloomFilter. The value should be in the range (0, 1). In one test scenario, a 96GB TPCH customer table with bloom_size=320000 and bloom_fpp=0.00001 will result in 18 false positive samples.</td>
-</tr>
-<tr>
-<td>BLOOM_COMPRESS</td>
-<td>NO</td>
-<td>true</td>
-<td>Whether to compress the BloomFilter index files.</td>
-</tr>
-</tbody>
-</table>
-<h2>
-<a id="loading-data" class="anchor" href="#loading-data" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Loading Data</h2>
-<p>When loading data to main table, BloomFilter files will be generated for all the
-index_columns given in DMProperties which contains the blockletId and a BloomFilter for each index column.
-These index files will be written inside a folder named with datamap name
-inside each segment folders.</p>
-<h2>
-<a id="querying-data" class="anchor" href="#querying-data" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Querying Data</h2>
-<p>User can verify whether a query can leverage BloomFilter datamap by executing <code>EXPLAIN</code> command,
-which will show the transformed logical plan, and thus user can check whether the BloomFilter datamap can skip blocklets during the scan.
-If the datamap does not prune blocklets well, you can try to increase the value of property <code>BLOOM_SIZE</code> and decrease the value of property <code>BLOOM_FPP</code>.</p>
-<h2>
-<a id="data-management-with-bloomfilter-datamap" class="anchor" href="#data-management-with-bloomfilter-datamap" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Data Management With BloomFilter DataMap</h2>
-<p>Data management with BloomFilter datamap has no difference with that on Lucene datamap.
-You can refer to the corresponding section in <code>CarbonData Lucene DataMap</code>.</p>
 <h2>
-<a id="useful-tips" class="anchor" href="#useful-tips" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Useful Tips</h2>
-<ul>
-<li>BloomFilter DataMap is suggested to be created on the high cardinality columns.
-Query conditions on these columns are always simple <code>equal</code> or <code>in</code>,
-such as 'col1=XX', 'col1 in (XX, YY)'.</li>
-<li>We can create multiple BloomFilter datamaps on one table,
-but we do recommend you to create one BloomFilter datamap that contains multiple index columns,
-because the data loading and query performance will be better.</li>
-<li>
-<code>BLOOM_FPP</code> is only the expected number from user, the actually FPP may be worse.
-If the BloomFilter datamap does not work well,
-you can try to increase <code>BLOOM_SIZE</code> and decrease <code>BLOOM_FPP</code> at the same time.
-Notice that bigger <code>BLOOM_SIZE</code> will increase the size of index file
-and smaller <code>BLOOM_FPP</code> will increase runtime calculation while performing query.</li>
-<li>'0' skipped blocklets of BloomFilter datamap in explain output indicates that
-BloomFilter datamap does not prune better than Main datamap.
-(For example since the data is not ordered, a specific value may be contained in many blocklets. In this case, bloom may not work better than Main DataMap.)
-If this occurs very often, it means that current BloomFilter is useless. You can disable or drop it.
-Sometimes we cannot see any pruning result about BloomFilter datamap in the explain output,
-this indicates that the previous datamap has pruned all the blocklets and there is no need to continue pruning.</li>
-<li>In some scenarios, the BloomFilter datamap may not enhance the query performance significantly
-but if it can reduce the number of spark task,
-there is still a chance that BloomFilter datamap can enhance the performance for concurrent query.</li>
-<li>Note that BloomFilter datamap will decrease the data loading performance and may cause slightly storage expansion (for datamap index file).</li>
-</ul>
+<a id="reference" class="anchor" href="#reference" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Reference</h2>
+<p>[1] <a href="https://www.alluxio.org/docs/1.8/en/Getting-Started.html" target=_blank rel="nofollow">https://www.alluxio.org/docs/1.8/en/Getting-Started.html</a>
+[2] <a href="https://www.alluxio.org/docs/1.8/en/compute/Spark.html" target=_blank rel="nofollow">https://www.alluxio.org/docs/1.8/en/compute/Spark.html</a></p>
 <script>
-$(function() {
-  // Show selected style on nav item
-  $('.b-nav__datamap').addClass('selected');
-  
-  if (!$('.b-nav__datamap').parent().hasClass('nav__item__with__subs--expanded')) {
-    // Display datamap subnav items
-    $('.b-nav__datamap').parent().toggleClass('nav__item__with__subs--expanded');
-  }
-});
+// Show selected style on nav item
+$(function() { $('.b-nav__quickstart').addClass('selected'); });
 </script></div>
 </div>
 </div>
diff --git a/src/main/webapp/bloomfilter-datamap-guide.html b/src/main/webapp/bloomfilter-datamap-guide.html
index aab8dc0..b9a073f 100644
--- a/src/main/webapp/bloomfilter-datamap-guide.html
+++ b/src/main/webapp/bloomfilter-datamap-guide.html
@@ -52,6 +52,9 @@
                            aria-expanded="false"> Download <span class="caret"></span></a>
                         <ul class="dropdown-menu">
                             <li>
+                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.2/"
+                                   target="_blank">Apache CarbonData 1.5.2</a></li>
+                            <li>
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.1/"
                                    target="_blank">Apache CarbonData 1.5.1</a></li>
                             <li>
@@ -64,9 +67,6 @@
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.4.0/"
                                    target="_blank">Apache CarbonData 1.4.0</a></li>
                             <li>
-                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.3.1/"
-                                   target="_blank">Apache CarbonData 1.3.1</a></li>
-                            <li>
                                 <a href="https://cwiki.apache.org/confluence/display/CARBONDATA/Releases"
                                    target="_blank">Release Archive</a></li>
                         </ul>
@@ -236,7 +236,7 @@ ON TABLE main_table
 USING 'bloomfilter'
 DMPROPERTIES ('index_columns'='city, name', 'BLOOM_SIZE'='640000', 'BLOOM_FPP'='0.00001')
 </code></pre>
-<p>Dropping specified datamap</p>
+<p>Dropping Specified DataMap</p>
 <pre><code>DROP DATAMAP [IF EXISTS] datamap_name
 ON TABLE main_table
 </code></pre>
@@ -244,7 +244,7 @@ ON TABLE main_table
 <pre><code>SHOW DATAMAP
 ON TABLE main_table
 </code></pre>
-<p>Disable Datamap</p>
+<p>Disable DataMap</p>
 <blockquote>
 <p>The datamap by default is enabled. To support tuning on query, we can disable a specific datamap during query to observe whether we can gain performance enhancement from it. This is effective only for current session.</p>
 </blockquote>
@@ -277,10 +277,10 @@ since <code>id</code> is in the sort_columns and it is orderd,
 query on it will be fast because CarbonData can skip all the irrelative blocklets.
 But queries on <code>name</code> may be bad since the blocklet minmax may not help,
 because in each blocklet the range of the value of <code>name</code> may be the same -- all from A* to z*.
-In this case, user can create a BloomFilter datamap on column <code>name</code>.
-Moreover, user can also create a BloomFilter datamap on the sort_columns.
+In this case, user can create a BloomFilter DataMap on column <code>name</code>.
+Moreover, user can also create a BloomFilter DataMap on the sort_columns.
 This is useful if user has too many segments and the range of the value of sort_columns are almost the same.</p>
-<p>User can create BloomFilter datamap using the Create DataMap DDL:</p>
+<p>User can create BloomFilter DataMap using the Create DataMap DDL:</p>
 <pre><code>CREATE DATAMAP dm
 ON TABLE datamap_test
 USING 'bloomfilter'
@@ -327,16 +327,16 @@ DMPROPERTIES ('INDEX_COLUMNS' = 'name,id', 'BLOOM_SIZE'='640000', 'BLOOM_FPP'='0
 <a id="loading-data" class="anchor" href="#loading-data" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Loading Data</h2>
 <p>When loading data to main table, BloomFilter files will be generated for all the
 index_columns given in DMProperties which contains the blockletId and a BloomFilter for each index column.
-These index files will be written inside a folder named with datamap name
+These index files will be written inside a folder named with DataMap name
 inside each segment folders.</p>
 <h2>
 <a id="querying-data" class="anchor" href="#querying-data" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Querying Data</h2>
-<p>User can verify whether a query can leverage BloomFilter datamap by executing <code>EXPLAIN</code> command,
-which will show the transformed logical plan, and thus user can check whether the BloomFilter datamap can skip blocklets during the scan.
-If the datamap does not prune blocklets well, you can try to increase the value of property <code>BLOOM_SIZE</code> and decrease the value of property <code>BLOOM_FPP</code>.</p>
+<p>User can verify whether a query can leverage BloomFilter DataMap by executing <code>EXPLAIN</code> command,
+which will show the transformed logical plan, and thus user can check whether the BloomFilter DataMap can skip blocklets during the scan.
+If the DataMap does not prune blocklets well, you can try to increase the value of property <code>BLOOM_SIZE</code> and decrease the value of property <code>BLOOM_FPP</code>.</p>
 <h2>
 <a id="data-management-with-bloomfilter-datamap" class="anchor" href="#data-management-with-bloomfilter-datamap" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Data Management With BloomFilter DataMap</h2>
-<p>Data management with BloomFilter datamap has no difference with that on Lucene datamap.
+<p>Data management with BloomFilter DataMap has no difference with that on Lucene DataMap.
 You can refer to the corresponding section in <code>CarbonData Lucene DataMap</code>.</p>
 <h2>
 <a id="useful-tips" class="anchor" href="#useful-tips" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Useful Tips</h2>
@@ -344,25 +344,25 @@ You can refer to the corresponding section in <code>CarbonData Lucene DataMap</c
 <li>BloomFilter DataMap is suggested to be created on the high cardinality columns.
 Query conditions on these columns are always simple <code>equal</code> or <code>in</code>,
 such as 'col1=XX', 'col1 in (XX, YY)'.</li>
-<li>We can create multiple BloomFilter datamaps on one table,
-but we do recommend you to create one BloomFilter datamap that contains multiple index columns,
+<li>We can create multiple BloomFilter DataMaps on one table,
+but we do recommend you to create one BloomFilter DataMap that contains multiple index columns,
 because the data loading and query performance will be better.</li>
 <li>
 <code>BLOOM_FPP</code> is only the expected number from user, the actually FPP may be worse.
-If the BloomFilter datamap does not work well,
+If the BloomFilter DataMap does not work well,
 you can try to increase <code>BLOOM_SIZE</code> and decrease <code>BLOOM_FPP</code> at the same time.
 Notice that bigger <code>BLOOM_SIZE</code> will increase the size of index file
 and smaller <code>BLOOM_FPP</code> will increase runtime calculation while performing query.</li>
-<li>'0' skipped blocklets of BloomFilter datamap in explain output indicates that
-BloomFilter datamap does not prune better than Main datamap.
+<li>'0' skipped blocklets of BloomFilter DataMap in explain output indicates that
+BloomFilter DataMap does not prune better than Main DataMap.
 (For example since the data is not ordered, a specific value may be contained in many blocklets. In this case, bloom may not work better than Main DataMap.)
 If this occurs very often, it means that current BloomFilter is useless. You can disable or drop it.
-Sometimes we cannot see any pruning result about BloomFilter datamap in the explain output,
-this indicates that the previous datamap has pruned all the blocklets and there is no need to continue pruning.</li>
-<li>In some scenarios, the BloomFilter datamap may not enhance the query performance significantly
+Sometimes we cannot see any pruning result about BloomFilter DataMap in the explain output,
+this indicates that the previous DataMap has pruned all the blocklets and there is no need to continue pruning.</li>
+<li>In some scenarios, the BloomFilter DataMap may not enhance the query performance significantly
 but if it can reduce the number of spark task,
-there is still a chance that BloomFilter datamap can enhance the performance for concurrent query.</li>
-<li>Note that BloomFilter datamap will decrease the data loading performance and may cause slightly storage expansion (for datamap index file).</li>
+there is still a chance that BloomFilter DataMap can enhance the performance for concurrent query.</li>
+<li>Note that BloomFilter DataMap will decrease the data loading performance and may cause slightly storage expansion (for DataMap index file).</li>
 </ul>
 <script>
 $(function() {
diff --git a/src/main/webapp/carbon-as-spark-datasource-guide.html b/src/main/webapp/carbon-as-spark-datasource-guide.html
index 9ffca8f..43698e5 100644
--- a/src/main/webapp/carbon-as-spark-datasource-guide.html
+++ b/src/main/webapp/carbon-as-spark-datasource-guide.html
@@ -52,6 +52,9 @@
                            aria-expanded="false"> Download <span class="caret"></span></a>
                         <ul class="dropdown-menu">
                             <li>
+                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.2/"
+                                   target="_blank">Apache CarbonData 1.5.2</a></li>
+                            <li>
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.1/"
                                    target="_blank">Apache CarbonData 1.5.1</a></li>
                             <li>
@@ -64,9 +67,6 @@
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.4.0/"
                                    target="_blank">Apache CarbonData 1.4.0</a></li>
                             <li>
-                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.3.1/"
-                                   target="_blank">Apache CarbonData 1.3.1</a></li>
-                            <li>
                                 <a href="https://cwiki.apache.org/confluence/display/CARBONDATA/Releases"
                                    target="_blank">Release Archive</a></li>
                         </ul>
diff --git a/src/main/webapp/configuration-parameters.html b/src/main/webapp/configuration-parameters.html
index 5cc7a45..6c48b5e 100644
--- a/src/main/webapp/configuration-parameters.html
+++ b/src/main/webapp/configuration-parameters.html
@@ -52,6 +52,9 @@
                            aria-expanded="false"> Download <span class="caret"></span></a>
                         <ul class="dropdown-menu">
                             <li>
+                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.2/"
+                                   target="_blank">Apache CarbonData 1.5.2</a></li>
+                            <li>
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.1/"
                                    target="_blank">Apache CarbonData 1.5.1</a></li>
                             <li>
@@ -64,9 +67,6 @@
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.4.0/"
                                    target="_blank">Apache CarbonData 1.4.0</a></li>
                             <li>
-                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.3.1/"
-                                   target="_blank">Apache CarbonData 1.3.1</a></li>
-                            <li>
                                 <a href="https://cwiki.apache.org/confluence/display/CARBONDATA/Releases"
                                    target="_blank">Release Archive</a></li>
                         </ul>
@@ -382,8 +382,7 @@
 <tr>
 <td>carbon.load.sort.scope</td>
 <td>LOCAL_SORT</td>
-<td>CarbonData can support various sorting options to match the balance between load and query performance. LOCAL_SORT:All the data given to an executor in the single load is fully sorted and written to carbondata files. Data loading performance is reduced a little as the entire data needs to be sorted in the executor. BATCH_SORT:Sorts the data in batches of configured size and writes to carbondata files. Data loading performance increases as the entire data need not be sorted. But query [...]
-</td>
+<td>CarbonData can support various sorting options to match the balance between load and query performance. LOCAL_SORT:All the data given to an executor in the single load is fully sorted and written to carbondata files. Data loading performance is reduced a little as the entire data needs to be sorted in the executor. BATCH_SORT:Sorts the data in batches of configured size and writes to carbondata files. Data loading performance increases as the entire data need not be sorted. But query [...]
 </tr>
 <tr>
 <td>carbon.load.batch.sort.size.inmb</td>
@@ -534,7 +533,7 @@
 <tr>
 <td>carbon.column.compressor</td>
 <td>snappy</td>
-<td>CarbonData will compress the column values using the compressor specified by this configuration. Currently CarbonData supports 'snappy' and 'zstd' compressors.</td>
+<td>CarbonData will compress the column values using the compressor specified by this configuration. Currently CarbonData supports 'snappy', 'zstd' and 'gzip' compressors.</td>
 </tr>
 <tr>
 <td>carbon.minmax.allowed.byte.count</td>
@@ -702,7 +701,7 @@
 <tr>
 <td>carbon.detail.batch.size</td>
 <td>100</td>
-<td>The buffer size to store records, returned from the block scan. In limit scenario this parameter is very important. For example your query limit is 1000. But if we set this value to 3000 that means we get 3000 records from scan but spark will only take 1000 rows. So the 2000 remaining are useless. In one Finance test case after we set it to 100, in the limit 1000 scenario the performance increase about 2 times in comparison to if we set this value to 12000.</td>
+<td>The buffer size to store records, returned from the block scan. In limit scenario this parameter is very important. For example your query limit is 1000. But if we set this value to 3000 that means we get 3000 records from scan but spark will only take 1000 rows. So the 2000 remaining are useless. In one Finance test case after we set it to 100, in the limit 1000 scenario the performance increase about 2 times in comparison to if we set this value to 12000.<br><br> <strong>NOTE</stro [...]
 </tr>
 <tr>
 <td>carbon.enable.vector.reader</td>
@@ -730,11 +729,6 @@
 <td>CarbonData supports unsafe operations of Java to avoid GC overhead for certain operations. This configuration enables to use unsafe functions in CarbonData while scanning the  data during query.</td>
 </tr>
 <tr>
-<td>carbon.query.validate.direct.query.on.datamap</td>
-<td>true</td>
-<td>CarbonData supports creating pre-aggregate table datamaps as an independent tables. For some debugging purposes, it might be required to directly query from such datamap tables. This configuration allows to query on such datamaps.</td>
-</tr>
-<tr>
 <td>carbon.max.driver.threads.for.block.pruning</td>
 <td>4</td>
 <td>Number of threads used for driver pruning when the carbon files are more than 100k Maximum memory. This configuration can used to set number of threads between 1 to 4.</td>
@@ -888,11 +882,15 @@
 </tr>
 <tr>
 <td>carbon.options.sort.scope</td>
-<td>Specifies how the current data load should be sorted with. <strong>NOTE:</strong> Refer to <a href="#data-loading-configuration">Data Loading Configuration</a>#carbon.sort.scope for detailed information.</td>
+<td>Specifies how the current data load should be sorted with. This sort parameter is at the table level. <strong>NOTE:</strong> Refer to <a href="#data-loading-configuration">Data Loading Configuration</a>#carbon.sort.scope for detailed information.</td>
+</tr>
+<tr>
+<td>carbon.table.load.sort.scope.db_name.table_name</td>
+<td>Overrides the SORT_SCOPE provided in CREATE TABLE.</td>
 </tr>
 <tr>
 <td>carbon.options.global.sort.partitions</td>
-<td></td>
+<td>Specifies the number of partitions to be used during global sort.</td>
 </tr>
 <tr>
 <td>carbon.options.serialization.null.format</td>
@@ -900,7 +898,7 @@
 </tr>
 <tr>
 <td>carbon.query.directQueryOnDataMap.enabled</td>
-<td>Specifies whether datamap can be queried directly. This is useful for debugging purposes.**NOTE: **Refer to <a href="#query-configuration">Query Configuration</a>#carbon.query.validate.direct.query.on.datamap for detailed information.</td>
+<td>Specifies whether datamap can be queried directly. This is useful for debugging purposes.**NOTE: **Refer to <a href="#query-configuration">Query Configuration</a> for detailed information.</td>
 </tr>
 </tbody>
 </table>
diff --git a/src/main/webapp/datamap-developer-guide.html b/src/main/webapp/datamap-developer-guide.html
index 286c21d..b0cb182 100644
--- a/src/main/webapp/datamap-developer-guide.html
+++ b/src/main/webapp/datamap-developer-guide.html
@@ -52,6 +52,9 @@
                            aria-expanded="false"> Download <span class="caret"></span></a>
                         <ul class="dropdown-menu">
                             <li>
+                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.2/"
+                                   target="_blank">Apache CarbonData 1.5.2</a></li>
+                            <li>
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.1/"
                                    target="_blank">Apache CarbonData 1.5.1</a></li>
                             <li>
@@ -64,9 +67,6 @@
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.4.0/"
                                    target="_blank">Apache CarbonData 1.4.0</a></li>
                             <li>
-                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.3.1/"
-                                   target="_blank">Apache CarbonData 1.3.1</a></li>
-                            <li>
                                 <a href="https://cwiki.apache.org/confluence/display/CARBONDATA/Releases"
                                    target="_blank">Release Archive</a></li>
                         </ul>
@@ -217,15 +217,16 @@
                         <div id="viewpage" name="viewpage">
                             <div class="row">
                                 <div class="col-sm-12  col-md-12">
-                                    <div><h1>
+                                    <div>
+<h1>
 <a id="datamap-developer-guide" class="anchor" href="#datamap-developer-guide" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>DataMap Developer Guide</h1>
 <h3>
 <a id="introduction" class="anchor" href="#introduction" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Introduction</h3>
 <p>DataMap is a data structure that can be used to accelerate certain query of the table. Different DataMap can be implemented by developers.
-Currently, there are two 2 types of DataMap supported:</p>
+Currently, there are two types of DataMap supported:</p>
 <ol>
-<li>IndexDataMap: DataMap that leverages index to accelerate filter query</li>
-<li>MVDataMap: DataMap that leverages Materialized View to accelerate olap style query, like SPJG query (select, predicate, join, groupby)</li>
+<li>IndexDataMap: DataMap that leverages index to accelerate filter query. Lucene DataMap and BloomFiler DataMap belong to this type of DataMaps.</li>
+<li>MVDataMap: DataMap that leverages Materialized View to accelerate olap style query, like SPJG query (select, predicate, join, groupby). Preaggregate, timeseries and mv DataMap belong to this type of DataMaps.</li>
 </ol>
 <h3>
 <a id="datamap-provider" class="anchor" href="#datamap-provider" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>DataMap Provider</h3>
@@ -234,7 +235,7 @@ Currently, the provider string can be:</p>
 <ol>
 <li>preaggregate: A type of MVDataMap that do pre-aggregate of single table</li>
 <li>timeseries: A type of MVDataMap that do pre-aggregate based on time dimension of the table</li>
-<li>class name IndexDataMapFactory  implementation: Developer can implement new type of IndexDataMap by extending IndexDataMapFactory</li>
+<li>class name IndexDataMapFactory implementation: Developer can implement new type of IndexDataMap by extending IndexDataMapFactory</li>
 </ol>
 <p>When user issues <code>DROP DATAMAP dm ON TABLE main</code>, the corresponding DataMapProvider interface will be called.</p>
 <p>Click for more details about <a href="./datamap-management.html#datamap-management">DataMap Management</a> and supported <a href="./datamap-management.html#overview">DSL</a>.</p>
diff --git a/src/main/webapp/datamap-management.html b/src/main/webapp/datamap-management.html
index 5dc2b33..ac847f3 100644
--- a/src/main/webapp/datamap-management.html
+++ b/src/main/webapp/datamap-management.html
@@ -52,6 +52,9 @@
                            aria-expanded="false"> Download <span class="caret"></span></a>
                         <ul class="dropdown-menu">
                             <li>
+                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.2/"
+                                   target="_blank">Apache CarbonData 1.5.2</a></li>
+                            <li>
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.1/"
                                    target="_blank">Apache CarbonData 1.5.1</a></li>
                             <li>
@@ -64,9 +67,6 @@
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.4.0/"
                                    target="_blank">Apache CarbonData 1.4.0</a></li>
                             <li>
-                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.3.1/"
-                                   target="_blank">Apache CarbonData 1.3.1</a></li>
-                            <li>
                                 <a href="https://cwiki.apache.org/confluence/display/CARBONDATA/Releases"
                                    target="_blank">Release Archive</a></li>
                         </ul>
@@ -238,13 +238,13 @@
 <h2>
 <a id="overview" class="anchor" href="#overview" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Overview</h2>
 <p>DataMap can be created using following DDL</p>
-<pre><code>  CREATE DATAMAP [IF NOT EXISTS] datamap_name
-  [ON TABLE main_table]
-  USING "datamap_provider"
-  [WITH DEFERRED REBUILD]
-  DMPROPERTIES ('key'='value', ...)
-  AS
-    SELECT statement
+<pre><code>CREATE DATAMAP [IF NOT EXISTS] datamap_name
+[ON TABLE main_table]
+USING "datamap_provider"
+[WITH DEFERRED REBUILD]
+DMPROPERTIES ('key'='value', ...)
+AS
+  SELECT statement
 </code></pre>
 <p>Currently, there are 5 DataMap implementations in CarbonData.</p>
 <table>
diff --git a/src/main/webapp/ddl-of-carbondata.html b/src/main/webapp/ddl-of-carbondata.html
index 7f84786..1ef64f8 100644
--- a/src/main/webapp/ddl-of-carbondata.html
+++ b/src/main/webapp/ddl-of-carbondata.html
@@ -52,6 +52,9 @@
                            aria-expanded="false"> Download <span class="caret"></span></a>
                         <ul class="dropdown-menu">
                             <li>
+                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.2/"
+                                   target="_blank">Apache CarbonData 1.5.2</a></li>
+                            <li>
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.1/"
                                    target="_blank">Apache CarbonData 1.5.1</a></li>
                             <li>
@@ -64,9 +67,6 @@
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.4.0/"
                                    target="_blank">Apache CarbonData 1.4.0</a></li>
                             <li>
-                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.3.1/"
-                                   target="_blank">Apache CarbonData 1.3.1</a></li>
-                            <li>
                                 <a href="https://cwiki.apache.org/confluence/display/CARBONDATA/Releases"
                                    target="_blank">Release Archive</a></li>
                         </ul>
@@ -226,13 +226,13 @@
 <p><a href="#create-table">CREATE TABLE</a></p>
 <ul>
 <li><a href="#dictionary-encoding-configuration">Dictionary Encoding</a></li>
+<li><a href="#local-dictionary-configuration">Local Dictionary</a></li>
 <li><a href="#inverted-index-configuration">Inverted Index</a></li>
 <li><a href="#sort-columns-configuration">Sort Columns</a></li>
 <li><a href="#sort-scope-configuration">Sort Scope</a></li>
 <li><a href="#table-block-size-configuration">Table Block Size</a></li>
 <li><a href="#table-compaction-configuration">Table Compaction</a></li>
 <li><a href="#streaming">Streaming</a></li>
-<li><a href="#local-dictionary-configuration">Local Dictionary</a></li>
 <li><a href="#caching-minmax-value-for-required-columns">Caching Column Min/Max</a></li>
 <li><a href="#caching-at-block-or-blocklet-level">Caching Level</a></li>
 <li><a href="#support-flat-folder-same-as-hiveparquet">Hive/Parquet folder Structure</a></li>
@@ -240,6 +240,7 @@
 <li><a href="#compression-for-table">Compression for Table</a></li>
 <li><a href="#bad-records-path">Bad Records Path</a></li>
 <li><a href="#load-minimum-data-size">Load Minimum Input File Size</a></li>
+<li><a href="#range-column">Range Column</a></li>
 </ul>
 </li>
 <li>
@@ -265,9 +266,10 @@
 <li><a href="#rename-table">RENAME TABLE</a></li>
 <li><a href="#add-columns">ADD COLUMNS</a></li>
 <li><a href="#drop-columns">DROP COLUMNS</a></li>
-<li><a href="#change-data-type">CHANGE DATA TYPE</a></li>
+<li><a href="#change-column-nametype">RENAME COLUMN</a></li>
+<li><a href="#change-column-nametype">CHANGE COLUMN NAME/TYPE</a></li>
 <li><a href="#merge-index">MERGE INDEXES</a></li>
-<li><a href="#set-and-unset-for-local-dictionary-properties">SET/UNSET Local Dictionary Properties</a></li>
+<li><a href="#set-and-unset">SET/UNSET</a></li>
 </ul>
 </li>
 <li><a href="#drop-table">DROP TABLE</a></li>
@@ -418,6 +420,10 @@ STORED AS carbondata
 <td><a href="#load-minimum-data-size">LOAD_MIN_SIZE_INMB</a></td>
 <td>Minimum input data size per node for data loading</td>
 </tr>
+<tr>
+<td><a href="#range-column">Range Column</a></td>
+<td>partition input data by range</td>
+</tr>
 </tbody>
 </table>
 <p>Following are the guidelines for TBLPROPERTIES, CarbonData's additional table options can be set via carbon.properties.</p>
@@ -429,106 +435,11 @@ STORED AS carbondata
 Suggested use cases : do dictionary encoding for low cardinality columns, it might help to improve data compression ratio and performance.</p>
 <pre><code>TBLPROPERTIES ('DICTIONARY_INCLUDE'='column1, column2')
 </code></pre>
-<p><strong>NOTE</strong>: Dictionary Include/Exclude for complex child columns is not supported.</p>
-</li>
-<li>
-<h5>
-<a id="inverted-index-configuration" class="anchor" href="#inverted-index-configuration" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Inverted Index Configuration</h5>
-<p>By default inverted index is disabled as store size will be reduced, it can be enabled by using a table property. It might help to improve compression ratio and query speed, especially for low cardinality columns which are in reward position.
-Suggested use cases : For high cardinality columns, you can disable the inverted index for improving the data loading performance.</p>
-<pre><code>TBLPROPERTIES ('NO_INVERTED_INDEX'='column1', 'INVERTED_INDEX'='column2, column3')
-</code></pre>
-</li>
-<li>
-<h5>
-<a id="sort-columns-configuration" class="anchor" href="#sort-columns-configuration" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Sort Columns Configuration</h5>
-<p>This property is for users to specify which columns belong to the MDK(Multi-Dimensions-Key) index.</p>
+<p><strong>NOTE</strong>:</p>
 <ul>
-<li>If users don't specify "SORT_COLUMN" property, by default MDK index be built by using all dimension columns except complex data type column.</li>
-<li>If this property is specified but with empty argument, then the table will be loaded without sort.</li>
-<li>This supports only string, date, timestamp, short, int, long, byte and boolean data types.
-Suggested use cases : Only build MDK index for required columns,it might help to improve the data loading performance.</li>
+<li>Dictionary Include/Exclude for complex child columns is not supported.</li>
+<li>Dictionary is global. Except global dictionary, there are local dictionary and non-dictionary in CarbonData.</li>
 </ul>
-<pre><code>TBLPROPERTIES ('SORT_COLUMNS'='column1, column3')
-OR
-TBLPROPERTIES ('SORT_COLUMNS'='')
-</code></pre>
-<p><strong>NOTE</strong>: Sort_Columns for Complex datatype columns is not supported.</p>
-</li>
-<li>
-<h5>
-<a id="sort-scope-configuration" class="anchor" href="#sort-scope-configuration" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Sort Scope Configuration</h5>
-<p>This property is for users to specify the scope of the sort during data load, following are the types of sort scope.</p>
-<ul>
-<li>LOCAL_SORT: It is the default sort scope.</li>
-<li>NO_SORT: It will load the data in unsorted manner, it will significantly increase load performance.</li>
-<li>BATCH_SORT: It increases the load performance but decreases the query performance if identified blocks &gt; parallelism.</li>
-<li>GLOBAL_SORT: It increases the query performance, especially high concurrent point query.
-And if you care about loading resources isolation strictly, because the system uses the spark GroupBy to sort data, the resource can be controlled by spark.</li>
-</ul>
-</li>
-</ul>
-<pre><code>### Example:
-
-```
-CREATE TABLE IF NOT EXISTS productSchema.productSalesTable (
-  productNumber INT,
-  productName STRING,
-  storeCity STRING,
-  storeProvince STRING,
-  productCategory STRING,
-  productBatch STRING,
-  saleQuantity INT,
-  revenue INT)
-STORED AS carbondata
-TBLPROPERTIES ('SORT_COLUMNS'='productName,storeCity',
-               'SORT_SCOPE'='NO_SORT')
-```
-</code></pre>
-<p><strong>NOTE:</strong> CarbonData also supports "using carbondata". Find example code at <a href="https://github.com/apache/carbondata/blob/master/examples/spark2/src/main/scala/org/apache/carbondata/examples/SparkSessionExample.scala" target=_blank>SparkSessionExample</a> in the CarbonData repo.</p>
-<ul>
-<li>
-<h5>
-<a id="table-block-size-configuration" class="anchor" href="#table-block-size-configuration" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Table Block Size Configuration</h5>
-<p>This property is for setting block size of this table, the default value is 1024 MB and supports a range of 1 MB to 2048 MB.</p>
-<pre><code>TBLPROPERTIES ('TABLE_BLOCKSIZE'='512')
-</code></pre>
-<p><strong>NOTE:</strong> 512 or 512M both are accepted.</p>
-</li>
-<li>
-<h5>
-<a id="table-blocklet-size-configuration" class="anchor" href="#table-blocklet-size-configuration" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Table Blocklet Size Configuration</h5>
-<p>This property is for setting blocklet size in the carbondata file, the default value is 64 MB.
-Blocklet is the minimum IO read unit, in case of point queries reduce blocklet size might improve the query performance.</p>
-<p>Example usage:</p>
-<pre><code>TBLPROPERTIES ('TABLE_BLOCKLET_SIZE'='8')
-</code></pre>
-</li>
-<li>
-<h5>
-<a id="table-compaction-configuration" class="anchor" href="#table-compaction-configuration" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Table Compaction Configuration</h5>
-<p>These properties are table level compaction configurations, if not specified, system level configurations in carbon.properties will be used.
-Following are 5 configurations:</p>
-<ul>
-<li>MAJOR_COMPACTION_SIZE: same meaning as carbon.major.compaction.size, size in MB.</li>
-<li>AUTO_LOAD_MERGE: same meaning as carbon.enable.auto.load.merge.</li>
-<li>COMPACTION_LEVEL_THRESHOLD: same meaning as carbon.compaction.level.threshold.</li>
-<li>COMPACTION_PRESERVE_SEGMENTS: same meaning as carbon.numberof.preserve.segments.</li>
-<li>ALLOWED_COMPACTION_DAYS: same meaning as carbon.allowed.compaction.days.</li>
-</ul>
-<pre><code>TBLPROPERTIES ('MAJOR_COMPACTION_SIZE'='2048',
-               'AUTO_LOAD_MERGE'='true',
-               'COMPACTION_LEVEL_THRESHOLD'='5,6',
-               'COMPACTION_PRESERVE_SEGMENTS'='10',
-               'ALLOWED_COMPACTION_DAYS'='5')
-</code></pre>
-</li>
-<li>
-<h5>
-<a id="streaming" class="anchor" href="#streaming" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Streaming</h5>
-<p>CarbonData supports streaming ingestion for real-time data. You can create the 'streaming' table using the following table properties.</p>
-<pre><code>TBLPROPERTIES ('streaming'='true')
-</code></pre>
 </li>
 <li>
 <h5>
@@ -645,27 +556,120 @@ Following are 5 configurations:</p>
 </ul>
 <h3>
 <a id="example" class="anchor" href="#example" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Example:</h3>
-<pre><code>CREATE TABLE carbontable(
-          
-            column1 string,
-          
-            column2 string,
-          
-            column3 LONG )
-          
-  STORED AS carbondata
-  TBLPROPERTIES('LOCAL_DICTIONARY_ENABLE'='true','LOCAL_DICTIONARY_THRESHOLD'='1000',
-  'LOCAL_DICTIONARY_INCLUDE'='column1','LOCAL_DICTIONARY_EXCLUDE'='column2')
+<pre><code>CREATE TABLE carbontable(             
+  column1 string,             
+  column2 string,             
+  column3 LONG)
+STORED AS carbondata
+TBLPROPERTIES('LOCAL_DICTIONARY_ENABLE'='true','LOCAL_DICTIONARY_THRESHOLD'='1000',
+'LOCAL_DICTIONARY_INCLUDE'='column1','LOCAL_DICTIONARY_EXCLUDE'='column2')
 </code></pre>
 <p><strong>NOTE:</strong></p>
 <ul>
 <li>We recommend to use Local Dictionary when cardinality is high but is distributed across multiple loads</li>
 <li>On a large cluster, decoding data can become a bottleneck for global dictionary as there will be many remote reads. In this scenario, it is better to use Local Dictionary.</li>
 <li>When cardinality is less, but loads are repetitive, it is better to use global dictionary as local dictionary generates multiple dictionary files at blocklet level increasing redundancy.</li>
+<li>If want to use non-dictionary, users can set LOCAL_DICTIONARY_ENABLE as false and don't set DICTIONARY_INCLUDE.</li>
 </ul>
 <ul>
 <li>
 <h5>
+<a id="inverted-index-configuration" class="anchor" href="#inverted-index-configuration" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Inverted Index Configuration</h5>
+<p>By default inverted index is disabled as store size will be reduced, it can be enabled by using a table property. It might help to improve compression ratio and query speed, especially for low cardinality columns which are in reward position.
+Suggested use cases : For high cardinality columns, you can disable the inverted index for improving the data loading performance.</p>
+<p><strong>NOTE</strong>: Columns specified in INVERTED_INDEX should also be present in SORT_COLUMNS.</p>
+<pre><code>TBLPROPERTIES ('SORT_COLUMNS'='column2,column3','NO_INVERTED_INDEX'='column1', 'INVERTED_INDEX'='column2, column3')
+</code></pre>
+</li>
+<li>
+<h5>
+<a id="sort-columns-configuration" class="anchor" href="#sort-columns-configuration" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Sort Columns Configuration</h5>
+<p>This property is for users to specify which columns belong to the MDK(Multi-Dimensions-Key) index.</p>
+<ul>
+<li>If users don't specify "SORT_COLUMN" property, by default no columns are sorted</li>
+<li>If this property is specified but with empty argument, then the table will be loaded without sort.</li>
+<li>This supports only string, date, timestamp, short, int, long, byte and boolean data types.
+Suggested use cases : Only build MDK index for required columns,it might help to improve the data loading performance.</li>
+</ul>
+<pre><code>TBLPROPERTIES ('SORT_COLUMNS'='column1, column3')
+</code></pre>
+<p><strong>NOTE</strong>: Sort_Columns for Complex datatype columns is not supported.</p>
+</li>
+<li>
+<h5>
+<a id="sort-scope-configuration" class="anchor" href="#sort-scope-configuration" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Sort Scope Configuration</h5>
+<p>This property is for users to specify the scope of the sort during data load, following are the types of sort scope.</p>
+<ul>
+<li>LOCAL_SORT: data will be locally sorted (task level sorting)</li>
+<li>NO_SORT: default scope. It will load the data in unsorted manner, it will significantly increase load performance.</li>
+<li>BATCH_SORT: It increases the load performance but decreases the query performance if identified blocks &gt; parallelism.</li>
+<li>GLOBAL_SORT: It increases the query performance, especially high concurrent point query.
+And if you care about loading resources isolation strictly, because the system uses the spark GroupBy to sort data, the resource can be controlled by spark.</li>
+</ul>
+</li>
+</ul>
+<h3>
+<a id="example-1" class="anchor" href="#example-1" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Example:</h3>
+<pre><code>CREATE TABLE IF NOT EXISTS productSchema.productSalesTable (
+  productNumber INT,
+  productName STRING,
+  storeCity STRING,
+  storeProvince STRING,
+  productCategory STRING,
+  productBatch STRING,
+  saleQuantity INT,
+  revenue INT)
+STORED AS carbondata
+TBLPROPERTIES ('SORT_COLUMNS'='productName,storeCity',
+               'SORT_SCOPE'='NO_SORT')
+</code></pre>
+<p><strong>NOTE:</strong> CarbonData also supports "using carbondata". Find example code at <a href="https://github.com/apache/carbondata/blob/master/examples/spark2/src/main/scala/org/apache/carbondata/examples/SparkSessionExample.scala" target=_blank>SparkSessionExample</a> in the CarbonData repo.</p>
+<ul>
+<li>
+<h5>
+<a id="table-block-size-configuration" class="anchor" href="#table-block-size-configuration" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Table Block Size Configuration</h5>
+<p>This property is for setting block size of this table, the default value is 1024 MB and supports a range of 1 MB to 2048 MB.</p>
+<pre><code>TBLPROPERTIES ('TABLE_BLOCKSIZE'='512')
+</code></pre>
+<p><strong>NOTE:</strong> 512 or 512M both are accepted.</p>
+</li>
+<li>
+<h5>
+<a id="table-blocklet-size-configuration" class="anchor" href="#table-blocklet-size-configuration" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Table Blocklet Size Configuration</h5>
+<p>This property is for setting blocklet size in the carbondata file, the default value is 64 MB.
+Blocklet is the minimum IO read unit, in case of point queries reduce blocklet size might improve the query performance.</p>
+<p>Example usage:</p>
+<pre><code>TBLPROPERTIES ('TABLE_BLOCKLET_SIZE'='8')
+</code></pre>
+</li>
+<li>
+<h5>
+<a id="table-compaction-configuration" class="anchor" href="#table-compaction-configuration" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Table Compaction Configuration</h5>
+<p>These properties are table level compaction configurations, if not specified, system level configurations in carbon.properties will be used.
+Following are 5 configurations:</p>
+<ul>
+<li>MAJOR_COMPACTION_SIZE: same meaning as carbon.major.compaction.size, size in MB.</li>
+<li>AUTO_LOAD_MERGE: same meaning as carbon.enable.auto.load.merge.</li>
+<li>COMPACTION_LEVEL_THRESHOLD: same meaning as carbon.compaction.level.threshold.</li>
+<li>COMPACTION_PRESERVE_SEGMENTS: same meaning as carbon.numberof.preserve.segments.</li>
+<li>ALLOWED_COMPACTION_DAYS: same meaning as carbon.allowed.compaction.days.</li>
+</ul>
+<pre><code>TBLPROPERTIES ('MAJOR_COMPACTION_SIZE'='2048',
+               'AUTO_LOAD_MERGE'='true',
+               'COMPACTION_LEVEL_THRESHOLD'='5,6',
+               'COMPACTION_PRESERVE_SEGMENTS'='10',
+               'ALLOWED_COMPACTION_DAYS'='5')
+</code></pre>
+</li>
+<li>
+<h5>
+<a id="streaming" class="anchor" href="#streaming" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Streaming</h5>
+<p>CarbonData supports streaming ingestion for real-time data. You can create the 'streaming' table using the following table properties.</p>
+<pre><code>TBLPROPERTIES ('streaming'='true')
+</code></pre>
+</li>
+<li>
+<h5>
 <a id="caching-minmax-value-for-required-columns" class="anchor" href="#caching-minmax-value-for-required-columns" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Caching Min/Max Value for Required Columns</h5>
 <p>By default, CarbonData caches min and max values of all the columns in schema.  As the load increases, the memory required to hold the min and max values increases considerably. This feature enables you to configure min and max values only for the required columns, resulting in optimized memory usage.</p>
 <p>Following are the valid values for COLUMN_META_CACHE:</p>
@@ -732,7 +736,7 @@ During create table operation specify the cache level in table properties.</p>
 <a id="support-flat-folder-same-as-hiveparquet" class="anchor" href="#support-flat-folder-same-as-hiveparquet" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Support Flat folder same as Hive/Parquet</h5>
 <p>This feature allows all carbondata and index files to keep directy under tablepath. Currently all carbondata/carbonindex files written under tablepath/Fact/Part0/Segment_NUM folder and it is not same as hive/parquet folder structure. This feature makes all files written will be directly under tablepath, it does not maintain any segment folder structure. This is useful for interoperability between the execution engines and plugin with other execution engines like hive or presto becomes [...]
 <p>Following table property enables this feature and default value is false.</p>
-<pre><code> 'flat_folder'='true'
+<pre><code>'flat_folder'='true'
 </code></pre>
 <p>Example:</p>
 <pre><code>CREATE TABLE employee (name String, city String, id int) STORED BY 'carbondata' TBLPROPERTIES ('flat_folder'='true')
@@ -787,7 +791,7 @@ The corresponding system property is configured in carbon.properties file as bel
 As the table path remains the same after rename therefore the user can use this property to
 specify bad records path for the table at the time of creation, so that the same path can
 be later viewed in table description for reference.</p>
-<pre><code>  TBLPROPERTIES('BAD_RECORD_PATH'='/opt/badrecords')
+<pre><code>TBLPROPERTIES('BAD_RECORD_PATH'='/opt/badrecords')
 </code></pre>
 </li>
 <li>
@@ -799,7 +803,15 @@ This property is useful if you have a large cluster and only want a small portio
 For example, if you have a cluster with 10 nodes and the input data is about 1GB. Without this property, each node will process about 100MB input data and result in at least 10 data files. With this property configured with 512, only 2 nodes will be chosen to process the input data, each with about 512MB input and result in about 2 or 4 files based on the compress ratio.
 Moreover, this property can also be specified in the load option.
 Notice that once you enable this feature, for load balance, carbondata will ignore the data locality while assigning input data to nodes, this will cause more network traffic.</p>
-<pre><code>  TBLPROPERTIES('LOAD_MIN_SIZE_INMB'='256')
+<pre><code>TBLPROPERTIES('LOAD_MIN_SIZE_INMB'='256')
+</code></pre>
+</li>
+<li>
+<h5>
+<a id="range-column" class="anchor" href="#range-column" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Range Column</h5>
+<p>This property is used to specify a column to partition the input data by range.
+Only one column can be configured. During data loading, you can use "global_sort_partitions" or "scale_factor" to avoid generating small files.</p>
+<pre><code>TBLPROPERTIES('RANGE_COLUMN'='col1')
 </code></pre>
 </li>
 </ul>
@@ -813,26 +825,37 @@ AS select_statement;
 </code></pre>
 <h3>
 <a id="examples" class="anchor" href="#examples" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Examples</h3>
-<pre><code>carbon.sql("CREATE TABLE source_table(
-                           id INT,
-                           name STRING,
-                           city STRING,
-                           age INT)
-            STORED AS parquet")
+<pre><code>carbon.sql(
+           s"""
+              | CREATE TABLE source_table(
+              |   id INT,
+              |   name STRING,
+              |   city STRING,
+              |   age INT)
+              | STORED AS parquet
+           """.stripMargin)
+              
 carbon.sql("INSERT INTO source_table SELECT 1,'bob','shenzhen',27")
+
 carbon.sql("INSERT INTO source_table SELECT 2,'david','shenzhen',31")
 
-carbon.sql("CREATE TABLE target_table
-            STORED AS carbondata
-            AS SELECT city,avg(age) FROM source_table GROUP BY city")
+carbon.sql(
+           s"""
+              | CREATE TABLE target_table
+              | STORED AS carbondata
+              | AS SELECT city, avg(age) 
+              |    FROM source_table 
+              |    GROUP BY city
+           """.stripMargin)
             
 carbon.sql("SELECT * FROM target_table").show
-  // results:
-  //    +--------+--------+
-  //    |    city|avg(age)|
-  //    +--------+--------+
-  //    |shenzhen|    29.0|
-  //    +--------+--------+
+
+// results:
+//    +--------+--------+
+//    |    city|avg(age)|
+//    +--------+--------+
+//    |shenzhen|    29.0|
+//    +--------+--------+
 
 </code></pre>
 <h2>
@@ -851,11 +874,12 @@ sql("INSERT INTO origin select 100,'spark'")
 sql("INSERT INTO origin select 200,'hive'")
 // creates a table in $storeLocation/origin
 
-sql(s"""
-|CREATE EXTERNAL TABLE source
-|STORED AS carbondata
-|LOCATION '$storeLocation/origin'
-""".stripMargin)
+sql(
+    s"""
+       | CREATE EXTERNAL TABLE source
+       | STORED AS carbondata
+       | LOCATION '$storeLocation/origin'
+    """.stripMargin)
 checkAnswer(sql("SELECT count(*) from source"), sql("SELECT count(*) from origin"))
 </code></pre>
 <h3>
@@ -864,8 +888,10 @@ checkAnswer(sql("SELECT count(*) from source"), sql("SELECT count(*) from origin
 Our SDK module currently supports writing data in this format.</p>
 <p><strong>Example:</strong></p>
 <pre><code>sql(
-s"""CREATE EXTERNAL TABLE sdkOutputTable STORED AS carbondata LOCATION
-|'$writerPath' """.stripMargin)
+    s"""
+       | CREATE EXTERNAL TABLE sdkOutputTable STORED AS carbondata LOCATION
+       |'$writerPath'
+    """.stripMargin)
 </code></pre>
 <p>Here writer path will have carbondata and index files.
 This can be SDK output or C++ SDK output. Refer <a href="./sdk-guide.html">SDK Guide</a> and <a href="./csdk-guide.html">C++ SDK Guide</a>.</p>
@@ -884,7 +910,7 @@ suggest to drop the external table and create again to register table with new s
 <pre><code>CREATE DATABASE [IF NOT EXISTS] database_name [LOCATION path];
 </code></pre>
 <h3>
-<a id="example-1" class="anchor" href="#example-1" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Example</h3>
+<a id="example-2" class="anchor" href="#example-2" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Example</h3>
 <pre><code>CREATE DATABASE carbon LOCATION "hdfs://name_cluster/dir1/carbonstore";
 </code></pre>
 <h2>
@@ -904,8 +930,8 @@ SHOW TABLES IN defaultdb
 <p>The following section introduce the commands to modify the physical or logical state of the existing table(s).</p>
 <ul>
 <li>
-<h5>
-<a id="rename-table" class="anchor" href="#rename-table" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>RENAME TABLE</h5>
+<h4>
+<a id="rename-table" class="anchor" href="#rename-table" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>RENAME TABLE</h4>
 <p>This command is used to rename the existing table.</p>
 <pre><code>ALTER TABLE [db_name.]table_name RENAME TO new_table_name
 </code></pre>
@@ -916,8 +942,8 @@ ALTER TABLE test_db.carbon RENAME TO test_db.carbonTable
 </code></pre>
 </li>
 <li>
-<h5>
-<a id="add-columns" class="anchor" href="#add-columns" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>ADD COLUMNS</h5>
+<h4>
+<a id="add-columns" class="anchor" href="#add-columns" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>ADD COLUMNS</h4>
 <p>This command is used to add a new column to the existing table.</p>
 <pre><code>ALTER TABLE [db_name.]table_name ADD COLUMNS (col_name data_type,...)
 TBLPROPERTIES('DICTIONARY_INCLUDE'='col_name,...',
@@ -937,8 +963,8 @@ TBLPROPERTIES('DICTIONARY_INCLUDE'='col_name,...',
 <code>ALTER TABLE carbon ADD COLUMNS (a1 STRING, b1 STRING) TBLPROPERTIES('LOCAL_DICTIONARY_INCLUDE'='a1','LOCAL_DICTIONARY_EXCLUDE'='b1')</code></p>
 <ul>
 <li>
-<h5>
-<a id="drop-columns" class="anchor" href="#drop-columns" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>DROP COLUMNS</h5>
+<h4>
+<a id="drop-columns" class="anchor" href="#drop-columns" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>DROP COLUMNS</h4>
 <p>This command is used to delete the existing column(s) in a table.</p>
 <pre><code>ALTER TABLE [db_name.]table_name DROP COLUMNS (col_name, ...)
 </code></pre>
@@ -952,11 +978,11 @@ ALTER TABLE carbon DROP COLUMNS (c1,d1)
 <p><strong>NOTE:</strong> Drop Complex child column is not supported.</p>
 </li>
 <li>
-<h5>
-<a id="change-data-type" class="anchor" href="#change-data-type" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>CHANGE DATA TYPE</h5>
-<p>This command is used to change the data type from INT to BIGINT or decimal precision from lower to higher.
+<h4>
+<a id="change-column-nametype" class="anchor" href="#change-column-nametype" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>CHANGE COLUMN NAME/TYPE</h4>
+<p>This command is used to change column name and the data type from INT to BIGINT or decimal precision from lower to higher.
 Change of decimal data type from lower precision to higher precision will only be supported for cases where there is no data loss.</p>
-<pre><code>ALTER TABLE [db_name.]table_name CHANGE col_name col_name changed_column_type
+<pre><code>ALTER TABLE [db_name.]table_name CHANGE col_old_name col_new_name column_type
 </code></pre>
 <p>Valid Scenarios</p>
 <ul>
@@ -965,34 +991,39 @@ Change of decimal data type from lower precision to higher precision will only b
 <li>
 <strong>NOTE:</strong> The allowed range is 38,38 (precision, scale) and is a valid upper case scenario which is not resulting in data loss.</li>
 </ul>
-<p>Example1:Changing data type of column a1 from INT to BIGINT.</p>
-<pre><code>ALTER TABLE test_db.carbon CHANGE a1 a1 BIGINT
+<p>Example1:Change column a1's name to a2 and its data type from INT to BIGINT.</p>
+<pre><code>ALTER TABLE test_db.carbon CHANGE a1 a2 BIGINT
 </code></pre>
 <p>Example2:Changing decimal precision of column a1 from 10 to 18.</p>
 <pre><code>ALTER TABLE test_db.carbon CHANGE a1 a1 DECIMAL(18,2)
 </code></pre>
+<p>Example3:Change column a3's name to a4.</p>
+<pre><code>ALTER TABLE test_db.carbon CHANGE a3 a4 STRING
+</code></pre>
+<p><strong>NOTE:</strong> Once the column is renamed, user has to take care about replacing the fileheader with the new name or changing the column header in csv file.</p>
 </li>
 <li>
-<h5>
-<a id="merge-index" class="anchor" href="#merge-index" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>MERGE INDEX</h5>
+<h4>
+<a id="merge-index" class="anchor" href="#merge-index" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>MERGE INDEX</h4>
 <p>This command is used to merge all the CarbonData index files (.carbonindex) inside a segment to a single CarbonData index merge file (.carbonindexmerge). This enhances the first query performance.</p>
-<pre><code> ALTER TABLE [db_name.]table_name COMPACT 'SEGMENT_INDEX'
-</code></pre>
-<pre><code>Examples:
+<pre><code>ALTER TABLE [db_name.]table_name COMPACT 'SEGMENT_INDEX'
 </code></pre>
-<pre><code> ALTER TABLE test_db.carbon COMPACT 'SEGMENT_INDEX'
- ```
-
- **NOTE:**
-
- * Merge index is not supported on streaming table.
-
+<p>Examples:</p>
+<pre><code>ALTER TABLE test_db.carbon COMPACT 'SEGMENT_INDEX'
 </code></pre>
+<p><strong>NOTE:</strong></p>
+<ul>
+<li>Merge index is not supported on streaming table.</li>
+</ul>
 </li>
 <li>
-<h5>
-<a id="set-and-unset-for-local-dictionary-properties" class="anchor" href="#set-and-unset-for-local-dictionary-properties" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>SET and UNSET for Local Dictionary Properties</h5>
+<h4>
+<a id="set-and-unset" class="anchor" href="#set-and-unset" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>SET and UNSET</h4>
 <p>When set command is used, all the newly set properties will override the corresponding old properties if exists.</p>
+<ul>
+<li>
+<h5>
+<a id="local-dictionary-properties" class="anchor" href="#local-dictionary-properties" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Local Dictionary Properties</h5>
 <p>Example to SET Local Dictionary Properties:</p>
 <pre><code>ALTER TABLE tablename SET TBLPROPERTIES('LOCAL_DICTIONARY_ENABLE'='false','LOCAL_DICTIONARY_THRESHOLD'='1000','LOCAL_DICTIONARY_INCLUDE'='column1','LOCAL_DICTIONARY_EXCLUDE'='column2')
 </code></pre>
@@ -1003,6 +1034,19 @@ Change of decimal data type from lower precision to higher precision will only b
 <p><strong>NOTE:</strong> For old tables, by default, local dictionary is disabled. If user wants local dictionary for these tables, user can enable/disable local dictionary for new data at their discretion.
 This can be achieved by using the alter table set command.</p>
 </li>
+<li>
+<h5>
+<a id="sort-scope" class="anchor" href="#sort-scope" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>SORT SCOPE</h5>
+<p>Example to SET SORT SCOPE:</p>
+<pre><code>ALTER TABLE tablename SET TBLPROPERTIES('SORT_SCOPE'='NO_SORT')
+</code></pre>
+<p>When Sort Scope is unset, the default values (NO_SORT) will be used.</p>
+<p>Example to UNSET SORT SCOPE:</p>
+<pre><code>ALTER TABLE tablename UNSET TBLPROPERTIES('SORT_SCOPE')
+</code></pre>
+</li>
+</ul>
+</li>
 </ul>
 <h3>
 <a id="drop-table" class="anchor" href="#drop-table" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>DROP TABLE</h3>
@@ -1041,8 +1085,8 @@ STORED AS carbondata
 <pre><code>CREATE TABLE IF NOT EXISTS productSchema.productSalesTable (
                               productNumber Int COMMENT 'unique serial number for product')
 COMMENT "This is table comment"
- STORED AS carbondata
- TBLPROPERTIES ('DICTIONARY_INCLUDE'='productNumber')
+STORED AS carbondata
+TBLPROPERTIES ('DICTIONARY_INCLUDE'='productNumber')
 </code></pre>
 <p>You can also SET and UNSET table comment using ALTER command.</p>
 <p>Example to SET table comment:</p>
@@ -1067,7 +1111,7 @@ COMMENT "This is table comment"
   [TBLPROPERTIES (property_name=property_value, ...)]
 </code></pre>
 <p>Example:</p>
-<pre><code> CREATE TABLE IF NOT EXISTS productSchema.productSalesTable (
+<pre><code>CREATE TABLE IF NOT EXISTS productSchema.productSalesTable (
                               productNumber INT,
                               productName STRING,
                               storeCity STRING,
@@ -1094,9 +1138,9 @@ STORED AS carbondata
 <h4>
 <a id="insert-overwrite" class="anchor" href="#insert-overwrite" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Insert OVERWRITE</h4>
 <p>This command allows you to insert or load overwrite on a specific partition.</p>
-<pre><code> INSERT OVERWRITE TABLE table_name
- PARTITION (column = 'partition_name')
- select_statement
+<pre><code>INSERT OVERWRITE TABLE table_name
+PARTITION (column = 'partition_name')
+select_statement
 </code></pre>
 <p>Example:</p>
 <pre><code>INSERT OVERWRITE TABLE partitioned_user
@@ -1150,10 +1194,10 @@ STORED AS carbondata
     col_C LONG,
     col_D DECIMAL(10,2),
     col_E LONG
- ) partitioned by (col_F Timestamp)
- PARTITIONED BY 'carbondata'
- TBLPROPERTIES('PARTITION_TYPE'='RANGE',
- 'RANGE_INFO'='2015-01-01, 2016-01-01, 2017-01-01, 2017-02-01')
+) partitioned by (col_F Timestamp)
+STORED BY 'carbondata'
+TBLPROPERTIES('PARTITION_TYPE'='RANGE',
+'RANGE_INFO'='2015-01-01, 2016-01-01, 2017-01-01, 2017-02-01')
 </code></pre>
 <h3>
 <a id="create-list-partition-table" class="anchor" href="#create-list-partition-table" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Create List Partition Table</h3>
@@ -1174,9 +1218,9 @@ STORED AS carbondata
     col_E LONG,
     col_F TIMESTAMP
  ) PARTITIONED BY (col_A STRING)
- STORED AS carbondata
- TBLPROPERTIES('PARTITION_TYPE'='LIST',
- 'LIST_INFO'='aaaa, bbbb, (cccc, dddd), eeee')
+STORED AS carbondata
+TBLPROPERTIES('PARTITION_TYPE'='LIST',
+'LIST_INFO'='aaaa, bbbb, (cccc, dddd), eeee')
 </code></pre>
 <h3>
 <a id="show-partitions-1" class="anchor" href="#show-partitions-1" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Show Partitions</h3>
@@ -1194,7 +1238,7 @@ STORED AS carbondata
 <h3>
 <a id="drop-a-partition" class="anchor" href="#drop-a-partition" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Drop a partition</h3>
 <p>Only drop partition definition, but keep data</p>
-<pre><code>  ALTER TABLE [db_name].table_name DROP PARTITION(partition_id)
+<pre><code>ALTER TABLE [db_name].table_name DROP PARTITION(partition_id)
 </code></pre>
 <p>Drop both partition definition and data</p>
 <pre><code>ALTER TABLE [db_name].table_name DROP PARTITION(partition_id) WITH DATA
diff --git a/src/main/webapp/dml-of-carbondata.html b/src/main/webapp/dml-of-carbondata.html
index 15ff807..e765ecb 100644
--- a/src/main/webapp/dml-of-carbondata.html
+++ b/src/main/webapp/dml-of-carbondata.html
@@ -52,6 +52,9 @@
                            aria-expanded="false"> Download <span class="caret"></span></a>
                         <ul class="dropdown-menu">
                             <li>
+                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.2/"
+                                   target="_blank">Apache CarbonData 1.5.2</a></li>
+                            <li>
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.1/"
                                    target="_blank">Apache CarbonData 1.5.1</a></li>
                             <li>
@@ -64,9 +67,6 @@
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.4.0/"
                                    target="_blank">Apache CarbonData 1.4.0</a></li>
                             <li>
-                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.3.1/"
-                                   target="_blank">Apache CarbonData 1.3.1</a></li>
-                            <li>
                                 <a href="https://cwiki.apache.org/confluence/display/CARBONDATA/Releases"
                                    target="_blank">Release Archive</a></li>
                         </ul>
@@ -269,6 +269,10 @@ OPTIONS(property_name=property_value, ...)
 <td>If header is not present in the input csv, what is the column names to be used for data read from input csv</td>
 </tr>
 <tr>
+<td><a href="#sort_scope">SORT_SCOPE</a></td>
+<td>Sort Scope to be used for current load.</td>
+</tr>
+<tr>
 <td><a href="#multiline">MULTILINE</a></td>
 <td>Whether a row data can span across multiple lines.</td>
 </tr>
@@ -332,8 +336,14 @@ OPTIONS(property_name=property_value, ...)
 <td><a href="#global_sort_partitions">GLOBAL_SORT_PARTITIONS</a></td>
 <td>Number of partition to use for shuffling of data during sorting</td>
 </tr>
+<tr>
+<td><a href="#scale_factor">SCALE_FACTOR</a></td>
+<td>Control the partition size for RANGE_COLUMN feature</td>
+</tr>
 </tbody>
 </table>
+<ul>
+<li>
 <p>You can use the following options to load data:</p>
 <ul>
 <li>
@@ -376,6 +386,24 @@ true: CSV file is with file header.</p>
 </li>
 <li>
 <h5>
+<a id="sort_scope" class="anchor" href="#sort_scope" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>SORT_SCOPE:</h5>
+<p>Sort Scope to be used for the current load. This overrides the Sort Scope of Table.
+Requirement: Sort Columns must be set while creating table. If Sort Columns is null, Sort Scope is always NO_SORT.</p>
+<pre><code>OPTIONS('SORT_SCOPE'='BATCH_SORT')
+</code></pre>
+<p>Priority order for choosing Sort Scope is:</p>
+<ol>
+<li>Load Data Command</li>
+<li>CARBON.TABLE.LOAD.SORT.SCOPE..<table> session property
+</table>
+</li>
+<li>Table level Sort Scope</li>
+<li>CARBON.OPTIONS.SORT.SCOPE session property</li>
+<li>Default Value: NO_SORT</li>
+
+
+<li>
+<h5>
 <a id="multiline" class="anchor" href="#multiline" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>MULTILINE:</h5>
 <p>CSV with new line character in quotes.</p>
 <pre><code>OPTIONS('MULTILINE'='true') 
@@ -398,15 +426,23 @@ true: CSV file is with file header.</p>
 <li>
 <h5>
 <a id="complex_delimiter_level_1" class="anchor" href="#complex_delimiter_level_1" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>COMPLEX_DELIMITER_LEVEL_1:</h5>
-<p>Split the complex type data column in a row (eg., a$b$c --&gt; Array = {a,b,c}).</p>
-<pre><code>OPTIONS('COMPLEX_DELIMITER_LEVEL_1'='$') 
+<p>Split the complex type data column in a row (eg., a\001b\001c --&gt; Array = {a,b,c}).</p>
+<pre><code>OPTIONS('COMPLEX_DELIMITER_LEVEL_1'='\001')
 </code></pre>
 </li>
 <li>
 <h5>
 <a id="complex_delimiter_level_2" class="anchor" href="#complex_delimiter_level_2" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>COMPLEX_DELIMITER_LEVEL_2:</h5>
-<p>Split the complex type nested data column in a row. Applies level_1 delimiter &amp; applies level_2 based on complex data type (eg., a:b$c:d --&gt; Array&gt; = {{a,b},{c,d}}).</p>
-<pre><code>OPTIONS('COMPLEX_DELIMITER_LEVEL_2'=':')
+<p>Split the complex type nested data column in a row. Applies level_1 delimiter &amp; applies level_2 based on complex data type (eg., a\002b\001c\002d --&gt; Array&gt; = {{a,b},{c,d}}).</p>
+<pre><code>OPTIONS('COMPLEX_DELIMITER_LEVEL_2'='\002')
+</code></pre>
+</li>
+<li>
+<h5>
+<a id="complex_delimiter_level_3" class="anchor" href="#complex_delimiter_level_3" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>COMPLEX_DELIMITER_LEVEL_3:</h5>
+<p>Split the complex type nested data column in a row. Applies level_1 delimiter, applies level_2 and then level_3 delimiter based on complex data type.
+Used in case of nested Complex Map type. (eg., 'a\003b\002b\003c\001aa\003bb\002cc\003dd' --&gt; Array Of Map&gt; = {{a -&gt; b, b -&gt; c},{aa -&gt; bb, cc -&gt; dd}}).</p>
+<pre><code>OPTIONS('COMPLEX_DELIMITER_LEVEL_3'='\003')
 </code></pre>
 </li>
 <li>
@@ -454,11 +490,12 @@ true: CSV file is with file header.</p>
 <a id="single_pass" class="anchor" href="#single_pass" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>SINGLE_PASS:</h5>
 <p>Single Pass Loading enables single job to finish data loading with dictionary generation on the fly. It enhances performance in the scenarios where the subsequent data loading after initial load involves fewer incremental updates on the dictionary.</p>
 </li>
-</ul>
+
 <p>This option specifies whether to use single pass for loading data or not. By default this option is set to FALSE.</p>
 <pre><code> OPTIONS('SINGLE_PASS'='TRUE')
 </code></pre>
 <p><strong>NOTE:</strong></p>
+</ol>
 <ul>
 <li>If this option is set to TRUE then data loading will take less time.</li>
 <li>If this option is set to some invalid value other than TRUE or FALSE then it uses the default value.</li>
@@ -470,8 +507,8 @@ options('DELIMITER'=',', 'QUOTECHAR'='"','COMMENTCHAR'='#',
 'FILEHEADER'='empno,empname,designation,doj,workgroupcategory,
 workgroupcategoryname,deptno,deptname,projectcode,
 projectjoindate,projectenddate,attendance,utilization,salary',
-'MULTILINE'='true','ESCAPECHAR'='\','COMPLEX_DELIMITER_LEVEL_1'='$',
-'COMPLEX_DELIMITER_LEVEL_2'=':',
+'MULTILINE'='true','ESCAPECHAR'='\','COMPLEX_DELIMITER_LEVEL_1'='\\\001',
+'COMPLEX_DELIMITER_LEVEL_2'='\\\002',
 'ALL_DICTIONARY_PATH'='/opt/alldictionary/data.dictionary',
 'SINGLE_PASS'='TRUE')
 </code></pre>
@@ -509,16 +546,37 @@ OPTIONS('BAD_RECORDS_LOGGER_ENABLE'='true','BAD_RECORD_PATH'='hdfs://hacluster/t
 <li>
 <h5>
 <a id="global_sort_partitions" class="anchor" href="#global_sort_partitions" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>GLOBAL_SORT_PARTITIONS:</h5>
-<p>If the SORT_SCOPE is defined as GLOBAL_SORT, then user can specify the number of partitions to use while shuffling data for sort using GLOBAL_SORT_PARTITIONS. If it is not configured, or configured less than 1, then it uses the number of map task as reduce task. It is recommended that each reduce task deal with 512MB-1GB data.</p>
+<p>If the SORT_SCOPE is defined as GLOBAL_SORT, then user can specify the number of partitions to use while shuffling data for sort using GLOBAL_SORT_PARTITIONS. If it is not configured, or configured less than 1, then it uses the number of map task as reduce task. It is recommended that each reduce task deal with 512MB-1GB data.
+For RANGE_COLUMN, GLOBAL_SORT_PARTITIONS is used to specify the number of range partitions also.</p>
 </li>
 </ul>
 <pre><code>OPTIONS('GLOBAL_SORT_PARTITIONS'='2')
 </code></pre>
-<p>NOTE:</p>
+<p><strong>NOTE:</strong></p>
 <ul>
 <li>GLOBAL_SORT_PARTITIONS should be Integer type, the range is [1,Integer.MaxValue].</li>
 <li>It is only used when the SORT_SCOPE is GLOBAL_SORT.</li>
 </ul>
+<ul>
+<li>
+<h5>
+<a id="scale_factor" class="anchor" href="#scale_factor" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>SCALE_FACTOR</h5>
+</li>
+</ul>
+<p>For RANGE_COLUMN, SCALE_FACTOR is used to control the number of range partitions as following.</p>
+<pre><code>  splitSize = max(blocklet_size, (block_size - blocklet_size)) * scale_factor
+  numPartitions = total size of input data / splitSize
+</code></pre>
+<p>The default value is 3, and the range is [1, 300].</p>
+<pre><code>  OPTIONS('SCALE_FACTOR'='10')
+</code></pre>
+<p><strong>NOTE:</strong></p>
+<ul>
+<li>If both GLOBAL_SORT_PARTITIONS and SCALE_FACTOR are used at the same time, only GLOBAL_SORT_PARTITIONS is valid.</li>
+<li>The compaction on RANGE_COLUMN will use LOCAL_SORT by default.</li>
+</ul>
+
+
 <h3>
 <a id="insert-data-into-carbondata-table" class="anchor" href="#insert-data-into-carbondata-table" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>INSERT DATA INTO CARBONDATA TABLE</h3>
 <p>This command inserts data into a CarbonData table, it is defined as a combination of two queries Insert and Select query respectively.
@@ -650,14 +708,17 @@ All specified segment ids should exist and be valid, otherwise compaction will f
 Custom compaction is usually done during the off-peak time.</p>
 <pre><code>ALTER TABLE table_name COMPACT 'CUSTOM' WHERE SEGMENT.ID IN (2,3,4)
 </code></pre>
-<p>NOTE: Compaction is unsupported for table containing Complex columns.</p>
 <ul>
 <li><strong>CLEAN SEGMENTS AFTER Compaction</strong></li>
 </ul>
 <p>Clean the segments which are compacted:</p>
 <pre><code>CLEAN FILES FOR TABLE carbon_table
 </code></pre>
-<script>
+
+</li>
+</ul>
+</li>
+</ul><script>
 $(function() {
   // Show selected style on nav item
   $('.b-nav__docs').addClass('selected');
diff --git a/src/main/webapp/documentation.html b/src/main/webapp/documentation.html
index e49cdae..09db88f 100644
--- a/src/main/webapp/documentation.html
+++ b/src/main/webapp/documentation.html
@@ -52,6 +52,9 @@
                            aria-expanded="false"> Download <span class="caret"></span></a>
                         <ul class="dropdown-menu">
                             <li>
+                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.2/"
+                                   target="_blank">Apache CarbonData 1.5.2</a></li>
+                            <li>
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.1/"
                                    target="_blank">Apache CarbonData 1.5.1</a></li>
                             <li>
@@ -64,9 +67,6 @@
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.4.0/"
                                    target="_blank">Apache CarbonData 1.4.0</a></li>
                             <li>
-                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.3.1/"
-                                   target="_blank">Apache CarbonData 1.3.1</a></li>
-                            <li>
                                 <a href="https://cwiki.apache.org/confluence/display/CARBONDATA/Releases"
                                    target="_blank">Release Archive</a></li>
                         </ul>
@@ -225,11 +225,15 @@
 <a id="getting-started" class="anchor" href="#getting-started" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Getting Started</h2>
 <p><strong>File Format Concepts:</strong> Start with the basics of understanding the <a href="./file-structure-of-carbondata.html#carbondata-file-format">CarbonData file format</a> and its <a href="./file-structure-of-carbondata.html">storage structure</a>. This will help to understand other parts of the documentation, including deployment, programming and usage guides.</p>
 <p><strong>Quick Start:</strong> <a href="./quick-start-guide.html#installing-and-configuring-carbondata-to-run-locally-with-spark-shell">Run an example program</a> on your local machine or <a href="https://github.com/apache/carbondata/tree/master/examples/spark2/src/main/scala/org/apache/carbondata/examples" target=_blank>study some examples</a>.</p>
-<p><strong>CarbonData SQL Language Reference:</strong> CarbonData extends the Spark SQL language and adds several <a href="./ddl-of-carbondata.html">DDL</a> and <a href="./dml-of-carbondata.html">DML</a> statements to support operations on it.Refer to the <a href="./language-manual.html">Reference Manual</a> to understand the supported features and functions.</p>
+<p><strong>CarbonData SQL Language Reference:</strong> CarbonData extends the Spark SQL language and adds several <a href="./ddl-of-carbondata.html">DDL</a> and <a href="./dml-of-carbondata.html">DML</a> statements to support operations on it. Refer to the <a href="./language-manual.html">Reference Manual</a> to understand the supported features and functions.</p>
 <p><strong>Programming Guides:</strong> You can read our guides about <a href="./sdk-guide.html">Java APIs supported</a> or <a href="./csdk-guide.html">C++ APIs supported</a> to learn how to integrate CarbonData with your applications.</p>
 <h2>
 <a id="integration" class="anchor" href="#integration" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Integration</h2>
-<p>CarbonData can be integrated with popular Execution engines like <a href="./quick-start-guide.html#spark">Spark</a> and <a href="./quick-start-guide.html#presto">Presto</a>.Refer to the <a href="./quick-start-guide.html#integration">Installation and Configuration</a> section to understand all modes of Integrating CarbonData.</p>
+<ul>
+<li>CarbonData can be integrated with popular execution engines like <a href="./quick-start-guide.html#spark">Spark</a> , <a href="./quick-start-guide.html#presto">Presto</a> and <a href="./quick-start-guide.html#hive">Hive</a>.</li>
+<li>CarbonData can be integrated with popular storage engines like HDFS, Huawei Cloud(OBS) and <a href="./quick-start-guide.html#alluxio">Alluxio</a>.<br>
+Refer to the <a href="./quick-start-guide.html#integration">Installation and Configuration</a> section to understand all modes of Integrating CarbonData.</li>
+</ul>
 <h2>
 <a id="contributing-to-carbondata" class="anchor" href="#contributing-to-carbondata" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Contributing to CarbonData</h2>
 <p>The Apache CarbonData community welcomes all kinds of contributions from anyone with a passion for
diff --git a/src/main/webapp/faq.html b/src/main/webapp/faq.html
index a42bbb8..7536a0b 100644
--- a/src/main/webapp/faq.html
+++ b/src/main/webapp/faq.html
@@ -52,6 +52,9 @@
                            aria-expanded="false"> Download <span class="caret"></span></a>
                         <ul class="dropdown-menu">
                             <li>
+                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.2/"
+                                   target="_blank">Apache CarbonData 1.5.2</a></li>
+                            <li>
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.1/"
                                    target="_blank">Apache CarbonData 1.5.1</a></li>
                             <li>
@@ -64,9 +67,6 @@
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.4.0/"
                                    target="_blank">Apache CarbonData 1.4.0</a></li>
                             <li>
-                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.3.1/"
-                                   target="_blank">Apache CarbonData 1.3.1</a></li>
-                            <li>
                                 <a href="https://cwiki.apache.org/confluence/display/CARBONDATA/Releases"
                                    target="_blank">Release Archive</a></li>
                         </ul>
@@ -279,12 +279,10 @@ By default <strong>carbon.badRecords.location</strong> specifies the following l
 <a id="how-to-specify-store-location-while-creating-carbon-session" class="anchor" href="#how-to-specify-store-location-while-creating-carbon-session" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>How to specify store location while creating carbon session?</h2>
 <p>The store location specified while creating carbon session is used by the CarbonData to store the meta data like the schema, dictionary files, dictionary meta data and sort indexes.</p>
 <p>Try creating <code>carbonsession</code> with <code>storepath</code> specified in the following manner :</p>
-<pre><code>val carbon = SparkSession.builder().config(sc.getConf)
-             .getOrCreateCarbonSession(&lt;store_path&gt;)
+<pre><code>val carbon = SparkSession.builder().config(sc.getConf).getOrCreateCarbonSession(&lt;carbon_store_path&gt;)
 </code></pre>
 <p>Example:</p>
-<pre><code>val carbon = SparkSession.builder().config(sc.getConf)
-             .getOrCreateCarbonSession("hdfs://localhost:9000/carbon/store")
+<pre><code>val carbon = SparkSession.builder().config(sc.getConf).getOrCreateCarbonSession("hdfs://localhost:9000/carbon/store")
 </code></pre>
 <h2>
 <a id="what-is-carbon-lock-type" class="anchor" href="#what-is-carbon-lock-type" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>What is Carbon Lock Type?</h2>
@@ -447,9 +445,9 @@ $OverrideCatalog$$overrides_$e
 <p>Use the following command :</p>
 </li>
 </ol>
-<pre><code>"mvn -Pspark-2.1 -Dspark.version {yourSparkVersion} clean package"
+<pre><code>mvn -Pspark-2.1 -Dspark.version {yourSparkVersion} clean package
 </code></pre>
-<p>Note :  Refrain from using "mvn clean package" without specifying the profile.</p>
+<p>Note : Refrain from using "mvn clean package" without specifying the profile.</p>
 <h2>
 <a id="failed-to-execute-load-query-on-cluster" class="anchor" href="#failed-to-execute-load-query-on-cluster" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Failed to execute load query on cluster</h2>
 <p><strong>Symptom</strong></p>
diff --git a/src/main/webapp/file-structure-of-carbondata.html b/src/main/webapp/file-structure-of-carbondata.html
index 5230ba3..3201546 100644
--- a/src/main/webapp/file-structure-of-carbondata.html
+++ b/src/main/webapp/file-structure-of-carbondata.html
@@ -52,6 +52,9 @@
                            aria-expanded="false"> Download <span class="caret"></span></a>
                         <ul class="dropdown-menu">
                             <li>
+                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.2/"
+                                   target="_blank">Apache CarbonData 1.5.2</a></li>
+                            <li>
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.1/"
                                    target="_blank">Apache CarbonData 1.5.1</a></li>
                             <li>
@@ -64,9 +67,6 @@
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.4.0/"
                                    target="_blank">Apache CarbonData 1.4.0</a></li>
                             <li>
-                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.3.1/"
-                                   target="_blank">Apache CarbonData 1.3.1</a></li>
-                            <li>
                                 <a href="https://cwiki.apache.org/confluence/display/CARBONDATA/Releases"
                                    target="_blank">Release Archive</a></li>
                         </ul>
diff --git a/src/main/webapp/s3-guide.html b/src/main/webapp/hive-guide.html
similarity index 75%
copy from src/main/webapp/s3-guide.html
copy to src/main/webapp/hive-guide.html
index ba25dfb..780c766 100644
--- a/src/main/webapp/s3-guide.html
+++ b/src/main/webapp/hive-guide.html
@@ -52,6 +52,9 @@
                            aria-expanded="false"> Download <span class="caret"></span></a>
                         <ul class="dropdown-menu">
                             <li>
+                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.2/"
+                                   target="_blank">Apache CarbonData 1.5.2</a></li>
+                            <li>
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.1/"
                                    target="_blank">Apache CarbonData 1.5.1</a></li>
                             <li>
@@ -64,9 +67,6 @@
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.4.0/"
                                    target="_blank">Apache CarbonData 1.4.0</a></li>
                             <li>
-                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.3.1/"
-                                   target="_blank">Apache CarbonData 1.3.1</a></li>
-                            <li>
                                 <a href="https://cwiki.apache.org/confluence/display/CARBONDATA/Releases"
                                    target="_blank">Release Archive</a></li>
                         </ul>
@@ -219,72 +219,87 @@
                                 <div class="col-sm-12  col-md-12">
                                     <div>
 <h1>
-<a id="s3-guide" class="anchor" href="#s3-guide" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>S3 Guide</h1>
-<p>Object storage is the recommended storage format in cloud as it can support storing large data
-files. S3 APIs are widely used for accessing object stores. This can be
-used to store or retrieve data on Amazon cloud, Huawei Cloud(OBS) or on any other object
-stores conforming to S3 API.
-Storing data in cloud is advantageous as there are no restrictions on the size of
-data and the data can be accessed from anywhere at any time.
-Carbondata can support any Object Storage that conforms to Amazon S3 API.
-Carbondata relies on Hadoop provided S3 filesystem APIs to access Object stores.</p>
-<h1>
-<a id="writing-to-object-storage" class="anchor" href="#writing-to-object-storage" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Writing to Object Storage</h1>
-<p>To store carbondata files onto Object Store, <code>carbon.storelocation</code> property will have
-to be configured with Object Store path in CarbonProperties file.</p>
-<p>For example:</p>
-<pre><code>carbon.storelocation=s3a://mybucket/carbonstore.
-</code></pre>
-<p>If the existing store location cannot be changed or only specific tables need to be stored
-onto cloud object store, it can be done so by specifying the <code>location</code> option in the create
-table DDL command.</p>
-<p>For example:</p>
-<pre><code>CREATE TABLE IF NOT EXISTS db1.table1(col1 string, col2 int) STORED AS carbondata LOCATION 's3a://mybucket/carbonstore'
-</code></pre>
-<p>For more details on create table, Refer <a href="ddl-of-carbondata.html#create-table">DDL of CarbonData</a></p>
-<h1>
-<a id="authentication" class="anchor" href="#authentication" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Authentication</h1>
-<p>Authentication properties will have to be configured to store the carbondata files on to S3 location.</p>
-<p>Authentication properties can be set in any of the following ways:</p>
-<ol>
+<a id="quick-start" class="anchor" href="#quick-start" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Quick Start</h1>
+<p>This tutorial provides a quick introduction to using current integration/hive module.</p>
+<h2>
+<a id="build-in-120-hive-integration-only-support-spark21-and-hadoop272" class="anchor" href="#build-in-120-hive-integration-only-support-spark21-and-hadoop272" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Build (In 1.2.0, hive integration only support spark2.1 and hadoop2.7.2)</h2>
+<p>mvn -DskipTests -Pspark-2.1 -Phadoop-2.7.2 clean package</p>
+<h2>
+<a id="prepare-carbondata-in-spark" class="anchor" href="#prepare-carbondata-in-spark" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Prepare CarbonData in Spark</h2>
+<ul>
 <li>
-<p>Set authentication properties in core-site.xml, refer
-<a href="https://hadoop.apache.org/docs/stable/hadoop-aws/tools/hadoop-aws/index.html#Authentication_properties" rel="nofollow">hadoop authentication document</a></p>
+<p>Create a sample.csv file using the following commands. The CSV file is required for loading data into CarbonData.</p>
+<pre><code>cd carbondata
+cat &gt; sample.csv &lt;&lt; EOF
+id,name,scale,country,salary
+1,yuhai,1.77,china,33000.1
+2,runlin,1.70,china,33000.2
+EOF
+</code></pre>
 </li>
 <li>
-<p>Set authentication properties in spark-defaults.conf.</p>
+<p>copy data to HDFS</p>
 </li>
-</ol>
-<p>Example</p>
-<pre><code>spark.hadoop.fs.s3a.secret.key=123
-spark.hadoop.fs.s3a.access.key=456
+</ul>
+<pre><code>$HADOOP_HOME/bin/hadoop fs -put sample.csv &lt;hdfs store path&gt;/sample.csv
 </code></pre>
-<ol start="3">
-<li>Pass authentication properties with spark-submit as configuration.</li>
-</ol>
-<p>Example:</p>
-<pre><code>./bin/spark-submit --master yarn --conf spark.hadoop.fs.s3a.secret.key=123 --conf spark.hadoop.fs
-.s3a.access.key=456 --class=
+<ul>
+<li>Add the following params to $SPARK_CONF_DIR/conf/hive-site.xml</li>
+</ul>
+<div class="highlight highlight-text-xml"><pre>&lt;<span class="pl-ent">property</span>&gt;
+  &lt;<span class="pl-ent">name</span>&gt;hive.metastore.pre.event.listeners&lt;/<span class="pl-ent">name</span>&gt;
+  &lt;<span class="pl-ent">value</span>&gt;org.apache.carbondata.hive.CarbonHiveMetastoreListener&lt;/<span class="pl-ent">value</span>&gt;
+&lt;/<span class="pl-ent">property</span>&gt;</pre></div>
+<ul>
+<li>Start Spark shell by running the following command in the Spark directory</li>
+</ul>
+<pre><code>./bin/spark-shell --jars &lt;carbondata assembly jar path, carbon hive jar path&gt;
 </code></pre>
-<ol start="4">
-<li>Set authentication properties to hadoop configuration object in sparkContext.</li>
-</ol>
-<p>Example:</p>
-<pre><code>sparkSession.sparkContext.hadoopConfiguration.set("fs.s3a.secret.key", "123")
-sparkSession.sparkContext.hadoopConfiguration.set("fs.s3a.access.key","456")
+<pre><code>import org.apache.spark.sql.SparkSession
+import org.apache.spark.sql.CarbonSession._
+val rootPath = "hdfs:///user/hadoop/carbon"
+val storeLocation = s"$rootPath/store"
+val warehouse = s"$rootPath/warehouse"
+val metaStoreDB = s"$rootPath/metastore_db"
+
+val carbon = SparkSession.builder().enableHiveSupport().config("spark.sql.warehouse.dir", warehouse).config(org.apache.carbondata.core.constants.CarbonCommonConstants.STORE_LOCATION, storeLocation).getOrCreateCarbonSession(storeLocation, metaStoreDB)
+
+carbon.sql("create table hive_carbon(id int, name string, scale decimal, country string, salary double) STORED BY 'carbondata'")
+carbon.sql("LOAD DATA INPATH '&lt;hdfs store path&gt;/sample.csv' INTO TABLE hive_carbon")
+scala&gt;carbon.sql("SELECT * FROM hive_carbon").show()
+</code></pre>
+<h2>
+<a id="query-data-in-hive" class="anchor" href="#query-data-in-hive" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Query Data in Hive</h2>
+<h3>
+<a id="configure-hive-classpath" class="anchor" href="#configure-hive-classpath" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Configure hive classpath</h3>
+<pre><code>mkdir hive/auxlibs/
+cp carbondata/assembly/target/scala-2.11/carbondata_2.11*.jar hive/auxlibs/
+cp carbondata/integration/hive/target/carbondata-hive-*.jar hive/auxlibs/
+cp $SPARK_HOME/jars/spark-catalyst*.jar hive/auxlibs/
+cp $SPARK_HOME/jars/scala*.jar hive/auxlibs/
+export HIVE_AUX_JARS_PATH=hive/auxlibs/
+</code></pre>
+<h3>
+<a id="fix-snappy-issue" class="anchor" href="#fix-snappy-issue" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Fix snappy issue</h3>
+<pre><code>copy snappy-java-xxx.jar from "./&lt;SPARK_HOME&gt;/jars/" to "./Library/Java/Extensions"
+export HADOOP_OPTS="-Dorg.xerial.snappy.lib.path=/Library/Java/Extensions -Dorg.xerial.snappy.lib.name=libsnappyjava.jnilib -Dorg.xerial.snappy.tempdir=/Users/apple/DEMO/tmp"
+</code></pre>
+<h3>
+<a id="start-hive-client" class="anchor" href="#start-hive-client" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Start hive client</h3>
+<pre><code>$HIVE_HOME/bin/hive
+</code></pre>
+<h3>
+<a id="query-data-from-hive-table" class="anchor" href="#query-data-from-hive-table" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Query data from hive table</h3>
+<pre><code>set hive.mapred.supports.subdirectories=true;
+set mapreduce.input.fileinputformat.input.dir.recursive=true;
+
+select * from hive_carbon;
+select count(*) from hive_carbon;
+select * from hive_carbon order by id;
 </code></pre>
-<h1>
-<a id="recommendations" class="anchor" href="#recommendations" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Recommendations</h1>
-<ol>
-<li>Object Storage like S3 does not support file leasing mechanism(supported by HDFS) that is
-required to take locks which ensure consistency between concurrent operations therefore, it is
-recommended to set the configurable lock path property(<a href="./configuration-parameters.html#system-configuration">carbon.lock.path</a>)
-to a HDFS directory.</li>
-<li>Concurrent data manipulation operations are not supported. Object stores follow eventual consistency semantics, i.e., any put request might take some time to reflect when trying to list. This behaviour causes the data read is always not consistent or not the latest.</li>
-</ol>
 <script>
 // Show selected style on nav item
-$(function() { $('.b-nav__s3').addClass('selected'); });
+$(function() { $('.b-nav__quickstart').addClass('selected'); });
 </script></div>
 </div>
 </div>
diff --git a/src/main/webapp/how-to-contribute-to-apache-carbondata.html b/src/main/webapp/how-to-contribute-to-apache-carbondata.html
index a6dc1ee..a8e5059 100644
--- a/src/main/webapp/how-to-contribute-to-apache-carbondata.html
+++ b/src/main/webapp/how-to-contribute-to-apache-carbondata.html
@@ -52,6 +52,9 @@
                            aria-expanded="false"> Download <span class="caret"></span></a>
                         <ul class="dropdown-menu">
                             <li>
+                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.2/"
+                                   target="_blank">Apache CarbonData 1.5.2</a></li>
+                            <li>
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.1/"
                                    target="_blank">Apache CarbonData 1.5.1</a></li>
                             <li>
@@ -64,9 +67,6 @@
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.4.0/"
                                    target="_blank">Apache CarbonData 1.4.0</a></li>
                             <li>
-                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.3.1/"
-                                   target="_blank">Apache CarbonData 1.3.1</a></li>
-                            <li>
                                 <a href="https://cwiki.apache.org/confluence/display/CARBONDATA/Releases"
                                    target="_blank">Release Archive</a></li>
                         </ul>
diff --git a/src/main/webapp/index.html b/src/main/webapp/index.html
index 6a5967d..047370c 100644
--- a/src/main/webapp/index.html
+++ b/src/main/webapp/index.html
@@ -54,6 +54,9 @@
                                 class="caret"></span></a>
                         <ul class="dropdown-menu">
                             <li>
+                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.2/"
+                                   target="_blank">Apache CarbonData 1.5.2</a></li>
+                            <li>
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.1/"
                                    target="_blank">Apache CarbonData 1.5.1</a></li>
                             <li>
@@ -66,9 +69,6 @@
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.4.0/"
                                    target="_blank">Apache CarbonData 1.4.0</a></li>
                             <li>
-                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.3.1/"
-                                   target="_blank">Apache CarbonData 1.3.1</a></li>
-                            <li>
                                 <a href="https://cwiki.apache.org/confluence/display/CARBONDATA/Releases"
                                    target="_blank">Release Archive</a></li>
                         </ul>
diff --git a/src/main/webapp/introduction.html b/src/main/webapp/introduction.html
index 0cfa369..53e741c 100644
--- a/src/main/webapp/introduction.html
+++ b/src/main/webapp/introduction.html
@@ -52,6 +52,9 @@
                            aria-expanded="false"> Download <span class="caret"></span></a>
                         <ul class="dropdown-menu">
                             <li>
+                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.2/"
+                                   target="_blank">Apache CarbonData 1.5.2</a></li>
+                            <li>
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.1/"
                                    target="_blank">Apache CarbonData 1.5.1</a></li>
                             <li>
@@ -64,9 +67,6 @@
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.4.0/"
                                    target="_blank">Apache CarbonData 1.4.0</a></li>
                             <li>
-                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.3.1/"
-                                   target="_blank">Apache CarbonData 1.3.1</a></li>
-                            <li>
                                 <a href="https://cwiki.apache.org/confluence/display/CARBONDATA/Releases"
                                    target="_blank">Release Archive</a></li>
                         </ul>
@@ -217,7 +217,8 @@
                         <div id="viewpage" name="viewpage">
                             <div class="row">
                                 <div class="col-sm-12  col-md-12">
-                                    <div><h2>
+                                    <div>
+<h2>
 <a id="what-is-carbondata" class="anchor" href="#what-is-carbondata" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>What is CarbonData</h2>
 <p>CarbonData is a fully indexed columnar and Hadoop native data-store for processing heavy analytical workloads and detailed queries on big data with Spark SQL. CarbonData allows faster interactive queries over PetaBytes of data.</p>
 <h2>
@@ -340,7 +341,12 @@
 <li>
 <h5>
 <a id="hdfs" class="anchor" href="#hdfs" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>HDFS</h5>
-<p>CarbonData uses HDFS api to write and read data from HDFS.CarbonData can take advantage of the locality information to efficiently suggest spark to run tasks near to the data.</p>
+<p>CarbonData uses HDFS api to write and read data from HDFS. CarbonData can take advantage of the locality information to efficiently suggest spark to run tasks near to the data.</p>
+</li>
+<li>
+<h5>
+<a id="alluxio" class="anchor" href="#alluxio" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Alluxio</h5>
+<p>CarbonData also supports read and write with <a href="./quick-start-guide.html#alluxio">Alluxio</a>.</p>
 </li>
 </ul>
 <h2>
diff --git a/src/main/webapp/language-manual.html b/src/main/webapp/language-manual.html
index a95de91..9ac8add 100644
--- a/src/main/webapp/language-manual.html
+++ b/src/main/webapp/language-manual.html
@@ -52,6 +52,9 @@
                            aria-expanded="false"> Download <span class="caret"></span></a>
                         <ul class="dropdown-menu">
                             <li>
+                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.2/"
+                                   target="_blank">Apache CarbonData 1.5.2</a></li>
+                            <li>
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.1/"
                                    target="_blank">Apache CarbonData 1.5.1</a></li>
                             <li>
@@ -64,9 +67,6 @@
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.4.0/"
                                    target="_blank">Apache CarbonData 1.4.0</a></li>
                             <li>
-                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.3.1/"
-                                   target="_blank">Apache CarbonData 1.3.1</a></li>
-                            <li>
                                 <a href="https://cwiki.apache.org/confluence/display/CARBONDATA/Releases"
                                    target="_blank">Release Archive</a></li>
                         </ul>
diff --git a/src/main/webapp/lucene-datamap-guide.html b/src/main/webapp/lucene-datamap-guide.html
index ef819a5..f9675e5 100644
--- a/src/main/webapp/lucene-datamap-guide.html
+++ b/src/main/webapp/lucene-datamap-guide.html
@@ -52,6 +52,9 @@
                            aria-expanded="false"> Download <span class="caret"></span></a>
                         <ul class="dropdown-menu">
                             <li>
+                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.2/"
+                                   target="_blank">Apache CarbonData 1.5.2</a></li>
+                            <li>
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.1/"
                                    target="_blank">Apache CarbonData 1.5.1</a></li>
                             <li>
@@ -64,9 +67,6 @@
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.4.0/"
                                    target="_blank">Apache CarbonData 1.4.0</a></li>
                             <li>
-                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.3.1/"
-                                   target="_blank">Apache CarbonData 1.3.1</a></li>
-                            <li>
                                 <a href="https://cwiki.apache.org/confluence/display/CARBONDATA/Releases"
                                    target="_blank">Release Archive</a></li>
                         </ul>
diff --git a/src/main/webapp/performance-tuning.html b/src/main/webapp/performance-tuning.html
index e539614..0a74864 100644
--- a/src/main/webapp/performance-tuning.html
+++ b/src/main/webapp/performance-tuning.html
@@ -52,6 +52,9 @@
                            aria-expanded="false"> Download <span class="caret"></span></a>
                         <ul class="dropdown-menu">
                             <li>
+                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.2/"
+                                   target="_blank">Apache CarbonData 1.5.2</a></li>
+                            <li>
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.1/"
                                    target="_blank">Apache CarbonData 1.5.1</a></li>
                             <li>
@@ -64,9 +67,6 @@
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.4.0/"
                                    target="_blank">Apache CarbonData 1.4.0</a></li>
                             <li>
-                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.3.1/"
-                                   target="_blank">Apache CarbonData 1.3.1</a></li>
-                            <li>
                                 <a href="https://cwiki.apache.org/confluence/display/CARBONDATA/Releases"
                                    target="_blank">Release Archive</a></li>
                         </ul>
diff --git a/src/main/webapp/preaggregate-datamap-guide.html b/src/main/webapp/preaggregate-datamap-guide.html
index 5e0d4e3..9d7a387 100644
--- a/src/main/webapp/preaggregate-datamap-guide.html
+++ b/src/main/webapp/preaggregate-datamap-guide.html
@@ -52,6 +52,9 @@
                            aria-expanded="false"> Download <span class="caret"></span></a>
                         <ul class="dropdown-menu">
                             <li>
+                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.2/"
+                                   target="_blank">Apache CarbonData 1.5.2</a></li>
+                            <li>
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.1/"
                                    target="_blank">Apache CarbonData 1.5.1</a></li>
                             <li>
@@ -64,9 +67,6 @@
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.4.0/"
                                    target="_blank">Apache CarbonData 1.4.0</a></li>
                             <li>
-                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.3.1/"
-                                   target="_blank">Apache CarbonData 1.3.1</a></li>
-                            <li>
                                 <a href="https://cwiki.apache.org/confluence/display/CARBONDATA/Releases"
                                    target="_blank">Release Archive</a></li>
                         </ul>
diff --git a/content/quick-start-guide.html b/src/main/webapp/presto-guide.html
similarity index 51%
copy from content/quick-start-guide.html
copy to src/main/webapp/presto-guide.html
index a2f093d..00c3f5f 100644
--- a/content/quick-start-guide.html
+++ b/src/main/webapp/presto-guide.html
@@ -52,6 +52,9 @@
                            aria-expanded="false"> Download <span class="caret"></span></a>
                         <ul class="dropdown-menu">
                             <li>
+                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.2/"
+                                   target="_blank">Apache CarbonData 1.5.2</a></li>
+                            <li>
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.1/"
                                    target="_blank">Apache CarbonData 1.5.1</a></li>
                             <li>
@@ -64,9 +67,6 @@
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.4.0/"
                                    target="_blank">Apache CarbonData 1.4.0</a></li>
                             <li>
-                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.3.1/"
-                                   target="_blank">Apache CarbonData 1.3.1</a></li>
-                            <li>
                                 <a href="https://cwiki.apache.org/confluence/display/CARBONDATA/Releases"
                                    target="_blank">Release Archive</a></li>
                         </ul>
@@ -219,360 +219,20 @@
                                 <div class="col-sm-12  col-md-12">
                                     <div>
 <h1>
-<a id="quick-start" class="anchor" href="#quick-start" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Quick Start</h1>
-<p>This tutorial provides a quick introduction to using CarbonData. To follow along with this guide, first download a packaged release of CarbonData from the <a href="https://dist.apache.org/repos/dist/release/carbondata/" target=_blank rel="nofollow">CarbonData website</a>.Alternatively it can be created following <a href="https://github.com/apache/carbondata/tree/master/build" target=_blank>Building CarbonData</a> steps.</p>
-<h2>
-<a id="prerequisites" class="anchor" href="#prerequisites" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Prerequisites</h2>
-<ul>
-<li>
-<p>CarbonData supports Spark versions upto 2.2.1.Please download Spark package from <a href="https://spark.apache.org/downloads.html" target=_blank rel="nofollow">Spark website</a></p>
-</li>
-<li>
-<p>Create a sample.csv file using the following commands. The CSV file is required for loading data into CarbonData</p>
-<pre><code>cd carbondata
-cat &gt; sample.csv &lt;&lt; EOF
-id,name,city,age
-1,david,shenzhen,31
-2,eason,shenzhen,27
-3,jarry,wuhan,35
-EOF
-</code></pre>
-</li>
-</ul>
-<h2>
-<a id="integration" class="anchor" href="#integration" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Integration</h2>
-<p>CarbonData can be integrated with Spark and Presto Execution Engines. The below documentation guides on Installing and Configuring with these execution engines.</p>
-<h3>
-<a id="spark" class="anchor" href="#spark" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Spark</h3>
-<p><a href="#installing-and-configuring-carbondata-to-run-locally-with-spark-shell">Installing and Configuring CarbonData to run locally with Spark Shell</a></p>
-<p><a href="#installing-and-configuring-carbondata-on-standalone-spark-cluster">Installing and Configuring CarbonData on Standalone Spark Cluster</a></p>
-<p><a href="#installing-and-configuring-carbondata-on-spark-on-yarn-cluster">Installing and Configuring CarbonData on Spark on YARN Cluster</a></p>
-<p><a href="#query-execution-using-carbondata-thrift-server">Installing and Configuring CarbonData Thrift Server for Query Execution</a></p>
-<h3>
-<a id="presto" class="anchor" href="#presto" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Presto</h3>
-<p><a href="#installing-and-configuring-carbondata-on-presto">Installing and Configuring CarbonData on Presto</a></p>
-<h2>
-<a id="installing-and-configuring-carbondata-to-run-locally-with-spark-shell" class="anchor" href="#installing-and-configuring-carbondata-to-run-locally-with-spark-shell" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Installing and Configuring CarbonData to run locally with Spark Shell</h2>
-<p>Apache Spark Shell provides a simple way to learn the API, as well as a powerful tool to analyze data interactively. Please visit <a href="http://spark.apache.org/docs/latest/" target=_blank rel="nofollow">Apache Spark Documentation</a> for more details on Spark shell.</p>
-<h4>
-<a id="basics" class="anchor" href="#basics" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Basics</h4>
-<p>Start Spark shell by running the following command in the Spark directory:</p>
-<pre><code>./bin/spark-shell --jars &lt;carbondata assembly jar path&gt;
-</code></pre>
-<p><strong>NOTE</strong>: Path where packaged release of CarbonData was downloaded or assembly jar will be available after <a href="https://github.com/apache/carbondata/blob/master/build/README.md" target=_blank>building CarbonData</a> and can be copied from <code>./assembly/target/scala-2.1x/carbondata_xxx.jar</code></p>
-<p>In this shell, SparkSession is readily available as <code>spark</code> and Spark context is readily available as <code>sc</code>.</p>
-<p>In order to create a CarbonSession we will have to configure it explicitly in the following manner :</p>
-<ul>
-<li>Import the following :</li>
-</ul>
-<pre><code>import org.apache.spark.sql.SparkSession
-import org.apache.spark.sql.CarbonSession._
-</code></pre>
-<ul>
-<li>Create a CarbonSession :</li>
-</ul>
-<pre><code>val carbon = SparkSession.builder().config(sc.getConf)
-             .getOrCreateCarbonSession("&lt;hdfs store path&gt;")
-</code></pre>
-<p><strong>NOTE</strong>: By default metastore location points to <code>../carbon.metastore</code>, user can provide own metastore location to CarbonSession like <code>SparkSession.builder().config(sc.getConf) .getOrCreateCarbonSession("&lt;hdfs store path&gt;", "&lt;local metastore path&gt;")</code></p>
-<h4>
-<a id="executing-queries" class="anchor" href="#executing-queries" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Executing Queries</h4>
-<h6>
-<a id="creating-a-table" class="anchor" href="#creating-a-table" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Creating a Table</h6>
-<pre><code>scala&gt;carbon.sql("CREATE TABLE
-                    IF NOT EXISTS test_table(
-                    id string,
-                    name string,
-                    city string,
-                    age Int)
-                  STORED AS carbondata")
-</code></pre>
-<h6>
-<a id="loading-data-to-a-table" class="anchor" href="#loading-data-to-a-table" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Loading Data to a Table</h6>
-<pre><code>scala&gt;carbon.sql("LOAD DATA INPATH '/path/to/sample.csv'
-                  INTO TABLE test_table")
-</code></pre>
-<p><strong>NOTE</strong>: Please provide the real file path of <code>sample.csv</code> for the above script.
-If you get "tablestatus.lock" issue, please refer to <a href="faq.html">FAQ</a></p>
-<h6>
-<a id="query-data-from-a-table" class="anchor" href="#query-data-from-a-table" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Query Data from a Table</h6>
-<pre><code>scala&gt;carbon.sql("SELECT * FROM test_table").show()
-
-scala&gt;carbon.sql("SELECT city, avg(age), sum(age)
-                  FROM test_table
-                  GROUP BY city").show()
-</code></pre>
+<a id="presto-guide" class="anchor" href="#presto-guide" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Presto guide</h1>
+<p>This tutorial provides a quick introduction to using current integration/presto module.</p>
+<p><a href="#presto-multinode-cluster-setup-for-carbondata">Presto Multinode Cluster Setup for Carbondata</a></p>
+<p><a href="#presto-single-node-setup-for-carbondata">Presto Single Node Setup for Carbondata</a></p>
 <h2>
-<a id="installing-and-configuring-carbondata-on-standalone-spark-cluster" class="anchor" href="#installing-and-configuring-carbondata-on-standalone-spark-cluster" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Installing and Configuring CarbonData on Standalone Spark Cluster</h2>
+<a id="presto-multinode-cluster-setup-for-carbondata" class="anchor" href="#presto-multinode-cluster-setup-for-carbondata" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Presto Multinode Cluster Setup for Carbondata</h2>
 <h3>
-<a id="prerequisites-1" class="anchor" href="#prerequisites-1" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Prerequisites</h3>
-<ul>
-<li>Hadoop HDFS and Yarn should be installed and running.</li>
-<li>Spark should be installed and running on all the cluster nodes.</li>
-<li>CarbonData user should have permission to access HDFS.</li>
-</ul>
-<h3>
-<a id="procedure" class="anchor" href="#procedure" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Procedure</h3>
-<ol>
-<li>
-<p><a href="https://github.com/apache/carbondata/blob/master/build/README.md" target=_blank>Build the CarbonData</a> project and get the assembly jar from <code>./assembly/target/scala-2.1x/carbondata_xxx.jar</code>.</p>
-</li>
-<li>
-<p>Copy <code>./assembly/target/scala-2.1x/carbondata_xxx.jar</code> to <code>$SPARK_HOME/carbonlib</code> folder.</p>
-<p><strong>NOTE</strong>: Create the carbonlib folder if it does not exist inside <code>$SPARK_HOME</code> path.</p>
-</li>
-<li>
-<p>Add the carbonlib folder path in the Spark classpath. (Edit <code>$SPARK_HOME/conf/spark-env.sh</code> file and modify the value of <code>SPARK_CLASSPATH</code> by appending <code>$SPARK_HOME/carbonlib/*</code> to the existing value)</p>
-</li>
-<li>
-<p>Copy the <code>./conf/carbon.properties.template</code> file from CarbonData repository to <code>$SPARK_HOME/conf/</code> folder and rename the file to <code>carbon.properties</code>.</p>
-</li>
-<li>
-<p>Repeat Step 2 to Step 5 in all the nodes of the cluster.</p>
-</li>
-<li>
-<p>In Spark node[master], configure the properties mentioned in the following table in <code>$SPARK_HOME/conf/spark-defaults.conf</code> file.</p>
-</li>
-</ol>
-<table>
-<thead>
-<tr>
-<th>Property</th>
-<th>Value</th>
-<th>Description</th>
-</tr>
-</thead>
-<tbody>
-<tr>
-<td>spark.driver.extraJavaOptions</td>
-<td><code>-Dcarbon.properties.filepath = $SPARK_HOME/conf/carbon.properties</code></td>
-<td>A string of extra JVM options to pass to the driver. For instance, GC settings or other logging.</td>
-</tr>
-<tr>
-<td>spark.executor.extraJavaOptions</td>
-<td><code>-Dcarbon.properties.filepath = $SPARK_HOME/conf/carbon.properties</code></td>
-<td>A string of extra JVM options to pass to executors. For instance, GC settings or other logging. <strong>NOTE</strong>: You can enter multiple values separated by space.</td>
-</tr>
-</tbody>
-</table>
-<ol>
-<li>Add the following properties in <code>$SPARK_HOME/conf/carbon.properties</code> file:</li>
-</ol>
-<table>
-<thead>
-<tr>
-<th>Property</th>
-<th>Required</th>
-<th>Description</th>
-<th>Example</th>
-<th>Remark</th>
-</tr>
-</thead>
-<tbody>
-<tr>
-<td>carbon.storelocation</td>
-<td>NO</td>
-<td>Location where data CarbonData will create the store and write the data in its own format. If not specified then it takes spark.sql.warehouse.dir path.</td>
-<td>hdfs://HOSTNAME:PORT/Opt/CarbonStore</td>
-<td>Propose to set HDFS directory</td>
-</tr>
-</tbody>
-</table>
+<a id="installing-presto" class="anchor" href="#installing-presto" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Installing Presto</h3>
 <ol>
-<li>Verify the installation. For example:</li>
+<li>Download the 0.210 version of Presto using:</li>
 </ol>
-<pre><code>./spark-shell --master spark://HOSTNAME:PORT --total-executor-cores 2
---executor-memory 2G
+<pre><code>wget https://repo1.maven.org/maven2/com/facebook/presto/presto-server/0.210/presto-server-0.210.tar.gz
 </code></pre>
-<p><strong>NOTE</strong>: Make sure you have permissions for CarbonData JARs and files through which driver and executor will start.</p>
-<h2>
-<a id="installing-and-configuring-carbondata-on-spark-on-yarn-cluster" class="anchor" href="#installing-and-configuring-carbondata-on-spark-on-yarn-cluster" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Installing and Configuring CarbonData on Spark on YARN Cluster</h2>
-<p>This section provides the procedure to install CarbonData on "Spark on YARN" cluster.</p>
-<h3>
-<a id="prerequisites-2" class="anchor" href="#prerequisites-2" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Prerequisites</h3>
-<ul>
-<li>Hadoop HDFS and Yarn should be installed and running.</li>
-<li>Spark should be installed and running in all the clients.</li>
-<li>CarbonData user should have permission to access HDFS.</li>
-</ul>
-<h3>
-<a id="procedure-1" class="anchor" href="#procedure-1" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Procedure</h3>
-<p>The following steps are only for Driver Nodes. (Driver nodes are the one which starts the spark context.)</p>
-<ol>
-<li>
-<p><a href="https://github.com/apache/carbondata/blob/master/build/README.md" target=_blank>Build the CarbonData</a> project and get the assembly jar from <code>./assembly/target/scala-2.1x/carbondata_xxx.jar</code> and copy to <code>$SPARK_HOME/carbonlib</code> folder.</p>
-<p><strong>NOTE</strong>: Create the carbonlib folder if it does not exists inside <code>$SPARK_HOME</code> path.</p>
-</li>
-<li>
-<p>Copy the <code>./conf/carbon.properties.template</code> file from CarbonData repository to <code>$SPARK_HOME/conf/</code> folder and rename the file to <code>carbon.properties</code>.</p>
-</li>
-<li>
-<p>Create <code>tar.gz</code> file of carbonlib folder and move it inside the carbonlib folder.</p>
-</li>
-</ol>
-<pre><code>cd $SPARK_HOME
-tar -zcvf carbondata.tar.gz carbonlib/
-mv carbondata.tar.gz carbonlib/
-</code></pre>
-<ol>
-<li>Configure the properties mentioned in the following table in <code>$SPARK_HOME/conf/spark-defaults.conf</code> file.</li>
-</ol>
-<table>
-<thead>
-<tr>
-<th>Property</th>
-<th>Description</th>
-<th>Value</th>
-</tr>
-</thead>
-<tbody>
-<tr>
-<td>spark.master</td>
-<td>Set this value to run the Spark in yarn cluster mode.</td>
-<td>Set yarn-client to run the Spark in yarn cluster mode.</td>
-</tr>
-<tr>
-<td>spark.yarn.dist.files</td>
-<td>Comma-separated list of files to be placed in the working directory of each executor.</td>
-<td><code>$SPARK_HOME/conf/carbon.properties</code></td>
-</tr>
-<tr>
-<td>spark.yarn.dist.archives</td>
-<td>Comma-separated list of archives to be extracted into the working directory of each executor.</td>
-<td><code>$SPARK_HOME/carbonlib/carbondata.tar.gz</code></td>
-</tr>
-<tr>
-<td>spark.executor.extraJavaOptions</td>
-<td>A string of extra JVM options to pass to executors. For instance  <strong>NOTE</strong>: You can enter multiple values separated by space.</td>
-<td><code>-Dcarbon.properties.filepath = carbon.properties</code></td>
-</tr>
-<tr>
-<td>spark.executor.extraClassPath</td>
-<td>Extra classpath entries to prepend to the classpath of executors. <strong>NOTE</strong>: If SPARK_CLASSPATH is defined in spark-env.sh, then comment it and append the values in below parameter spark.driver.extraClassPath</td>
-<td><code>carbondata.tar.gz/carbonlib/*</code></td>
-</tr>
-<tr>
-<td>spark.driver.extraClassPath</td>
-<td>Extra classpath entries to prepend to the classpath of the driver. <strong>NOTE</strong>: If SPARK_CLASSPATH is defined in spark-env.sh, then comment it and append the value in below parameter spark.driver.extraClassPath.</td>
-<td><code>$SPARK_HOME/carbonlib/*</code></td>
-</tr>
-<tr>
-<td>spark.driver.extraJavaOptions</td>
-<td>A string of extra JVM options to pass to the driver. For instance, GC settings or other logging.</td>
-<td><code>-Dcarbon.properties.filepath = $SPARK_HOME/conf/carbon.properties</code></td>
-</tr>
-</tbody>
-</table>
-<ol>
-<li>Add the following properties in <code>$SPARK_HOME/conf/carbon.properties</code>:</li>
-</ol>
-<table>
-<thead>
-<tr>
-<th>Property</th>
-<th>Required</th>
-<th>Description</th>
-<th>Example</th>
-<th>Default Value</th>
-</tr>
-</thead>
-<tbody>
-<tr>
-<td>carbon.storelocation</td>
-<td>NO</td>
-<td>Location where CarbonData will create the store and write the data in its own format. If not specified then it takes spark.sql.warehouse.dir path.</td>
-<td>hdfs://HOSTNAME:PORT/Opt/CarbonStore</td>
-<td>Propose to set HDFS directory</td>
-</tr>
-</tbody>
-</table>
-<ol>
-<li>Verify the installation.</li>
-</ol>
-<pre><code> ./bin/spark-shell --master yarn-client --driver-memory 1g
- --executor-cores 2 --executor-memory 2G
-</code></pre>
-<p><strong>NOTE</strong>: Make sure you have permissions for CarbonData JARs and files through which driver and executor will start.</p>
-<h2>
-<a id="query-execution-using-carbondata-thrift-server" class="anchor" href="#query-execution-using-carbondata-thrift-server" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Query Execution Using CarbonData Thrift Server</h2>
-<h3>
-<a id="starting-carbondata-thrift-server" class="anchor" href="#starting-carbondata-thrift-server" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Starting CarbonData Thrift Server.</h3>
-<p>a. cd <code>$SPARK_HOME</code></p>
-<p>b. Run the following command to start the CarbonData thrift server.</p>
-<pre><code>./bin/spark-submit
---class org.apache.carbondata.spark.thriftserver.CarbonThriftServer
-$SPARK_HOME/carbonlib/$CARBON_ASSEMBLY_JAR &lt;carbon_store_path&gt;
-</code></pre>
-<table>
-<thead>
-<tr>
-<th>Parameter</th>
-<th>Description</th>
-<th>Example</th>
-</tr>
-</thead>
-<tbody>
-<tr>
-<td>CARBON_ASSEMBLY_JAR</td>
-<td>CarbonData assembly jar name present in the <code>$SPARK_HOME/carbonlib/</code> folder.</td>
-<td>carbondata_2.xx-x.x.x-SNAPSHOT-shade-hadoop2.7.2.jar</td>
-</tr>
-<tr>
-<td>carbon_store_path</td>
-<td>This is a parameter to the CarbonThriftServer class. This a HDFS path where CarbonData files will be kept. Strongly Recommended to put same as carbon.storelocation parameter of carbon.properties. If not specified then it takes spark.sql.warehouse.dir path.</td>
-<td><code>hdfs://&lt;host_name&gt;:port/user/hive/warehouse/carbon.store</code></td>
-</tr>
-</tbody>
-</table>
-<p><strong>NOTE</strong>: From Spark 1.6, by default the Thrift server runs in multi-session mode. Which means each JDBC/ODBC connection owns a copy of their own SQL configuration and temporary function registry. Cached tables are still shared though. If you prefer to run the Thrift server in single-session mode and share all SQL configuration and temporary function registry, please set option <code>spark.sql.hive.thriftServer.singleSession</code> to <code>true</code>. You may either add [...]
-<pre><code>./bin/spark-submit
---conf spark.sql.hive.thriftServer.singleSession=true
---class org.apache.carbondata.spark.thriftserver.CarbonThriftServer
-$SPARK_HOME/carbonlib/$CARBON_ASSEMBLY_JAR &lt;carbon_store_path&gt;
-</code></pre>
-<p><strong>But</strong> in single-session mode, if one user changes the database from one connection, the database of the other connections will be changed too.</p>
-<p><strong>Examples</strong></p>
-<ul>
-<li>Start with default memory and executors.</li>
-</ul>
-<pre><code>./bin/spark-submit
---class org.apache.carbondata.spark.thriftserver.CarbonThriftServer 
-$SPARK_HOME/carbonlib
-/carbondata_2.xx-x.x.x-SNAPSHOT-shade-hadoop2.7.2.jar
-hdfs://&lt;host_name&gt;:port/user/hive/warehouse/carbon.store
-</code></pre>
-<ul>
-<li>Start with Fixed executors and resources.</li>
-</ul>
-<pre><code>./bin/spark-submit
---class org.apache.carbondata.spark.thriftserver.CarbonThriftServer 
---num-executors 3 --driver-memory 20g --executor-memory 250g 
---executor-cores 32 
-/srv/OSCON/BigData/HACluster/install/spark/sparkJdbc/lib
-/carbondata_2.xx-x.x.x-SNAPSHOT-shade-hadoop2.7.2.jar
-hdfs://&lt;host_name&gt;:port/user/hive/warehouse/carbon.store
-</code></pre>
-<h3>
-<a id="connecting-to-carbondata-thrift-server-using-beeline" class="anchor" href="#connecting-to-carbondata-thrift-server-using-beeline" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Connecting to CarbonData Thrift Server Using Beeline.</h3>
-<pre><code>     cd $SPARK_HOME
-     ./sbin/start-thriftserver.sh
-     ./bin/beeline -u jdbc:hive2://&lt;thriftserver_host&gt;:port
-
-     Example
-     ./bin/beeline -u jdbc:hive2://10.10.10.10:10000
-</code></pre>
-<h2>
-<a id="installing-and-configuring-carbondata-on-presto" class="anchor" href="#installing-and-configuring-carbondata-on-presto" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Installing and Configuring CarbonData on Presto</h2>
-<p><strong>NOTE:</strong> <strong>CarbonData tables cannot be created nor loaded from Presto. User need to create CarbonData Table and load data into it
-either with <a href="#installing-and-configuring-carbondata-to-run-locally-with-spark-shell">Spark</a> or <a href="./sdk-guide.html">SDK</a> or <a href="./csdk-guide.html">C++ SDK</a>.
-Once the table is created,it can be queried from Presto.</strong></p>
-<h3>
-<a id="installing-presto" class="anchor" href="#installing-presto" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Installing Presto</h3>
-<ol>
-<li>
-<p>Download the 0.210 version of Presto using:
-<code>wget https://repo1.maven.org/maven2/com/facebook/presto/presto-server/0.210/presto-server-0.210.tar.gz</code></p>
-</li>
+<ol start="2">
 <li>
 <p>Extract Presto tar file: <code>tar zxvf presto-server-0.210.tar.gz</code>.</p>
 </li>
@@ -625,8 +285,8 @@ node.data-dir=/home/ubuntu/data
 <pre><code>com.facebook.presto=INFO
 </code></pre>
 <p>The default minimum level is <code>INFO</code>. There are four levels: <code>DEBUG</code>, <code>INFO</code>, <code>WARN</code> and <code>ERROR</code>.</p>
-<h3>
-<a id="coordinator-configurations" class="anchor" href="#coordinator-configurations" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Coordinator Configurations</h3>
+<h2>
+<a id="coordinator-configurations" class="anchor" href="#coordinator-configurations" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Coordinator Configurations</h2>
 <h5>
 <a id="contents-of-your-configproperties" class="anchor" href="#contents-of-your-configproperties" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Contents of your config.properties</h5>
 <pre><code>coordinator=true
@@ -637,13 +297,10 @@ query.max-total-memory-per-node=5GB
 query.max-memory-per-node=3GB
 memory.heap-headroom-per-node=1GB
 discovery-server.enabled=true
-discovery.uri=http://localhost:8086
-task.max-worker-threads=4
-optimizer.dictionary-aggregation=true
-optimizer.optimize-hash-generation = false
+discovery.uri=&lt;coordinator_ip&gt;:8086
 </code></pre>
 <p>The options <code>node-scheduler.include-coordinator=false</code> and <code>coordinator=true</code> indicate that the node is the coordinator and tells the coordinator not to do any of the computation work itself and to use the workers.</p>
-<p><strong>Note</strong>: It is recommended to set <code>query.max-memory-per-node</code> to half of the JVM config max memory, though the workload is highly concurrent, lower value for <code>query.max-memory-per-node</code> is to be used.</p>
+<p><strong>Note</strong>: We recommend setting <code>query.max-memory-per-node</code> to half of the JVM config max memory, though if your workload is highly concurrent, you may want to use a lower value for <code>query.max-memory-per-node</code>.</p>
 <p>Also relation between below two configuration-properties should be like:
 If, <code>query.max-memory-per-node=30GB</code>
 Then, <code>query.max-memory=&lt;30GB * number of nodes&gt;</code>.</p>
@@ -657,7 +314,7 @@ query.max-memory=5GB
 query.max-memory-per-node=2GB
 discovery.uri=&lt;coordinator_ip&gt;:8086
 </code></pre>
-<p><strong>Note</strong>: <code>jvm.config</code> and <code>node.properties</code> files are same for all the nodes (worker + coordinator). All the nodes should have different <code>node.id</code>.(generated by uuid command).</p>
+<p><strong>Note</strong>: <code>jvm.config</code> and <code>node.properties</code> files are same for all the nodes (worker + coordinator). All the nodes should have different <code>node.id</code>.</p>
 <h3>
 <a id="catalog-configurations" class="anchor" href="#catalog-configurations" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Catalog Configurations</h3>
 <ol>
@@ -684,8 +341,6 @@ discovery.uri=&lt;coordinator_ip&gt;:8086
 <p>To run it in foreground.</p>
 <h3>
 <a id="start-presto-cli" class="anchor" href="#start-presto-cli" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Start Presto CLI</h3>
-<pre><code>./presto
-</code></pre>
 <p>To connect to carbondata catalog use the following command:</p>
 <pre><code>./presto --server &lt;coordinator_ip&gt;:8086 --catalog carbondata --schema &lt;schema_name&gt;
 </code></pre>
@@ -693,19 +348,121 @@ discovery.uri=&lt;coordinator_ip&gt;:8086
 <pre><code>select * from system.runtime.nodes;
 </code></pre>
 <p>Now you can use the Presto CLI on the coordinator to query data sources in the catalog using the Presto workers.</p>
-<p>List the schemas(databases) available</p>
-<pre><code>show schemas;
+<h2>
+<a id="presto-single-node-setup-for-carbondata" class="anchor" href="#presto-single-node-setup-for-carbondata" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Presto Single Node Setup for Carbondata</h2>
+<h3>
+<a id="config-presto-server" class="anchor" href="#config-presto-server" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Config presto server</h3>
+<ul>
+<li>Download presto server (0.210 is suggested and supported) : <a href="https://repo1.maven.org/maven2/com/facebook/presto/presto-server/" target=_blank rel="nofollow">https://repo1.maven.org/maven2/com/facebook/presto/presto-server/</a>
+</li>
+<li>Finish presto configuration following <a href="https://prestodb.io/docs/current/installation/deployment.html" target=_blank rel="nofollow">https://prestodb.io/docs/current/installation/deployment.html</a>.
+A configuration example:</li>
+</ul>
+<p><strong>config.properties</strong></p>
+<pre><code>coordinator=true
+node-scheduler.include-coordinator=true
+http-server.http.port=8086
+query.max-memory=5GB
+query.max-total-memory-per-node=5GB
+query.max-memory-per-node=3GB
+memory.heap-headroom-per-node=1GB
+discovery-server.enabled=true
+discovery.uri=http://localhost:8086
+task.max-worker-threads=4
+optimizer.dictionary-aggregation=true
+optimizer.optimize-hash-generation = false  
+</code></pre>
+<p><strong>jvm.config</strong></p>
+<pre><code>-server
+-Xmx4G
+-XX:+UseG1GC
+-XX:G1HeapRegionSize=32M
+-XX:+UseGCOverheadLimit
+-XX:+ExplicitGCInvokesConcurrent
+-XX:+HeapDumpOnOutOfMemoryError
+-XX:OnOutOfMemoryError=kill -9 %p
+-XX:+TraceClassLoading
+-Dcarbon.properties.filepath=&lt;path&gt;/carbon.properties
+
 </code></pre>
-<p>Selected the schema where CarbonData table resides</p>
-<pre><code>use carbonschema;
+<p><code>carbon.properties.filepath</code> property is used to set the carbon.properties file path and it is recommended to set otherwise some features may not work. Please check the above example.</p>
+<p><strong>log.properties</strong></p>
+<pre><code>com.facebook.presto=DEBUG
+com.facebook.presto.server.PluginManager=DEBUG
 </code></pre>
-<p>List the available tables</p>
-<pre><code>show tables;
+<p><strong>node.properties</strong></p>
+<pre><code>node.environment=carbondata
+node.id=ffffffff-ffff-ffff-ffff-ffffffffffff
+node.data-dir=/Users/apple/DEMO/presto_test/data
 </code></pre>
-<p>Query from the available tables</p>
-<pre><code>select * from carbon_table;
+<ul>
+<li>
+<p>Config carbondata-connector for presto</p>
+<p>Firstly: Compile carbondata, including carbondata-presto integration module</p>
+<pre><code>$ git clone https://github.com/apache/carbondata
+$ cd carbondata
+$ mvn -DskipTests -P{spark-version} -Dspark.version={spark-version-number} -Dhadoop.version={hadoop-version-number} clean package
 </code></pre>
-<p><strong>Note :</strong> Create Tables and data loads should be done before executing queries as we can not create carbon table from this interface.</p>
+<p>Replace the spark and hadoop version with the version used in your cluster.
+For example, if you are using Spark 2.2.1 and Hadoop 2.7.2, you would like to compile using:</p>
+<pre><code>mvn -DskipTests -Pspark-2.2 -Dspark.version=2.2.1 -Dhadoop.version=2.7.2 clean package
+</code></pre>
+<p>Secondly: Create a folder named 'carbondata' under $PRESTO_HOME$/plugin and
+copy all jars from carbondata/integration/presto/target/carbondata-presto-x.x.x-SNAPSHOT
+to $PRESTO_HOME$/plugin/carbondata</p>
+<p><strong>NOTE:</strong>  Copying assemble jar alone will not work, need to copy all jars from integration/presto/target/carbondata-presto-x.x.x-SNAPSHOT</p>
+<p>Thirdly: Create a carbondata.properties file under $PRESTO_HOME$/etc/catalog/ containing the following contents:</p>
+<pre><code>connector.name=carbondata
+hive.metastore.uri=thrift://&lt;host&gt;:&lt;port&gt;
+</code></pre>
+<p>Carbondata becomes one of the supported format of presto hive plugin, so the configurations and setup is similar to hive connector of presto.
+Please refer <a href="https://prestodb.io/docs/current/connector/hive.html" target=_blank rel="nofollow">https://prestodb.io/docs/current/connector/hive.html</a> for more details.</p>
+<p><strong>Note</strong>: Since carbon can work only with hive metastore, it is necessary that spark also connects to same metastore db for creating tables and updating tables.
+All the operations done on spark will be reflected in presto immediately.
+It is mandatory to create Carbon tables from spark using CarbonData 1.5.2 or greater version since input/output formats are updated in carbon table properly from this version.</p>
+</li>
+</ul>
+<h4>
+<a id="connecting-to-carbondata-store-on-s3" class="anchor" href="#connecting-to-carbondata-store-on-s3" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Connecting to carbondata store on s3</h4>
+<ul>
+<li>
+<p>In case you want to query carbonstore on S3 using S3A api put following additional properties inside $PRESTO_HOME$/etc/catalog/carbondata.properties</p>
+<pre><code> Required properties
+
+ hive.s3.aws-access-key={value}
+ hive.s3.aws-secret-key={value}
+ 
+ Optional properties
+ 
+ hive.s3.endpoint={value}
+</code></pre>
+<p>Please refer <a href="https://prestodb.io/docs/current/connector/hive.html" target=_blank rel="nofollow">https://prestodb.io/docs/current/connector/hive.html</a> for more details on S3 integration.</p>
+</li>
+</ul>
+<h3>
+<a id="generate-carbondata-file" class="anchor" href="#generate-carbondata-file" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Generate CarbonData file</h3>
+<p>Please refer to quick start: <a href="https://github.com/apache/carbondata/blob/master/docs/quick-start-guide.html" target=_blank>https://github.com/apache/carbondata/blob/master/docs/quick-start-guide.html</a>.
+Load data statement in Spark can be used to create carbondata tables. And then you can easily find the created
+carbondata files.</p>
+<h3>
+<a id="query-carbondata-in-cli-of-presto" class="anchor" href="#query-carbondata-in-cli-of-presto" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Query carbondata in CLI of presto</h3>
+<ul>
+<li>
+<p>Download presto cli client of version 0.210 : <a href="https://repo1.maven.org/maven2/com/facebook/presto/presto-cli" target=_blank rel="nofollow">https://repo1.maven.org/maven2/com/facebook/presto/presto-cli</a></p>
+</li>
+<li>
+<p>Start CLI:</p>
+<pre><code>$ ./presto --server localhost:8086 --catalog carbondata --schema default
+</code></pre>
+<p>Replace the hostname, port and schema name with your own.</p>
+</li>
+</ul>
+<h3>
+<a id="supported-features-of-presto-carbon" class="anchor" href="#supported-features-of-presto-carbon" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Supported features of presto carbon</h3>
+<p>Presto carbon only supports reading the carbon table which is written by spark carbon or carbon SDK.
+During reading, it supports the non-distributed datamaps like block datamap and bloom datamap.
+It doesn't support MV datamap and Pre-aggregate datamap as it needs query plan to be changed and presto does not allow it.
+Also Presto carbon supports streaming segment read from streaming table created by spark.</p>
 <script>
 // Show selected style on nav item
 $(function() { $('.b-nav__quickstart').addClass('selected'); });
diff --git a/src/main/webapp/quick-start-guide.html b/src/main/webapp/quick-start-guide.html
index a2f093d..b321353 100644
--- a/src/main/webapp/quick-start-guide.html
+++ b/src/main/webapp/quick-start-guide.html
@@ -52,6 +52,9 @@
                            aria-expanded="false"> Download <span class="caret"></span></a>
                         <ul class="dropdown-menu">
                             <li>
+                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.2/"
+                                   target="_blank">Apache CarbonData 1.5.2</a></li>
+                            <li>
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.1/"
                                    target="_blank">Apache CarbonData 1.5.1</a></li>
                             <li>
@@ -64,9 +67,6 @@
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.4.0/"
                                    target="_blank">Apache CarbonData 1.4.0</a></li>
                             <li>
-                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.3.1/"
-                                   target="_blank">Apache CarbonData 1.3.1</a></li>
-                            <li>
                                 <a href="https://cwiki.apache.org/confluence/display/CARBONDATA/Releases"
                                    target="_blank">Release Archive</a></li>
                         </ul>
@@ -241,16 +241,32 @@ EOF
 </ul>
 <h2>
 <a id="integration" class="anchor" href="#integration" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Integration</h2>
-<p>CarbonData can be integrated with Spark and Presto Execution Engines. The below documentation guides on Installing and Configuring with these execution engines.</p>
 <h3>
-<a id="spark" class="anchor" href="#spark" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Spark</h3>
+<a id="integration-with-execution-engines" class="anchor" href="#integration-with-execution-engines" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Integration with Execution Engines</h3>
+<p>CarbonData can be integrated with Spark,Presto and Hive execution engines. The below documentation guides on Installing and Configuring with these execution engines.</p>
+<h4>
+<a id="spark" class="anchor" href="#spark" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Spark</h4>
 <p><a href="#installing-and-configuring-carbondata-to-run-locally-with-spark-shell">Installing and Configuring CarbonData to run locally with Spark Shell</a></p>
 <p><a href="#installing-and-configuring-carbondata-on-standalone-spark-cluster">Installing and Configuring CarbonData on Standalone Spark Cluster</a></p>
 <p><a href="#installing-and-configuring-carbondata-on-spark-on-yarn-cluster">Installing and Configuring CarbonData on Spark on YARN Cluster</a></p>
 <p><a href="#query-execution-using-carbondata-thrift-server">Installing and Configuring CarbonData Thrift Server for Query Execution</a></p>
-<h3>
-<a id="presto" class="anchor" href="#presto" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Presto</h3>
+<h4>
+<a id="presto" class="anchor" href="#presto" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Presto</h4>
 <p><a href="#installing-and-configuring-carbondata-on-presto">Installing and Configuring CarbonData on Presto</a></p>
+<h4>
+<a id="hive" class="anchor" href="#hive" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Hive</h4>
+<p><a href="https://github.com/apache/carbondata/blob/master/docs/hive-guide.html" target=_blank>Installing and Configuring CarbonData on Hive</a></p>
+<h3>
+<a id="integration-with-storage-engines" class="anchor" href="#integration-with-storage-engines" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Integration with Storage Engines</h3>
+<h4>
+<a id="hdfs" class="anchor" href="#hdfs" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>HDFS</h4>
+<p><a href="https://github.com/apache/carbondata/blob/master/docs/quick-start-guide.html#installing-and-configuring-carbondata-on-standalone-spark-cluster">CarbonData supports read and write with HDFS</a></p>
+<h4>
+<a id="s3" class="anchor" href="#s3" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>S3</h4>
+<p><a href="https://github.com/apache/carbondata/blob/master/docs/s3-guide.html" target=_blank>CarbonData supports read and write with S3</a></p>
+<h4>
+<a id="alluxio" class="anchor" href="#alluxio" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Alluxio</h4>
+<p><a href="https://github.com/apache/carbondata/blob/master/docs/alluxio-guide.html" target=_blank>CarbonData supports read and write with Alluxio</a></p>
 <h2>
 <a id="installing-and-configuring-carbondata-to-run-locally-with-spark-shell" class="anchor" href="#installing-and-configuring-carbondata-to-run-locally-with-spark-shell" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Installing and Configuring CarbonData to run locally with Spark Shell</h2>
 <p>Apache Spark Shell provides a simple way to learn the API, as well as a powerful tool to analyze data interactively. Please visit <a href="http://spark.apache.org/docs/latest/" target=_blank rel="nofollow">Apache Spark Documentation</a> for more details on Spark shell.</p>
@@ -271,36 +287,44 @@ import org.apache.spark.sql.CarbonSession._
 <ul>
 <li>Create a CarbonSession :</li>
 </ul>
-<pre><code>val carbon = SparkSession.builder().config(sc.getConf)
-             .getOrCreateCarbonSession("&lt;hdfs store path&gt;")
+<pre><code>val carbon = SparkSession.builder().config(sc.getConf).getOrCreateCarbonSession("&lt;carbon_store_path&gt;")
 </code></pre>
-<p><strong>NOTE</strong>: By default metastore location points to <code>../carbon.metastore</code>, user can provide own metastore location to CarbonSession like <code>SparkSession.builder().config(sc.getConf) .getOrCreateCarbonSession("&lt;hdfs store path&gt;", "&lt;local metastore path&gt;")</code></p>
+<p><strong>NOTE</strong></p>
+<ul>
+<li>By default metastore location points to <code>../carbon.metastore</code>, user can provide own metastore location to CarbonSession like
+<code>SparkSession.builder().config(sc.getConf).getOrCreateCarbonSession("&lt;carbon_store_path&gt;", "&lt;local metastore path&gt;")</code>.</li>
+<li>Data storage location can be specified by <code>&lt;carbon_store_path&gt;</code>, like <code>/carbon/data/store</code>, <code>hdfs://localhost:9000/carbon/data/store</code> or <code>s3a://carbon/data/store</code>.</li>
+</ul>
 <h4>
 <a id="executing-queries" class="anchor" href="#executing-queries" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Executing Queries</h4>
 <h6>
 <a id="creating-a-table" class="anchor" href="#creating-a-table" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Creating a Table</h6>
-<pre><code>scala&gt;carbon.sql("CREATE TABLE
-                    IF NOT EXISTS test_table(
-                    id string,
-                    name string,
-                    city string,
-                    age Int)
-                  STORED AS carbondata")
+<pre><code>carbon.sql(
+           s"""
+              | CREATE TABLE IF NOT EXISTS test_table(
+              |   id string,
+              |   name string,
+              |   city string,
+              |   age Int)
+              | STORED AS carbondata
+           """.stripMargin)
 </code></pre>
 <h6>
 <a id="loading-data-to-a-table" class="anchor" href="#loading-data-to-a-table" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Loading Data to a Table</h6>
-<pre><code>scala&gt;carbon.sql("LOAD DATA INPATH '/path/to/sample.csv'
-                  INTO TABLE test_table")
+<pre><code>carbon.sql("LOAD DATA INPATH '/path/to/sample.csv' INTO TABLE test_table")
 </code></pre>
 <p><strong>NOTE</strong>: Please provide the real file path of <code>sample.csv</code> for the above script.
 If you get "tablestatus.lock" issue, please refer to <a href="faq.html">FAQ</a></p>
 <h6>
 <a id="query-data-from-a-table" class="anchor" href="#query-data-from-a-table" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Query Data from a Table</h6>
-<pre><code>scala&gt;carbon.sql("SELECT * FROM test_table").show()
+<pre><code>carbon.sql("SELECT * FROM test_table").show()
 
-scala&gt;carbon.sql("SELECT city, avg(age), sum(age)
-                  FROM test_table
-                  GROUP BY city").show()
+carbon.sql(
+           s"""
+              | SELECT city, avg(age), sum(age)
+              | FROM test_table
+              | GROUP BY city
+           """.stripMargin).show()
 </code></pre>
 <h2>
 <a id="installing-and-configuring-carbondata-on-standalone-spark-cluster" class="anchor" href="#installing-and-configuring-carbondata-on-standalone-spark-cluster" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Installing and Configuring CarbonData on Standalone Spark Cluster</h2>
@@ -355,7 +379,7 @@ scala&gt;carbon.sql("SELECT city, avg(age), sum(age)
 </tr>
 </tbody>
 </table>
-<ol>
+<ol start="7">
 <li>Add the following properties in <code>$SPARK_HOME/conf/carbon.properties</code> file:</li>
 </ol>
 <table>
@@ -378,10 +402,12 @@ scala&gt;carbon.sql("SELECT city, avg(age), sum(age)
 </tr>
 </tbody>
 </table>
-<ol>
+<ol start="8">
 <li>Verify the installation. For example:</li>
 </ol>
-<pre><code>./spark-shell --master spark://HOSTNAME:PORT --total-executor-cores 2
+<pre><code>./bin/spark-shell \
+--master spark://HOSTNAME:PORT \
+--total-executor-cores 2 \
 --executor-memory 2G
 </code></pre>
 <p><strong>NOTE</strong>: Make sure you have permissions for CarbonData JARs and files through which driver and executor will start.</p>
@@ -414,7 +440,7 @@ scala&gt;carbon.sql("SELECT city, avg(age), sum(age)
 tar -zcvf carbondata.tar.gz carbonlib/
 mv carbondata.tar.gz carbonlib/
 </code></pre>
-<ol>
+<ol start="4">
 <li>Configure the properties mentioned in the following table in <code>$SPARK_HOME/conf/spark-defaults.conf</code> file.</li>
 </ol>
 <table>
@@ -463,7 +489,7 @@ mv carbondata.tar.gz carbonlib/
 </tr>
 </tbody>
 </table>
-<ol>
+<ol start="5">
 <li>Add the following properties in <code>$SPARK_HOME/conf/carbon.properties</code>:</li>
 </ol>
 <table>
@@ -486,11 +512,14 @@ mv carbondata.tar.gz carbonlib/
 </tr>
 </tbody>
 </table>
-<ol>
+<ol start="6">
 <li>Verify the installation.</li>
 </ol>
-<pre><code> ./bin/spark-shell --master yarn-client --driver-memory 1g
- --executor-cores 2 --executor-memory 2G
+<pre><code>./bin/spark-shell \
+--master yarn-client \
+--driver-memory 1G \
+--executor-memory 2G \
+--executor-cores 2
 </code></pre>
 <p><strong>NOTE</strong>: Make sure you have permissions for CarbonData JARs and files through which driver and executor will start.</p>
 <h2>
@@ -499,8 +528,8 @@ mv carbondata.tar.gz carbonlib/
 <a id="starting-carbondata-thrift-server" class="anchor" href="#starting-carbondata-thrift-server" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Starting CarbonData Thrift Server.</h3>
 <p>a. cd <code>$SPARK_HOME</code></p>
 <p>b. Run the following command to start the CarbonData thrift server.</p>
-<pre><code>./bin/spark-submit
---class org.apache.carbondata.spark.thriftserver.CarbonThriftServer
+<pre><code>./bin/spark-submit \
+--class org.apache.carbondata.spark.thriftserver.CarbonThriftServer \
 $SPARK_HOME/carbonlib/$CARBON_ASSEMBLY_JAR &lt;carbon_store_path&gt;
 </code></pre>
 <table>
@@ -525,9 +554,9 @@ $SPARK_HOME/carbonlib/$CARBON_ASSEMBLY_JAR &lt;carbon_store_path&gt;
 </tbody>
 </table>
 <p><strong>NOTE</strong>: From Spark 1.6, by default the Thrift server runs in multi-session mode. Which means each JDBC/ODBC connection owns a copy of their own SQL configuration and temporary function registry. Cached tables are still shared though. If you prefer to run the Thrift server in single-session mode and share all SQL configuration and temporary function registry, please set option <code>spark.sql.hive.thriftServer.singleSession</code> to <code>true</code>. You may either add [...]
-<pre><code>./bin/spark-submit
---conf spark.sql.hive.thriftServer.singleSession=true
---class org.apache.carbondata.spark.thriftserver.CarbonThriftServer
+<pre><code>./bin/spark-submit \
+--conf spark.sql.hive.thriftServer.singleSession=true \
+--class org.apache.carbondata.spark.thriftserver.CarbonThriftServer \
 $SPARK_HOME/carbonlib/$CARBON_ASSEMBLY_JAR &lt;carbon_store_path&gt;
 </code></pre>
 <p><strong>But</strong> in single-session mode, if one user changes the database from one connection, the database of the other connections will be changed too.</p>
@@ -535,31 +564,31 @@ $SPARK_HOME/carbonlib/$CARBON_ASSEMBLY_JAR &lt;carbon_store_path&gt;
 <ul>
 <li>Start with default memory and executors.</li>
 </ul>
-<pre><code>./bin/spark-submit
---class org.apache.carbondata.spark.thriftserver.CarbonThriftServer 
-$SPARK_HOME/carbonlib
-/carbondata_2.xx-x.x.x-SNAPSHOT-shade-hadoop2.7.2.jar
+<pre><code>./bin/spark-submit \
+--class org.apache.carbondata.spark.thriftserver.CarbonThriftServer \
+$SPARK_HOME/carbonlib/carbondata_2.xx-x.x.x-SNAPSHOT-shade-hadoop2.7.2.jar \
 hdfs://&lt;host_name&gt;:port/user/hive/warehouse/carbon.store
 </code></pre>
 <ul>
 <li>Start with Fixed executors and resources.</li>
 </ul>
-<pre><code>./bin/spark-submit
---class org.apache.carbondata.spark.thriftserver.CarbonThriftServer 
---num-executors 3 --driver-memory 20g --executor-memory 250g 
---executor-cores 32 
-/srv/OSCON/BigData/HACluster/install/spark/sparkJdbc/lib
-/carbondata_2.xx-x.x.x-SNAPSHOT-shade-hadoop2.7.2.jar
+<pre><code>./bin/spark-submit \
+--class org.apache.carbondata.spark.thriftserver.CarbonThriftServer \
+--num-executors 3 \
+--driver-memory 20G \
+--executor-memory 250G \
+--executor-cores 32 \
+$SPARK_HOME/carbonlib/carbondata_2.xx-x.x.x-SNAPSHOT-shade-hadoop2.7.2.jar \
 hdfs://&lt;host_name&gt;:port/user/hive/warehouse/carbon.store
 </code></pre>
 <h3>
 <a id="connecting-to-carbondata-thrift-server-using-beeline" class="anchor" href="#connecting-to-carbondata-thrift-server-using-beeline" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Connecting to CarbonData Thrift Server Using Beeline.</h3>
-<pre><code>     cd $SPARK_HOME
-     ./sbin/start-thriftserver.sh
-     ./bin/beeline -u jdbc:hive2://&lt;thriftserver_host&gt;:port
+<pre><code>cd $SPARK_HOME
+./sbin/start-thriftserver.sh
+./bin/beeline -u jdbc:hive2://&lt;thriftserver_host&gt;:port
 
-     Example
-     ./bin/beeline -u jdbc:hive2://10.10.10.10:10000
+Example
+./bin/beeline -u jdbc:hive2://10.10.10.10:10000
 </code></pre>
 <h2>
 <a id="installing-and-configuring-carbondata-on-presto" class="anchor" href="#installing-and-configuring-carbondata-on-presto" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Installing and Configuring CarbonData on Presto</h2>
@@ -580,29 +609,23 @@ Once the table is created,it can be queried from Presto.</strong></p>
 <p>Download the Presto CLI for the coordinator and name it presto.</p>
 </li>
 </ol>
-<pre><code>  wget https://repo1.maven.org/maven2/com/facebook/presto/presto-cli/0.210/presto-cli-0.210-executable.jar
+<pre><code>wget https://repo1.maven.org/maven2/com/facebook/presto/presto-cli/0.210/presto-cli-0.210-executable.jar
 
-  mv presto-cli-0.210-executable.jar presto
+mv presto-cli-0.210-executable.jar presto
 
-  chmod +x presto
+chmod +x presto
 </code></pre>
 <h3>
 <a id="create-configuration-files" class="anchor" href="#create-configuration-files" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Create Configuration Files</h3>
 <ol>
-<li>
-<p>Create <code>etc</code> folder in presto-server-0.210 directory.</p>
-</li>
-<li>
-<p>Create <code>config.properties</code>, <code>jvm.config</code>, <code>log.properties</code>, and <code>node.properties</code> files.</p>
-</li>
-<li>
-<p>Install uuid to generate a node.id.</p>
+<li>Create <code>etc</code> folder in presto-server-0.210 directory.</li>
+<li>Create <code>config.properties</code>, <code>jvm.config</code>, <code>log.properties</code>, and <code>node.properties</code> files.</li>
+<li>Install uuid to generate a node.id.</li>
+</ol>
 <pre><code>sudo apt-get install uuid
 
 uuid
 </code></pre>
-</li>
-</ol>
 <h5>
 <a id="contents-of-your-nodeproperties-file" class="anchor" href="#contents-of-your-nodeproperties-file" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Contents of your node.properties file</h5>
 <pre><code>node.environment=production
@@ -706,6 +729,7 @@ discovery.uri=&lt;coordinator_ip&gt;:8086
 <pre><code>select * from carbon_table;
 </code></pre>
 <p><strong>Note :</strong> Create Tables and data loads should be done before executing queries as we can not create carbon table from this interface.</p>
+<pre><code></code></pre>
 <script>
 // Show selected style on nav item
 $(function() { $('.b-nav__quickstart').addClass('selected'); });
diff --git a/src/main/webapp/release-guide.html b/src/main/webapp/release-guide.html
index dcdaba3..ad94ba0 100644
--- a/src/main/webapp/release-guide.html
+++ b/src/main/webapp/release-guide.html
@@ -52,6 +52,9 @@
                            aria-expanded="false"> Download <span class="caret"></span></a>
                         <ul class="dropdown-menu">
                             <li>
+                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.2/"
+                                   target="_blank">Apache CarbonData 1.5.2</a></li>
+                            <li>
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.1/"
                                    target="_blank">Apache CarbonData 1.5.1</a></li>
                             <li>
@@ -64,9 +67,6 @@
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.4.0/"
                                    target="_blank">Apache CarbonData 1.4.0</a></li>
                             <li>
-                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.3.1/"
-                                   target="_blank">Apache CarbonData 1.3.1</a></li>
-                            <li>
                                 <a href="https://cwiki.apache.org/confluence/display/CARBONDATA/Releases"
                                    target="_blank">Release Archive</a></li>
                         </ul>
diff --git a/src/main/webapp/s3-guide.html b/src/main/webapp/s3-guide.html
index ba25dfb..bf6f06d 100644
--- a/src/main/webapp/s3-guide.html
+++ b/src/main/webapp/s3-guide.html
@@ -52,6 +52,9 @@
                            aria-expanded="false"> Download <span class="caret"></span></a>
                         <ul class="dropdown-menu">
                             <li>
+                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.2/"
+                                   target="_blank">Apache CarbonData 1.5.2</a></li>
+                            <li>
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.1/"
                                    target="_blank">Apache CarbonData 1.5.1</a></li>
                             <li>
@@ -64,9 +67,6 @@
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.4.0/"
                                    target="_blank">Apache CarbonData 1.4.0</a></li>
                             <li>
-                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.3.1/"
-                                   target="_blank">Apache CarbonData 1.3.1</a></li>
-                            <li>
                                 <a href="https://cwiki.apache.org/confluence/display/CARBONDATA/Releases"
                                    target="_blank">Release Archive</a></li>
                         </ul>
@@ -233,7 +233,7 @@ Carbondata relies on Hadoop provided S3 filesystem APIs to access Object stores.
 <p>To store carbondata files onto Object Store, <code>carbon.storelocation</code> property will have
 to be configured with Object Store path in CarbonProperties file.</p>
 <p>For example:</p>
-<pre><code>carbon.storelocation=s3a://mybucket/carbonstore.
+<pre><code>carbon.storelocation=s3a://mybucket/carbonstore
 </code></pre>
 <p>If the existing store location cannot be changed or only specific tables need to be stored
 onto cloud object store, it can be done so by specifying the <code>location</code> option in the create
@@ -263,8 +263,11 @@ spark.hadoop.fs.s3a.access.key=456
 <li>Pass authentication properties with spark-submit as configuration.</li>
 </ol>
 <p>Example:</p>
-<pre><code>./bin/spark-submit --master yarn --conf spark.hadoop.fs.s3a.secret.key=123 --conf spark.hadoop.fs
-.s3a.access.key=456 --class=
+<pre><code>./bin/spark-submit \
+--master yarn \
+--conf spark.hadoop.fs.s3a.secret.key=123 \
+--conf spark.hadoop.fs.s3a.access.key=456 \
+--class=xxx
 </code></pre>
 <ol start="4">
 <li>Set authentication properties to hadoop configuration object in sparkContext.</li>
diff --git a/src/main/webapp/sdk-guide.html b/src/main/webapp/sdk-guide.html
index 37d6b26..32bd876 100644
--- a/src/main/webapp/sdk-guide.html
+++ b/src/main/webapp/sdk-guide.html
@@ -52,6 +52,9 @@
                            aria-expanded="false"> Download <span class="caret"></span></a>
                         <ul class="dropdown-menu">
                             <li>
+                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.2/"
+                                   target="_blank">Apache CarbonData 1.5.2</a></li>
+                            <li>
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.1/"
                                    target="_blank">Apache CarbonData 1.5.1</a></li>
                             <li>
@@ -64,9 +67,6 @@
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.4.0/"
                                    target="_blank">Apache CarbonData 1.4.0</a></li>
                             <li>
-                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.3.1/"
-                                   target="_blank">Apache CarbonData 1.3.1</a></li>
-                            <li>
                                 <a href="https://cwiki.apache.org/confluence/display/CARBONDATA/Releases"
                                    target="_blank">Release Archive</a></li>
                         </ul>
@@ -239,49 +239,49 @@ These SDK writer output contains just carbondata and carbonindex files. No metad
 <a id="quick-example" class="anchor" href="#quick-example" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Quick example</h2>
 <h3>
 <a id="example-with-csv-format" class="anchor" href="#example-with-csv-format" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Example with csv format</h3>
-<div class="highlight highlight-source-java"><pre> <span class="pl-k">import</span> <span class="pl-smi">java.io.IOException</span>;
- 
- <span class="pl-k">import</span> <span class="pl-smi">org.apache.carbondata.common.exceptions.sql.InvalidLoadOptionException</span>;
- <span class="pl-k">import</span> <span class="pl-smi">org.apache.carbondata.core.metadata.datatype.DataTypes</span>;
- <span class="pl-k">import</span> <span class="pl-smi">org.apache.carbondata.core.util.CarbonProperties</span>;
- <span class="pl-k">import</span> <span class="pl-smi">org.apache.carbondata.sdk.file.CarbonWriter</span>;
- <span class="pl-k">import</span> <span class="pl-smi">org.apache.carbondata.sdk.file.CarbonWriterBuilder</span>;
- <span class="pl-k">import</span> <span class="pl-smi">org.apache.carbondata.sdk.file.Field</span>;
- <span class="pl-k">import</span> <span class="pl-smi">org.apache.carbondata.sdk.file.Schema</span>;
- 
- <span class="pl-k">public</span> <span class="pl-k">class</span> <span class="pl-en">TestSdk</span> {
+<div class="highlight highlight-source-java"><pre><span class="pl-k">import</span> <span class="pl-smi">java.io.IOException</span>;
 
-   <span class="pl-c"><span class="pl-c">//</span> pass true or false while executing the main to use offheap memory or not</span>
-   <span class="pl-k">public</span> <span class="pl-k">static</span> <span class="pl-k">void</span> <span class="pl-en">main</span>(<span class="pl-k">String</span>[] <span class="pl-v">args</span>) <span class="pl-k">throws</span> <span class="pl-smi">IOException</span>, <span class="pl-smi">InvalidLoadOptionException</span> {
-     <span class="pl-k">if</span> (args<span class="pl-k">.</span>length <span class="pl-k">&gt;</span> <span class="pl-c1">0</span> <span class="pl-k">&amp;&amp;</span> args[<span class="pl-c1">0</span>] <span class="pl-k">!=</span> <span class="pl-c1">null</span>) {
-       testSdkWriter(args[<span class="pl-c1">0</span>]);
-     } <span class="pl-k">else</span> {
-       testSdkWriter(<span class="pl-s"><span class="pl-pds">"</span>true<span class="pl-pds">"</span></span>);
-     }
-   }
- 
-   <span class="pl-k">public</span> <span class="pl-k">static</span> <span class="pl-k">void</span> <span class="pl-en">testSdkWriter</span>(<span class="pl-smi">String</span> <span class="pl-v">enableOffheap</span>) <span class="pl-k">throws</span> <span class="pl-smi">IOException</span>, <span class="pl-smi">InvalidLoadOptionException</span> {
-     <span class="pl-smi">String</span> path <span class="pl-k">=</span> <span class="pl-s"><span class="pl-pds">"</span>./target/testCSVSdkWriter<span class="pl-pds">"</span></span>;
- 
-     <span class="pl-k">Field</span>[] fields <span class="pl-k">=</span> <span class="pl-k">new</span> <span class="pl-smi">Field</span>[<span class="pl-c1">2</span>];
-     fields[<span class="pl-c1">0</span>] <span class="pl-k">=</span> <span class="pl-k">new</span> <span class="pl-smi">Field</span>(<span class="pl-s"><span class="pl-pds">"</span>name<span class="pl-pds">"</span></span>, <span class="pl-smi">DataTypes</span><span class="pl-c1"><span class="pl-k">.</span>STRING</span>);
-     fields[<span class="pl-c1">1</span>] <span class="pl-k">=</span> <span class="pl-k">new</span> <span class="pl-smi">Field</span>(<span class="pl-s"><span class="pl-pds">"</span>age<span class="pl-pds">"</span></span>, <span class="pl-smi">DataTypes</span><span class="pl-c1"><span class="pl-k">.</span>INT</span>);
- 
-     <span class="pl-smi">Schema</span> schema <span class="pl-k">=</span> <span class="pl-k">new</span> <span class="pl-smi">Schema</span>(fields);
+<span class="pl-k">import</span> <span class="pl-smi">org.apache.carbondata.common.exceptions.sql.InvalidLoadOptionException</span>;
+<span class="pl-k">import</span> <span class="pl-smi">org.apache.carbondata.core.metadata.datatype.DataTypes</span>;
+<span class="pl-k">import</span> <span class="pl-smi">org.apache.carbondata.core.util.CarbonProperties</span>;
+<span class="pl-k">import</span> <span class="pl-smi">org.apache.carbondata.sdk.file.CarbonWriter</span>;
+<span class="pl-k">import</span> <span class="pl-smi">org.apache.carbondata.sdk.file.CarbonWriterBuilder</span>;
+<span class="pl-k">import</span> <span class="pl-smi">org.apache.carbondata.sdk.file.Field</span>;
+<span class="pl-k">import</span> <span class="pl-smi">org.apache.carbondata.sdk.file.Schema</span>;
 
-     <span class="pl-smi">CarbonProperties</span><span class="pl-k">.</span>getInstance()<span class="pl-k">.</span>addProperty(<span class="pl-s"><span class="pl-pds">"</span>enable.offheap.sort<span class="pl-pds">"</span></span>, enableOffheap);
- 
-     <span class="pl-smi">CarbonWriterBuilder</span> builder <span class="pl-k">=</span> <span class="pl-smi">CarbonWriter</span><span class="pl-k">.</span>builder()<span class="pl-k">.</span>outputPath(path)<span class="pl-k">.</span>withCsvInput(schema)<span class="pl-k">.</span>writtenBy(<span class="pl-s"><span class="pl-pds">"</span>SDK<span class="pl-pds">"</span></span>);
- 
-     <span class="pl-smi">CarbonWriter</span> writer <span class="pl-k">=</span> builder<span class="pl-k">.</span>build();
- 
-     <span class="pl-k">int</span> rows <span class="pl-k">=</span> <span class="pl-c1">5</span>;
-     <span class="pl-k">for</span> (<span class="pl-k">int</span> i <span class="pl-k">=</span> <span class="pl-c1">0</span>; i <span class="pl-k">&lt;</span> rows; i<span class="pl-k">++</span>) {
-       writer<span class="pl-k">.</span>write(<span class="pl-k">new</span> <span class="pl-smi">String</span>[] { <span class="pl-s"><span class="pl-pds">"</span>robot<span class="pl-pds">"</span></span> <span class="pl-k">+</span> (i <span class="pl-k">%</span> <span class="pl-c1">10</span>), <span class="pl-smi">String</span><span class="pl-k">.</span>valueOf(i) });
-     }
-     writer<span class="pl-k">.</span>close();
-   }
- }</pre></div>
+<span class="pl-k">public</span> <span class="pl-k">class</span> <span class="pl-en">TestSdk</span> {
+
+  <span class="pl-c"><span class="pl-c">//</span> pass true or false while executing the main to use offheap memory or not</span>
+  <span class="pl-k">public</span> <span class="pl-k">static</span> <span class="pl-k">void</span> <span class="pl-en">main</span>(<span class="pl-k">String</span>[] <span class="pl-v">args</span>) <span class="pl-k">throws</span> <span class="pl-smi">IOException</span>, <span class="pl-smi">InvalidLoadOptionException</span> {
+    <span class="pl-k">if</span> (args<span class="pl-k">.</span>length <span class="pl-k">&gt;</span> <span class="pl-c1">0</span> <span class="pl-k">&amp;&amp;</span> args[<span class="pl-c1">0</span>] <span class="pl-k">!=</span> <span class="pl-c1">null</span>) {
+      testSdkWriter(args[<span class="pl-c1">0</span>]);
+    } <span class="pl-k">else</span> {
+      testSdkWriter(<span class="pl-s"><span class="pl-pds">"</span>true<span class="pl-pds">"</span></span>);
+    }
+  }
+
+  <span class="pl-k">public</span> <span class="pl-k">static</span> <span class="pl-k">void</span> <span class="pl-en">testSdkWriter</span>(<span class="pl-smi">String</span> <span class="pl-v">enableOffheap</span>) <span class="pl-k">throws</span> <span class="pl-smi">IOException</span>, <span class="pl-smi">InvalidLoadOptionException</span> {
+    <span class="pl-smi">String</span> path <span class="pl-k">=</span> <span class="pl-s"><span class="pl-pds">"</span>./target/testCSVSdkWriter<span class="pl-pds">"</span></span>;
+
+    <span class="pl-k">Field</span>[] fields <span class="pl-k">=</span> <span class="pl-k">new</span> <span class="pl-smi">Field</span>[<span class="pl-c1">2</span>];
+    fields[<span class="pl-c1">0</span>] <span class="pl-k">=</span> <span class="pl-k">new</span> <span class="pl-smi">Field</span>(<span class="pl-s"><span class="pl-pds">"</span>name<span class="pl-pds">"</span></span>, <span class="pl-smi">DataTypes</span><span class="pl-c1"><span class="pl-k">.</span>STRING</span>);
+    fields[<span class="pl-c1">1</span>] <span class="pl-k">=</span> <span class="pl-k">new</span> <span class="pl-smi">Field</span>(<span class="pl-s"><span class="pl-pds">"</span>age<span class="pl-pds">"</span></span>, <span class="pl-smi">DataTypes</span><span class="pl-c1"><span class="pl-k">.</span>INT</span>);
+
+    <span class="pl-smi">Schema</span> schema <span class="pl-k">=</span> <span class="pl-k">new</span> <span class="pl-smi">Schema</span>(fields);
+
+    <span class="pl-smi">CarbonProperties</span><span class="pl-k">.</span>getInstance()<span class="pl-k">.</span>addProperty(<span class="pl-s"><span class="pl-pds">"</span>enable.offheap.sort<span class="pl-pds">"</span></span>, enableOffheap);
+
+    <span class="pl-smi">CarbonWriterBuilder</span> builder <span class="pl-k">=</span> <span class="pl-smi">CarbonWriter</span><span class="pl-k">.</span>builder()<span class="pl-k">.</span>outputPath(path)<span class="pl-k">.</span>withCsvInput(schema)<span class="pl-k">.</span>writtenBy(<span class="pl-s"><span class="pl-pds">"</span>SDK<span class="pl-pds">"</span></span>);
+
+    <span class="pl-smi">CarbonWriter</span> writer <span class="pl-k">=</span> builder<span class="pl-k">.</span>build();
+
+    <span class="pl-k">int</span> rows <span class="pl-k">=</span> <span class="pl-c1">5</span>;
+    <span class="pl-k">for</span> (<span class="pl-k">int</span> i <span class="pl-k">=</span> <span class="pl-c1">0</span>; i <span class="pl-k">&lt;</span> rows; i<span class="pl-k">++</span>) {
+      writer<span class="pl-k">.</span>write(<span class="pl-k">new</span> <span class="pl-smi">String</span>[] { <span class="pl-s"><span class="pl-pds">"</span>robot<span class="pl-pds">"</span></span> <span class="pl-k">+</span> (i <span class="pl-k">%</span> <span class="pl-c1">10</span>), <span class="pl-smi">String</span><span class="pl-k">.</span>valueOf(i) });
+    }
+    writer<span class="pl-k">.</span>close();
+  }
+}</pre></div>
 <h3>
 <a id="example-with-avro-format" class="anchor" href="#example-with-avro-format" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Example with Avro format</h3>
 <div class="highlight highlight-source-java"><pre><span class="pl-k">import</span> <span class="pl-smi">java.io.IOException</span>;
@@ -543,271 +543,281 @@ or directly use DataTypes.VARCHAR if it is carbon schema.</p>
 <h3>
 <a id="class-orgapachecarbondatasdkfilecarbonwriterbuilder" class="anchor" href="#class-orgapachecarbondatasdkfilecarbonwriterbuilder" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Class org.apache.carbondata.sdk.file.CarbonWriterBuilder</h3>
 <pre><code>/**
-* Sets the output path of the writer builder
-* @param path is the absolute path where output files are written
-*             This method must be called when building CarbonWriterBuilder
-* @return updated CarbonWriterBuilder
-*/
+ * Sets the output path of the writer builder
+ *
+ * @param path is the absolute path where output files are written
+ *             This method must be called when building CarbonWriterBuilder
+ * @return updated CarbonWriterBuilder
+ */
 public CarbonWriterBuilder outputPath(String path);
 </code></pre>
 <pre><code>/**
-* to set the timestamp in the carbondata and carbonindex index files
-* @param UUID is a timestamp to be used in the carbondata and carbonindex index files.
-*             By default set to zero.
-* @return updated CarbonWriterBuilder
-*/
+ * To set the timestamp in the carbondata and carbonindex index files
+ *
+ * @param UUID is a timestamp to be used in the carbondata and carbonindex index files.
+ *             By default set to zero.
+ * @return updated CarbonWriterBuilder
+ */
 public CarbonWriterBuilder uniqueIdentifier(long UUID);
 </code></pre>
 <pre><code>/**
-* To set the carbondata file size in MB between 1MB-2048MB
-* @param blockSize is size in MB between 1MB to 2048 MB
-*                  default value is 1024 MB
-* @return updated CarbonWriterBuilder
-*/
+ * To set the carbondata file size in MB between 1MB-2048MB
+ *
+ * @param blockSize is size in MB between 1MB to 2048 MB
+ *                  default value is 1024 MB
+ * @return updated CarbonWriterBuilder
+ */
 public CarbonWriterBuilder withBlockSize(int blockSize);
 </code></pre>
 <pre><code>/**
-* To set the blocklet size of carbondata file
-* @param blockletSize is blocklet size in MB
-*                     default value is 64 MB
-* @return updated CarbonWriterBuilder
-*/
+ * To set the blocklet size of carbondata file
+ *
+ * @param blockletSize is blocklet size in MB
+ *                     default value is 64 MB
+ * @return updated CarbonWriterBuilder
+ */
 public CarbonWriterBuilder withBlockletSize(int blockletSize);
 </code></pre>
 <pre><code>/**
-   * @param enableLocalDictionary enable local dictionary  , default is false
-   * @return updated CarbonWriterBuilder
-   */
+ * @param enableLocalDictionary enable local dictionary  , default is false
+ * @return updated CarbonWriterBuilder
+ */
 public CarbonWriterBuilder enableLocalDictionary(boolean enableLocalDictionary);
 </code></pre>
 <pre><code>/**
-   * @param localDictionaryThreshold is localDictionaryThreshold,default is 10000
-   * @return updated CarbonWriterBuilder
-   */
+ * @param localDictionaryThreshold is localDictionaryThreshold,default is 10000
+ * @return updated CarbonWriterBuilder
+ */
 public CarbonWriterBuilder localDictionaryThreshold(int localDictionaryThreshold) ;
 </code></pre>
 <pre><code>/**
-* sets the list of columns that needs to be in sorted order
-* @param sortColumns is a string array of columns that needs to be sorted.
-*                    If it is null or by default all dimensions are selected for sorting
-*                    If it is empty array, no columns are sorted
-* @return updated CarbonWriterBuilder
-*/
+ * Sets the list of columns that needs to be in sorted order
+ *
+ * @param sortColumns is a string array of columns that needs to be sorted.
+ *                    If it is null or by default all dimensions are selected for sorting
+ *                    If it is empty array, no columns are sorted
+ * @return updated CarbonWriterBuilder
+ */
 public CarbonWriterBuilder sortBy(String[] sortColumns);
 </code></pre>
 <pre><code>/**
-* sets the taskNo for the writer. SDKs concurrently running
-* will set taskNo in order to avoid conflicts in file's name during write.
-* @param taskNo is the TaskNo user wants to specify.
-*               by default it is system time in nano seconds.
-* @return updated CarbonWriterBuilder
-*/
+ * Sets the taskNo for the writer. SDKs concurrently running
+ * will set taskNo in order to avoid conflicts in file's name during write.
+ *
+ * @param taskNo is the TaskNo user wants to specify.
+ *               by default it is system time in nano seconds.
+ * @return updated CarbonWriterBuilder
+ */
 public CarbonWriterBuilder taskNo(long taskNo);
 </code></pre>
 <pre><code>/**
-* To support the load options for sdk writer
-* @param options key,value pair of load options.
-*                supported keys values are
-*                a. bad_records_logger_enable -- true (write into separate logs), false
-*                b. bad_records_action -- FAIL, FORCE, IGNORE, REDIRECT
-*                c. bad_record_path -- path
-*                d. dateformat -- same as JAVA SimpleDateFormat
-*                e. timestampformat -- same as JAVA SimpleDateFormat
-*                f. complex_delimiter_level_1 -- value to Split the complexTypeData
-*                g. complex_delimiter_level_2 -- value to Split the nested complexTypeData
-*                h. quotechar
-*                i. escapechar
-*                
-*                Default values are as follows.
-*
-*                a. bad_records_logger_enable -- "false"
-*                b. bad_records_action -- "FAIL"
-*                c. bad_record_path -- ""
-*                d. dateformat -- "" , uses from carbon.properties file
-*                e. timestampformat -- "", uses from carbon.properties file
-*                f. complex_delimiter_level_1 -- "$"
-*                g. complex_delimiter_level_2 -- ":"
-*                h. quotechar -- "\""
-*                i. escapechar -- "\\"
-*
-* @return updated CarbonWriterBuilder
-*/
+ * To support the load options for sdk writer
+ * @param options key,value pair of load options.
+ *                supported keys values are
+ *                a. bad_records_logger_enable -- true (write into separate logs), false
+ *                b. bad_records_action -- FAIL, FORCE, IGNORE, REDIRECT
+ *                c. bad_record_path -- path
+ *                d. dateformat -- same as JAVA SimpleDateFormat
+ *                e. timestampformat -- same as JAVA SimpleDateFormat
+ *                f. complex_delimiter_level_1 -- value to Split the complexTypeData
+ *                g. complex_delimiter_level_2 -- value to Split the nested complexTypeData
+ *                h. quotechar
+ *                i. escapechar
+ *                
+ *                Default values are as follows.
+ *
+ *                a. bad_records_logger_enable -- "false"
+ *                b. bad_records_action -- "FAIL"
+ *                c. bad_record_path -- ""
+ *                d. dateformat -- "" , uses from carbon.properties file
+ *                e. timestampformat -- "", uses from carbon.properties file
+ *                f. complex_delimiter_level_1 -- "$"
+ *                g. complex_delimiter_level_2 -- ":"
+ *                h. quotechar -- "\""
+ *                i. escapechar -- "\\"
+ *
+ * @return updated CarbonWriterBuilder
+ */
 public CarbonWriterBuilder withLoadOptions(Map&lt;String, String&gt; options);
 </code></pre>
 <pre><code>/**
-* To support the table properties for sdk writer
-*
-* @param options key,value pair of create table properties.
-* supported keys values are
-* a. table_blocksize -- [1-2048] values in MB. Default value is 1024
-* b. table_blocklet_size -- values in MB. Default value is 64 MB
-* c. local_dictionary_threshold -- positive value, default is 10000
-* d. local_dictionary_enable -- true / false. Default is false
-* e. sort_columns -- comma separated column. "c1,c2". Default all dimensions are sorted.
-                     If empty string "" is passed. No columns are sorted
-* j. sort_scope -- "local_sort", "no_sort", "batch_sort". default value is "local_sort"
-* k. long_string_columns -- comma separated string columns which are more than 32k length. 
-*                           default value is null.
-* l. inverted_index -- comma separated string columns for which inverted index needs to be
-*                      generated
-*
-* @return updated CarbonWriterBuilder
-*/
+ * To support the table properties for sdk writer
+ *
+ * @param options key,value pair of create table properties.
+ * supported keys values are
+ * a. table_blocksize -- [1-2048] values in MB. Default value is 1024
+ * b. table_blocklet_size -- values in MB. Default value is 64 MB
+ * c. local_dictionary_threshold -- positive value, default is 10000
+ * d. local_dictionary_enable -- true / false. Default is false
+ * e. sort_columns -- comma separated column. "c1,c2". Default no columns are sorted.
+ * j. sort_scope -- "local_sort", "no_sort", "batch_sort". default value is "no_sort"
+ * k. long_string_columns -- comma separated string columns which are more than 32k length. 
+ *                           default value is null.
+ * l. inverted_index -- comma separated string columns for which inverted index needs to be
+ *                      generated
+ *
+ * @return updated CarbonWriterBuilder
+ */
 public CarbonWriterBuilder withTableProperties(Map&lt;String, String&gt; options);
 </code></pre>
 <pre><code>/**
-* To make sdk writer thread safe.
-*
-* @param numOfThreads should number of threads in which writer is called in multi-thread scenario
-*                     default sdk writer is not thread safe.
-*                     can use one writer instance in one thread only.
-* @return updated CarbonWriterBuilder
-*/
+ * To make sdk writer thread safe.
+ *
+ * @param numOfThreads should number of threads in which writer is called in multi-thread scenario
+ *                     default sdk writer is not thread safe.
+ *                     can use one writer instance in one thread only.
+ * @return updated CarbonWriterBuilder
+ */
 public CarbonWriterBuilder withThreadSafe(short numOfThreads);
 </code></pre>
 <pre><code>/**
-* To support hadoop configuration
-*
-* @param conf hadoop configuration support, can set s3a AK,SK,end point and other conf with this
-* @return updated CarbonWriterBuilder
-*/
+ * To support hadoop configuration
+ *
+ * @param conf hadoop configuration support, can set s3a AK,SK,end point and other conf with this
+ * @return updated CarbonWriterBuilder
+ */
 public CarbonWriterBuilder withHadoopConf(Configuration conf)
 </code></pre>
-<pre><code>  /**
-   * Updates the hadoop configuration with the given key value
-   *
-   * @param key   key word
-   * @param value value
-   * @return this object
-   */
-  public CarbonWriterBuilder withHadoopConf(String key, String value);
+<pre><code>/**
+ * Updates the hadoop configuration with the given key value
+ *
+ * @param key   key word
+ * @param value value
+ * @return this object
+ */
+public CarbonWriterBuilder withHadoopConf(String key, String value);
 </code></pre>
 <pre><code>/**
-* to build a {@link CarbonWriter}, which accepts row in CSV format
-*
-* @param schema carbon Schema object {org.apache.carbondata.sdk.file.Schema}
-* @return CarbonWriterBuilder
-*/
+ * To build a {@link CarbonWriter}, which accepts row in CSV format
+ *
+ * @param schema carbon Schema object {org.apache.carbondata.sdk.file.Schema}
+ * @return CarbonWriterBuilder
+ */
 public CarbonWriterBuilder withCsvInput(Schema schema);
 </code></pre>
 <pre><code>/**
-* to build a {@link CarbonWriter}, which accepts Avro object
-*
-* @param avroSchema avro Schema object {org.apache.avro.Schema}
-* @return CarbonWriterBuilder
-*/
+ * To build a {@link CarbonWriter}, which accepts Avro object
+ *
+ * @param avroSchema avro Schema object {org.apache.avro.Schema}
+ * @return CarbonWriterBuilder
+ */
 public CarbonWriterBuilder withAvroInput(org.apache.avro.Schema avroSchema);
 </code></pre>
 <pre><code>/**
-* to build a {@link CarbonWriter}, which accepts Json object
-*
-* @param carbonSchema carbon Schema object
-* @return CarbonWriterBuilder
-*/
+ * To build a {@link CarbonWriter}, which accepts Json object
+ *
+ * @param carbonSchema carbon Schema object
+ * @return CarbonWriterBuilder
+ */
 public CarbonWriterBuilder withJsonInput(Schema carbonSchema);
 </code></pre>
 <pre><code>/**
-* To support writing the ApplicationName which is writing the carbondata file
-* This is a mandatory API to call, else the build() call will fail with error.
-* @param application name which is writing the carbondata files
-* @return CarbonWriterBuilder
-*/
+ * To support writing the ApplicationName which is writing the carbondata file
+ * This is a mandatory API to call, else the build() call will fail with error.
+ * @param application name which is writing the carbondata files
+ * @return CarbonWriterBuilder
+ */
 public CarbonWriterBuilder writtenBy(String appName) {
 </code></pre>
 <pre><code>/**
-* sets the list of columns for which inverted index needs to generated
-* @param invertedIndexColumns is a string array of columns for which inverted index needs to
-* generated.
-* If it is null or an empty array, inverted index will be generated for none of the columns
-* @return updated CarbonWriterBuilder
-*/
+ * Sets the list of columns for which inverted index needs to generated
+ *
+ * @param invertedIndexColumns is a string array of columns for which inverted index needs to
+ * generated.
+ * If it is null or an empty array, inverted index will be generated for none of the columns
+ * @return updated CarbonWriterBuilder
+ */
 public CarbonWriterBuilder invertedIndexFor(String[] invertedIndexColumns);
 </code></pre>
 <pre><code>/**
-* Build a {@link CarbonWriter}
-* This writer is not thread safe,
-* use withThreadSafe() configuration in multi thread environment
-* 
-* @return CarbonWriter {AvroCarbonWriter/CSVCarbonWriter/JsonCarbonWriter based on Input Type }
-* @throws IOException
-* @throws InvalidLoadOptionException
-*/
+ * Build a {@link CarbonWriter}
+ * This writer is not thread safe,
+ * use withThreadSafe() configuration in multi thread environment
+ * 
+ * @return CarbonWriter {AvroCarbonWriter/CSVCarbonWriter/JsonCarbonWriter based on Input Type }
+ * @throws IOException
+ * @throws InvalidLoadOptionException
+ */
 public CarbonWriter build() throws IOException, InvalidLoadOptionException;
 </code></pre>
-<pre><code> /**
-   * Configure Row Record Reader for reading.
-   *
-   */
-  public CarbonReaderBuilder withRowRecordReader()
+<pre><code>/**
+ * Configure Row Record Reader for reading.
+ *
+ */
+public CarbonReaderBuilder withRowRecordReader()
 </code></pre>
 <h3>
 <a id="class-orgapachecarbondatasdkfilecarbonwriter" class="anchor" href="#class-orgapachecarbondatasdkfilecarbonwriter" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Class org.apache.carbondata.sdk.file.CarbonWriter</h3>
 <pre><code>/**
-* Create a {@link CarbonWriterBuilder} to build a {@link CarbonWriter}
-*/
+ * Create a {@link CarbonWriterBuilder} to build a {@link CarbonWriter}
+ */
 public static CarbonWriterBuilder builder() {
     return new CarbonWriterBuilder();
 }
 </code></pre>
 <pre><code>/**
-* Write an object to the file, the format of the object depends on the implementation
-* If AvroCarbonWriter, object is of type org.apache.avro.generic.GenericData.Record, 
-*                      which is one row of data.
-* If CSVCarbonWriter, object is of type String[], which is one row of data
-* If JsonCarbonWriter, object is of type String, which is one row of json
-* @param object
-* @throws IOException
-*/
+ * Write an object to the file, the format of the object depends on the implementation
+ * If AvroCarbonWriter, object is of type org.apache.avro.generic.GenericData.Record, 
+ *                      which is one row of data.
+ * If CSVCarbonWriter, object is of type String[], which is one row of data
+ * If JsonCarbonWriter, object is of type String, which is one row of json
+ *
+ * @param object
+ * @throws IOException
+ */
 public abstract void write(Object object) throws IOException;
 </code></pre>
 <pre><code>/**
-* Flush and close the writer
-*/
+ * Flush and close the writer
+ */
 public abstract void close() throws IOException;
 </code></pre>
 <h3>
 <a id="class-orgapachecarbondatasdkfilefield" class="anchor" href="#class-orgapachecarbondatasdkfilefield" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Class org.apache.carbondata.sdk.file.Field</h3>
 <pre><code>/**
-* Field Constructor
-* @param name name of the field
-* @param type datatype of field, specified in strings.
-*/
+ * Field Constructor
+ *
+ * @param name name of the field
+ * @param type datatype of field, specified in strings.
+ */
 public Field(String name, String type);
 </code></pre>
 <pre><code>/**
-* Field constructor
-* @param name name of the field
-* @param type datatype of the field of class DataType
-*/
+ * Field constructor
+ *
+ * @param name name of the field
+ * @param type datatype of the field of class DataType
+ */
 public Field(String name, DataType type);  
 </code></pre>
 <h3>
 <a id="class-orgapachecarbondatasdkfileschema" class="anchor" href="#class-orgapachecarbondatasdkfileschema" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Class org.apache.carbondata.sdk.file.Schema</h3>
 <pre><code>/**
-* construct a schema with fields
-* @param fields
-*/
+ * Construct a schema with fields
+ *
+ * @param fields
+ */
 public Schema(Field[] fields);
 </code></pre>
 <pre><code>/**
-* Create a Schema using JSON string, for example:
-* [
-*   {"name":"string"},
-*   {"age":"int"}
-* ] 
-* @param json specified as string
-* @return Schema
-*/
+ * Create a Schema using JSON string, for example:
+ * [
+ *   {"name":"string"},
+ *   {"age":"int"}
+ * ] 
+ * @param json specified as string
+ * @return Schema
+ */
 public static Schema parseJson(String json);
 </code></pre>
 <h3>
 <a id="class-orgapachecarbondatasdkfileavrocarbonwriter" class="anchor" href="#class-orgapachecarbondatasdkfileavrocarbonwriter" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Class org.apache.carbondata.sdk.file.AvroCarbonWriter</h3>
 <pre><code>/**
-* converts avro schema to carbon schema, required by carbonWriter
-*
-* @param avroSchemaString json formatted avro schema as string
-* @return carbon sdk schema
-*/
+ * Converts avro schema to carbon schema, required by carbonWriter
+ *
+ * @param avroSchemaString json formatted avro schema as string
+ * @return carbon sdk schema
+ */
 public static org.apache.carbondata.sdk.file.Schema getCarbonSchemaFromAvroSchema(String avroSchemaString);
 </code></pre>
 <h1>
@@ -816,123 +826,123 @@ public static org.apache.carbondata.sdk.file.Schema getCarbonSchemaFromAvroSchem
 External client can make use of this reader to read CarbonData files without CarbonSession.</p>
 <h2>
 <a id="quick-example-1" class="anchor" href="#quick-example-1" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Quick example</h2>
-<pre><code>    // 1. Create carbon reader
-    String path = "./testWriteFiles";
-    CarbonReader reader = CarbonReader
-        .builder(path, "_temp")
-        .projection(new String[]{"stringField", "shortField", "intField", "longField", 
-                "doubleField", "boolField", "dateField", "timeField", "decimalField"})
-        .build();
+<pre><code>// 1. Create carbon reader
+String path = "./testWriteFiles";
+CarbonReader reader = CarbonReader
+    .builder(path, "_temp")
+    .projection(new String[]{"stringField", "shortField", "intField", "longField", 
+            "doubleField", "boolField", "dateField", "timeField", "decimalField"})
+    .build();
 
-    // 2. Read data
-    long day = 24L * 3600 * 1000;
-    int i = 0;
-    while (reader.hasNext()) {
-        Object[] row = (Object[]) reader.readNextRow();
-        System.out.println(String.format("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t",
-            i, row[0], row[1], row[2], row[3], row[4], row[5],
-            new Date((day * ((int) row[6]))), new Timestamp((long) row[7] / 1000), row[8]
-        ));
-        i++;
-    }
+// 2. Read data
+long day = 24L * 3600 * 1000;
+int i = 0;
+while (reader.hasNext()) {
+    Object[] row = (Object[]) reader.readNextRow();
+    System.out.println(String.format("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t",
+        i, row[0], row[1], row[2], row[3], row[4], row[5],
+        new Date((day * ((int) row[6]))), new Timestamp((long) row[7] / 1000), row[8]
+    ));
+    i++;
+}
 
-    // 3. Close this reader
-    reader.close();
+// 3. Close this reader
+reader.close();
 </code></pre>
 <p>Find example code at <a href="https://github.com/apache/carbondata/blob/master/examples/spark2/src/main/java/org/apache/carbondata/examples/sdk/CarbonReaderExample.java" target=_blank>CarbonReaderExample</a> in the CarbonData repo.</p>
 <h2>
 <a id="api-list-1" class="anchor" href="#api-list-1" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>API List</h2>
 <h3>
 <a id="class-orgapachecarbondatasdkfilecarbonreader" class="anchor" href="#class-orgapachecarbondatasdkfilecarbonreader" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Class org.apache.carbondata.sdk.file.CarbonReader</h3>
-<pre><code>   /**
-    * Return a new {@link CarbonReaderBuilder} instance
-    *
-    * @param tablePath table store path
-    * @param tableName table name
-    * @return CarbonReaderBuilder object
-    */
-  public static CarbonReaderBuilder builder(String tablePath, String tableName);
+<pre><code>/**
+ * Return a new {@link CarbonReaderBuilder} instance
+ *
+ * @param tablePath table store path
+ * @param tableName table name
+ * @return CarbonReaderBuilder object
+ */
+public static CarbonReaderBuilder builder(String tablePath, String tableName);
 </code></pre>
-<pre><code>  /**
-   * Return a new CarbonReaderBuilder instance
-   * Default value of table name is table + tablePath + time
-   *
-   * @param tablePath table path
-   * @return CarbonReaderBuilder object
-   */
-  public static CarbonReaderBuilder builder(String tablePath);
+<pre><code>/**
+ * Return a new CarbonReaderBuilder instance
+ * Default value of table name is table + tablePath + time
+ *
+ * @param tablePath table path
+ * @return CarbonReaderBuilder object
+ */
+public static CarbonReaderBuilder builder(String tablePath);
 </code></pre>
 <pre><code>/**
-  * Breaks the list of CarbonRecordReader in CarbonReader into multiple
-  * CarbonReader objects, each iterating through some 'carbondata' files
-  * and return that list of CarbonReader objects
-  *
-  * If the no. of files is greater than maxSplits, then break the
-  * CarbonReader into maxSplits splits, with each split iterating
-  * through &gt;= 1 file.
-  *
-  * If the no. of files is less than maxSplits, then return list of
-  * CarbonReader with size as the no. of files, with each CarbonReader
-  * iterating through exactly one file
-  *
-  * @param maxSplits: Int
-  * @return list of CarbonReader objects
-  */
-  public List&lt;CarbonReader&gt; split(int maxSplits);
+ * Breaks the list of CarbonRecordReader in CarbonReader into multiple
+ * CarbonReader objects, each iterating through some 'carbondata' files
+ * and return that list of CarbonReader objects
+ *
+ * If the no. of files is greater than maxSplits, then break the
+ * CarbonReader into maxSplits splits, with each split iterating
+ * through &gt;= 1 file.
+ *
+ * If the no. of files is less than maxSplits, then return list of
+ * CarbonReader with size as the no. of files, with each CarbonReader
+ * iterating through exactly one file
+ *
+ * @param maxSplits: Int
+ * @return list of CarbonReader objects
+ */
+public List&lt;CarbonReader&gt; split(int maxSplits);
 </code></pre>
-<pre><code>  /**
-   * Return true if has next row
-   */
-  public boolean hasNext();
+<pre><code>/**
+ * Return true if has next row
+ */
+public boolean hasNext();
 </code></pre>
-<pre><code>  /**
-   * Read and return next row object
-   */
-  public T readNextRow();
+<pre><code>/**
+ * Read and return next row object
+ */
+public T readNextRow();
 </code></pre>
-<pre><code>  /**
-   * Read and return next batch row objects
-   */
-  public Object[] readNextBatchRow();
+<pre><code>/**
+ * Read and return next batch row objects
+ */
+public Object[] readNextBatchRow();
 </code></pre>
-<pre><code>  /**
-   * Close reader
-   */
-  public void close();
+<pre><code>/**
+ * Close reader
+ */
+public void close();
 </code></pre>
 <h3>
 <a id="class-orgapachecarbondatasdkfilecarbonreaderbuilder" class="anchor" href="#class-orgapachecarbondatasdkfilecarbonreaderbuilder" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Class org.apache.carbondata.sdk.file.CarbonReaderBuilder</h3>
-<pre><code>  /**
-   * Construct a CarbonReaderBuilder with table path and table name
-   *
-   * @param tablePath table path
-   * @param tableName table name
-   */
-  CarbonReaderBuilder(String tablePath, String tableName);
+<pre><code>/**
+ * Construct a CarbonReaderBuilder with table path and table name
+ *
+ * @param tablePath table path
+ * @param tableName table name
+ */
+CarbonReaderBuilder(String tablePath, String tableName);
 </code></pre>
-<pre><code>  /**
-   * Configure the projection column names of carbon reader
-   *
-   * @param projectionColumnNames projection column names
-   * @return CarbonReaderBuilder object
-   */
-  public CarbonReaderBuilder projection(String[] projectionColumnNames);
+<pre><code>/**
+ * Configure the projection column names of carbon reader
+ *
+ * @param projectionColumnNames projection column names
+ * @return CarbonReaderBuilder object
+ */
+public CarbonReaderBuilder projection(String[] projectionColumnNames);
 </code></pre>
-<pre><code> /**
-  * Configure the filter expression for carbon reader
-  *
-  * @param filterExpression filter expression
-  * @return CarbonReaderBuilder object
-  */
-  public CarbonReaderBuilder filter(Expression filterExpression);
+<pre><code>/**
+ * Configure the filter expression for carbon reader
+ *
+ * @param filterExpression filter expression
+ * @return CarbonReaderBuilder object
+ */
+public CarbonReaderBuilder filter(Expression filterExpression);
 </code></pre>
-<pre><code>  /**
-   * Sets the batch size of records to read
-   *
-   * @param batch batch size
-   * @return updated CarbonReaderBuilder
-   */
-  public CarbonReaderBuilder withBatch(int batch);
+<pre><code>/**
+ * Sets the batch size of records to read
+ *
+ * @param batch batch size
+ * @return updated CarbonReaderBuilder
+ */
+public CarbonReaderBuilder withBatch(int batch);
 </code></pre>
 <pre><code>/**
  * To support hadoop configuration
@@ -940,139 +950,168 @@ External client can make use of this reader to read CarbonData files without Car
  * @param conf hadoop configuration support, can set s3a AK,SK,end point and other conf with this
  * @return updated CarbonReaderBuilder
  */
- public CarbonReaderBuilder withHadoopConf(Configuration conf);
+public CarbonReaderBuilder withHadoopConf(Configuration conf);
 </code></pre>
-<pre><code>  /**
-   * Updates the hadoop configuration with the given key value
-   *
-   * @param key   key word
-   * @param value value
-   * @return this object
-   */
-  public CarbonReaderBuilder withHadoopConf(String key, String value);
+<pre><code>/**
+ * Updates the hadoop configuration with the given key value
+ *
+ * @param key   key word
+ * @param value value
+ * @return this object
+ */
+public CarbonReaderBuilder withHadoopConf(String key, String value);
 </code></pre>
-<pre><code> /**
-   * Build CarbonReader
-   *
-   * @param &lt;T&gt;
-   * @return CarbonReader
-   * @throws IOException
-   * @throws InterruptedException
-   */
-  public &lt;T&gt; CarbonReader&lt;T&gt; build();
+<pre><code>/**
+ * Build CarbonReader
+ *
+ * @param &lt;T&gt;
+ * @return CarbonReader
+ * @throws IOException
+ * @throws InterruptedException
+ */
+public &lt;T&gt; CarbonReader&lt;T&gt; build();
 </code></pre>
 <h3>
 <a id="class-orgapachecarbondatasdkfilecarbonschemareader" class="anchor" href="#class-orgapachecarbondatasdkfilecarbonschemareader" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Class org.apache.carbondata.sdk.file.CarbonSchemaReader</h3>
-<pre><code>  /**
-   * Read schema file and return the schema
-   *
-   * @param schemaFilePath complete path including schema file name
-   * @return schema object
-   * @throws IOException
-   */
-  @Deprecated
-  public static Schema readSchemaInSchemaFile(String schemaFilePath);
+<pre><code>/**
+ * Read schema file and return the schema
+ *
+ * @param schemaFilePath complete path including schema file name
+ * @return schema object
+ * @throws IOException
+ */
+@Deprecated
+public static Schema readSchemaInSchemaFile(String schemaFilePath);
 </code></pre>
-<pre><code>  /**
-   * Read carbondata file and return the schema
-   *
-   * @param dataFilePath complete path including carbondata file name
-   * @return Schema object
-   */
-  @Deprecated
-  public static Schema readSchemaInDataFile(String dataFilePath);
+<pre><code>/**
+ * Read carbondata file and return the schema
+ *
+ * @param dataFilePath complete path including carbondata file name
+ * @return Schema object
+ */
+@Deprecated
+public static Schema readSchemaInDataFile(String dataFilePath);
 </code></pre>
-<pre><code>  /**
-   * Read carbonindex file and return the schema
-   *
-   * @param indexFilePath complete path including index file name
-   * @return schema object
-   * @throws IOException
-   */
-  @Deprecated
-  public static Schema readSchemaInIndexFile(String indexFilePath);
+<pre><code>/**
+ * Read carbonindex file and return the schema
+ *
+ * @param indexFilePath complete path including index file name
+ * @return schema object
+ * @throws IOException
+ */
+@Deprecated
+public static Schema readSchemaInIndexFile(String indexFilePath);
+</code></pre>
+<pre><code>/**
+ * Read schema from path,
+ * path can be folder path,carbonindex file path, and carbondata file path
+ * and will not check all files schema
+ *
+ * @param path file/folder path
+ * @return schema
+ * @throws IOException
+ */
+public static Schema readSchema(String path);
+</code></pre>
+<pre><code>/**
+ * Read schema from path,
+ * path can be folder path,carbonindex file path, and carbondata file path
+ * and user can decide whether check all files schema
+ *
+ * @param path             file/folder path
+ * @param validateSchema whether check all files schema
+ * @return schema
+ * @throws IOException
+ */
+public static Schema readSchema(String path, boolean validateSchema);
 </code></pre>
-<pre><code>  /**
-   * read schema from path,
-   * path can be folder path,carbonindex file path, and carbondata file path
-   * and will not check all files schema
-   *
-   * @param path file/folder path
-   * @return schema
-   * @throws IOException
-   */
-  public static Schema readSchema(String path);
+<pre><code>/**
+ * Read schema from path,
+ * path can be folder path, carbonindex file path, and carbondata file path
+ * and will not check all files schema
+ *
+ * @param path file/folder path
+ * @param conf hadoop configuration support, can set s3a AK,SK,end point and other conf with this
+ * @return schema
+ * @throws IOException
+ */
+public static Schema readSchema(String path, Configuration conf);
 </code></pre>
-<pre><code>  /**
-   * read schema from path,
-   * path can be folder path,carbonindex file path, and carbondata file path
-   * and user can decide whether check all files schema
-   *
-   * @param path             file/folder path
-   * @param validateSchema whether check all files schema
-   * @return schema
-   * @throws IOException
-   */
-  public static Schema readSchema(String path, boolean validateSchema);
+<pre><code>/**
+ * Read schema from path,
+ * path can be folder path, carbonindex file path, and carbondata file path
+ * and user can decide whether check all files schema
+ *
+ * @param path           file/folder path
+ * @param validateSchema whether check all files schema
+ * @param conf           hadoop configuration support, can set s3a AK,SK,
+ *                       end point and other conf with this
+ * @return schema
+ * @throws IOException
+ */
+public static Schema readSchema(String path, boolean validateSchema, Configuration conf);
 </code></pre>
-<pre><code>  /**
-   * This method return the version details in formatted string by reading from carbondata file
-   * If application name is SDK_1.0.0 and this has written the carbondata file in carbondata 1.6 project version,
-   * then this API returns the String "SDK_1.0.0 in version: 1.6.0-SNAPSHOT"
-   * @param dataFilePath complete path including carbondata file name
-   * @return string with information of who has written this file in which carbondata project version
-   * @throws IOException
-   */
-  public static String getVersionDetails(String dataFilePath);
+<pre><code>/**
+ * This method return the version details in formatted string by reading from carbondata file
+ * If application name is SDK_1.0.0 and this has written the carbondata file in carbondata 1.6 project version,
+ * then this API returns the String "SDK_1.0.0 in version: 1.6.0-SNAPSHOT"
+ *
+ * @param dataFilePath complete path including carbondata file name
+ * @return string with information of who has written this file in which carbondata project version
+ * @throws IOException
+ */
+public static String getVersionDetails(String dataFilePath);
 </code></pre>
 <h3>
 <a id="class-orgapachecarbondatasdkfileschema-1" class="anchor" href="#class-orgapachecarbondatasdkfileschema-1" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Class org.apache.carbondata.sdk.file.Schema</h3>
-<pre><code>  /**
-   * construct a schema with fields
-   * @param fields
-   */
-  public Schema(Field[] fields);
+<pre><code>/**
+ * Construct a schema with fields
+ *
+ * @param fields
+ */
+public Schema(Field[] fields);
 </code></pre>
-<pre><code>  /**
-   * construct a schema with List&lt;ColumnSchema&gt;
-   *
-   * @param columnSchemaList column schema list
-   */
-  public Schema(List&lt;ColumnSchema&gt; columnSchemaList);
+<pre><code>/**
+ * Construct a schema with List&lt;ColumnSchema&gt;
+ *
+ * @param columnSchemaList column schema list
+ */
+public Schema(List&lt;ColumnSchema&gt; columnSchemaList);
 </code></pre>
-<pre><code>  /**
-   * Create a Schema using JSON string, for example:
-   * [
-   *   {"name":"string"},
-   *   {"age":"int"}
-   * ]
-   * @param json specified as string
-   * @return Schema
-   */
-  public static Schema parseJson(String json);
+<pre><code>/**
+ * Create a Schema using JSON string, for example:
+ * [
+ *   {"name":"string"},
+ *   {"age":"int"}
+ * ]
+ * @param json specified as string
+ * @return Schema
+ */
+public static Schema parseJson(String json);
 </code></pre>
-<pre><code>  /**
-   * Sort the schema order as original order
-   *
-   * @return Schema object
-   */
-  public Schema asOriginOrder();
+<pre><code>/**
+ * Sort the schema order as original order
+ *
+ * @return Schema object
+ */
+public Schema asOriginOrder();
 </code></pre>
 <h3>
 <a id="class-orgapachecarbondatasdkfilefield-1" class="anchor" href="#class-orgapachecarbondatasdkfilefield-1" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Class org.apache.carbondata.sdk.file.Field</h3>
-<pre><code>  /**
-   * Field Constructor
-   * @param name name of the field
-   * @param type datatype of field, specified in strings.
-   */
-  public Field(String name, String type);
+<pre><code>/**
+ * Field Constructor
+ *
+ * @param name name of the field
+ * @param type datatype of field, specified in strings.
+ */
+public Field(String name, String type);
 </code></pre>
-<pre><code>  /**
-   * Construct Field from ColumnSchema
-   *
-   * @param columnSchema ColumnSchema, Store the information about the column meta data
-   */
-  public Field(ColumnSchema columnSchema);
+<pre><code>/**
+ * Construct Field from ColumnSchema
+ *
+ * @param columnSchema ColumnSchema, Store the information about the column meta data
+ */
+public Field(ColumnSchema columnSchema);
 </code></pre>
 <p>Find S3 example code at <a href="https://github.com/apache/carbondata/blob/master/examples/spark2/src/main/java/org/apache/carbondata/examples/sdk/SDKS3Example.java" target=_blank>SDKS3Example</a> in the CarbonData repo.</p>
 <h1>
@@ -1080,38 +1119,38 @@ External client can make use of this reader to read CarbonData files without Car
 <h3>
 <a id="class-orgapachecarbondatacoreutilcarbonproperties" class="anchor" href="#class-orgapachecarbondatacoreutilcarbonproperties" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Class org.apache.carbondata.core.util.CarbonProperties</h3>
 <pre><code>/**
-* This method will be responsible to get the instance of CarbonProperties class
-*
-* @return carbon properties instance
-*/
+ * This method will be responsible to get the instance of CarbonProperties class
+ *
+ * @return carbon properties instance
+ */
 public static CarbonProperties getInstance();
 </code></pre>
 <pre><code>/**
-* This method will be used to add a new property
-*
-* @param key is a property name to set for carbon.
-* @param value is valid parameter corresponding to property.
-* @return CarbonProperties object
-*/
+ * This method will be used to add a new property
+ *
+ * @param key is a property name to set for carbon.
+ * @param value is valid parameter corresponding to property.
+ * @return CarbonProperties object
+ */
 public CarbonProperties addProperty(String key, String value);
 </code></pre>
 <pre><code>/**
-* This method will be used to get the property value. If property is not
-* present, then it will return the default value.
-*
-* @param key is a property name to get user specified value.
-* @return properties value for corresponding key. If not set, then returns null.
-*/
+ * This method will be used to get the property value. If property is not
+ * present, then it will return the default value.
+ *
+ * @param key is a property name to get user specified value.
+ * @return properties value for corresponding key. If not set, then returns null.
+ */
 public String getProperty(String key);
 </code></pre>
 <pre><code>/**
-* This method will be used to get the property value. If property is not
-* present, then it will return the default value.
-*
-* @param key is a property name to get user specified value..
-* @param defaultValue used to be returned by function if corrosponding key not set.
-* @return properties value for corresponding key. If not set, then returns specified defaultValue.
-*/
+ * This method will be used to get the property value. If property is not
+ * present, then it will return the default value.
+ *
+ * @param key is a property name to get user specified value..
+ * @param defaultValue used to be returned by function if corrosponding key not set.
+ * @return properties value for corresponding key. If not set, then returns specified defaultValue.
+ */
 public String getProperty(String key, String defaultValue);
 </code></pre>
 <p>Reference : <a href="./configuration-parameters.html">list of carbon properties</a></p>
diff --git a/src/main/webapp/security.html b/src/main/webapp/security.html
index 75a2f65..dccfddf 100644
--- a/src/main/webapp/security.html
+++ b/src/main/webapp/security.html
@@ -45,6 +45,9 @@
                            aria-expanded="false">Download <span class="caret"></span></a>
                         <ul class="dropdown-menu">
                             <li>
+                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.2/"
+                                   target="_blank">Apache CarbonData 1.5.2</a></li>
+                            <li>
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.1/"
                                    target="_blank">Apache CarbonData 1.5.1</a></li>
                             <li>
@@ -60,9 +63,6 @@
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.3.1/"
                                    target="_blank">Apache CarbonData 1.3.1</a></li>
                             <li>
-                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.3.0/"
-                                   target="_blank">Apache CarbonData 1.3.0</a></li>
-                            <li>
                                 <a href="https://cwiki.apache.org/confluence/display/CARBONDATA/Releases"
                                    target="_blank">Release Archive</a></li>
                         </ul>
diff --git a/src/main/webapp/segment-management-on-carbondata.html b/src/main/webapp/segment-management-on-carbondata.html
index dae0d0e..30bca2e 100644
--- a/src/main/webapp/segment-management-on-carbondata.html
+++ b/src/main/webapp/segment-management-on-carbondata.html
@@ -52,6 +52,9 @@
                            aria-expanded="false"> Download <span class="caret"></span></a>
                         <ul class="dropdown-menu">
                             <li>
+                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.2/"
+                                   target="_blank">Apache CarbonData 1.5.2</a></li>
+                            <li>
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.1/"
                                    target="_blank">Apache CarbonData 1.5.1</a></li>
                             <li>
@@ -64,9 +67,6 @@
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.4.0/"
                                    target="_blank">Apache CarbonData 1.4.0</a></li>
                             <li>
-                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.3.1/"
-                                   target="_blank">Apache CarbonData 1.3.1</a></li>
-                            <li>
                                 <a href="https://cwiki.apache.org/confluence/display/CARBONDATA/Releases"
                                    target="_blank">Release Archive</a></li>
                         </ul>
diff --git a/src/main/webapp/streaming-guide.html b/src/main/webapp/streaming-guide.html
index 8d8cb82..4c45380 100644
--- a/src/main/webapp/streaming-guide.html
+++ b/src/main/webapp/streaming-guide.html
@@ -52,6 +52,9 @@
                            aria-expanded="false"> Download <span class="caret"></span></a>
                         <ul class="dropdown-menu">
                             <li>
+                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.2/"
+                                   target="_blank">Apache CarbonData 1.5.2</a></li>
+                            <li>
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.5.1/"
                                    target="_blank">Apache CarbonData 1.5.1</a></li>
                             <li>
@@ -64,9 +67,6 @@
                                 <a href="https://dist.apache.org/repos/dist/release/carbondata/1.4.0/"
                                    target="_blank">Apache CarbonData 1.4.0</a></li>
                             <li>
-                                <a href="https://dist.apache.org/repos/dist/release/carbondata/1.3.1/"
-                                   target="_blank">Apache CarbonData 1.3.1</a></li>
-                            <li>
                                 <a href="https://cwiki.apache.org/confluence/display/CARBONDATA/Releases"
                                    target="_blank">Release Archive</a></li>
                         </ul>
@@ -258,7 +258,7 @@
 <p>Package carbon jar, and copy assembly/target/scala-2.11/carbondata_2.11-1.3.0-SNAPSHOT-shade-hadoop2.7.2.jar to $SPARK_HOME/jars</p>
... 4353 lines suppressed ...