You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@carbondata.apache.org by ja...@apache.org on 2018/10/24 14:23:02 UTC

[1/2] carbondata git commit: [CARBONDATA-3034] Carding parameters, Organized by parameter category.

Repository: carbondata
Updated Branches:
  refs/heads/master 278d17178 -> e4806b9a0


http://git-wip-us.apache.org/repos/asf/carbondata/blob/e4806b9a/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java b/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java
index c5f8335..5085b5f 100644
--- a/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java
+++ b/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java
@@ -23,86 +23,28 @@ import org.apache.carbondata.common.annotations.InterfaceStability;
 import org.apache.carbondata.core.util.CarbonProperty;
 
 public final class CarbonCommonConstants {
-  /**
-   * surrogate value of null
-   */
-  public static final int DICT_VALUE_NULL = 1;
-  /**
-   * surrogate value of null for direct dictionary
-   */
-  public static final int DIRECT_DICT_VALUE_NULL = 1;
-  /**
-   * integer size in bytes
-   */
-  public static final int INT_SIZE_IN_BYTE = 4;
-  /**
-   * short size in bytes
-   */
-  public static final int SHORT_SIZE_IN_BYTE = 2;
-  /**
-   * DOUBLE size in bytes
-   */
-  public static final int DOUBLE_SIZE_IN_BYTE = 8;
-  /**
-   * LONG size in bytes
-   */
-  public static final int LONG_SIZE_IN_BYTE = 8;
-  /**
-   * byte to KB conversion factor
-   */
-  public static final int BYTE_TO_KB_CONVERSION_FACTOR = 1024;
-  /**
-   * BYTE_ENCODING
-   */
-  public static final String BYTE_ENCODING = "ISO-8859-1";
-  /**
-   * measure meta data file name
-   */
-  public static final String MEASURE_METADATA_FILE_NAME = "/msrMetaData_";
-
-  /**
-   * set the segment ids to query from the table
-   */
-  public static final String CARBON_INPUT_SEGMENTS = "carbon.input.segments.";
-
-  /**
-   * key prefix for set command. 'carbon.datamap.visible.dbName.tableName.dmName = false' means
-   * that the query on 'dbName.table' will not use the datamap 'dmName'
-   */
-  @InterfaceStability.Unstable
-  public static final String CARBON_DATAMAP_VISIBLE = "carbon.datamap.visible.";
 
-  /**
-   * Fetch and validate the segments.
-   * Used for aggregate table load as segment validation is not required.
-   */
-  public static final String VALIDATE_CARBON_INPUT_SEGMENTS = "validate.carbon.input.segments.";
-
-  /**
-   * Whether load/insert command is fired internally or by the user.
-   * Used to block load/insert on pre-aggregate if fired by user
-   */
-  public static final String IS_INTERNAL_LOAD_CALL = "is.internal.load.call";
+  private CarbonCommonConstants() {
+  }
+  //////////////////////////////////////////////////////////////////////////////////////////
+  // System level property start here
+  //////////////////////////////////////////////////////////////////////////////////////////
+  // System level property is the global property for CarbonData
+  // application, these properties are stored in a singleton instance
+  // so that all processing logic in CarbonData uses the same
+  // property value
 
   /**
    * location of the carbon member, hierarchy and fact files
    */
   @CarbonProperty
   public static final String STORE_LOCATION = "carbon.storelocation";
+
   /**
    * blocklet size in carbon file
    */
   public static final String BLOCKLET_SIZE = "carbon.blocklet.size";
-  /**
-   * carbon sort size
-   */
-  @Deprecated
-  @CarbonProperty
-  public static final String SORT_SIZE = "carbon.sort.size";
-  /**
-   * CARDINALITY_INCREMENT_DEFAULT_VALUE
-   */
-  public static final int CARDINALITY_INCREMENT_VALUE_DEFAULT_VAL = 10;
+
   /**
    * default blocklet size
    */
@@ -128,18 +70,6 @@ public final class CarbonCommonConstants {
    */
   public static final int BLOCK_SIZE_MAX_VAL = 2048;
   /**
-   * default value of number of cores to be used
-   */
-  public static final String NUM_CORES_DEFAULT_VAL = "2";
-  /**
-   * default carbon sort size
-   */
-  public static final String SORT_SIZE_DEFAULT_VAL = "100000";
-  /**
-   * min carbon sort size
-   */
-  public static final int SORT_SIZE_MIN_VAL = 1000;
-  /**
    * carbon properties file path
    */
   public static final String CARBON_PROPERTIES_FILE_PATH = "../../../conf/carbon.properties";
@@ -149,321 +79,400 @@ public final class CarbonCommonConstants {
   @CarbonProperty
   public static final String CARBON_DDL_BASE_HDFS_URL = "carbon.ddl.base.hdfs.url";
   /**
-   * Load Folder Name
+   * CARBON_BADRECORDS_LOCATION
    */
-  public static final String LOAD_FOLDER = "Segment_";
-
-  public static final String HDFSURL_PREFIX = "hdfs://";
-
-  public static final String LOCAL_FILE_PREFIX = "file://";
-
-  public static final String VIEWFSURL_PREFIX = "viewfs://";
-
-  public static final String ALLUXIOURL_PREFIX = "alluxio://";
-
-  public static final String S3_PREFIX = "s3://";
-
-  public static final String S3N_PREFIX = "s3n://";
-
-  public static final String S3A_PREFIX = "s3a://";
+  @CarbonProperty
+  public static final String CARBON_BADRECORDS_LOC = "carbon.badRecords.location";
   /**
-   * Access Key for s3n
+   * CARBON_BADRECORDS_LOCATION_DEFAULT
    */
-  public static final String S3N_ACCESS_KEY = "fs.s3n.awsAccessKeyId";
+  public static final String CARBON_BADRECORDS_LOC_DEFAULT_VAL = "";
   /**
-   * Secret Key for s3n
+   * CARBON_TIMESTAMP_DEFAULT_FORMAT
    */
-  public static final String S3N_SECRET_KEY = "fs.s3n.awsSecretAccessKey";
+  public static final String CARBON_TIMESTAMP_DEFAULT_FORMAT = "yyyy-MM-dd HH:mm:ss";
   /**
-   * Access Key for s3
+   * Property for specifying the format of TIMESTAMP data type column.
+   * e.g. yyyy/MM/dd HH:mm:ss, or using CARBON_TIMESTAMP_DEFAULT_FORMAT
    */
-  public static final String S3_ACCESS_KEY = "fs.s3.awsAccessKeyId";
+  @CarbonProperty
+  public static final String CARBON_TIMESTAMP_FORMAT = "carbon.timestamp.format";
+
   /**
-   * Secret Key for s3
+   * CARBON_DATE_DEFAULT_FORMAT
    */
-  public static final String S3_SECRET_KEY = "fs.s3.awsSecretAccessKey";
-
+  public static final String CARBON_DATE_DEFAULT_FORMAT = "yyyy-MM-dd";
   /**
-   * FS_DEFAULT_FS
+   * Property for specifying the format of DATE data type column.
+   * e.g. yyyy/MM/dd , or using CARBON_DATE_DEFAULT_FORMAT
    */
   @CarbonProperty
-  public static final String FS_DEFAULT_FS = "fs.defaultFS";
+  public static final String CARBON_DATE_FORMAT = "carbon.date.format";
   /**
-   * BYTEBUFFER_SIZE
+   * CARBON_TIMESTAMP
    */
+  public static final String CARBON_TIMESTAMP = "dd-MM-yyyy HH:mm:ss";
 
-  public static final int BYTEBUFFER_SIZE = 24 * 1024;
-  /**
-   * Average constant
-   */
-  public static final String AVERAGE = "avg";
   /**
-   * Count constant
+   * CARBON_TIMESTAMP
    */
-  public static final String COUNT = "count";
+  public static final String CARBON_TIMESTAMP_MILLIS = "dd-MM-yyyy HH:mm:ss:SSS";
   /**
-   * SUM
+   * compressor for writing/reading carbondata file
    */
-  public static final String SUM = "sum";
+  public static final String COMPRESSOR = "carbon.column.compressor";
+
   /**
-   * DUMMY aggregation function
+   * default compressor is snappy
    */
-  public static final String DUMMY = "dummy";
+  public static final String DEFAULT_COMPRESSOR = "snappy";
   /**
-   * MEMBER_DEFAULT_VAL
+   * ZOOKEEPER_ENABLE_LOCK if this is set to true then zookeeper will be used to handle locking
+   * mechanism of carbon
    */
-  public static final String MEMBER_DEFAULT_VAL = "@NU#LL$!";
+  @CarbonProperty
+  public static final String LOCK_TYPE = "carbon.lock.type";
 
   /**
-   * default charset to be used for reading and writing
+   * Specifies the path where the lock files have to be created.
+   * By default, lock files are created in table path.
    */
-  public static final String DEFAULT_CHARSET = "UTF-8";
+  @CarbonProperty
+  public static final String LOCK_PATH = "carbon.lock.path";
 
   /**
-   * MEMBER_DEFAULT_VAL_ARRAY
+   * ZOOKEEPER_ENABLE_DEFAULT the default value for zookeeper will be true for carbon
    */
-  public static final byte[] MEMBER_DEFAULT_VAL_ARRAY =
-      MEMBER_DEFAULT_VAL.getBytes(Charset.forName(DEFAULT_CHARSET));
+  public static final String LOCK_TYPE_DEFAULT = "LOCALLOCK";
 
   /**
-   * Bytes for string 0, it is used in codegen in case of null values.
+   * ZOOKEEPER_LOCATION this is the location in zookeeper file system where locks are created.
+   * mechanism of carbon
    */
-  public static final byte[] ZERO_BYTE_ARRAY = "0".getBytes(Charset.forName(DEFAULT_CHARSET));
-
+  public static final String ZOOKEEPER_LOCATION = "/CarbonLocks";
   /**
-   * Empty byte array
+   * xxhash algorithm property for hashmap
    */
-  public static final byte[] EMPTY_BYTE_ARRAY = new byte[0];
+  public static final String ENABLE_XXHASH = "carbon.enableXXHash";
 
   /**
-   * FILE STATUS IN-PROGRESS
+   * xxhash algorithm property for hashmap. Default value false
    */
-  public static final String FILE_INPROGRESS_STATUS = ".inprogress";
+  public static final String ENABLE_XXHASH_DEFAULT = "true";
   /**
-   * CARBON_BADRECORDS_LOCATION
+   * System property to enable or disable local dictionary generation
    */
-  @CarbonProperty
-  public static final String CARBON_BADRECORDS_LOC = "carbon.badRecords.location";
+  public static final String LOCAL_DICTIONARY_SYSTEM_ENABLE = "carbon.local.dictionary.enable";
+
   /**
-   * CARBON_BADRECORDS_LOCATION_DEFAULT
+   * System property to enable or disable decoder based local dictionary fallback
    */
-  public static final String CARBON_BADRECORDS_LOC_DEFAULT_VAL = "";
+  public static final String LOCAL_DICTIONARY_DECODER_BASED_FALLBACK =
+      "carbon.local.dictionary.decoder.fallback";
+
   /**
-   * HIERARCHY_FILE_EXTENSION
+   * System property to enable or disable decoder based local dictionary fallback default value
    */
-  public static final String HIERARCHY_FILE_EXTENSION = ".hierarchy";
+  public static final String LOCAL_DICTIONARY_DECODER_BASED_FALLBACK_DEFAULT = "true";
   /**
-   * SORT_TEMP_FILE_LOCATION
+   * zookeeper url key
    */
-  public static final String SORT_TEMP_FILE_LOCATION = "sortrowtmp";
+  public static final String ZOOKEEPER_URL = "spark.deploy.zookeeper.url";
   /**
-   * CARBON_RESULT_SIZE_DEFAULT
+   * carbon data file version property
    */
-  public static final String LEVEL_FILE_EXTENSION = ".level";
+  public static final String CARBON_DATA_FILE_VERSION = "carbon.data.file.version";
+  @CarbonProperty
+  public static final String SPARK_SCHEMA_STRING_LENGTH_THRESHOLD =
+      "spark.sql.sources.schemaStringLengthThreshold";
+
+  public static final int SPARK_SCHEMA_STRING_LENGTH_THRESHOLD_DEFAULT = 4000;
+
+  @CarbonProperty
+  public static final String CARBON_BAD_RECORDS_ACTION = "carbon.bad.records.action";
+
   /**
-   * FACT_FILE_EXT
+   * FAIL action will fail the load in case of bad records in loading data
    */
-  public static final String FACT_FILE_EXT = ".carbondata";
+  public static final String CARBON_BAD_RECORDS_ACTION_DEFAULT = "FAIL";
+
+  public static final String ENABLE_HIVE_SCHEMA_META_STORE = "spark.carbon.hive.schema.store";
+
+  public static final String ENABLE_HIVE_SCHEMA_META_STORE_DEFAULT = "false";
 
   /**
-   * DELETE_DELTA_FILE_EXT
+   * There is more often that in production uses different drivers for load and queries. So in case
+   * of load driver user should set this property to enable loader specific clean up.
    */
-  public static final String DELETE_DELTA_FILE_EXT = ".deletedelta";
+  public static final String DATA_MANAGEMENT_DRIVER = "spark.carbon.datamanagement.driver";
+
+  public static final String DATA_MANAGEMENT_DRIVER_DEFAULT = "true";
+
+  public static final String CARBON_SESSIONSTATE_CLASSNAME = "spark.carbon.sessionstate.classname";
 
   /**
-   * UPDATE_DELTA_FILE_EXT
+   * This property will be used to configure the sqlastbuilder class.
    */
-  public static final String UPDATE_DELTA_FILE_EXT = FACT_FILE_EXT;
+  public static final String CARBON_SQLASTBUILDER_CLASSNAME =
+      "spark.carbon.sqlastbuilder.classname";
+  @CarbonProperty
+  public static final String CARBON_LEASE_RECOVERY_RETRY_COUNT =
+      "carbon.lease.recovery.retry.count";
+  @CarbonProperty
+  public static final String CARBON_LEASE_RECOVERY_RETRY_INTERVAL =
+      "carbon.lease.recovery.retry.interval";
+
+  @CarbonProperty
+  public static final String CARBON_SECURE_DICTIONARY_SERVER =
+      "carbon.secure.dictionary.server";
+
+  public static final String CARBON_SECURE_DICTIONARY_SERVER_DEFAULT = "true";
   /**
-   * MEASUREMETADATA_FILE_EXT
+   * ENABLE_CALCULATE_DATA_INDEX_SIZE
    */
-  public static final String MEASUREMETADATA_FILE_EXT = ".msrmetadata";
+  @CarbonProperty public static final String ENABLE_CALCULATE_SIZE = "carbon.enable.calculate.size";
+
   /**
-   * Comment for <code>TYPE_MYSQL</code>
+   * DEFAULT_ENABLE_CALCULATE_DATA_INDEX_SIZE
    */
-  public static final String TYPE_MYSQL = "MYSQL";
-  /**
-   * Comment for <code>TYPE_MSSQL</code>
-   */
-  public static final String TYPE_MSSQL = "MSSQL";
+  public static final String DEFAULT_ENABLE_CALCULATE_SIZE = "true";
   /**
-   * Comment for <code>TYPE_ORACLE</code>
+   * this will be used to skip / ignore empty lines while loading
    */
-  public static final String TYPE_ORACLE = "ORACLE";
+  @CarbonProperty public static final String CARBON_SKIP_EMPTY_LINE = "carbon.skip.empty.line";
+
+  public static final String CARBON_SKIP_EMPTY_LINE_DEFAULT = "false";
   /**
-   * Comment for <code>TYPE_SYBASE</code>
+   * Currently the segment lock files are not deleted immediately when unlock,
+   * this value indicates the number of hours the segment lock files will be preserved.
    */
-  public static final String TYPE_SYBASE = "SYBASE";
+  @CarbonProperty
+  public static final String CARBON_SEGMENT_LOCK_FILES_PRESERVE_HOURS =
+      "carbon.segment.lock.files.preserve.hours";
+
+  // default value is 2 days
+  public static final String CARBON_SEGMENT_LOCK_FILES_PRESERVE_HOURS_DEFAULT = "48";
+
   /**
-   * SORT_INTERMEDIATE_FILES_LIMIT
+   * The number of invisible segment info which will be preserved in tablestatus file,
+   * if it exceeds this value, they will be removed and write to tablestatus.history file.
    */
   @CarbonProperty
-  public static final String SORT_INTERMEDIATE_FILES_LIMIT = "carbon.sort.intermediate.files.limit";
+  public static final String CARBON_INVISIBLE_SEGMENTS_PRESERVE_COUNT =
+      "carbon.invisible.segments.preserve.count";
+
   /**
-   * SORT_INTERMEDIATE_FILES_LIMIT_DEFAULT_VALUE
+   * default value is 200, it means that it will preserve 200 invisible segment info
+   * in tablestatus file.
+   * The size of one segment info is about 500 bytes, so the size of tablestatus file
+   * will remain at 100KB.
    */
-  public static final String SORT_INTERMEDIATE_FILES_LIMIT_DEFAULT_VALUE = "20";
-
-  public static final int SORT_INTERMEDIATE_FILES_LIMIT_MIN = 2;
+  public static final String CARBON_INVISIBLE_SEGMENTS_PRESERVE_COUNT_DEFAULT = "200";
 
-  public static final int SORT_INTERMEDIATE_FILES_LIMIT_MAX = 50;
   /**
-   * BAD_RECORD_KEY_VALUE
+   * System older location to store system level data like datamap schema and status files.
    */
-  public static final String BAD_RECORD_KEY = "BADRECORD";
+  public static final String CARBON_SYSTEM_FOLDER_LOCATION = "carbon.system.folder.location";
   /**
-   * MERGERD_EXTENSION
+   * It is internal configuration and used only for test purpose.
+   * It will merge the carbon index files with in the segment to single segment.
    */
-  public static final String MERGERD_EXTENSION = ".merge";
+  @CarbonProperty
+  public static final String CARBON_MERGE_INDEX_IN_SEGMENT =
+      "carbon.merge.index.in.segment";
+
+  public static final String CARBON_MERGE_INDEX_IN_SEGMENT_DEFAULT = "true";
   /**
-   * SORT_FILE_WRITE_BUFFER_SIZE
+   * property to be used for specifying the max byte limit for string/varchar data type till
+   * where storing min/max in data file will be considered
    */
-  @CarbonProperty
-  public static final String CARBON_SORT_FILE_WRITE_BUFFER_SIZE =
-      "carbon.sort.file.write.buffer.size";
-  public static final int CARBON_SORT_FILE_WRITE_BUFFER_SIZE_MIN = 10240;
-  public static final int CARBON_SORT_FILE_WRITE_BUFFER_SIZE_MAX = 10485760;
+  public static final String CARBON_MINMAX_ALLOWED_BYTE_COUNT =
+      "carbon.minmax.allowed.byte.count";
+  public static final String CARBON_MINMAX_ALLOWED_BYTE_COUNT_DEFAULT = "200";
+
+  //////////////////////////////////////////////////////////////////////////////////////////
+  // Table level property start here
+  //////////////////////////////////////////////////////////////////////////////////////////
+  // Table level property is the table property for Carbon table
+
+  // Flat folder support on table. when it is true all carbondata files store directly under table
+  // path instead of sub folders.
+  public static final String FLAT_FOLDER = "flat_folder";
   /**
-   * SORT_FILE_WRITE_BUFFER_SIZE_DEFAULT_VALUE
+   * DEFAULT_FLAT_FOLDER
    */
-  public static final String CARBON_SORT_FILE_WRITE_BUFFER_SIZE_DEFAULT_VALUE = "16384";
+  public static final String DEFAULT_FLAT_FOLDER = "false";
+  public static final String DICTIONARY_EXCLUDE = "dictionary_exclude";
+  public static final String DICTIONARY_INCLUDE = "dictionary_include";
+  public static final String LONG_STRING_COLUMNS = "long_string_columns";
+
   /**
-   * Number of cores to be used while loading
+   * Table property to enable or disable local dictionary generation
    */
-  @CarbonProperty
-  public static final String NUM_CORES_LOADING = "carbon.number.of.cores.while.loading";
+  public static final String LOCAL_DICTIONARY_ENABLE = "local_dictionary_enable";
+
   /**
-   * Number of cores to be used while compacting
+   * default value for local dictionary generation
    */
-  @CarbonProperty
-  public static final String NUM_CORES_COMPACTING = "carbon.number.of.cores.while.compacting";
+  public static final String LOCAL_DICTIONARY_ENABLE_DEFAULT = "false";
   /**
-   * Number of cores to be used while alter partition
+   * Threshold value for local dictionary
    */
-  public static final String NUM_CORES_ALT_PARTITION = "carbon.number.of.cores.while.altPartition";
+  public static final String LOCAL_DICTIONARY_THRESHOLD = "local_dictionary_threshold";
+
   /**
-   * Default value of number of cores to be used for block sort
+   * default value for local dictionary
    */
-  public static final String NUM_CORES_BLOCK_SORT_DEFAULT_VAL = "7";
+  public static final String LOCAL_DICTIONARY_THRESHOLD_DEFAULT = "10000";
+
   /**
-   * Max value of number of cores to be used for block sort
+   * max dictionary threshold
    */
-  public static final int NUM_CORES_BLOCK_SORT_MAX_VAL = 12;
+  public static final int LOCAL_DICTIONARY_MAX = 100000;
+
   /**
-   * Min value of number of cores to be used for block sort
+   * min dictionary threshold
    */
-  public static final int NUM_CORES_BLOCK_SORT_MIN_VAL = 1;
+  public static final int LOCAL_DICTIONARY_MIN = 1000;
+
   /**
-   * CSV_READ_BUFFER_SIZE
+   * Table property to specify the columns for which local dictionary needs to be generated.
    */
-  @CarbonProperty
-  public static final String CSV_READ_BUFFER_SIZE = "carbon.csv.read.buffersize.byte";
+  public static final String LOCAL_DICTIONARY_INCLUDE = "local_dictionary_include";
+
   /**
-   * CSV_READ_BUFFER_SIZE
+   * Table property to specify the columns for which local dictionary should not be to be generated.
    */
-  public static final String CSV_READ_BUFFER_SIZE_DEFAULT = "1048576"; // 1mb
+  public static final String LOCAL_DICTIONARY_EXCLUDE = "local_dictionary_exclude";
+
   /**
-   * min value for csv read buffer size
+   * key for dictionary path
    */
-  public static final int CSV_READ_BUFFER_SIZE_MIN = 10240; //10 kb
+  public static final String DICTIONARY_PATH = "dictionary_path";
+  public static final String SORT_COLUMNS = "sort_columns";
+  public static final String PARTITION_TYPE = "partition_type";
+  public static final String NUM_PARTITIONS = "num_partitions";
+  public static final String RANGE_INFO = "range_info";
+  public static final String LIST_INFO = "list_info";
+  public static final String COLUMN_PROPERTIES = "columnproperties";
+  // table block size in MB
+  public static final String TABLE_BLOCKSIZE = "table_blocksize";
+  // table blocklet size in MB
+  public static final String TABLE_BLOCKLET_SIZE = "table_blocklet_size";
+  // set in column level to disable inverted index
+  public static final String NO_INVERTED_INDEX = "no_inverted_index";
+  // table property name of major compaction size
+  public static final String TABLE_MAJOR_COMPACTION_SIZE = "major_compaction_size";
+  // table property name of auto load merge
+  public static final String TABLE_AUTO_LOAD_MERGE = "auto_load_merge";
+  // table property name of compaction level threshold
+  public static final String TABLE_COMPACTION_LEVEL_THRESHOLD = "compaction_level_threshold";
+  // table property name of preserve segments numbers while compaction
+  public static final String TABLE_COMPACTION_PRESERVE_SEGMENTS = "compaction_preserve_segments";
+  // table property name of allowed compaction days while compaction
+  public static final String TABLE_ALLOWED_COMPACTION_DAYS = "allowed_compaction_days";
   /**
-   * max value for csv read buffer size
+   * property to be specified for caching min/max of required columns
    */
-  public static final int CSV_READ_BUFFER_SIZE_MAX = 10485760; // 10 mb
-
+  public static final String COLUMN_META_CACHE = "column_meta_cache";
   /**
-   * CSV_READ_COPIES
+   * property to be specified for caching level (Block/Blocket)
    */
-  public static final String DEFAULT_NUMBER_CORES = "2";
+  public static final String CACHE_LEVEL = "cache_level";
   /**
-   * CSV_FILE_EXTENSION
+   * default value for cache level
    */
-  public static final String CSV_FILE_EXTENSION = ".csv";
+  public static final String CACHE_LEVEL_DEFAULT_VALUE = "BLOCK";
+
+  //////////////////////////////////////////////////////////////////////////////////////////
+  // Data loading parameter start here
+  //////////////////////////////////////////////////////////////////////////////////////////
 
   /**
-   * LOG_FILE_EXTENSION
+   * Number of cores to be used while loading
    */
-  public static final String LOG_FILE_EXTENSION = ".log";
-
+  @CarbonProperty
+  public static final String NUM_CORES_LOADING = "carbon.number.of.cores.while.loading";
   /**
-   * COLON_SPC_CHARACTER
+   * default value of number of cores to be used
    */
-  public static final String COLON_SPC_CHARACTER = ":!@#COLON#@!:";
+  public static final String NUM_CORES_DEFAULT_VAL = "2";
   /**
-   * HASH_SPC_CHARATER
+   * Number of cores to be used while compacting
    */
-  public static final String HASH_SPC_CHARACTER = "#!@:HASH:@!#";
+  @CarbonProperty
+  public static final String NUM_CORES_COMPACTING = "carbon.number.of.cores.while.compacting";
   /**
-   * SEMICOLON_SPC_CHARATER
+   * Number of cores to be used while alter partition
    */
-  public static final String SEMICOLON_SPC_CHARACTER = ";#!@:SEMIC:@!#;";
+  public static final String NUM_CORES_ALT_PARTITION = "carbon.number.of.cores.while.altPartition";
   /**
-   * AMPERSAND_SPC_CHARATER
+   * default carbon sort size
    */
-  public static final String AMPERSAND_SPC_CHARACTER = "&#!@:AMPER:@!#&";
+  public static final String SORT_SIZE_DEFAULT_VAL = "100000";
   /**
-   * ATTHERATE_SPC_CHARATER
+   * min carbon sort size
    */
-  public static final String COMA_SPC_CHARACTER = ",#!:COMA:!#,";
+  public static final int SORT_SIZE_MIN_VAL = 1000;
   /**
-   * HYPHEN_SPC_CHARACTER
+   * BYTEBUFFER_SIZE
    */
-  public static final String HYPHEN_SPC_CHARACTER = "-#!:HYPHEN:!#-";
+  public static final int BYTEBUFFER_SIZE = 24 * 1024;
   /**
-   * SORT_TEMP_FILE_EXT
+   * SORT_TEMP_FILE_LOCATION
    */
-  public static final String SORT_TEMP_FILE_EXT = ".sorttemp";
+  public static final String SORT_TEMP_FILE_LOCATION = "sortrowtmp";
   /**
-   * CARBON_MERGE_SORT_READER_THREAD
+   * SORT_INTERMEDIATE_FILES_LIMIT
    */
   @CarbonProperty
-  public static final String CARBON_MERGE_SORT_READER_THREAD = "carbon.merge.sort.reader.thread";
+  public static final String SORT_INTERMEDIATE_FILES_LIMIT = "carbon.sort.intermediate.files.limit";
   /**
-   * CARBON_MERGE_SORT_READER_THREAD_DEFAULTVALUE
+   * SORT_INTERMEDIATE_FILES_LIMIT_DEFAULT_VALUE
    */
-  public static final String CARBON_MERGE_SORT_READER_THREAD_DEFAULTVALUE = "3";
+  public static final String SORT_INTERMEDIATE_FILES_LIMIT_DEFAULT_VALUE = "20";
+
+  public static final int SORT_INTERMEDIATE_FILES_LIMIT_MIN = 2;
+
+  public static final int SORT_INTERMEDIATE_FILES_LIMIT_MAX = 50;
   /**
-   * DEFAULT_COLLECTION_SIZE
+   * SORT_FILE_WRITE_BUFFER_SIZE
    */
-  public static final int DEFAULT_COLLECTION_SIZE = 16;
+  @CarbonProperty
+  public static final String CARBON_SORT_FILE_WRITE_BUFFER_SIZE =
+      "carbon.sort.file.write.buffer.size";
+  public static final int CARBON_SORT_FILE_WRITE_BUFFER_SIZE_MIN = 10240;
+  public static final int CARBON_SORT_FILE_WRITE_BUFFER_SIZE_MAX = 10485760;
   /**
-   * CARBON_TIMESTAMP_DEFAULT_FORMAT
+   * SORT_FILE_WRITE_BUFFER_SIZE_DEFAULT_VALUE
    */
-  public static final String CARBON_TIMESTAMP_DEFAULT_FORMAT = "yyyy-MM-dd HH:mm:ss";
+  public static final String CARBON_SORT_FILE_WRITE_BUFFER_SIZE_DEFAULT_VALUE = "16384";
   /**
-   * Property for specifying the format of TIMESTAMP data type column.
-   * e.g. yyyy/MM/dd HH:mm:ss, or using CARBON_TIMESTAMP_DEFAULT_FORMAT
+   * CSV_READ_BUFFER_SIZE
    */
   @CarbonProperty
-  public static final String CARBON_TIMESTAMP_FORMAT = "carbon.timestamp.format";
-
-  /**
-   * CARBON_DATE_DEFAULT_FORMAT
-   */
-  public static final String CARBON_DATE_DEFAULT_FORMAT = "yyyy-MM-dd";
+  public static final String CSV_READ_BUFFER_SIZE = "carbon.csv.read.buffersize.byte";
   /**
-   * Property for specifying the format of DATE data type column.
-   * e.g. yyyy/MM/dd , or using CARBON_DATE_DEFAULT_FORMAT
+   * CSV_READ_BUFFER_SIZE
    */
-  @CarbonProperty
-  public static final String CARBON_DATE_FORMAT = "carbon.date.format";
+  public static final String CSV_READ_BUFFER_SIZE_DEFAULT = "1048576"; // 1mb
   /**
-   * DIMENSION_SPLIT_VALUE_IN_COLUMNAR_DEFAULTVALUE
+   * min value for csv read buffer size
    */
-  public static final String DIMENSION_SPLIT_VALUE_IN_COLUMNAR_DEFAULTVALUE = "1";
+  public static final int CSV_READ_BUFFER_SIZE_MIN = 10240; //10 kb
   /**
-   * IS_FULLY_FILLED_BITS_DEFAULT_VALUE
+   * max value for csv read buffer size
    */
-  public static final String IS_FULLY_FILLED_BITS_DEFAULT_VALUE = "true";
+  public static final int CSV_READ_BUFFER_SIZE_MAX = 10485760; // 10 mb
   /**
-   * ENABLE_QUERY_STATISTICS
+   * CARBON_MERGE_SORT_READER_THREAD
    */
   @CarbonProperty
-  public static final String ENABLE_QUERY_STATISTICS = "enable.query.statistics";
+  public static final String CARBON_MERGE_SORT_READER_THREAD = "carbon.merge.sort.reader.thread";
   /**
-   * ENABLE_QUERY_STATISTICS_DEFAULT
+   * CARBON_MERGE_SORT_READER_THREAD_DEFAULTVALUE
    */
-  public static final String ENABLE_QUERY_STATISTICS_DEFAULT = "false";
+  public static final String CARBON_MERGE_SORT_READER_THREAD_DEFAULTVALUE = "3";
   /**
    * TIME_STAT_UTIL_TYPE
    */
@@ -474,63 +483,12 @@ public final class CarbonCommonConstants {
    */
   public static final String ENABLE_DATA_LOADING_STATISTICS_DEFAULT = "false";
   /**
-   * CONSTANT_SIZE_TEN
-   */
-  public static final int CONSTANT_SIZE_TEN = 10;
-  /**
-   * LEVEL_METADATA_FILE
-   */
-  public static final String LEVEL_METADATA_FILE = "levelmetadata_";
-
-  /**
-   * COMMA
-   */
-  public static final String COMMA = ",";
-  /**
-   * UNDERSCORE
-   */
-  public static final String UNDERSCORE = "_";
-  /**
-   * DASH
-   */
-  public static final String DASH = "-";
-  /**
-   * POINT
-   */
-  public static final String POINT = ".";
-  /**
-   * Windows File separator
-   */
-  public static final String WINDOWS_FILE_SEPARATOR = "\\";
-  /**
-   * File separator
-   */
-  public static final String FILE_SEPARATOR = "/";
-  /**
-   * ARRAY separator
-   */
-  public static final String ARRAY_SEPARATOR = "\001";
-  /**
-   * MAX_QUERY_EXECUTION_TIME
-   */
-  @CarbonProperty
-  public static final String MAX_QUERY_EXECUTION_TIME = "max.query.execution.time";
-  /**
-   * CARBON_TIMESTAMP
-   */
-  public static final String CARBON_TIMESTAMP = "dd-MM-yyyy HH:mm:ss";
-
-  /**
-   * CARBON_TIMESTAMP
-   */
-  public static final String CARBON_TIMESTAMP_MILLIS = "dd-MM-yyyy HH:mm:ss:SSS";
-  /**
-   * NUMBER_OF_TRIES_FOR_LOAD_METADATA_LOCK.
-   *
-   * Because we want concurrent loads to be completed even if they have to wait for the lock
-   * therefore taking the default as 100.
-   *
-   * Example: Concurrent loads will use this to wait to acquire the table status lock.
+   * NUMBER_OF_TRIES_FOR_LOAD_METADATA_LOCK.
+   *
+   * Because we want concurrent loads to be completed even if they have to wait for the lock
+   * therefore taking the default as 100.
+   *
+   * Example: Concurrent loads will use this to wait to acquire the table status lock.
    */
   public static final int NUMBER_OF_TRIES_FOR_CONCURRENT_LOCK_DEFAULT = 100;
   /**
@@ -551,7 +509,6 @@ public final class CarbonCommonConstants {
   @CarbonProperty
   public static final String MAX_TIMEOUT_FOR_CONCURRENT_LOCK =
       "carbon.concurrent.lock.retry.timeout.sec";
-
   /**
    * NUMBER_OF_TRIES_FOR_CARBON_LOCK
    */
@@ -572,78 +529,6 @@ public final class CarbonCommonConstants {
   @CarbonProperty
   public static final String MAX_TIMEOUT_FOR_CARBON_LOCK =
       "carbon.lock.retry.timeout.sec";
-
-  /**
-   * compressor for writing/reading carbondata file
-   */
-  public static final String COMPRESSOR = "carbon.column.compressor";
-
-  /**
-   * default compressor is snappy
-   */
-  public static final String DEFAULT_COMPRESSOR = "snappy";
-
-  public static final String STRING = "String";
-
-  public static final String SHORT = "Short";
-  public static final String TIMESTAMP = "Timestamp";
-  public static final String ARRAY = "array";
-  public static final String STRUCT = "struct";
-  public static final String MAP = "map";
-  public static final String FROM = "from";
-  /**
-   * FACT_UPDATE_EXTENSION.
-   */
-  public static final String FACT_UPDATE_EXTENSION = ".carbondata_update";
-  public static final String FACT_DELETE_EXTENSION = "_delete";
-  /**
-   * MARKED_FOR_UPDATION
-   */
-  public static final String FACT_FILE_UPDATED = "update";
-  /**
-   * MAX_QUERY_EXECUTION_TIME
-   */
-  public static final int DEFAULT_MAX_QUERY_EXECUTION_TIME = 60;
-  /**
-   * TABLE UPDATE STATUS FILENAME
-   */
-  public static final String TABLEUPDATESTATUS_FILENAME = "tableupdatestatus";
-  /**
-   * The batch size of records which returns to client.
-   */
-  @CarbonProperty
-  public static final String DETAIL_QUERY_BATCH_SIZE = "carbon.detail.batch.size";
-
-  public static final int DETAIL_QUERY_BATCH_SIZE_DEFAULT = 100;
-  /**
-   * SPILL_OVER_DISK_PATH
-   */
-  public static final String SCHEMAS_MODIFIED_TIME_FILE = "modifiedTime.mdt";
-  public static final String DEFAULT_INVISIBLE_DUMMY_MEASURE = "default_dummy_measure";
-  public static final String CARBON_IMPLICIT_COLUMN_POSITIONID = "positionId";
-  public static final String CARBON_IMPLICIT_COLUMN_TUPLEID = "tupleId";
-  public static final String CARBON_IMPLICIT_COLUMN_SEGMENTID = "segId";
-  /**
-   * max driver lru cache size upto which lru cache will be loaded in memory
-   */
-  @CarbonProperty
-  public static final String CARBON_MAX_DRIVER_LRU_CACHE_SIZE = "carbon.max.driver.lru.cache.size";
-  public static final String POSITION_REFERENCE = "positionReference";
-  /**
-   * implicit column which will be added to each carbon table
-   */
-  public static final String POSITION_ID = "positionId";
-  /**
-   * max executor lru cache size upto which lru cache will be loaded in memory
-   */
-  @CarbonProperty
-  public static final String CARBON_MAX_EXECUTOR_LRU_CACHE_SIZE =
-      "carbon.max.executor.lru.cache.size";
-  /**
-   * max lru cache size default value in MB
-   */
-  public static final String CARBON_MAX_LRU_CACHE_SIZE_DEFAULT = "-1";
-
   /**
    * CARBON_PREFETCH_BUFFERSIZE
    */
@@ -658,10 +543,6 @@ public final class CarbonCommonConstants {
    */
   public static final boolean CARBON_PREFETCH_IN_MERGE_VALUE = false;
   /**
-   * TEMPWRITEFILEEXTENSION
-   */
-  public static final String TEMPWRITEFILEEXTENSION = ".write";
-  /**
    * ENABLE_AUTO_LOAD_MERGE
    */
   @CarbonProperty
@@ -670,43 +551,15 @@ public final class CarbonCommonConstants {
    * DEFAULT_ENABLE_AUTO_LOAD_MERGE
    */
   public static final String DEFAULT_ENABLE_AUTO_LOAD_MERGE = "false";
-
-  /**
-   * DEFAULT_FLAT_FOLDER
-   */
-  public static final String DEFAULT_FLAT_FOLDER = "false";
-
-  /**
-   * ZOOKEEPER_ENABLE_LOCK if this is set to true then zookeeper will be used to handle locking
-   * mechanism of carbon
-   */
-  @CarbonProperty
-  public static final String LOCK_TYPE = "carbon.lock.type";
-
-  /**
-   * Specifies the path where the lock files have to be created.
-   * By default, lock files are created in table path.
-   */
-  @CarbonProperty
-  public static final String LOCK_PATH = "carbon.lock.path";
-
-  /**
-   * ZOOKEEPER_ENABLE_DEFAULT the default value for zookeeper will be true for carbon
-   */
-  public static final String LOCK_TYPE_DEFAULT = "LOCALLOCK";
-
-  /**
-   * ZOOKEEPER_LOCATION this is the location in zookeeper file system where locks are created.
-   * mechanism of carbon
-   */
-  public static final String ZOOKEEPER_LOCATION = "/CarbonLocks";
-
   /**
    * maximum dictionary chunk size that can be kept in memory while writing dictionary file
    */
   @CarbonProperty
   public static final String DICTIONARY_ONE_CHUNK_SIZE = "carbon.dictionary.chunk.size";
-
+  /**
+   * dictionary chunk default size
+   */
+  public static final String DICTIONARY_ONE_CHUNK_SIZE_DEFAULT = "10000";
   /**
    *  Dictionary Server Worker Threads
    */
@@ -717,45 +570,6 @@ public final class CarbonCommonConstants {
    *  Dictionary Server Worker Threads
    */
   public static final String DICTIONARY_WORKER_THREADS_DEFAULT = "1";
-
-  /**
-   * dictionary chunk default size
-   */
-  public static final String DICTIONARY_ONE_CHUNK_SIZE_DEFAULT = "10000";
-
-  /**
-   * xxhash algorithm property for hashmap
-   */
-  public static final String ENABLE_XXHASH = "carbon.enableXXHash";
-
-  /**
-   * xxhash algorithm property for hashmap. Default value false
-   */
-  public static final String ENABLE_XXHASH_DEFAULT = "true";
-
-  /**
-   * default charset class to be used for reading and writing
-   */
-  public static final Charset DEFAULT_CHARSET_CLASS = Charset.forName(DEFAULT_CHARSET);
-
-  /**
-   * surrogate key that will be sent whenever in the dictionary chunks
-   * a valid surrogate key is not found for a given dictionary value
-   */
-  public static final int INVALID_SURROGATE_KEY = -1;
-
-  /**
-   * surrogate key for MEMBER_DEFAULT_VAL
-   */
-  public static final int MEMBER_DEFAULT_VAL_SURROGATE_KEY = 1;
-
-  public static final String INVALID_SEGMENT_ID = "-1";
-
-  /**
-   * default load time of the segment
-   */
-  public static final long SEGMENT_LOAD_TIME_DEFAULT = -1;
-
   /**
    * Size of Major Compaction in MBs
    */
@@ -766,7 +580,6 @@ public final class CarbonCommonConstants {
    * By default size of major compaction in MBs.
    */
   public static final String DEFAULT_CARBON_MAJOR_COMPACTION_SIZE = "1024";
-
   /**
    * This property is used to tell how many segments to be preserved from merging.
    */
@@ -800,18 +613,6 @@ public final class CarbonCommonConstants {
    * default value for space reserved for writing block meta data in carbon data file
    */
   public static final String CARBON_BLOCK_META_RESERVED_SPACE_DEFAULT = "10";
-
-  /**
-   * property to enable min max during filter query
-   */
-  @CarbonProperty
-  public static final String CARBON_QUERY_MIN_MAX_ENABLED = "carbon.enableMinMax";
-
-  /**
-   * default value to enable min or max during filter query execution
-   */
-  public static final String MIN_MAX_DEFAULT_VALUE = "true";
-
   /**
    * this variable is to enable/disable prefetch of data during merge sort while
    * reading data from sort temp files
@@ -819,7 +620,6 @@ public final class CarbonCommonConstants {
   @CarbonProperty
   public static final String CARBON_MERGE_SORT_PREFETCH = "carbon.merge.sort.prefetch";
   public static final String CARBON_MERGE_SORT_PREFETCH_DEFAULT = "true";
-
   /**
    * If we are executing insert into query from source table using select statement
    * & loading the same source table concurrently, when select happens on source table
@@ -859,163 +659,33 @@ public final class CarbonCommonConstants {
    * http://spark.apache.org/docs/latest/rdd-programming-guide.html#rdd-persistence.
    */
   public static final String CARBON_INSERT_STORAGE_LEVEL_DEFAULT = "MEMORY_AND_DISK";
-
   /**
-   * default name of data base
+   * Number of unmerged segments to be merged.
    */
-  public static final String DATABASE_DEFAULT_NAME = "default";
-
-  public static final String DICTIONARY_EXCLUDE = "dictionary_exclude";
-  public static final String DICTIONARY_INCLUDE = "dictionary_include";
-  public static final String LONG_STRING_COLUMNS = "long_string_columns";
+  @CarbonProperty
+  public static final String COMPACTION_SEGMENT_LEVEL_THRESHOLD =
+      "carbon.compaction.level.threshold";
 
   /**
-   * Table property to enable or disable local dictionary generation
+   * Default count for Number of segments to be merged in levels is 4,3
    */
-  public static final String LOCAL_DICTIONARY_ENABLE = "local_dictionary_enable";
+  public static final String DEFAULT_SEGMENT_LEVEL_THRESHOLD = "4,3";
 
   /**
-   * default value for local dictionary generation
+   * Number of Update Delta files which is the Threshold for IUD compaction.
+   * Only accepted Range is 0 - 10000. Outside this range system will pick default value.
    */
-  public static final String LOCAL_DICTIONARY_ENABLE_DEFAULT = "false";
-
+  @CarbonProperty
+  public static final String UPDATE_DELTAFILE_COUNT_THRESHOLD_IUD_COMPACTION =
+      "carbon.horizontal.update.compaction.threshold";
   /**
-   * System property to enable or disable local dictionary generation
+   * Default count of segments which act as a threshold for IUD compaction merge.
    */
-  public static final String LOCAL_DICTIONARY_SYSTEM_ENABLE = "carbon.local.dictionary.enable";
+  public static final String DEFAULT_UPDATE_DELTAFILE_COUNT_THRESHOLD_IUD_COMPACTION = "1";
 
   /**
-   * System property to enable or disable decoder based local dictionary fallback
-   */
-  public static final String LOCAL_DICTIONARY_DECODER_BASED_FALLBACK =
-      "carbon.local.dictionary.decoder.fallback";
-
-  /**
-   * System property to enable or disable decoder based local dictionary fallback default value
-   */
-  public static final String LOCAL_DICTIONARY_DECODER_BASED_FALLBACK_DEFAULT = "true";
-  /**
-   * Threshold value for local dictionary
-   */
-  public static final String LOCAL_DICTIONARY_THRESHOLD = "local_dictionary_threshold";
-
-  /**
-   * default value for local dictionary
-   */
-  public static final String LOCAL_DICTIONARY_THRESHOLD_DEFAULT = "10000";
-
-  /**
-   * max dictionary threshold
-   */
-  public static final int LOCAL_DICTIONARY_MAX = 100000;
-
-  /**
-   * min dictionary threshold
-   */
-  public static final int LOCAL_DICTIONARY_MIN = 1000;
-
-  /**
-   * Table property to specify the columns for which local dictionary needs to be generated.
-   */
-  public static final String LOCAL_DICTIONARY_INCLUDE = "local_dictionary_include";
-
-  /**
-   * Table property to specify the columns for which local dictionary should not be to be generated.
-   */
-  public static final String LOCAL_DICTIONARY_EXCLUDE = "local_dictionary_exclude";
-
-  /**
-   * key for dictionary path
-   */
-  public static final String DICTIONARY_PATH = "dictionary_path";
-  public static final String SORT_COLUMNS = "sort_columns";
-  public static final String PARTITION_TYPE = "partition_type";
-  public static final String NUM_PARTITIONS = "num_partitions";
-  public static final String RANGE_INFO = "range_info";
-  public static final String LIST_INFO = "list_info";
-  public static final String COLUMN_PROPERTIES = "columnproperties";
-  // table block size in MB
-  public static final String TABLE_BLOCKSIZE = "table_blocksize";
-  // table blocklet size in MB
-  public static final String TABLE_BLOCKLET_SIZE = "table_blocklet_size";
-  // set in column level to disable inverted index
-  public static final String NO_INVERTED_INDEX = "no_inverted_index";
-  // table property name of major compaction size
-  public static final String TABLE_MAJOR_COMPACTION_SIZE = "major_compaction_size";
-  // table property name of auto load merge
-  public static final String TABLE_AUTO_LOAD_MERGE = "auto_load_merge";
-  // table property name of compaction level threshold
-  public static final String TABLE_COMPACTION_LEVEL_THRESHOLD = "compaction_level_threshold";
-  // table property name of preserve segments numbers while compaction
-  public static final String TABLE_COMPACTION_PRESERVE_SEGMENTS = "compaction_preserve_segments";
-  // table property name of allowed compaction days while compaction
-  public static final String TABLE_ALLOWED_COMPACTION_DAYS = "allowed_compaction_days";
-  // Flat folder support on table. when it is true all carbondata files store directly under table
-  // path instead of sub folders.
-  public static final String FLAT_FOLDER = "flat_folder";
-
-  /**
-   * 16 mb size
-   */
-  public static final long CARBON_16MB = 16 * 1024 * 1024;
-  /**
-   * 256 mb size
-   */
-  public static final long CARBON_256MB = 256 * 1024 * 1024;
-
-  /**
-   * ZOOKEEPERLOCK TYPE
-   */
-  public static final String CARBON_LOCK_TYPE_ZOOKEEPER = "ZOOKEEPERLOCK";
-
-  /**
-   * LOCALLOCK TYPE
-   */
-  public static final String CARBON_LOCK_TYPE_LOCAL = "LOCALLOCK";
-
-  /**
-   * HDFSLOCK TYPE
-   */
-  public static final String CARBON_LOCK_TYPE_HDFS = "HDFSLOCK";
-
-  /**
-   * S3LOCK TYPE
-   */
-  public static final String CARBON_LOCK_TYPE_S3 = "S3LOCK";
-
-  /**
-   * Invalid filter member log string
-   */
-  public static final String FILTER_INVALID_MEMBER =
-      " Invalid Record(s) are present while filter evaluation. ";
-
-  /**
-   * Number of unmerged segments to be merged.
-   */
-  @CarbonProperty
-  public static final String COMPACTION_SEGMENT_LEVEL_THRESHOLD =
-      "carbon.compaction.level.threshold";
-
-  /**
-   * Default count for Number of segments to be merged in levels is 4,3
-   */
-  public static final String DEFAULT_SEGMENT_LEVEL_THRESHOLD = "4,3";
-
-  /**
-   * Number of Update Delta files which is the Threshold for IUD compaction.
-   * Only accepted Range is 0 - 10000. Outside this range system will pick default value.
-   */
-  @CarbonProperty
-  public static final String UPDATE_DELTAFILE_COUNT_THRESHOLD_IUD_COMPACTION =
-      "carbon.horizontal.update.compaction.threshold";
-  /**
-   * Default count of segments which act as a threshold for IUD compaction merge.
-   */
-  public static final String DEFAULT_UPDATE_DELTAFILE_COUNT_THRESHOLD_IUD_COMPACTION = "1";
-
-  /**
-   * Number of Delete Delta files which is the Threshold for IUD compaction.
-   * Only accepted Range is 0 - 10000. Outside this range system will pick default value.
+   * Number of Delete Delta files which is the Threshold for IUD compaction.
+   * Only accepted Range is 0 - 10000. Outside this range system will pick default value.
    */
   @CarbonProperty
   public static final String DELETE_DELTAFILE_COUNT_THRESHOLD_IUD_COMPACTION =
@@ -1024,52 +694,6 @@ public final class CarbonCommonConstants {
    * Default count of segments which act as a threshold for IUD compaction merge.
    */
   public static final String DEFAULT_DELETE_DELTAFILE_COUNT_THRESHOLD_IUD_COMPACTION = "1";
-
-  /**
-   * default location of the carbon metastore db
-   */
-  public static final String METASTORE_LOCATION_DEFAULT_VAL = "../carbon.metastore";
-
-  /**
-   * hive connection url
-   */
-  public static final String HIVE_CONNECTION_URL = "javax.jdo.option.ConnectionURL";
-
-  /**
-   * If the level 2 compaction is done in minor then new compacted segment will end with .2
-   */
-  public static final String LEVEL2_COMPACTION_INDEX = ".2";
-
-  /**
-   * Indicates compaction
-   */
-  public static final String COMPACTION_KEY_WORD = "COMPACTION";
-
-  /**
-   * Indicates alter partition
-   */
-  public static final String ALTER_PARTITION_KEY_WORD = "ALTER_PARTITION";
-
-  /**
-   * hdfs temporary directory key
-   */
-  public static final String HDFS_TEMP_LOCATION = "hadoop.tmp.dir";
-
-  /**
-   * zookeeper url key
-   */
-  public static final String ZOOKEEPER_URL = "spark.deploy.zookeeper.url";
-
-  /**
-   * File created in case of minor compaction request
-   */
-  public static final String minorCompactionRequiredFile = "compactionRequired_minor";
-
-  /**
-   * File created in case of major compaction request
-   */
-  public static final String majorCompactionRequiredFile = "compactionRequired_major";
-
   /**
    * @Deprecated : This property has been deprecated.
    * Property for enabling system level compaction lock.1 compaction can run at once.
@@ -1082,12 +706,6 @@ public final class CarbonCommonConstants {
    * at once.
    */
   public static final String DEFAULT_ENABLE_CONCURRENT_COMPACTION = "true";
-
-  /**
-   * Compaction system level lock folder.
-   */
-  public static final String SYSTEM_LEVEL_COMPACTION_LOCK_FOLDER = "SystemCompactionLock";
-
   /**
    * This batch size is used to send rows from load step to another step in batches.
    */
@@ -1098,41 +716,6 @@ public final class CarbonCommonConstants {
    */
   public static final String DATA_LOAD_BATCH_SIZE_DEFAULT = "1000";
   /**
-   * carbon data file version property
-   */
-  public static final String CARBON_DATA_FILE_VERSION = "carbon.data.file.version";
-
-  /**
-   * Index file name will end with this extension when update.
-   */
-  public static final String UPDATE_INDEX_FILE_EXT = ".carbonindex";
-
-  /**
-   * Key word for true
-   */
-  public static final String KEYWORD_TRUE = "TRUE";
-
-  /**
-   * Key word for false
-   */
-  public static final String KEYWORD_FALSE = "FALSE";
-
-  /**
-   * hyphen
-   */
-  public static final String HYPHEN = "-";
-
-  /**
-   * columns which gets updated in update will have header ends with this extension.
-   */
-  public static final String UPDATED_COL_EXTENSION = "-updatedColumn";
-
-  /**
-   * appending the key to differentiate the update flow with insert flow.
-   */
-  public static final String RDDUTIL_UPDATE_KEY = "UPDATE_";
-
-  /**
    * to determine to use the rdd persist or not.
    */
   @CarbonProperty
@@ -1175,22 +758,6 @@ public final class CarbonCommonConstants {
    * http://spark.apache.org/docs/latest/rdd-programming-guide.html#rdd-persistence.
    */
   public static final String CARBON_UPDATE_STORAGE_LEVEL_DEFAULT = "MEMORY_AND_DISK";
-
-  /**
-   * current data file version
-   */
-  public static final String CARBON_DATA_FILE_DEFAULT_VERSION = "V3";
-
-  /**
-   * data file version header
-   */
-  public static final String CARBON_DATA_VERSION_HEADER = "CARBONDATAVERSION#";
-  /**
-   * Maximum no of column supported
-   */
-  public static final int DEFAULT_MAX_NUMBER_OF_COLUMNS = 20000;
-
-
   /**
    * to enable unsafe column page
    */
@@ -1229,9 +796,6 @@ public final class CarbonCommonConstants {
   public static final String OFFHEAP_SORT_CHUNK_SIZE_IN_MB = "offheap.sort.chunk.size.inmb";
 
   public static final String OFFHEAP_SORT_CHUNK_SIZE_IN_MB_DEFAULT = "64";
-  public static final String IN_MEMORY_FOR_SORT_DATA_IN_MB = "sort.inmemory.size.inmb";
-  public static final String IN_MEMORY_FOR_SORT_DATA_IN_MB_DEFAULT = "1024";
-
   @CarbonProperty
   public static final String UNSAFE_WORKING_MEMORY_IN_MB = "carbon.unsafe.working.memory.in.mb";
   public static final String UNSAFE_WORKING_MEMORY_IN_MB_DEFAULT = "512";
@@ -1271,11 +835,6 @@ public final class CarbonCommonConstants {
   public static final String LOAD_GLOBAL_SORT_PARTITIONS = "carbon.load.global.sort.partitions";
 
   public static final String LOAD_GLOBAL_SORT_PARTITIONS_DEFAULT = "0";
-  @CarbonProperty
-  public static final String ENABLE_VECTOR_READER = "carbon.enable.vector.reader";
-
-  public static final String ENABLE_VECTOR_READER_DEFAULT = "true";
-
   /*
    * carbon dictionary server port
    */
@@ -1286,103 +845,29 @@ public final class CarbonCommonConstants {
    * Default carbon dictionary server port
    */
   public static final String DICTIONARY_SERVER_PORT_DEFAULT = "2030";
-
   /**
-   * property to set is IS_DRIVER_INSTANCE
+   * whether to prefetch data while loading.
    */
   @CarbonProperty
-  public static final String IS_DRIVER_INSTANCE = "is.driver.instance";
+  public static final String USE_PREFETCH_WHILE_LOADING = "carbon.loading.prefetch";
 
   /**
-   * property for enabling unsafe based query processing
+   * default value for prefetch data while loading.
+   */
+  public static final String USE_PREFETCH_WHILE_LOADING_DEFAULT = "false";
+  /**
+   * for loading, whether to use yarn's local dir the main purpose is to avoid single disk hot spot
    */
   @CarbonProperty
-  public static final String ENABLE_UNSAFE_IN_QUERY_EXECUTION = "enable.unsafe.in.query.processing";
+  public static final String CARBON_LOADING_USE_YARN_LOCAL_DIR = "carbon.use.local.dir";
 
   /**
-   * default property of unsafe processing
+   * default value for whether to enable carbon use yarn local dir
    */
-  public static final String ENABLE_UNSAFE_IN_QUERY_EXECUTION_DEFAULTVALUE = "false";
+  public static final String CARBON_LOADING_USE_YARN_LOCAL_DIR_DEFAULT = "true";
 
   /**
-   * whether to prefetch data while loading.
-   */
-  @CarbonProperty
-  public static final String USE_PREFETCH_WHILE_LOADING = "carbon.loading.prefetch";
-
-  /**
-   * default value for prefetch data while loading.
-   */
-  public static final String USE_PREFETCH_WHILE_LOADING_DEFAULT = "false";
-
-  public static final String MINOR = "minor";
-
-  public static final String MAJOR = "major";
-
-  @CarbonProperty
-  public static final String CARBON_CUSTOM_BLOCK_DISTRIBUTION = "carbon.custom.block.distribution";
-
-  public static final int DICTIONARY_DEFAULT_CARDINALITY = 1;
-  @CarbonProperty
-  public static final String SPARK_SCHEMA_STRING_LENGTH_THRESHOLD =
-      "spark.sql.sources.schemaStringLengthThreshold";
-
-  public static final int SPARK_SCHEMA_STRING_LENGTH_THRESHOLD_DEFAULT = 4000;
-
-  @CarbonProperty
-  public static final String CARBON_BAD_RECORDS_ACTION = "carbon.bad.records.action";
-
-  /**
-   * FAIL action will fail the load in case of bad records in loading data
-   */
-  public static final String CARBON_BAD_RECORDS_ACTION_DEFAULT = "FAIL";
-
-  public static final String ENABLE_HIVE_SCHEMA_META_STORE = "spark.carbon.hive.schema.store";
-
-  public static final String ENABLE_HIVE_SCHEMA_META_STORE_DEFAULT = "false";
-
-  /**
-   * There is more often that in production uses different drivers for load and queries. So in case
-   * of load driver user should set this property to enable loader specific clean up.
-   */
-  public static final String DATA_MANAGEMENT_DRIVER = "spark.carbon.datamanagement.driver";
-
-  public static final String DATA_MANAGEMENT_DRIVER_DEFAULT = "true";
-
-  public static final String CARBON_SESSIONSTATE_CLASSNAME = "spark.carbon.sessionstate.classname";
-
-  /**
-   * This property will be used to configure the sqlastbuilder class.
-   */
-  public static final String CARBON_SQLASTBUILDER_CLASSNAME =
-      "spark.carbon.sqlastbuilder.classname";
-
-  @CarbonProperty
-  public static final String CARBON_LEASE_RECOVERY_RETRY_COUNT =
-      "carbon.lease.recovery.retry.count";
-  @CarbonProperty
-  public static final String CARBON_LEASE_RECOVERY_RETRY_INTERVAL =
-      "carbon.lease.recovery.retry.interval";
-
-  @CarbonProperty
-  public static final String CARBON_SECURE_DICTIONARY_SERVER =
-      "carbon.secure.dictionary.server";
-
-  public static final String CARBON_SECURE_DICTIONARY_SERVER_DEFAULT = "true";
-
-  /**
-   * for loading, whether to use yarn's local dir the main purpose is to avoid single disk hot spot
-   */
-  @CarbonProperty
-  public static final String CARBON_LOADING_USE_YARN_LOCAL_DIR = "carbon.use.local.dir";
-
-  /**
-   * default value for whether to enable carbon use yarn local dir
-   */
-  public static final String CARBON_LOADING_USE_YARN_LOCAL_DIR_DEFAULT = "true";
-
-  /**
-   * name of compressor to compress sort temp files
+   * name of compressor to compress sort temp files
    */
   @CarbonProperty
   public static final String CARBON_SORT_TEMP_COMPRESSOR = "carbon.sort.temp.compressor";
@@ -1422,43 +907,6 @@ public final class CarbonCommonConstants {
    * In default we will not optimize the update
    */
   public static final String CARBON_UPDATE_SEGMENT_PARALLELISM_DEFAULT = "1";
-
-  public static final String USE_DISTRIBUTED_DATAMAP = "carbon.enable.distributed.datamap";
-
-  public static final String USE_DISTRIBUTED_DATAMAP_DEFAULT = "false";
-
-  /**
-   * This property defines how the tasks are splitted/combined and launch spark tasks during query
-   */
-  @CarbonProperty
-  public static final String CARBON_TASK_DISTRIBUTION = "carbon.task.distribution";
-
-  /**
-   * It combines the available blocks as per the maximum available tasks in the cluster.
-   */
-  public static final String CARBON_TASK_DISTRIBUTION_CUSTOM = "custom";
-
-  /**
-   * It creates the splits as per the number of blocks/carbondata files available for query.
-   */
-  public static final String CARBON_TASK_DISTRIBUTION_BLOCK = "block";
-
-  /**
-   * It creates the splits as per the number of blocklets available for query.
-   */
-  public static final String CARBON_TASK_DISTRIBUTION_BLOCKLET = "blocklet";
-
-  /**
-   * It merges all the small files and create tasks as per the configurable partition size.
-   */
-  public static final String CARBON_TASK_DISTRIBUTION_MERGE_FILES = "merge_small_files";
-
-  /**
-   * Default task distribution.
-   */
-  public static final String CARBON_TASK_DISTRIBUTION_DEFAULT = CARBON_TASK_DISTRIBUTION_BLOCK;
-
-
   /**
    * The property to configure the mdt file folder path, earlier it was pointing to the
    * fixed carbon store path. This is needed in case of the federation setup when user removes
@@ -1468,120 +916,223 @@ public final class CarbonCommonConstants {
   public static final String CARBON_UPDATE_SYNC_FOLDER = "carbon.update.sync.folder";
 
   public static final String CARBON_UPDATE_SYNC_FOLDER_DEFAULT = "/tmp/carbondata";
+  /**
+   * Configures the parser/writer to limit the length of displayed contents being parsed/written
+   * in the exception message when an error occurs.
+   * Here {@code 0} means no exceptions will include the content being manipulated in their
+   * attributes.
+   */
+  public static final int CARBON_ERROR_CONTENT_LENGTH = 0;
 
   /**
-   * this will be used to pass bitset value in filter to another filter for
-   * faster execution of filter query
+   * if the byte size of streaming segment reach this value,
+   * the system will create a new stream segment
    */
-  public static final String BITSET_PIPE_LINE = "carbon.use.bitset.pipe.line";
+  @CarbonProperty
+  public static final String HANDOFF_SIZE = "carbon.streaming.segment.max.size";
 
   /**
-   * this will be used to provide comment for table
+   * enable auto handoff streaming segment
    */
-  public static final String TABLE_COMMENT = "comment";
+  @CarbonProperty
+  public static final String ENABLE_AUTO_HANDOFF = "carbon.streaming.auto.handoff.enabled";
 
+  public static final String ENABLE_AUTO_HANDOFF_DEFAULT = "true";
   /**
-   * this will be used to provide comment for table
+   * Enabling page level reader for compaction reduces the memory usage while compacting more
+   * number of segments. It allows reading only page by page instead of reaing whole blocklet to
+   * memory.
    */
-  public static final String COLUMN_COMMENT = "comment";
+  @CarbonProperty
+  @InterfaceStability.Evolving
+  public static final String CARBON_ENABLE_PAGE_LEVEL_READER_IN_COMPACTION =
+      "carbon.enable.page.level.reader.in.compaction";
 
-  public static final String BITSET_PIPE_LINE_DEFAULT = "true";
+  // Note: If this property is set to true it can impact compaction performance as IO will increase
+  public static final String CARBON_ENABLE_PAGE_LEVEL_READER_IN_COMPACTION_DEFAULT = "false";
 
+  @CarbonProperty
+  public static final String CARBON_SORT_STORAGE_INMEMORY_IN_MB =
+      "carbon.sort.storage.inmemory.size.inmb";
+  public static final int CARBON_SORT_STORAGE_INMEMORY_IN_MB_DEFAULT = 512;
   /*
-   * The total size of carbon data
+   * whether to enable prefetch for rowbatch to enhance row reconstruction during compaction
    */
-  public static final String CARBON_TOTAL_DATA_SIZE = "datasize";
+  @CarbonProperty
+  public static final String CARBON_COMPACTION_PREFETCH_ENABLE =
+      "carbon.compaction.prefetch.enable";
+  public static final String CARBON_COMPACTION_PREFETCH_ENABLE_DEFAULT = "false";
 
   /**
-   * The total size of carbon index
+   * compression mode used by lucene for index writing, this conf will be passed to lucene writer
+   * while writing index files.
    */
-  public static final String CARBON_TOTAL_INDEX_SIZE = "indexsize";
+  public static final String CARBON_LUCENE_COMPRESSION_MODE = "carbon.lucene.compression.mode";
 
   /**
-   * ENABLE_CALCULATE_DATA_INDEX_SIZE
+   * default lucene index compression mode, in this mode writing speed will be less and speed is
+   * given priority, another mode is compression mode, where the index size is given importance to
+   * make it less and not the index writing speed.
    */
-  @CarbonProperty public static final String ENABLE_CALCULATE_SIZE = "carbon.enable.calculate.size";
-
+  public static final String CARBON_LUCENE_COMPRESSION_MODE_DEFAULT = "speed";
   /**
-   * DEFAULT_ENABLE_CALCULATE_DATA_INDEX_SIZE
+   * The node loads the smallest amount of data
    */
-  public static final String DEFAULT_ENABLE_CALCULATE_SIZE = "true";
-
-  public static final String TABLE_DATA_SIZE = "Table Data Size";
-
-  public static final String TABLE_INDEX_SIZE = "Table Index Size";
-
-  public static final String LAST_UPDATE_TIME = "Last Update Time";
+  @CarbonProperty
+  public static final String CARBON_LOAD_MIN_SIZE_INMB = "load_min_size_inmb";
+  public static final String CARBON_LOAD_MIN_NODE_SIZE_INMB_DEFAULT = "256";
 
   /**
-   * this will be used to skip / ignore empty lines while loading
+   *  the node minimum load data default value
    */
-  @CarbonProperty public static final String CARBON_SKIP_EMPTY_LINE = "carbon.skip.empty.line";
+  public static final int CARBON_LOAD_MIN_SIZE_DEFAULT = 256;
+
+  //////////////////////////////////////////////////////////////////////////////////////////
+  // Query parameter start here
+  //////////////////////////////////////////////////////////////////////////////////////////
 
-  public static final String CARBON_SKIP_EMPTY_LINE_DEFAULT = "false";
   /**
-   * Configures the parser/writer to limit the length of displayed contents being parsed/written
-   * in the exception message when an error occurs.
-   * Here {@code 0} means no exceptions will include the content being manipulated in their
-   * attributes.
+   * set the segment ids to query from the table
    */
-  public static final int CARBON_ERROR_CONTENT_LENGTH = 0;
-
+  public static final String CARBON_INPUT_SEGMENTS = "carbon.input.segments.";
   /**
-   * if the byte size of streaming segment reach this value,
-   * the system will create a new stream segment
+   * ENABLE_QUERY_STATISTICS
    */
   @CarbonProperty
-  public static final String HANDOFF_SIZE = "carbon.streaming.segment.max.size";
-
+  public static final String ENABLE_QUERY_STATISTICS = "enable.query.statistics";
   /**
-   * enable auto handoff streaming segment
+   * ENABLE_QUERY_STATISTICS_DEFAULT
+   */
+  public static final String ENABLE_QUERY_STATISTICS_DEFAULT = "false";
+  /**
+   * MAX_QUERY_EXECUTION_TIME
    */
   @CarbonProperty
-  public static final String ENABLE_AUTO_HANDOFF = "carbon.streaming.auto.handoff.enabled";
-
-  public static final String ENABLE_AUTO_HANDOFF_DEFAULT = "true";
-
+  public static final String MAX_QUERY_EXECUTION_TIME = "max.query.execution.time";
   /**
-   * the min handoff size of streaming segment, the unit is byte
+   * MAX_QUERY_EXECUTION_TIME
    */
-  public static final long HANDOFF_SIZE_MIN = 1024L * 1024 * 64;
-
+  public static final int DEFAULT_MAX_QUERY_EXECUTION_TIME = 60;
   /**
-   * the default handoff size of streaming segment, the unit is byte
+   * The batch size of records which returns to client.
    */
-  public static final long HANDOFF_SIZE_DEFAULT = 1024L * 1024 * 1024;
+  @CarbonProperty
+  public static final String DETAIL_QUERY_BATCH_SIZE = "carbon.detail.batch.size";
 
+  public static final int DETAIL_QUERY_BATCH_SIZE_DEFAULT = 100;
   /**
-   * minimum required registered resource for starting block distribution
+   * max driver lru cache size upto which lru cache will be loaded in memory
    */
   @CarbonProperty
-  public static final String CARBON_SCHEDULER_MIN_REGISTERED_RESOURCES_RATIO =
-      "carbon.scheduler.minregisteredresourcesratio";
+  public static final String CARBON_MAX_DRIVER_LRU_CACHE_SIZE = "carbon.max.driver.lru.cache.size";
   /**
-   * default minimum required registered resource for starting block distribution
+   * max executor lru cache size upto which lru cache will be loaded in memory
    */
-  public static final String CARBON_SCHEDULER_MIN_REGISTERED_RESOURCES_RATIO_DEFAULT = "0.8d";
+  @CarbonProperty
+  public static final String CARBON_MAX_EXECUTOR_LRU_CACHE_SIZE =
+      "carbon.max.executor.lru.cache.size";
   /**
-   * minimum required registered resource for starting block distribution
+   * max lru cache size default value in MB
    */
-  public static final double CARBON_SCHEDULER_MIN_REGISTERED_RESOURCES_RATIO_MIN = 0.1d;
+  public static final String CARBON_MAX_LRU_CACHE_SIZE_DEFAULT = "-1";
   /**
-   * max minimum required registered resource for starting block distribution
+   * property to enable min max during filter query
    */
-  public static final double CARBON_SCHEDULER_MIN_REGISTERED_RESOURCES_RATIO_MAX = 1.0d;
+  @CarbonProperty
+  public static final String CARBON_QUERY_MIN_MAX_ENABLED = "carbon.enableMinMax";
 
   /**
-   * To define how much time scheduler should wait for the
-   * resource in dynamic allocation.
+   * default value to enable min or max during filter query execution
    */
-  public static final String CARBON_DYNAMIC_ALLOCATION_SCHEDULER_TIMEOUT =
-      "carbon.dynamicallocation.schedulertimeout";
+  public static final String MIN_MAX_DEFAULT_VALUE = "true";
+  @CarbonProperty
+  public static final String ENABLE_VECTOR_READER = "carbon.enable.vector.reader";
 
-  /**
-   * default scheduler wait time
-   */
-  public static final String CARBON_DYNAMIC_ALLOCATION_SCHEDULER_TIMEOUT_DEFAULT = "5";
+  public static final String ENABLE_VECTOR_READER_DEFAULT = "true";
+
+  /**
+   * property to set is IS_DRIVER_INSTANCE
+   */
+  @CarbonProperty
+  public static final String IS_DRIVER_INSTANCE = "is.driver.instance";
+
+  /**
+   * property for enabling unsafe based query processing
+   */
+  @CarbonProperty
+  public static final String ENABLE_UNSAFE_IN_QUERY_EXECUTION = "enable.unsafe.in.query.processing";
+
+  /**
+   * default property of unsafe processing
+   */
+  public static final String ENABLE_UNSAFE_IN_QUERY_EXECUTION_DEFAULTVALUE = "false";
+  @CarbonProperty
+  public static final String CARBON_CUSTOM_BLOCK_DISTRIBUTION = "carbon.custom.block.distribution";
+  /**
+   * This property defines how the tasks are splitted/combined and launch spark tasks during query
+   */
+  @CarbonProperty
+  public static final String CARBON_TASK_DISTRIBUTION = "carbon.task.distribution";
+
+  /**
+   * It combines the available blocks as per the maximum available tasks in the cluster.
+   */
+  public static final String CARBON_TASK_DISTRIBUTION_CUSTOM = "custom";
+
+  /**
+   * It creates the splits as per the number of blocks/carbondata files available for query.
+   */
+  public static final String CARBON_TASK_DISTRIBUTION_BLOCK = "block";
+
+  /**
+   * It creates the splits as per the number of blocklets available for query.
+   */
+  public static final String CARBON_TASK_DISTRIBUTION_BLOCKLET = "blocklet";
+
+  /**
+   * It merges all the small files and create tasks as per the configurable partition size.
+   */
+  public static final String CARBON_TASK_DISTRIBUTION_MERGE_FILES = "merge_small_files";
+
+  /**
+   * Default task distribution.
+   */
+  public static final String CARBON_TASK_DISTRIBUTION_DEFAULT = CARBON_TASK_DISTRIBUTION_BLOCK;
+  /**
+   * this will be used to pass bitset value in filter to another filter for
+   * faster execution of filter query
+   */
+  public static final String BITSET_PIPE_LINE = "carbon.use.bitset.pipe.line";
+  public static final String BITSET_PIPE_LINE_DEFAULT = "true";
+  /**
+   * minimum required registered resource for starting block distribution
+   */
+  @CarbonProperty
+  public static final String CARBON_SCHEDULER_MIN_REGISTERED_RESOURCES_RATIO =
+      "carbon.scheduler.minregisteredresourcesratio";
+  /**
+   * default minimum required registered resource for starting block distribution
+   */
+  public static final String CARBON_SCHEDULER_MIN_REGISTERED_RESOURCES_RATIO_DEFAULT = "0.8d";
+  /**
+   * minimum required registered resource for starting block distribution
+   */
+  public static final double CARBON_SCHEDULER_MIN_REGISTERED_RESOURCES_RATIO_MIN = 0.1d;
+  /**
+   * max minimum required registered resource for starting block distribution
+   */
+  public static final double CARBON_SCHEDULER_MIN_REGISTERED_RESOURCES_RATIO_MAX = 1.0d;
+  /**
+   * To define how much time scheduler should wait for the
+   * resource in dynamic allocation.
+   */
+  public static final String CARBON_DYNAMIC_ALLOCATION_SCHEDULER_TIMEOUT =
+      "carbon.dynamicallocation.schedulertimeout";
+
+  /**
+   * default scheduler wait time
+   */
+  public static final String CARBON_DYNAMIC_ALLOCATION_SCHEDULER_TIMEOUT_DEFAULT = "5";
 
   /**
    * default value for executor start up waiting time out
@@ -1599,7 +1150,6 @@ public final class CarbonCommonConstants {
    * have been started
    */
   public static final int CARBON_DYNAMIC_ALLOCATION_SCHEDULER_THREAD_SLEEP_TIME = 250;
-
   /**
    * It allows queries on hive metastore directly along with filter information, otherwise first
    * fetches all partitions from hive and apply filters on it.
@@ -1607,41 +1157,6 @@ public final class CarbonCommonConstants {
   public static final String CARBON_READ_PARTITION_HIVE_DIRECT =
       "carbon.read.partition.hive.direct";
   public static final String CARBON_READ_PARTITION_HIVE_DIRECT_DEFAULT = "true";
-
-  // As Short data type is used for storing the length of a column during data processing hence
-  // the maximum characters that can be supported should be less than Short max value
-  public static final int MAX_CHARS_PER_COLUMN_DEFAULT = 32000;
-  // todo: use infinity first, will switch later
-  public static final int MAX_CHARS_PER_COLUMN_INFINITY = -1;
-
-  /**
-   * Enabling page level reader for compaction reduces the memory usage while compacting more
-   * number of segments. It allows reading only page by page instead of reaing whole blocklet to
-   * memory.
-   */
-  @CarbonProperty
-  @InterfaceStability.Evolving
-  public static final String CARBON_ENABLE_PAGE_LEVEL_READER_IN_COMPACTION =
-      "carbon.enable.page.level.reader.in.compaction";
-
-  // Note: If this property is set to true it can impact compaction performance as IO will increase
-  public static final String CARBON_ENABLE_PAGE_LEVEL_READER_IN_COMPACTION_DEFAULT = "false";
-
-  @CarbonProperty
-  public static final String CARBON_SORT_STORAGE_INMEMORY_IN_MB =
-      "carbon.sort.storage.inmemory.size.inmb";
-  public static final int CARBON_SORT_STORAGE_INMEMORY_IN_MB_DEFAULT = 512;
-
-  @CarbonProperty
-  public static final String SUPPORT_DIRECT_QUERY_ON_DATAMAP =
-      "carbon.query.directQueryOnDataMap.enabled";
-  public static final String SUPPORT_DIRECT_QUERY_ON_DATAMAP_DEFAULTVALUE = "false";
-
-  @CarbonProperty
-  public static final String VALIDATE_DIRECT_QUERY_ON_DATAMAP =
-      "carbon.query.validate.directqueryondatamap";
-  public static final String VALIDATE_DIRECT_QUERY_ON_DATAMAP_DEFAULTVALUE = "true";
-
   /**
    * If the heap memory allocations of the given size is greater or equal than this value,
    * it should go through the pooling mechanism.
@@ -1653,44 +1168,6 @@ public final class CarbonCommonConstants {
   public static final String CARBON_HEAP_MEMORY_POOLING_THRESHOLD_BYTES =
       "carbon.heap.memory.pooling.threshold.bytes";
   public static final String CARBON_HEAP_MEMORY_POOLING_THRESHOLD_BYTES_DEFAULT = "1048576";
-
-  @CarbonProperty
-  public static final String CARBON_SHOW_DATAMAPS = "carbon.query.show.datamaps";
-
-  public static final String CARBON_SHOW_DATAMAPS_DEFAULT = "true";
-
-  /**
-   * Currently the segment lock files are not deleted immediately when unlock,
-   * this value indicates the number of hours the segment lock files will be preserved.
-   */
-  @CarbonProperty
-  public static final String CARBON_SEGMENT_LOCK_FILES_PRESERVE_HOURS =
-      "carbon.segment.lock.files.preserve.hours";
-
-  // default value is 2 days
-  public static final String CARBON_SEGMENT_LOCK_FILES_PRESERVE_HOURS_DEFAULT = "48";
-
-  /**
-   * The number of invisible segment info which will be preserved in tablestatus file,
-   * if it exceeds this value, they will be removed and write to tablestatus.history file.
-   */
-  @CarbonProperty
-  public static final String CARBON_INVISIBLE_SEGMENTS_PRESERVE_COUNT =
-      "carbon.invisible.segments.preserve.count";
-
-  /**
-   * default value is 200, it means that it will preserve 200 invisible segment info
-   * in tablestatus file.
-   * The size of one segment info is about 500 bytes, so the size of tablestatus file
-   * will remain at 100KB.
-   */
-  public static final String CARBON_INVISIBLE_SEGMENTS_PRESERVE_COUNT_DEFAULT = "200";
-
-  /**
-   * System older location to store system level data like datamap schema and status files.
-   */
-  public static final String CARBON_SYSTEM_FOLDER_LOCATION = "carbon.system.folder.location";
-
   /**
    * If set to true, will use CarbonReader to do distributed scan directly instead of using
    * compute framework like spark, thus avoiding limitation of compute framework like SQL
@@ -1756,27 +1233,43 @@ public final class CarbonCommonConstants {
   public static final String CARBON_SEARCH_MODE_WORKER_WORKLOAD_LIMIT =
       "carbon.search.worker.workload.limit";
 
-  /*
-   * whether to enable prefetch for rowbatch to enhance row reconstruction during compaction
+  //////////////////////////////////////////////////////////////////////////////////////////
+  // Datamap parameter start here
+  //////////////////////////////////////////////////////////////////////////////////////////
+
+  /**
+   * key prefix for set command. 'carbon.datamap.visible.dbName.tableName.dmName = false' means
+   * that the query on 'dbName.table' will not use the datamap 'dmName'
    */
-  @CarbonProperty
-  public static final String CARBON_COMPACTION_PREFETCH_ENABLE =
-      "carbon.compaction.prefetch.enable";
-  public static final String CARBON_COMPACTION_PREFETCH_ENABLE_DEFAULT = "false";
+  @InterfaceStability.Unstable
+  public static final String CARBON_DATAMAP_VISIBLE = "carbon.datamap.visible.";
 
   /**
-   * compression mode used by lucene for index writing, this conf will be passed to lucene writer
-   * while writing index files.
+   * Fetch and validate the segments.
+   * Used for aggregate table load as segment validation is not required.
    */
-  public static final String CARBON_LUCENE_COMPRESSION_MODE = "carbon.lucene.compression.mode";
+  public static final String VALIDATE_CARBON_INPUT_SEGMENTS = "validate.carbon.input.segments.";
 
   /**
-   * default lucene index compression mode, in this mode writing speed will be less and speed is
-   * given priority, another mode is compression mode, where the index size is given importance to
-   * make it less and not the index writing speed.
+   * Whether load/insert command is fired internally or by the user.
+   * Used to block load/insert on pre-aggregate if fired by user
    */
-  public static final String CARBON_LUCENE_COMPRESSION_MODE_DEFAULT = "speed";
+  public static final String IS_INTERNAL_LOAD_CALL = "is.internal.load.call";
+  public static final String USE_DISTRIBUTED_DATAMAP = "carbon.enable.distributed.datamap";
+  public static final String USE_DISTRIBUTED_DATAMAP_DEFAULT = "false";
+  @CarbonProperty
+  public static final String SUPPORT_DIRECT_QUERY_ON_DATAMAP =
+      "carbon.query.directQueryOnDataMap.enabled";
+  public static final String SUPPORT_DIRECT_QUERY_ON_DATAMAP_DEFAULTVALUE = "false";
+
+  @CarbonProperty
+  public static final String VALIDATE_DIRECT_QUERY_ON_DATAMAP =
+      "carbon.query.validate.directqueryondatamap";
+  public static final String VALIDATE_DIRECT_QUERY_ON_DATAMAP_DEFAULTVALUE = "true";
+  @CarbonProperty
+  public static final String CARBON_SHOW_DATAMAPS = "carbon.query.show.datamaps";
 
+  public static final String CARBON_SHOW_DATAMAPS_DEFAULT = "true";
   /**
    * DMPROPERTY for Index DataMap, like lucene, bloomfilter DataMap,
    * to indicate a list of column name to be indexed
@@ -1785,12 +1278,6 @@ public final class CarbonCommonConstants {
 
   // Property to enable parallel datamap loading for a table
   public static final String CARBON_LOAD_DATAMAPS_PARALLEL = "carbon.load.datamaps.parallel.";
-
-  /**
-   * default value in size for cache size of bloom filter datamap.
-   */
-  public static final String CARBON_QUERY_DATAMAP_BLOOM_CACHE_SIZE_DEFAULT_VAL = "512";
-
   // by default lucene will not store or create index for stop words like "is","the", if this
   // property is set to true lucene will index for stop words also and gives result for the filter
   // with stop words(example: TEXT_MATCH('description':'the'))
@@ -1798,52 +1285,522 @@ public final class CarbonCommonConstants {
 
   public static final String CARBON_LUCENE_INDEX_STOP_WORDS_DEFAULT = "false";
 
+  //////////////////////////////////////////////////////////////////////////////////////////
+  // Constant value start here
+  //////////////////////////////////////////////////////////////////////////////////////////
+
   /**
-   * The node loads the smallest amount of data
+   * surrogate value of null
    */
-  @CarbonProperty
-  public static final String CARBON_LOAD_MIN_SIZE_INMB = "load_min_size_inmb";
-  public static final String CARBON_LOAD_MIN_NODE_SIZE_INMB_DEFAULT = "256";
-
+  public static final int DICT_VALUE_NULL = 1;
   /**
-   *  the node minimum load data default value
+   * surrogate value of null for direct dictionary
    */
-  public static final int CARBON_LOAD_MIN_SIZE_DEFAULT = 256;
+  public static final int DIRECT_DICT_VALUE_NULL = 1;
   /**
-   * property to be specified for caching min/max of required columns
+   * integer size in bytes
    */
-  public static final String COLUMN_META_CACHE = "column_meta_cache";
+  public static final int INT_SIZE_IN_BYTE = 4;
   /**
-   * property to be specified for caching level (Block/Blocket)
+   * short size in bytes
    */
-  public static final String CACHE_LEVEL = "cache_level";
+  public static final int SHORT_SIZE_IN_BYTE = 2;
   /**
-   * default value for cache level
+   * DOUBLE size in bytes
    */
-  public static final String CACHE_LEVEL_DEFAULT_VALUE = "BLOCK";
-
+  public static final int DOUBLE_SIZE_IN_BYTE = 8;
   /**
-   * It is internal configuration and used only for test purpose.
-   * It will merge the carbon index files with in the segment to single segment.
+   * LONG size in bytes
    */
-  @CarbonProperty
-  public static final String CARBON_MERGE_INDEX_IN_SEGMENT =
-      "carbon.merge.index.in.segment";
+  public static final int LONG_SIZE_IN_BYTE = 8;
+  /**
+   * byte to KB conversion factor
+   */
+  public static final int BYTE_TO_KB_CONVERSION_FACTOR = 1024;
+  /**
+   * CARDINALITY_INCREMENT_DEFAULT_VALUE
+   */
+  public static final int CARDINALITY_INCREMENT_VALUE_DEFAULT_VAL = 10;
+  /**
+   * Load Folder Name
+   */
+  public static final String LOAD_FOLDER = "Segment_";
 
-  public static final String CARBON_MERGE_INDEX_IN_SEGMENT_DEFAULT = "true";
+  public static final String HDFSURL_PREFIX = "hdfs://";
 
-  public static final short LOCAL_DICT_ENCODED_BYTEARRAY_SIZE = 3;
+  public static final String LOCAL_FILE_PREFIX = "file://";
 
-  /**
-   * property to be used for specifying the max byte limit for string/varchar data type till
-   * where storing min/max in data file will be considered
-   */
-  public static final String CARBON_MINMAX_ALLOWED_BYTE_COUNT =
-      "carbon.minmax.allowed.byte.count";
-  public static final String CARBON_MINMAX_ALLOWED_BYTE_COUNT_DEFAULT = "200";
+  public static final String VIEWFSURL_PREFIX = "viewfs://";
+
+  public static final String ALLUXIOURL_PREFIX = "alluxio://";
+
+  public static final String S3_PREFIX = "s3://";
+
+  public static final String S3N_PREFIX = "s3n://";
+
+  public static final String S3A_PREFIX = "s3a://";
+  /**
+   * Access Key for s3n
+   */
+  public static final String S3N_ACCESS_KEY = "fs.s3n.awsAccessKeyId";
+  /**
+   * Secret Key for s3n
+   */
+  public static final String S3N_SECRET_KEY = "fs.s3n.awsSecretAccessKey";
+  /**
+   * Access Key for s3
+   */
+  public static final String S3_ACCESS_KEY = "fs.s3.awsAccessKeyId";
+  /**
+   * Secret Key for s3
+   */
+  public static final String S3_SECRET_KEY = "fs.s3.awsSecretAccessKey";
+
+  /**
+   * FS_DEFAULT_FS
+   */
+  @CarbonProperty
+  public static final String FS_DEFAULT_FS = "fs.defaultFS";
+  /**
+   * Average constant
+   */
+  public static final String AVERAGE = "avg";
+  /**
+   * Count constant
+   */
+  public static final String COUNT = "count";
+  /**
+   * SUM
+   */
+  public static final String SUM = "sum";
+  /**
+   * MEMBER_DEFAULT_VAL
+   */
+  public static final String MEMBER_DEFAULT_VAL = "@NU#LL$!";
+
+  /**
+   * default charset to be used for reading and writing
+   */
+  public static final String DEFAULT_CHARSET = "UTF-8";
+  /**
+   * MEMBER_DEFAULT_VAL_ARRAY
+   */
+  public static final byte[] MEMBER_DEFAULT_VAL_ARRAY =
+      MEMBER_DEFAULT_VAL.getBytes(Charset.forName(DEFAULT_CHARSET));
+  /**
+   * Empty byte array
+   */
+  public static final byte[] EMPTY_BYTE_ARRAY = new byte[0];
+  /**
+   * FILE STATUS IN-PROGRESS
+   */
+  public static final String FILE_INPROGRESS_STATUS = ".inprogress";
+  /**
+   * FACT_FILE_EXT
+   */
+  public static final String FACT_FILE_EXT = ".carbondata";
+
+  /**
+   * DELETE_DELTA_FILE_EXT
+   */
+  public static final String DELETE_DELTA_FILE_EXT = ".deletedelta";
+
+  /**
+   * UPDATE_DELTA_FILE_EXT
+   */
+  public static final String UPDATE_DELTA_FILE_EXT = FACT_FILE_EXT;
+  /**
+   * MERGERD_EXTENSION
+   */
+  public static final String MERGERD_EXTENSION = ".merge";
+  /**
+   * CSV_READ_COPIES
+   */
+  public static final String DEFAULT_NUMBER_CORES = "2";
+  /**
+   * CSV_FILE_EXTENSION
+   */
+  public static final String CSV_FILE_EXTENSION = ".csv";
+
+  /**
+   * LOG_FILE_EXTENSION
+   */
+  public static final String LOG_FILE_EXTENSION = ".log";
+
+  /**
+   * COLON_SPC_CHARACTER
+   */
+  public static final String COLON_SPC_CHARACTER = ":!@#COLON#@!:";
+  /**
+   * HASH_SPC_CHARATER
+   */
+  public static final String HASH_SPC_CHARACTER = "#!@:HASH:@!#";
+  /**
+   * SEMICOLON_SPC_CHARATER
+   */
+  public static final String SEMICOLON_SPC_CHARACTER = ";#!@:SEMIC:@!#;";
+  /**
+   * AMPERSAND_SPC_CHARATER
+   */
+  public static final String AMPERSAND_SPC_CHARACTER = "&#!@:AMPER:@!#&";
+  /**
+   * ATTHERATE_SPC_CHARATER
+   */
+  public static final String COMA_SPC_CHARACTER = ",#!:COMA:!#,";
+  /**
+   * HYPHEN_SPC_CHARACTER
+   */
+  public static final String HYPHEN_SPC_CHARACTER = "-#!:HYPHEN:!#-";
+  /**
+   * SORT_TEMP_FILE_EXT
+   */
+  public static final String SORT_TEMP_FILE_EXT = ".sorttemp";
+  /**
+   * DEFAULT_COLLECTION_SIZE
+   */
+  public static final int DEFAULT_COLLECTION_SIZE = 16;
+  /**
+   * DIMENSION_SPLIT_VALUE_IN_COLUMNAR_DEFAULTVALUE
+   */
+  public static final String DIMENSION_SPLIT_VALUE_IN_COLUMNAR_DEFAULTVALUE = "1";
+  /**
+   * IS_FULLY_FILLED_BITS_DEFAULT_VALUE
+   */
+  public static final String IS_FULLY_FILLED_BITS_DEFAULT_VALUE = "true";
+  /**
+   * CONSTANT_SIZE_TEN
+   */
+  public static final int CONSTANT_SIZE_TEN = 10;
+  /**
+   * COMMA
+   */
+  public static final String COMMA = ",";
+  /**
+   * UNDERSCORE
+   */
+  public static final String UNDERSCORE = "_";
+  /**
+   * POINT
+   */
+  public static final String POINT = ".";
+  /**
+   * Windows File separator
+   */
+  public static final String WINDOWS_FILE_SEPARATOR = "\\";
+  /**
+   * File separator
+   */
+  public static final String FILE_SEPARATOR = "/";
+  /**
+   * ARRAY separator
+   */
+  public static final String ARRAY_SEPARATOR = "\001";
+  public static final String STRING = "String";
+  public static final String SHORT = "Short";
+  public static final String TIMESTAMP = "Timestamp";
+  public static final String ARRAY = "array";
+  public static final String STRUCT = "struct";
+  public static final String MAP = "map";
+  public static final String FROM = "from";
+  /**
+   * TABLE UPDATE STATUS FILENAME
+   */
+  public static final String TABLEUPDATESTATUS_FILENAME = "tableupdatestatus";
+  /**
+   * SPILL_OVER_DISK_PATH
+   */
+  public static final String SCHEMAS_MODIFIED_TIME_FILE = "modifiedTime.mdt";
+  public static final String DEFAULT_INVISIBLE_DUMMY_MEASURE = "default_dummy_measure";
+  public static final String CARBON_IMPLICIT_COLUMN_POSITIONID = "positionId";
+  public static final String CARBON_IMPLICIT_COLUMN_TUPLEID = "tupleId";
+  public static final String CARBON_IMPLICIT_COLUMN_SEGMENTID = "segId";
+  public static final String POSITION_REFERENCE = "positionReference";
+  /**
+   * implicit column which will be added to each carbon table
+   */
+  public static final String POSITION_ID = "positionId";
+  /**
+   * TEMPWRITEFILEEXTENSION
+   */
+  public static final String TEMPWRITEFILEEXTENSION = ".write";
+  /**
+   * default charset class to be used for reading and writing
+   */
+  public static final Charset DEFAULT_CHARSET_CLASS = Charset.forName(DEFAULT_CHARSET);
+
+  /**
+   * surrogate key that will be sent whenever in the dictionary chunks
+   * a valid surrogate key is not found for a given dictionary value
+   */
+  public static final int INVALID_SURROGATE_KEY = -1;
+
+  /**
+   * surrogate key for MEMBER_DEFAULT_VAL
+   */
+  public static final int MEMBER_DEFAULT_VAL_SURROGATE_KEY = 1;
+
+  public static final String INVALID_SEGMENT_ID = "-1";
+
+  /**
+   * default load time of the segment
+   */
+  public static final long SEGMENT_LOAD_TIME_DEFAULT = -1;
+  /**
+   * default name of data base
+   */
+  public static final String DATABASE_DEFAULT_NAME = "default";
+  /**
+   * 16 mb size
+   */
+  public static final long CARBON_16MB = 16 * 1024 * 1024;
+  /**
+   * 256 mb size
+   */
+  public static final long CARBON_256MB = 256 * 1024 * 1024;
+
+  /**
+   * ZOOKEEPERLOCK TYPE
+   */
+  public static final String CARBON_LOCK_TYPE_ZOOKEEPER = "ZOOKEEPERLOCK";
+
+  /**
+   * LOCALLOCK TYPE
+   */
+  public static final String CARBON_LOCK_TYPE_LOCAL = "LOCALLOCK";
+
+  /**
+   * HDFSLOCK TYPE
+   */
+  public static final String CARBON_LOCK_TYPE_HDFS = "HDFSLOCK";
+
+  /**
+   * S3LOCK TYPE
+   */
+  public static final String CARBON_LOCK_TYPE_S3 = "S3LOCK";
+  /**
+   * Invalid filter member log string
+   */
+  public static final String FILTER_INVALID_MEMBER =
+      " Invalid Record(s) are present while filter evaluation. ";
+  /**
+   * default location of the carbon metastore db
+   */
+  public static final String METASTORE_LOCATION_DEFAULT_VAL = "../carbon.metastore";
+
+  /**
+   * hive connection url
+   */
+  public static final String HIVE_CONNECTION_URL = "javax.jdo.option.ConnectionURL";
+
+  /**
+   * If the level 2 compaction is done in minor then new compacted segment will end with .2
+   */
+  public static final String LEVEL2_COMPACTION_INDEX = ".2";
+
+  /**
+   * Indicates compaction
+   */
+  public static final String COMPACTION_KEY_WORD = "COMPACTION";
+
+  /**
+   * Indicates alter partition
+   */
+  public static final String ALTER_PARTITION_KEY_WORD = "ALTER_PARTITION";
+  /**
+   * hdfs temporary directory key
+   */
+  public static final String HDFS_TEMP_LOCATION = "hadoop.tmp.dir";
+  /**
+   * File created in case of minor compaction request
+   */
+  public static final String minorCompactionRequiredFile = "compactionRequired_minor";
+
+  /**
+   * File created in case of major compaction request
+   */
+  public static final String majorCompactionRequiredFile = "compactionRequired_major";
+  /**
+   * Compaction system level lock folder.
+   */
+  public static final String SYSTEM_LEVEL_COMPACTION_LOCK_FOLDER = "SystemCompactionLock";
+  /**
+   * Index file name will end with this extension when update.
+   */
+  public static final String UPDATE_INDEX_FILE_EXT = ".carbonindex";
+
+  /**
+   * Key word for true
+   */
+  public static final String KEYWORD_TRUE = "TRUE";
+
+  /**
+   * Key word for false
+   */
+  public static final String KEYWORD_FALSE = "FALSE";
+
+  /**
+   * hyphen
+   */
+  public static final String HYPHEN = "-";
+
+  /**
+   * columns which gets updated in update will have header ends with this extension.
+   */
+  public static final String UPDATED_COL_EXTENSION = "-updatedColumn";
+
+  /**
+   * appending the key to differentiate the update flow with insert flow.
+   */
+  public static final String RDDUTIL_UPDATE_KEY = "UPDATE_";
+  /**
+   * current data file version
+   */
+  public static final String CARBON_DATA_FILE_DEFAULT_VERSION = "V3";
+
+  /**
+   * data file version header
+   */
+  public static final String CARBON_DATA_VERSION_HEADER = "CARBONDATAVERSION#";
+  /**
+   * Maximum no of column supported
+   */
+  public static final int DEFAULT_MAX_NUMBER_OF_COLUMNS = 20000;
+  public static final String MINOR = "minor";
+
+  public static final String MAJOR = "major";
+  public static final int DICTIONARY_DEFAULT_CARDINALITY = 1;
+  /**
+   * this will be used to provide comment for table
+   */
+  public static final String TABLE_COMMENT = "comment";
+
+  /**
+   * this will be used to provide comment for table
+   */
+  public static final String COLUMN_COMMENT = "comment";
+  /*
+   * The total size of carbon data
+   */
+  public static final String CARBON_TOTAL_DATA_SIZE = "datasize";
+
+  /**
+   * The total size of carbon index
+   */
+  public static final String CARBON_TOTAL_INDEX_SIZE = "indexsize";
+  public static final String TABLE_DATA_SIZE = "Table Data Size";
+
+  public static final String TABLE_INDEX_SIZE = "Table Index Size";
+
+  public static final String LAST_UPDATE_TIME = "Last Update Time";
+  /**
+   * the min handoff size of streaming segment, the unit is byte
+   */
+  public static final long HANDOFF_SIZE_MIN = 1024L * 1024 * 64;
+
+  /**
+   * the default handoff size of streaming segment, the unit is byte
+   */
+  public static final long HANDOFF_SIZE_DEFAULT = 1024L * 1024 * 1024;
+  // As Short data type is used for storing the length of a column during data processing hence
+  // the maximum characters that can be supported should be less than Short max value
+  public static final int MAX_CHARS_PER_COLUMN_DEFAULT = 32000;
+  // todo: use infinity first, will switch later
+  public static final int MAX_CHARS_PER_COLUMN_INFINITY = -1;
+  public static final short LOCAL_DICT_ENCODED_BYTEARRAY_SIZE = 3;
   public static final int CARBON_MINMAX_ALLOWED_BYTE_COUNT_MIN = 10;
   public static final int CARBON_MINMAX_ALLOWED_BYTE_COUNT_MAX = 1000;
 
-  private CarbonCommonConstants() {
-  }
+  //////////////////////////////////////////////////////////////////////////////////////////
+  // Unused constants and parameters start here
+  //////////////////////////////////////////////////////////////////////////////////////////
+
+  /**
+   * BYTE_ENCODING
+   */
+  public static final String BYTE_ENCODING = "ISO-8859-1";
+  /**
+   * measure meta data file name
+   */
+  public 

<TRUNCATED>

[2/2] carbondata git commit: [CARBONDATA-3034] Carding parameters, Organized by parameter category.

Posted by ja...@apache.org.
[CARBONDATA-3034] Carding parameters,Organized by parameter category.

This PR is mainly for combing parameters in CarbonCommonConstain.java, organized by parameter category.

This closes #2843


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/e4806b9a
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/e4806b9a
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/e4806b9a

Branch: refs/heads/master
Commit: e4806b9a0dba602eec526cfb01dbf6d9dc56278d
Parents: 278d171
Author: ndwangsen <lu...@huawei.com>
Authored: Tue Oct 23 11:35:17 2018 +0800
Committer: Jacky Li <ja...@qq.com>
Committed: Wed Oct 24 22:21:19 2018 +0800

----------------------------------------------------------------------
 .../core/constants/CarbonCommonConstants.java   | 2033 +++++++++---------
 1 file changed, 995 insertions(+), 1038 deletions(-)
----------------------------------------------------------------------