You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@kylin.apache.org by li...@apache.org on 2016/11/23 09:08:59 UTC

[3/9] kylin git commit: KYLIN-2195 All code changes, ready for test

http://git-wip-us.apache.org/repos/asf/kylin/blob/826f23f1/engine-mr/src/main/java/org/apache/kylin/engine/mr/DFSFileTableReader.java
----------------------------------------------------------------------
diff --git a/engine-mr/src/main/java/org/apache/kylin/engine/mr/DFSFileTableReader.java b/engine-mr/src/main/java/org/apache/kylin/engine/mr/DFSFileTableReader.java
index 173c908..67fedbd 100644
--- a/engine-mr/src/main/java/org/apache/kylin/engine/mr/DFSFileTableReader.java
+++ b/engine-mr/src/main/java/org/apache/kylin/engine/mr/DFSFileTableReader.java
@@ -1,252 +1,252 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * 
- *     http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
-*/
-
-package org.apache.kylin.engine.mr;
-
-import java.io.BufferedReader;
-import java.io.Closeable;
-import java.io.EOFException;
-import java.io.IOException;
-import java.io.InputStreamReader;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-
-import org.apache.commons.io.IOUtils;
-import org.apache.commons.lang.StringEscapeUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.SequenceFile;
-import org.apache.hadoop.io.SequenceFile.Reader;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.util.ReflectionUtils;
-import org.apache.kylin.common.util.Bytes;
-import org.apache.kylin.common.util.StringSplitter;
-import org.apache.kylin.source.ReadableTable.TableReader;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Tables are typically CSV or SEQ file.
- * 
- * @author yangli9
- */
-public class DFSFileTableReader implements TableReader {
-
-    private static final Logger logger = LoggerFactory.getLogger(DFSFileTableReader.class);
-    private static final char CSV_QUOTE = '"';
-    private static final String[] DETECT_DELIMS = new String[] { "\177", "|", "\t", "," };
-
-    private String filePath;
-    private String delim;
-    private List<RowReader> readerList;
-
-    private String curLine;
-    private String[] curColumns;
-    private int expectedColumnNumber = -1; // helps delimiter detection
-
-    public DFSFileTableReader(String filePath, int expectedColumnNumber) throws IOException {
-        this(filePath, DFSFileTable.DELIM_AUTO, expectedColumnNumber);
-    }
-
-    public DFSFileTableReader(String filePath, String delim, int expectedColumnNumber) throws IOException {
-        filePath = HadoopUtil.fixWindowsPath(filePath);
-        this.filePath = filePath;
-        this.delim = delim;
-        this.expectedColumnNumber = expectedColumnNumber;
-        this.readerList = new ArrayList<RowReader>();
-
-        FileSystem fs = HadoopUtil.getFileSystem(filePath);
-
-        ArrayList<FileStatus> allFiles = new ArrayList<>();
-        FileStatus status = fs.getFileStatus(new Path(filePath));
-        if (status.isFile()) {
-            allFiles.add(status);
-        } else {
-            FileStatus[] listStatus = fs.listStatus(new Path(filePath));
-            allFiles.addAll(Arrays.asList(listStatus));
-        }
-
-        try {
-            for (FileStatus f : allFiles) {
-                RowReader rowReader = new SeqRowReader(HadoopUtil.getCurrentConfiguration(), fs, f.getPath().toString());
-                this.readerList.add(rowReader);
-            }
-        } catch (IOException e) {
-            if (isExceptionSayingNotSeqFile(e) == false)
-                throw e;
-
-            this.readerList = new ArrayList<RowReader>();
-            for (FileStatus f : allFiles) {
-                RowReader rowReader = new CsvRowReader(fs, f.getPath().toString());
-                this.readerList.add(rowReader);
-            }
-        }
-    }
-
-    private boolean isExceptionSayingNotSeqFile(IOException e) {
-        if (e.getMessage() != null && e.getMessage().contains("not a SequenceFile"))
-            return true;
-
-        if (e instanceof EOFException) // in case the file is very very small
-            return true;
-
-        return false;
-    }
-
-    @Override
-    public boolean next() throws IOException {
-        int curReaderIndex = -1;
-        RowReader curReader;
-
-        while (++curReaderIndex < readerList.size()) {
-            curReader = readerList.get(curReaderIndex);
-            curLine = curReader.nextLine();
-            curColumns = null;
-
-            if (curLine != null) {
-                return true;
-            }
-        }
-
-        return false;
-    }
-
-    public String getLine() {
-        return curLine;
-    }
-
-    @Override
-    public String[] getRow() {
-        if (curColumns == null) {
-            if (DFSFileTable.DELIM_AUTO.equals(delim))
-                delim = autoDetectDelim(curLine);
-
-            if (delim == null)
-                curColumns = new String[] { curLine };
-            else
-                curColumns = split(curLine, delim);
-        }
-        return curColumns;
-    }
-
-    private String[] split(String line, String delim) {
-        // FIXME CVS line should be parsed considering escapes
-        String[] str = StringSplitter.split(line, delim);
-
-        // un-escape CSV
-        if (DFSFileTable.DELIM_COMMA.equals(delim)) {
-            for (int i = 0; i < str.length; i++) {
-                str[i] = unescapeCsv(str[i]);
-            }
-        }
-
-        return str;
-    }
-
-    private String unescapeCsv(String str) {
-        if (str == null || str.length() < 2)
-            return str;
-
-        str = StringEscapeUtils.unescapeCsv(str);
-
-        // unescapeCsv may not remove the outer most quotes
-        if (str.charAt(0) == CSV_QUOTE && str.charAt(str.length() - 1) == CSV_QUOTE)
-            str = str.substring(1, str.length() - 1);
-
-        return str;
-    }
-
-    @Override
-    public void close() {
-        for (RowReader reader : readerList) {
-            IOUtils.closeQuietly(reader);
-        }
-    }
-
-    private String autoDetectDelim(String line) {
-        if (expectedColumnNumber > 0) {
-            for (String delim : DETECT_DELIMS) {
-                if (StringSplitter.split(line, delim).length == expectedColumnNumber) {
-                    logger.info("Auto detect delim to be '" + delim + "', split line to " + expectedColumnNumber + " columns -- " + line);
-                    return delim;
-                }
-            }
-        }
-
-        logger.info("Auto detect delim to be null, will take THE-WHOLE-LINE as a single value, for " + filePath);
-        return null;
-    }
-
-    // ============================================================================
-
-    private interface RowReader extends Closeable {
-        String nextLine() throws IOException; // return null on EOF
-    }
-
-    private class SeqRowReader implements RowReader {
-        Reader reader;
-        Writable key;
-        Text value;
-
-        SeqRowReader(Configuration hconf, FileSystem fs, String path) throws IOException {
-            reader = new Reader(hconf, SequenceFile.Reader.file(new Path(path)));
-            key = (Writable) ReflectionUtils.newInstance(reader.getKeyClass(), hconf);
-            value = new Text();
-        }
-
-        @Override
-        public String nextLine() throws IOException {
-            boolean hasNext = reader.next(key, value);
-            if (hasNext)
-                return Bytes.toString(value.getBytes(), 0, value.getLength());
-            else
-                return null;
-        }
-
-        @Override
-        public void close() throws IOException {
-            reader.close();
-        }
-    }
-
-    private class CsvRowReader implements RowReader {
-        BufferedReader reader;
-
-        CsvRowReader(FileSystem fs, String path) throws IOException {
-            FSDataInputStream in = fs.open(new Path(path));
-            reader = new BufferedReader(new InputStreamReader(in, "UTF-8"));
-        }
-
-        @Override
-        public String nextLine() throws IOException {
-            return reader.readLine();
-        }
-
-        @Override
-        public void close() throws IOException {
-            reader.close();
-        }
-
-    }
-
-}
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+*/
+
+package org.apache.kylin.engine.mr;
+
+import java.io.BufferedReader;
+import java.io.Closeable;
+import java.io.EOFException;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+import org.apache.commons.io.IOUtils;
+import org.apache.commons.lang.StringEscapeUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.SequenceFile;
+import org.apache.hadoop.io.SequenceFile.Reader;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.kylin.common.util.Bytes;
+import org.apache.kylin.common.util.StringSplitter;
+import org.apache.kylin.source.ReadableTable.TableReader;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Tables are typically CSV or SEQ file.
+ * 
+ * @author yangli9
+ */
+public class DFSFileTableReader implements TableReader {
+
+    private static final Logger logger = LoggerFactory.getLogger(DFSFileTableReader.class);
+    private static final char CSV_QUOTE = '"';
+    private static final String[] DETECT_DELIMS = new String[] { "\177", "|", "\t", "," };
+
+    private String filePath;
+    private String delim;
+    private List<RowReader> readerList;
+
+    private String curLine;
+    private String[] curColumns;
+    private int expectedColumnNumber = -1; // helps delimiter detection
+
+    public DFSFileTableReader(String filePath, int expectedColumnNumber) throws IOException {
+        this(filePath, DFSFileTable.DELIM_AUTO, expectedColumnNumber);
+    }
+
+    public DFSFileTableReader(String filePath, String delim, int expectedColumnNumber) throws IOException {
+        filePath = HadoopUtil.fixWindowsPath(filePath);
+        this.filePath = filePath;
+        this.delim = delim;
+        this.expectedColumnNumber = expectedColumnNumber;
+        this.readerList = new ArrayList<RowReader>();
+
+        FileSystem fs = HadoopUtil.getFileSystem(filePath);
+
+        ArrayList<FileStatus> allFiles = new ArrayList<>();
+        FileStatus status = fs.getFileStatus(new Path(filePath));
+        if (status.isFile()) {
+            allFiles.add(status);
+        } else {
+            FileStatus[] listStatus = fs.listStatus(new Path(filePath));
+            allFiles.addAll(Arrays.asList(listStatus));
+        }
+
+        try {
+            for (FileStatus f : allFiles) {
+                RowReader rowReader = new SeqRowReader(HadoopUtil.getCurrentConfiguration(), fs, f.getPath().toString());
+                this.readerList.add(rowReader);
+            }
+        } catch (IOException e) {
+            if (isExceptionSayingNotSeqFile(e) == false)
+                throw e;
+
+            this.readerList = new ArrayList<RowReader>();
+            for (FileStatus f : allFiles) {
+                RowReader rowReader = new CsvRowReader(fs, f.getPath().toString());
+                this.readerList.add(rowReader);
+            }
+        }
+    }
+
+    private boolean isExceptionSayingNotSeqFile(IOException e) {
+        if (e.getMessage() != null && e.getMessage().contains("not a SequenceFile"))
+            return true;
+
+        if (e instanceof EOFException) // in case the file is very very small
+            return true;
+
+        return false;
+    }
+
+    @Override
+    public boolean next() throws IOException {
+        int curReaderIndex = -1;
+        RowReader curReader;
+
+        while (++curReaderIndex < readerList.size()) {
+            curReader = readerList.get(curReaderIndex);
+            curLine = curReader.nextLine();
+            curColumns = null;
+
+            if (curLine != null) {
+                return true;
+            }
+        }
+
+        return false;
+    }
+
+    public String getLine() {
+        return curLine;
+    }
+
+    @Override
+    public String[] getRow() {
+        if (curColumns == null) {
+            if (DFSFileTable.DELIM_AUTO.equals(delim))
+                delim = autoDetectDelim(curLine);
+
+            if (delim == null)
+                curColumns = new String[] { curLine };
+            else
+                curColumns = split(curLine, delim);
+        }
+        return curColumns;
+    }
+
+    private String[] split(String line, String delim) {
+        // FIXME CVS line should be parsed considering escapes
+        String[] str = StringSplitter.split(line, delim);
+
+        // un-escape CSV
+        if (DFSFileTable.DELIM_COMMA.equals(delim)) {
+            for (int i = 0; i < str.length; i++) {
+                str[i] = unescapeCsv(str[i]);
+            }
+        }
+
+        return str;
+    }
+
+    private String unescapeCsv(String str) {
+        if (str == null || str.length() < 2)
+            return str;
+
+        str = StringEscapeUtils.unescapeCsv(str);
+
+        // unescapeCsv may not remove the outer most quotes
+        if (str.charAt(0) == CSV_QUOTE && str.charAt(str.length() - 1) == CSV_QUOTE)
+            str = str.substring(1, str.length() - 1);
+
+        return str;
+    }
+
+    @Override
+    public void close() {
+        for (RowReader reader : readerList) {
+            IOUtils.closeQuietly(reader);
+        }
+    }
+
+    private String autoDetectDelim(String line) {
+        if (expectedColumnNumber > 0) {
+            for (String delim : DETECT_DELIMS) {
+                if (StringSplitter.split(line, delim).length == expectedColumnNumber) {
+                    logger.info("Auto detect delim to be '" + delim + "', split line to " + expectedColumnNumber + " columns -- " + line);
+                    return delim;
+                }
+            }
+        }
+
+        logger.info("Auto detect delim to be null, will take THE-WHOLE-LINE as a single value, for " + filePath);
+        return null;
+    }
+
+    // ============================================================================
+
+    private interface RowReader extends Closeable {
+        String nextLine() throws IOException; // return null on EOF
+    }
+
+    private class SeqRowReader implements RowReader {
+        Reader reader;
+        Writable key;
+        Text value;
+
+        SeqRowReader(Configuration hconf, FileSystem fs, String path) throws IOException {
+            reader = new Reader(hconf, SequenceFile.Reader.file(new Path(path)));
+            key = (Writable) ReflectionUtils.newInstance(reader.getKeyClass(), hconf);
+            value = new Text();
+        }
+
+        @Override
+        public String nextLine() throws IOException {
+            boolean hasNext = reader.next(key, value);
+            if (hasNext)
+                return Bytes.toString(value.getBytes(), 0, value.getLength());
+            else
+                return null;
+        }
+
+        @Override
+        public void close() throws IOException {
+            reader.close();
+        }
+    }
+
+    private class CsvRowReader implements RowReader {
+        BufferedReader reader;
+
+        CsvRowReader(FileSystem fs, String path) throws IOException {
+            FSDataInputStream in = fs.open(new Path(path));
+            reader = new BufferedReader(new InputStreamReader(in, "UTF-8"));
+        }
+
+        @Override
+        public String nextLine() throws IOException {
+            return reader.readLine();
+        }
+
+        @Override
+        public void close() throws IOException {
+            reader.close();
+        }
+
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/kylin/blob/826f23f1/engine-mr/src/main/java/org/apache/kylin/engine/mr/steps/FactDistinctColumnsJob.java
----------------------------------------------------------------------
diff --git a/engine-mr/src/main/java/org/apache/kylin/engine/mr/steps/FactDistinctColumnsJob.java b/engine-mr/src/main/java/org/apache/kylin/engine/mr/steps/FactDistinctColumnsJob.java
index 863ab8f..2eb694e 100644
--- a/engine-mr/src/main/java/org/apache/kylin/engine/mr/steps/FactDistinctColumnsJob.java
+++ b/engine-mr/src/main/java/org/apache/kylin/engine/mr/steps/FactDistinctColumnsJob.java
@@ -1,160 +1,160 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * 
- *     http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
-*/
-
-package org.apache.kylin.engine.mr.steps;
-
-import java.io.IOException;
-import java.util.List;
-
-import org.apache.commons.cli.Options;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.NullWritable;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.mapreduce.Job;
-import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
-import org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat;
-import org.apache.hadoop.util.ToolRunner;
-import org.apache.kylin.common.KylinConfig;
-import org.apache.kylin.cube.CubeInstance;
-import org.apache.kylin.cube.CubeManager;
-import org.apache.kylin.cube.CubeSegment;
-import org.apache.kylin.engine.mr.IMRInput.IMRTableInputFormat;
-import org.apache.kylin.engine.mr.MRUtil;
-import org.apache.kylin.engine.mr.common.AbstractHadoopJob;
-import org.apache.kylin.engine.mr.common.BatchConstants;
-import org.apache.kylin.metadata.model.TblColRef;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- */
-public class FactDistinctColumnsJob extends AbstractHadoopJob {
-    protected static final Logger logger = LoggerFactory.getLogger(FactDistinctColumnsJob.class);
-
-    @Override
-    public int run(String[] args) throws Exception {
-        Options options = new Options();
-
-        try {
-            options.addOption(OPTION_JOB_NAME);
-            options.addOption(OPTION_CUBE_NAME);
-            options.addOption(OPTION_CUBING_JOB_ID);
-            options.addOption(OPTION_OUTPUT_PATH);
-            options.addOption(OPTION_SEGMENT_ID);
-            options.addOption(OPTION_STATISTICS_ENABLED);
-            options.addOption(OPTION_STATISTICS_OUTPUT);
-            options.addOption(OPTION_STATISTICS_SAMPLING_PERCENT);
-            parseOptions(options, args);
-
-            job = Job.getInstance(getConf(), getOptionValue(OPTION_JOB_NAME));
-            String job_id = getOptionValue(OPTION_CUBING_JOB_ID);
-            job.getConfiguration().set(BatchConstants.ARG_CUBING_JOB_ID, job_id);
-            String cubeName = getOptionValue(OPTION_CUBE_NAME);
-            Path output = new Path(getOptionValue(OPTION_OUTPUT_PATH));
-
-            String segmentID = getOptionValue(OPTION_SEGMENT_ID);
-            String statistics_enabled = getOptionValue(OPTION_STATISTICS_ENABLED);
-            String statistics_output = getOptionValue(OPTION_STATISTICS_OUTPUT);
-            String statistics_sampling_percent = getOptionValue(OPTION_STATISTICS_SAMPLING_PERCENT);
-
-            // ----------------------------------------------------------------------------
-            // add metadata to distributed cache
-            CubeManager cubeMgr = CubeManager.getInstance(KylinConfig.getInstanceFromEnv());
-            CubeInstance cube = cubeMgr.getCube(cubeName);
-            List<TblColRef> columnsNeedDict = cubeMgr.getAllDictColumnsOnFact(cube.getDescriptor());
-
-            int reducerCount = columnsNeedDict.size();
-            int uhcReducerCount = cube.getConfig().getUHCReducerCount();
-
-            int[] uhcIndex = cubeMgr.getUHCIndex(cube.getDescriptor());
-            for(int index : uhcIndex) {
-                if(index == 1) {
-                    reducerCount += uhcReducerCount - 1;
-                }
-            }
-
-            if (reducerCount > 255) {
-                throw new IllegalArgumentException("The max reducer number for FactDistinctColumnsJob is 255, but now it is " + reducerCount + ", decrease 'kylin.job.uhc.reducer.count'");
-            }
-
-
-            job.getConfiguration().set(BatchConstants.CFG_CUBE_NAME, cubeName);
-            job.getConfiguration().set(BatchConstants.CFG_CUBE_SEGMENT_ID, segmentID);
-            job.getConfiguration().set(BatchConstants.CFG_STATISTICS_ENABLED, statistics_enabled);
-            job.getConfiguration().set(BatchConstants.CFG_STATISTICS_OUTPUT, statistics_output);
-            job.getConfiguration().set(BatchConstants.CFG_STATISTICS_SAMPLING_PERCENT, statistics_sampling_percent);
-
-            logger.info("Starting: " + job.getJobName());
-
-            setJobClasspath(job, cube.getConfig());
-
-            CubeSegment segment = cube.getSegmentById(segmentID);
-            if (segment == null) {
-                logger.error("Failed to find {} in cube {}", segmentID, cube);
-                for (CubeSegment s : cube.getSegments()) {
-                    logger.error(s.getName() + " with status " + s.getStatus());
-                }
-                throw new IllegalStateException();
-            } else {
-                logger.info("Found segment: " + segment);
-            }
-            setupMapper(cube.getSegmentById(segmentID));
-            setupReducer(output, "true".equalsIgnoreCase(statistics_enabled) ? reducerCount + 2 : reducerCount);
-
-            attachKylinPropsAndMetadata(cube, job.getConfiguration());
-
-            return waitForCompletion(job);
-
-        } finally {
-            if (job != null)
-                cleanupTempConfFile(job.getConfiguration());
-        }
-
-    }
-
-    private void setupMapper(CubeSegment cubeSeg) throws IOException {
-        IMRTableInputFormat flatTableInputFormat = MRUtil.getBatchCubingInputSide(cubeSeg).getFlatTableInputFormat();
-        flatTableInputFormat.configureJob(job);
-
-        job.setMapperClass(FactDistinctHiveColumnsMapper.class);
-        job.setCombinerClass(FactDistinctColumnsCombiner.class);
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+*/
+
+package org.apache.kylin.engine.mr.steps;
+
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.commons.cli.Options;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.NullWritable;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.mapreduce.Job;
+import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
+import org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat;
+import org.apache.hadoop.util.ToolRunner;
+import org.apache.kylin.common.KylinConfig;
+import org.apache.kylin.cube.CubeInstance;
+import org.apache.kylin.cube.CubeManager;
+import org.apache.kylin.cube.CubeSegment;
+import org.apache.kylin.engine.mr.IMRInput.IMRTableInputFormat;
+import org.apache.kylin.engine.mr.MRUtil;
+import org.apache.kylin.engine.mr.common.AbstractHadoopJob;
+import org.apache.kylin.engine.mr.common.BatchConstants;
+import org.apache.kylin.metadata.model.TblColRef;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ */
+public class FactDistinctColumnsJob extends AbstractHadoopJob {
+    protected static final Logger logger = LoggerFactory.getLogger(FactDistinctColumnsJob.class);
+
+    @Override
+    public int run(String[] args) throws Exception {
+        Options options = new Options();
+
+        try {
+            options.addOption(OPTION_JOB_NAME);
+            options.addOption(OPTION_CUBE_NAME);
+            options.addOption(OPTION_CUBING_JOB_ID);
+            options.addOption(OPTION_OUTPUT_PATH);
+            options.addOption(OPTION_SEGMENT_ID);
+            options.addOption(OPTION_STATISTICS_ENABLED);
+            options.addOption(OPTION_STATISTICS_OUTPUT);
+            options.addOption(OPTION_STATISTICS_SAMPLING_PERCENT);
+            parseOptions(options, args);
+
+            job = Job.getInstance(getConf(), getOptionValue(OPTION_JOB_NAME));
+            String job_id = getOptionValue(OPTION_CUBING_JOB_ID);
+            job.getConfiguration().set(BatchConstants.ARG_CUBING_JOB_ID, job_id);
+            String cubeName = getOptionValue(OPTION_CUBE_NAME);
+            Path output = new Path(getOptionValue(OPTION_OUTPUT_PATH));
+
+            String segmentID = getOptionValue(OPTION_SEGMENT_ID);
+            String statistics_enabled = getOptionValue(OPTION_STATISTICS_ENABLED);
+            String statistics_output = getOptionValue(OPTION_STATISTICS_OUTPUT);
+            String statistics_sampling_percent = getOptionValue(OPTION_STATISTICS_SAMPLING_PERCENT);
+
+            // ----------------------------------------------------------------------------
+            // add metadata to distributed cache
+            CubeManager cubeMgr = CubeManager.getInstance(KylinConfig.getInstanceFromEnv());
+            CubeInstance cube = cubeMgr.getCube(cubeName);
+            List<TblColRef> columnsNeedDict = cubeMgr.getAllDictColumnsOnFact(cube.getDescriptor());
+
+            int reducerCount = columnsNeedDict.size();
+            int uhcReducerCount = cube.getConfig().getUHCReducerCount();
+
+            int[] uhcIndex = cubeMgr.getUHCIndex(cube.getDescriptor());
+            for(int index : uhcIndex) {
+                if(index == 1) {
+                    reducerCount += uhcReducerCount - 1;
+                }
+            }
+
+            if (reducerCount > 255) {
+                throw new IllegalArgumentException("The max reducer number for FactDistinctColumnsJob is 255, but now it is " + reducerCount + ", decrease 'kylin.engine.mr.uhc-reducer-count'");
+            }
+
+
+            job.getConfiguration().set(BatchConstants.CFG_CUBE_NAME, cubeName);
+            job.getConfiguration().set(BatchConstants.CFG_CUBE_SEGMENT_ID, segmentID);
+            job.getConfiguration().set(BatchConstants.CFG_STATISTICS_ENABLED, statistics_enabled);
+            job.getConfiguration().set(BatchConstants.CFG_STATISTICS_OUTPUT, statistics_output);
+            job.getConfiguration().set(BatchConstants.CFG_STATISTICS_SAMPLING_PERCENT, statistics_sampling_percent);
+
+            logger.info("Starting: " + job.getJobName());
+
+            setJobClasspath(job, cube.getConfig());
+
+            CubeSegment segment = cube.getSegmentById(segmentID);
+            if (segment == null) {
+                logger.error("Failed to find {} in cube {}", segmentID, cube);
+                for (CubeSegment s : cube.getSegments()) {
+                    logger.error(s.getName() + " with status " + s.getStatus());
+                }
+                throw new IllegalStateException();
+            } else {
+                logger.info("Found segment: " + segment);
+            }
+            setupMapper(cube.getSegmentById(segmentID));
+            setupReducer(output, "true".equalsIgnoreCase(statistics_enabled) ? reducerCount + 2 : reducerCount);
+
+            attachKylinPropsAndMetadata(cube, job.getConfiguration());
+
+            return waitForCompletion(job);
+
+        } finally {
+            if (job != null)
+                cleanupTempConfFile(job.getConfiguration());
+        }
+
+    }
+
+    private void setupMapper(CubeSegment cubeSeg) throws IOException {
+        IMRTableInputFormat flatTableInputFormat = MRUtil.getBatchCubingInputSide(cubeSeg).getFlatTableInputFormat();
+        flatTableInputFormat.configureJob(job);
+
+        job.setMapperClass(FactDistinctHiveColumnsMapper.class);
+        job.setCombinerClass(FactDistinctColumnsCombiner.class);
         job.setMapOutputKeyClass(SelfDefineSortableKey.class);
-        job.setMapOutputValueClass(Text.class);
-    }
-
-    private void setupReducer(Path output, int numberOfReducers) throws IOException {
-        job.setReducerClass(FactDistinctColumnsReducer.class);
-        job.setOutputFormatClass(SequenceFileOutputFormat.class);
-        job.setOutputKeyClass(NullWritable.class);
-        job.setOutputValueClass(Text.class);
-        job.setPartitionerClass(FactDistinctColumnPartitioner.class);
-        job.setNumReduceTasks(numberOfReducers);
-
-        FileOutputFormat.setOutputPath(job, output);
-        job.getConfiguration().set(BatchConstants.CFG_OUTPUT_PATH, output.toString());
-
-        deletePath(job.getConfiguration(), output);
-    }
-
-    public static void main(String[] args) throws Exception {
-        FactDistinctColumnsJob job = new FactDistinctColumnsJob();
-        int exitCode = ToolRunner.run(job, args);
-        System.exit(exitCode);
-    }
-
-}
+        job.setMapOutputValueClass(Text.class);
+    }
+
+    private void setupReducer(Path output, int numberOfReducers) throws IOException {
+        job.setReducerClass(FactDistinctColumnsReducer.class);
+        job.setOutputFormatClass(SequenceFileOutputFormat.class);
+        job.setOutputKeyClass(NullWritable.class);
+        job.setOutputValueClass(Text.class);
+        job.setPartitionerClass(FactDistinctColumnPartitioner.class);
+        job.setNumReduceTasks(numberOfReducers);
+
+        FileOutputFormat.setOutputPath(job, output);
+        job.getConfiguration().set(BatchConstants.CFG_OUTPUT_PATH, output.toString());
+
+        deletePath(job.getConfiguration(), output);
+    }
+
+    public static void main(String[] args) throws Exception {
+        FactDistinctColumnsJob job = new FactDistinctColumnsJob();
+        int exitCode = ToolRunner.run(job, args);
+        System.exit(exitCode);
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/kylin/blob/826f23f1/engine-mr/src/test/java/org/apache/kylin/engine/mr/TableReaderTest.java
----------------------------------------------------------------------
diff --git a/engine-mr/src/test/java/org/apache/kylin/engine/mr/TableReaderTest.java b/engine-mr/src/test/java/org/apache/kylin/engine/mr/TableReaderTest.java
index 8790152..4c43dbc 100644
--- a/engine-mr/src/test/java/org/apache/kylin/engine/mr/TableReaderTest.java
+++ b/engine-mr/src/test/java/org/apache/kylin/engine/mr/TableReaderTest.java
@@ -1,46 +1,46 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * 
- *     http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
-*/
-
-package org.apache.kylin.engine.mr;
-
-import static org.junit.Assert.assertEquals;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.Arrays;
-
-import org.junit.Test;
-
-/**
- * @author yangli9
- * 
- */
-public class TableReaderTest {
-
-    @Test
-    public void testBasicReader() throws IOException {
-        File f = new File("src/test/resources/dict/DW_SITES");
-        DFSFileTableReader reader = new DFSFileTableReader("file://" + f.getAbsolutePath(), DFSFileTable.DELIM_AUTO, 10);
-        while (reader.next()) {
-            assertEquals("[-1, Korea Auction.co.kr, S, 48, 0, 111, 2009-02-11, , DW_OFFPLAT, ]", Arrays.toString(reader.getRow()));
-            break;
-        }
-        reader.close();
-
-    }
-}
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+*/
+
+package org.apache.kylin.engine.mr;
+
+import static org.junit.Assert.assertEquals;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.Arrays;
+
+import org.junit.Test;
+
+/**
+ * @author yangli9
+ * 
+ */
+public class TableReaderTest {
+
+    @Test
+    public void testBasicReader() throws IOException {
+        File f = new File("src/test/resources/dict/DW_SITES");
+        DFSFileTableReader reader = new DFSFileTableReader("file://" + f.getAbsolutePath(), DFSFileTable.DELIM_AUTO, 10);
+        while (reader.next()) {
+            assertEquals("[-1, Korea Auction.co.kr, S, 48, 0, 111, 2009-02-11, , DW_OFFPLAT, ]", Arrays.toString(reader.getRow()));
+            break;
+        }
+        reader.close();
+
+    }
+}

http://git-wip-us.apache.org/repos/asf/kylin/blob/826f23f1/examples/test_case_data/localmeta/kylin.properties
----------------------------------------------------------------------
diff --git a/examples/test_case_data/localmeta/kylin.properties b/examples/test_case_data/localmeta/kylin.properties
index c46442e..8854ac1 100644
--- a/examples/test_case_data/localmeta/kylin.properties
+++ b/examples/test_case_data/localmeta/kylin.properties
@@ -19,10 +19,10 @@
 
 # Optional information for the owner of kylin platform, it can be your team's email
 # Currently it will be attached to each kylin's htable attribute
-kylin.owner=whoami@kylin.apache.org
+kylin.storage.hbase.owner-tag=whoami@kylin.apache.org
 
 # List of web servers in use, this enables one web server instance to sync up with other servers.
-#kylin.rest.servers=localhost:7070
+#kylin.server.cluster-servers=localhost:7070
 
 ### SOURCE ###
 
@@ -36,10 +36,10 @@ kylin.metadata.url=
 kylin.storage.url=hbase
 
 # Working folder in HDFS, make sure user has the right access to the hdfs directory
-kylin.hdfs.working.dir=/kylin
+kylin.env.hdfs-working-dir=/kylin
 
 # Compression codec for htable, valid value [none, snappy, lzo, gzip, lz4]
-kylin.hbase.default.compression.codec=snappy
+kylin.storage.hbase.compression-codec=snappy
 
 ### JOB ###
 
@@ -47,33 +47,33 @@ kylin.hbase.default.compression.codec=snappy
 kylin.job.retry=0
 
 # If true, job engine will not assume that hadoop CLI reside on the same server as it self
-# you will have to specify kylin.job.remote.cli.hostname, kylin.job.remote.cli.username and kylin.job.remote.cli.password
+# you will have to specify kylin.job.remote-cli-hostname, kylin.job.remote-cli-username and kylin.job.remote-cli-password
 # It should not be set to "true" unless you're NOT running Kylin.sh on a hadoop client machine 
 # (Thus kylin instance has to ssh to another real hadoop client machine to execute hbase,hive,hadoop commands)
-kylin.job.run.as.remote.cmd=false
+kylin.job.use-remote-cli=false
 
-# Only necessary when kylin.job.run.as.remote.cmd=true
-kylin.job.remote.cli.hostname=
+# Only necessary when kylin.job.use-remote-cli=true
+kylin.job.remote-cli-hostname=
 
-kylin.job.remote.cli.port=22
+kylin.job.remote-cli-port=22
 
-# Only necessary when kylin.job.run.as.remote.cmd=true
-kylin.job.remote.cli.username=
+# Only necessary when kylin.job.use-remote-cli=true
+kylin.job.remote-cli-username=
 
-# Only necessary when kylin.job.run.as.remote.cmd=true
-kylin.job.remote.cli.password=
+# Only necessary when kylin.job.use-remote-cli=true
+kylin.job.remote-cli-password=
 
 # Used by test cases to prepare synthetic data for sample cube
-kylin.job.remote.cli.working.dir=/tmp/kylin
+kylin.job.remote-cli-working-dir=/tmp/kylin
 
 # Max count of concurrent jobs running
-kylin.job.concurrent.max.limit=10
+kylin.job.max-concurrent-jobs=10
 
 # Time interval to check hadoop job status
-kylin.job.yarn.app.rest.check.interval.seconds=10
+kylin.engine.mr.yarn-check-interval-seconds=10
 
 # for test
-kylin.job.uhc.reducer.count=1
+kylin.engine.mr.uhc-reducer-count=1
 
 ### CUBE ###
 
@@ -118,16 +118,16 @@ saml.context.contextPath=/kylin
 
 ### MAIL ###
 # If true, will send email notification;
-mail.enabled=false
-mail.host=mail.com
-mail.username=
-mail.password=need_reset
-mail.sender=
+kylin.job.notification-enabled=false
+kylin.job.notification-mail-host=mail.com
+kylin.job.notification-mail-username=
+kylin.job.notification-mail-password=need_reset
+kylin.job.notification-mail-sender=
 
 ### OTHER ###
 
 # for tests
-kylin.test.bcc.old.key=some-value
-kylin.job.mr.config.override.test1=test1
-kylin.job.mr.config.override.test2=test2
-kylin.job.controller.lock=org.apache.kylin.job.lock.MockJobLock
+kylin.test.bcc.new.key=some-value
+kylin.engine.mr.config-override.test1=test1
+kylin.engine.mr.config-override.test2=test2
+kylin.job.lock=org.apache.kylin.job.lock.MockJobLock

http://git-wip-us.apache.org/repos/asf/kylin/blob/826f23f1/examples/test_case_data/localmeta/kylin_job_conf.xml
----------------------------------------------------------------------
diff --git a/examples/test_case_data/localmeta/kylin_job_conf.xml b/examples/test_case_data/localmeta/kylin_job_conf.xml
index 7755f0c..8a4406a 100644
--- a/examples/test_case_data/localmeta/kylin_job_conf.xml
+++ b/examples/test_case_data/localmeta/kylin_job_conf.xml
@@ -1,73 +1,73 @@
-<?xml version="1.0"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-<configuration>
-
-    <property>
-        <name>mapreduce.job.split.metainfo.maxsize</name>
-        <value>-1</value>
-        <description>The maximum permissible size of the split metainfo file.
-            The JobTracker won't attempt to read split metainfo files bigger than
-            the configured value. No limits if set to -1.
-        </description>
-    </property>
-
-
-    <property>
+<?xml version="1.0"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<configuration>
+
+    <property>
+        <name>mapreduce.job.split.metainfo.maxsize</name>
+        <value>-1</value>
+        <description>The maximum permissible size of the split metainfo file.
+            The JobTracker won't attempt to read split metainfo files bigger than
+            the configured value. No limits if set to -1.
+        </description>
+    </property>
+
+
+    <property>
         <name>mapreduce.map.output.compress</name>
-        <value>true</value>
-        <description>Compress map outputs</description>
-    </property>
-
-    <property>
+        <value>true</value>
+        <description>Compress map outputs</description>
+    </property>
+
+    <property>
         <name>mapreduce.map.output.compress.codec</name>
-        <value>org.apache.hadoop.io.compress.SnappyCodec</value>
-        <description>The compression codec to use for map outputs
-        </description>
-    </property>
-
-    <property>
+        <value>org.apache.hadoop.io.compress.SnappyCodec</value>
+        <description>The compression codec to use for map outputs
+        </description>
+    </property>
+
+    <property>
         <name>mapreduce.output.fileoutputformat.compress</name>
-        <value>true</value>
-        <description>Compress the output of a MapReduce job</description>
-    </property>
-
-    <property>
+        <value>true</value>
+        <description>Compress the output of a MapReduce job</description>
+    </property>
+
+    <property>
         <name>mapreduce.output.fileoutputformat.compress.codec</name>
-        <value>org.apache.hadoop.io.compress.SnappyCodec</value>
-        <description>The compression codec to use for job outputs
-        </description>
-    </property>
-
-    <property>
+        <value>org.apache.hadoop.io.compress.SnappyCodec</value>
+        <description>The compression codec to use for job outputs
+        </description>
+    </property>
+
+    <property>
         <name>mapreduce.output.fileoutputformat.compress.type</name>
-        <value>BLOCK</value>
-        <description>The compression type to use for job outputs</description>
-    </property>
-
-    <property>
-        <name>mapreduce.job.max.split.locations</name>
-        <value>2000</value>
-        <description>No description</description>
-    </property>
-
-    <property>
-        <name>dfs.replication</name>
-        <value>2</value>
-        <description>Block replication</description>
-    </property>
+        <value>BLOCK</value>
+        <description>The compression type to use for job outputs</description>
+    </property>
+
+    <property>
+        <name>mapreduce.job.max.split.locations</name>
+        <value>2000</value>
+        <description>No description</description>
+    </property>
+
+    <property>
+        <name>dfs.replication</name>
+        <value>2</value>
+        <description>Block replication</description>
+    </property>
 </configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/kylin/blob/826f23f1/examples/test_case_data/sandbox/hive-site.xml
----------------------------------------------------------------------
diff --git a/examples/test_case_data/sandbox/hive-site.xml b/examples/test_case_data/sandbox/hive-site.xml
index 1e78107..d4b7745 100644
--- a/examples/test_case_data/sandbox/hive-site.xml
+++ b/examples/test_case_data/sandbox/hive-site.xml
@@ -118,7 +118,7 @@
 
     <property>
         <name>hive.conf.restricted.list</name>
-        <value>hive.security.authenticator.manager,hive.security.authorization.manager,hive.users.in.admin.role</value>
+        <value>hive.security.authenticator.manager,hive.security.authorization.manager,kylin.source.hive.users.in.admin.role</value>
     </property>
 
     <property>
@@ -722,12 +722,12 @@
     </property>
 
     <property>
-        <name>hive.user.install.directory</name>
+        <name>kylin.source.hive.user.install.directory</name>
         <value>/user/</value>
     </property>
 
     <property>
-        <name>hive.users.in.admin.role</name>
+        <name>kylin.source.hive.users.in.admin.role</name>
         <value>hue,hive</value>
     </property>
 

http://git-wip-us.apache.org/repos/asf/kylin/blob/826f23f1/examples/test_case_data/sandbox/kylin.properties
----------------------------------------------------------------------
diff --git a/examples/test_case_data/sandbox/kylin.properties b/examples/test_case_data/sandbox/kylin.properties
index de1250f..3c066a6 100644
--- a/examples/test_case_data/sandbox/kylin.properties
+++ b/examples/test_case_data/sandbox/kylin.properties
@@ -22,19 +22,19 @@ kyin.server.mode=all
 
 # Optional information for the owner of kylin platform, it can be your team's email
 # Currently it will be attached to each kylin's htable attribute
-kylin.owner=whoami@kylin.apache.org
+kylin.storage.hbase.owner-tag=whoami@kylin.apache.org
 
 # List of web servers in use, this enables one web server instance to sync up with other servers.
-kylin.rest.servers=localhost:7070
+kylin.server.cluster-servers=localhost:7070
 
 # Display timezone on UI,format like[GMT+N or GMT-N]
-kylin.rest.timezone=GMT+8
+kylin.web.timezone=GMT+8
 
 ### SOURCE ###
 
 # Hive client, valid value [cli, beeline]
-kylin.hive.client=cli
-#kylin.hive.beeline.params=-n root --hiveconf hive.security.authorization.sqlstd.confwhitelist.append='mapreduce.job.*|dfs.*' -u 'jdbc:hive2://localhost:10000'
+kylin.source.hive.client=cli
+#kylin.source.hive.beeline-params=-n root --hiveconf hive.security.authorization.sqlstd.confwhitelist.append='mapreduce.job.*|dfs.*' -u 'jdbc:hive2://localhost:10000'
 
 ### STORAGE ###
 
@@ -45,14 +45,14 @@ kylin.metadata.url=kylin_metadata@hbase
 kylin.storage.url=hbase
 
 # Working folder in HDFS, make sure user has the right access to the hdfs directory
-kylin.hdfs.working.dir=/kylin
+kylin.env.hdfs-working-dir=/kylin
 
 # HBase Cluster FileSystem, which serving hbase, format as hdfs://hbase-cluster:8020
 # Leave empty if hbase running on same cluster with hive and mapreduce
-#kylin.hbase.cluster.fs=
+#kylin.storage.hbase.cluster-fs=
 
 
-kylin.job.mapreduce.default.reduce.input.mb=500
+kylin.engine.mr.reduce-input-mb=500
 
 ### JOB ###
 
@@ -60,66 +60,66 @@ kylin.job.mapreduce.default.reduce.input.mb=500
 kylin.job.retry=0
 
 # If true, job engine will not assume that hadoop CLI reside on the same server as it self
-# you will have to specify kylin.job.remote.cli.hostname, kylin.job.remote.cli.username and kylin.job.remote.cli.password
+# you will have to specify kylin.job.remote-cli-hostname, kylin.job.remote-cli-username and kylin.job.remote-cli-password
 # It should not be set to "true" unless you're NOT running Kylin.sh on a hadoop client machine
 # (Thus kylin instance has to ssh to another real hadoop client machine to execute hbase,hive,hadoop commands)
-kylin.job.run.as.remote.cmd=false
+kylin.job.use-remote-cli=false
 
-# Only necessary when kylin.job.run.as.remote.cmd=true
-kylin.job.remote.cli.hostname=sandbox
+# Only necessary when kylin.job.use-remote-cli=true
+kylin.job.remote-cli-hostname=sandbox
 
-kylin.job.remote.cli.username=root
+kylin.job.remote-cli-username=root
 
-# Only necessary when kylin.job.run.as.remote.cmd=true
-kylin.job.remote.cli.password=hadoop
+# Only necessary when kylin.job.use-remote-cli=true
+kylin.job.remote-cli-password=hadoop
 
 # Used by test cases to prepare synthetic data for sample cube
-kylin.job.remote.cli.working.dir=/tmp/kylin
+kylin.job.remote-cli-working-dir=/tmp/kylin
 
 # Max count of concurrent jobs running
-kylin.job.concurrent.max.limit=10
+kylin.job.max-concurrent-jobs=10
 
 # Time interval to check hadoop job status
-kylin.job.yarn.app.rest.check.interval.seconds=10
+kylin.engine.mr.yarn-check-interval-seconds=10
 
 # Hive database name for putting the intermediate flat tables
-kylin.job.hive.database.for.intermediatetable=default
+kylin.source.hive.database-for-flat-table=default
 
 #default compression codec for htable,snappy,lzo,gzip,lz4
-kylin.hbase.default.compression.codec=gzip
+kylin.storage.hbase.compression-codec=gzip
 
 # Max reducer number
-kylin.job.mapreduce.max.reducer.number=5
+kylin.engine.mr.max-reducer-number=5
 
 # The percentage of the sampling, default 100%
-kylin.job.cubing.inmem.sampling.percent=100
+kylin.job.sampling-percentage=100
 
 # The cut size for hbase region, in GB.
 # E.g, for cube whose capacity be marked as "SMALL", split region per 10GB by default
-kylin.hbase.region.cut=0.1
-kylin.hbase.region.count.max=5
+kylin.storage.hbase.region-cut-gb=0.1
+kylin.storage.hbase.max-region-count=5
 
 # The hfile size of GB, smaller hfile leading to the converting hfile MR has more reducers and be faster
 # set 0 to disable this optimization
-kylin.hbase.hfile.size.gb=2
+kylin.storage.hbase.hfile-size-gb=2
 
 kylin.query.udf.massin=org.apache.kylin.query.udf.MassInUDF
 kylin.query.udf.version=org.apache.kylin.query.udf.VersionUDF
 
 # for test
-kylin.job.controller.lock=org.apache.kylin.job.lock.MockJobLock
-kylin.job.uhc.reducer.count=1
+kylin.job.lock=org.apache.kylin.job.lock.MockJobLock
+kylin.engine.mr.uhc-reducer-count=1
 
 ### CUBE ###
 
 # dictionary forest cut
-kylin.dictionary.forest.trie.size.max_mb=500
+kylin.dictionary.forest-trie-max-mb=500
 
 # 'auto', 'inmem', 'layer' or 'random' for testing
 kylin.cube.algorithm=random
 
 # Enable/disable ACL check for cube query
-kylin.query.security.enabled=true
+kylin.query.security-enabled=true
 
 ### SECURITY ###
 
@@ -135,7 +135,7 @@ acl.adminRole=ROLE_ADMIN
 ### MAIL ###
 
 # If true, will send email notification;
-mail.enabled=false
+kylin.job.notification-enabled=false
 
 ### WEB ###
 
@@ -159,7 +159,7 @@ kylin.web.contact_mail=
 ### OTHER ###
 
 # kylin query metrics percentiles intervals default=60, 300, 3600
-kylin.query.metrics.percentiles.intervals=60, 360, 3600
+kylin.server.query-metrics-percentiles-intervals=60, 360, 3600
 
 # Env DEV|QA|PROD
-deploy.env=DEV
+kylin.env=DEV

http://git-wip-us.apache.org/repos/asf/kylin/blob/826f23f1/examples/test_case_data/sandbox/kylin_job_conf.xml
----------------------------------------------------------------------
diff --git a/examples/test_case_data/sandbox/kylin_job_conf.xml b/examples/test_case_data/sandbox/kylin_job_conf.xml
index c2046e6..8f5817e 100644
--- a/examples/test_case_data/sandbox/kylin_job_conf.xml
+++ b/examples/test_case_data/sandbox/kylin_job_conf.xml
@@ -1,79 +1,79 @@
-<?xml version="1.0"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-<configuration>
-
-    <property>
-        <name>mapreduce.job.split.metainfo.maxsize</name>
-        <value>-1</value>
-        <description>The maximum permissible size of the split metainfo file.
-            The JobTracker won't attempt to read split metainfo files bigger than
-            the configured value. No limits if set to -1.
-        </description>
-    </property>
-
-    <property>
-        <name>mapreduce.map.maxattempts</name>
-        <value>2</value>
-    </property>
-
-    <property>
-        <name>mapreduce.map.output.compress</name>
-        <value>true</value>
-        <description>Compress map outputs</description>
-    </property>
-
-    <property>
-        <name>mapreduce.map.output.compress.codec</name>
-        <value>org.apache.hadoop.io.compress.SnappyCodec</value>
-        <description>The compression codec to use for map outputs
-        </description>
-    </property>
-
-    <property>
-        <name>mapreduce.output.fileoutputformat.compress</name>
-        <value>true</value>
-        <description>Compress the output of a MapReduce job</description>
-    </property>
-
-    <property>
-        <name>mapreduce.output.fileoutputformat.compress.codec</name>
-        <value>org.apache.hadoop.io.compress.SnappyCodec</value>
-        <description>The compression codec to use for job outputs
-        </description>
-    </property>
-
-    <!--
-    <property>
-        <name>mapreduce.output.fileoutputformat.compress.type</name>
-        <value>BLOCK</value>
-        <description>The compression type to use for job outputs</description>
-    </property>
-    -->
-    <property>
-        <name>mapreduce.job.max.split.locations</name>
-        <value>2000</value>
-        <description>No description</description>
-    </property>
-
-    <property>
-        <name>dfs.replication</name>
-        <value>2</value>
-        <description>Block replication</description>
-    </property>
-
+<?xml version="1.0"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<configuration>
+
+    <property>
+        <name>mapreduce.job.split.metainfo.maxsize</name>
+        <value>-1</value>
+        <description>The maximum permissible size of the split metainfo file.
+            The JobTracker won't attempt to read split metainfo files bigger than
+            the configured value. No limits if set to -1.
+        </description>
+    </property>
+
+    <property>
+        <name>mapreduce.map.maxattempts</name>
+        <value>2</value>
+    </property>
+
+    <property>
+        <name>mapreduce.map.output.compress</name>
+        <value>true</value>
+        <description>Compress map outputs</description>
+    </property>
+
+    <property>
+        <name>mapreduce.map.output.compress.codec</name>
+        <value>org.apache.hadoop.io.compress.SnappyCodec</value>
+        <description>The compression codec to use for map outputs
+        </description>
+    </property>
+
+    <property>
+        <name>mapreduce.output.fileoutputformat.compress</name>
+        <value>true</value>
+        <description>Compress the output of a MapReduce job</description>
+    </property>
+
+    <property>
+        <name>mapreduce.output.fileoutputformat.compress.codec</name>
+        <value>org.apache.hadoop.io.compress.SnappyCodec</value>
+        <description>The compression codec to use for job outputs
+        </description>
+    </property>
+
+    <!--
+    <property>
+        <name>mapreduce.output.fileoutputformat.compress.type</name>
+        <value>BLOCK</value>
+        <description>The compression type to use for job outputs</description>
+    </property>
+    -->
+    <property>
+        <name>mapreduce.job.max.split.locations</name>
+        <value>2000</value>
+        <description>No description</description>
+    </property>
+
+    <property>
+        <name>dfs.replication</name>
+        <value>2</value>
+        <description>Block replication</description>
+    </property>
+
 </configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/kylin/blob/826f23f1/examples/test_case_data/sandbox/kylin_job_conf_inmem.xml
----------------------------------------------------------------------
diff --git a/examples/test_case_data/sandbox/kylin_job_conf_inmem.xml b/examples/test_case_data/sandbox/kylin_job_conf_inmem.xml
index 42f1cc4..9e8fc84 100644
--- a/examples/test_case_data/sandbox/kylin_job_conf_inmem.xml
+++ b/examples/test_case_data/sandbox/kylin_job_conf_inmem.xml
@@ -1,97 +1,97 @@
-<?xml version="1.0"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-<configuration>
-
-    <property>
-        <name>mapreduce.job.split.metainfo.maxsize</name>
-        <value>-1</value>
-        <description>The maximum permissible size of the split metainfo file.
-            The JobTracker won't attempt to read split metainfo files bigger than
-            the configured value. No limits if set to -1.
-        </description>
-    </property>
-
-    <property>
-        <name>mapreduce.map.maxattempts</name>
-        <value>2</value>
-    </property>
-
-    <property>
+<?xml version="1.0"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<configuration>
+
+    <property>
+        <name>mapreduce.job.split.metainfo.maxsize</name>
+        <value>-1</value>
+        <description>The maximum permissible size of the split metainfo file.
+            The JobTracker won't attempt to read split metainfo files bigger than
+            the configured value. No limits if set to -1.
+        </description>
+    </property>
+
+    <property>
+        <name>mapreduce.map.maxattempts</name>
+        <value>2</value>
+    </property>
+
+    <property>
         <name>mapreduce.map.output.compress</name>
-        <value>true</value>
-        <description>Compress map outputs</description>
-    </property>
-
-    <property>
+        <value>true</value>
+        <description>Compress map outputs</description>
+    </property>
+
+    <property>
         <name>mapreduce.map.output.compress.codec</name>
-        <value>org.apache.hadoop.io.compress.SnappyCodec</value>
-        <description>The compression codec to use for map outputs
-        </description>
-    </property>
-
-    <property>
+        <value>org.apache.hadoop.io.compress.SnappyCodec</value>
+        <description>The compression codec to use for map outputs
+        </description>
+    </property>
+
+    <property>
         <name>mapreduce.output.fileoutputformat.compress</name>
-        <value>true</value>
-        <description>Compress the output of a MapReduce job</description>
-    </property>
-
-    <property>
+        <value>true</value>
+        <description>Compress the output of a MapReduce job</description>
+    </property>
+
+    <property>
         <name>mapreduce.output.fileoutputformat.compress.codec</name>
-        <value>org.apache.hadoop.io.compress.SnappyCodec</value>
-        <description>The compression codec to use for job outputs
-        </description>
-    </property>
-
-    <property>
+        <value>org.apache.hadoop.io.compress.SnappyCodec</value>
+        <description>The compression codec to use for job outputs
+        </description>
+    </property>
+
+    <property>
         <name>mapreduce.output.fileoutputformat.compress.type</name>
-        <value>BLOCK</value>
-        <description>The compression type to use for job outputs</description>
-    </property>
-
-    <property>
-        <name>mapreduce.job.max.split.locations</name>
-        <value>2000</value>
-        <description>No description</description>
-    </property>
-
-    <property>
-        <name>dfs.replication</name>
-        <value>2</value>
-        <description>Block replication</description>
-    </property>
-
-    <!--Additional config for in-mem cubing, giving mapper more memory -->
-    <property>
-        <name>mapreduce.map.memory.mb</name>
-        <value>512</value>
-        <description></description>
-    </property>
-
-    <property>
-        <name>mapreduce.map.java.opts</name>
-        <value>-Xmx384m</value>
-        <description></description>
-    </property>
-
-    <property>
-        <name>mapreduce.task.io.sort.mb</name>
-        <value>100</value>
-        <description></description>
-    </property>
-
+        <value>BLOCK</value>
+        <description>The compression type to use for job outputs</description>
+    </property>
+
+    <property>
+        <name>mapreduce.job.max.split.locations</name>
+        <value>2000</value>
+        <description>No description</description>
+    </property>
+
+    <property>
+        <name>dfs.replication</name>
+        <value>2</value>
+        <description>Block replication</description>
+    </property>
+
+    <!--Additional config for in-mem cubing, giving mapper more memory -->
+    <property>
+        <name>mapreduce.map.memory.mb</name>
+        <value>512</value>
+        <description></description>
+    </property>
+
+    <property>
+        <name>mapreduce.map.java.opts</name>
+        <value>-Xmx384m</value>
+        <description></description>
+    </property>
+
+    <property>
+        <name>mapreduce.task.io.sort.mb</name>
+        <value>100</value>
+        <description></description>
+    </property>
+
 </configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/kylin/blob/826f23f1/jdbc/src/main/resources/org-apache-kylin-jdbc.properties
----------------------------------------------------------------------
diff --git a/jdbc/src/main/resources/org-apache-kylin-jdbc.properties b/jdbc/src/main/resources/org-apache-kylin-jdbc.properties
index cab76a1..68918b1 100644
--- a/jdbc/src/main/resources/org-apache-kylin-jdbc.properties
+++ b/jdbc/src/main/resources/org-apache-kylin-jdbc.properties
@@ -1,28 +1,28 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-driver.name=Kylin JDBC Driver
-driver.version=0.1
-product.name=Kylin
-product.version=0.1
-jdbc.compliant=true
-driver.version.major=0
-driver.version.minor=8
-database.version.major=0
-database.version.minor=8
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+driver.name=Kylin JDBC Driver
+driver.version=0.1
+product.name=Kylin
+product.version=0.1
+jdbc.compliant=true
+driver.version.major=0
+driver.version.minor=8
+database.version.major=0
+database.version.minor=8
 build.timestamp=20140918-2017
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/kylin/blob/826f23f1/kylin-it/src/test/java/org/apache/kylin/job/BaseTestDistributedScheduler.java
----------------------------------------------------------------------
diff --git a/kylin-it/src/test/java/org/apache/kylin/job/BaseTestDistributedScheduler.java b/kylin-it/src/test/java/org/apache/kylin/job/BaseTestDistributedScheduler.java
index 910db49..1a0a39d 100644
--- a/kylin-it/src/test/java/org/apache/kylin/job/BaseTestDistributedScheduler.java
+++ b/kylin-it/src/test/java/org/apache/kylin/job/BaseTestDistributedScheduler.java
@@ -72,7 +72,7 @@ public class BaseTestDistributedScheduler extends HBaseMetadataTestCase {
     @BeforeClass
     public static void setup() throws Exception {
         staticCreateTestMetadata();
-        System.setProperty("kylin.job.controller.lock", "org.apache.kylin.storage.hbase.util.ZookeeperDistributedJobLock");
+        System.setProperty("kylin.job.lock", "org.apache.kylin.storage.hbase.util.ZookeeperDistributedJobLock");
 
         new File(confDstPath1).getParentFile().mkdirs();
         new File(confDstPath2).getParentFile().mkdirs();
@@ -131,7 +131,7 @@ public class BaseTestDistributedScheduler extends HBaseMetadataTestCase {
             zkClient = null;
         }
         
-        System.clearProperty("kylin.job.controller.lock");
+        System.clearProperty("kylin.job.lock");
         staticCleanupTestMetadata();
     }
 

http://git-wip-us.apache.org/repos/asf/kylin/blob/826f23f1/kylin-it/src/test/java/org/apache/kylin/provision/BuildCubeWithEngine.java
----------------------------------------------------------------------
diff --git a/kylin-it/src/test/java/org/apache/kylin/provision/BuildCubeWithEngine.java b/kylin-it/src/test/java/org/apache/kylin/provision/BuildCubeWithEngine.java
index 3b88dab..67b62d5 100644
--- a/kylin-it/src/test/java/org/apache/kylin/provision/BuildCubeWithEngine.java
+++ b/kylin-it/src/test/java/org/apache/kylin/provision/BuildCubeWithEngine.java
@@ -123,7 +123,7 @@ public class BuildCubeWithEngine {
                 throw new IOException("mkdir fails");
             }
         } catch (IOException e) {
-            throw new RuntimeException("failed to create kylin.hdfs.working.dir, Please make sure the user has right to access " + KylinConfig.getInstanceFromEnv().getHdfsWorkingDirectory(), e);
+            throw new RuntimeException("failed to create kylin.env.hdfs-working-dir, Please make sure the user has right to access " + KylinConfig.getInstanceFromEnv().getHdfsWorkingDirectory(), e);
         }
     }
 

http://git-wip-us.apache.org/repos/asf/kylin/blob/826f23f1/kylin-it/src/test/java/org/apache/kylin/query/ITKylinQueryTest.java
----------------------------------------------------------------------
diff --git a/kylin-it/src/test/java/org/apache/kylin/query/ITKylinQueryTest.java b/kylin-it/src/test/java/org/apache/kylin/query/ITKylinQueryTest.java
index 9c1b640..90324b5 100644
--- a/kylin-it/src/test/java/org/apache/kylin/query/ITKylinQueryTest.java
+++ b/kylin-it/src/test/java/org/apache/kylin/query/ITKylinQueryTest.java
@@ -105,7 +105,7 @@ public class ITKylinQueryTest extends KylinTestBase {
             toggles.put(BackdoorToggles.DEBUG_TOGGLE_COPROCESSOR_BEHAVIOR, StorageSideBehavior.SCAN_FILTER_AGGR_CHECKMEM_WITHDELAY.toString());//delay 10ms for every scan
             BackdoorToggles.setToggles(toggles);
 
-            KylinConfig.getInstanceFromEnv().setProperty("kylin.query.coprocessor.timeout.seconds", "3");
+            KylinConfig.getInstanceFromEnv().setProperty("kylin.storage.hbase.coprocessor-timeout-seconds", "3");
 
             //these two cubes has RAW measure, will disturb limit push down
             RemoveBlackoutRealizationsRule.blackList.add("CUBE[name=test_kylin_cube_without_slr_left_join_empty]");
@@ -118,7 +118,7 @@ public class ITKylinQueryTest extends KylinTestBase {
             RemoveBlackoutRealizationsRule.blackList.remove("CUBE[name=test_kylin_cube_without_slr_left_join_empty]");
             RemoveBlackoutRealizationsRule.blackList.remove("CUBE[name=test_kylin_cube_without_slr_inner_join_empty]");
 
-            KylinConfig.getInstanceFromEnv().setProperty("kylin.query.coprocessor.timeout.seconds", "0"); // set timeout to default
+            KylinConfig.getInstanceFromEnv().setProperty("kylin.storage.hbase.coprocessor-timeout-seconds", "0"); // set timeout to default
             BackdoorToggles.cleanToggles();
         }
     }

http://git-wip-us.apache.org/repos/asf/kylin/blob/826f23f1/kylin-it/src/test/resources/logging.properties
----------------------------------------------------------------------
diff --git a/kylin-it/src/test/resources/logging.properties b/kylin-it/src/test/resources/logging.properties
index 8f2713d..0a2b1ca 100644
--- a/kylin-it/src/test/resources/logging.properties
+++ b/kylin-it/src/test/resources/logging.properties
@@ -1,23 +1,23 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-handlers=java.util.logging.ConsoleHandler
-.level=INFO
-#org.apache.calcite.plan.RelOptPlanner.level=FINE
-java.util.logging.ConsoleHandler.level=ALL
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+handlers=java.util.logging.ConsoleHandler
+.level=INFO
+#org.apache.calcite.plan.RelOptPlanner.level=FINE
+java.util.logging.ConsoleHandler.level=ALL
 java.util.logging.ConsoleHandler.formatter=org.apache.kylin.common.util.MyLogFormatter
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/kylin/blob/826f23f1/query/src/test/java/org/apache/kylin/query/aggregate/DimCountDistinctAggFuncTest.java
----------------------------------------------------------------------
diff --git a/query/src/test/java/org/apache/kylin/query/aggregate/DimCountDistinctAggFuncTest.java b/query/src/test/java/org/apache/kylin/query/aggregate/DimCountDistinctAggFuncTest.java
index 9e6134a..794f1e5 100644
--- a/query/src/test/java/org/apache/kylin/query/aggregate/DimCountDistinctAggFuncTest.java
+++ b/query/src/test/java/org/apache/kylin/query/aggregate/DimCountDistinctAggFuncTest.java
@@ -66,7 +66,7 @@ public class DimCountDistinctAggFuncTest extends LocalFileMetadataTestCase {
 
     @Test
     public void testThreshold() {
-        System.setProperty("kylin.query.dim.distinct.max", "100");
+        System.setProperty("kylin.query.max-dimension-count-distinct", "100");
 
         DimCountDistinctAggFunc.DimDistinctCounter counter = DimCountDistinctAggFunc.init();
 
@@ -77,6 +77,6 @@ public class DimCountDistinctAggFuncTest extends LocalFileMetadataTestCase {
             counter = DimCountDistinctAggFunc.add(counter, i);
         }
 
-        System.clearProperty("kylin.query.dim.distinct.max");
+        System.clearProperty("kylin.query.max-dimension-count-distinct");
     }
 }

http://git-wip-us.apache.org/repos/asf/kylin/blob/826f23f1/server/src/main/webapp/WEB-INF/kylin-servlet.xml
----------------------------------------------------------------------
diff --git a/server/src/main/webapp/WEB-INF/kylin-servlet.xml b/server/src/main/webapp/WEB-INF/kylin-servlet.xml
index b86505a..c1e76ab 100644
--- a/server/src/main/webapp/WEB-INF/kylin-servlet.xml
+++ b/server/src/main/webapp/WEB-INF/kylin-servlet.xml
@@ -12,17 +12,17 @@
   limitations under the License. See accompanying LICENSE file.
 -->
 
-<beans xmlns="http://www.springframework.org/schema/beans"
-       xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-       xmlns:context="http://www.springframework.org/schema/context"
-       xmlns:mvc="http://www.springframework.org/schema/mvc"
-       xmlns:task="http://www.springframework.org/schema/task"
-       xsi:schemaLocation="http://www.springframework.org/schema/beans
-    http://www.springframework.org/schema/beans/spring-beans-3.1.xsd
-    http://www.springframework.org/schema/context
-    http://www.springframework.org/schema/context/spring-context-3.1.xsd
-    http://www.springframework.org/schema/task
-    http://www.springframework.org/schema/task/spring-task-3.1.xsd
-    http://www.springframework.org/schema/mvc
-    http://www.springframework.org/schema/mvc/spring-mvc-3.1.xsd">
+<beans xmlns="http://www.springframework.org/schema/beans"
+       xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+       xmlns:context="http://www.springframework.org/schema/context"
+       xmlns:mvc="http://www.springframework.org/schema/mvc"
+       xmlns:task="http://www.springframework.org/schema/task"
+       xsi:schemaLocation="http://www.springframework.org/schema/beans
+    http://www.springframework.org/schema/beans/spring-beans-3.1.xsd
+    http://www.springframework.org/schema/context
+    http://www.springframework.org/schema/context/spring-context-3.1.xsd
+    http://www.springframework.org/schema/task
+    http://www.springframework.org/schema/task/spring-task-3.1.xsd
+    http://www.springframework.org/schema/mvc
+    http://www.springframework.org/schema/mvc/spring-mvc-3.1.xsd">
 </beans>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/kylin/blob/826f23f1/server/src/main/webapp/WEB-INF/web.xml
----------------------------------------------------------------------
diff --git a/server/src/main/webapp/WEB-INF/web.xml b/server/src/main/webapp/WEB-INF/web.xml
index 7a4420c..8a43b33 100644
--- a/server/src/main/webapp/WEB-INF/web.xml
+++ b/server/src/main/webapp/WEB-INF/web.xml
@@ -1,4 +1,4 @@
-<?xml version="1.0" encoding="UTF-8"?>
+<?xml version="1.0" encoding="UTF-8"?>
 <!--
   Licensed under the Apache License, Version 2.0 (the "License");
   you may not use this file except in compliance with the License.
@@ -13,92 +13,92 @@
   limitations under the License. See accompanying LICENSE file.
 -->
 
-<web-app xmlns="http://java.sun.com/xml/ns/javaee"
-           xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-           xsi:schemaLocation="http://java.sun.com/xml/ns/javaee
-		  http://java.sun.com/xml/ns/javaee/web-app_2_5.xsd"
-           version="2.5">
-
-    <display-name>Kylin REST Service</display-name>
-
-    <welcome-file-list>
-        <welcome-file>/index.html</welcome-file>
-    </welcome-file-list>
-
-    <error-page>
-        <error-code>404</error-code>
-        <location>/index.html</location>
-    </error-page>
-
-    <context-param>
-        <param-name>log4jConfigLocation</param-name>
-        <param-value>classpath:kylin-server-log4j.properties</param-value>
-    </context-param>
-    <context-param>
-        <param-name>contextConfigLocation</param-name>
-        <param-value>
-        	classpath:applicationContext.xml
-			classpath:kylinSecurity.xml
-			classpath*:kylin-*-plugin.xml
-		</param-value>
-    </context-param>
-
-    <listener>
-        <listener-class>org.apache.kylin.rest.util.Log4jConfigListener</listener-class>
-    </listener>
-
-    <listener>
-        <listener-class>org.springframework.web.context.ContextLoaderListener</listener-class>
-    </listener>
-	<listener>
-		<listener-class>
-			org.springframework.security.web.session.HttpSessionEventPublisher
-		</listener-class>
-	</listener>
-	
-<filter>
-   <filter-name>CORS</filter-name>
-   <filter-class>com.thetransactioncompany.cors.CORSFilter</filter-class>       
-   <init-param>
-      <param-name>cors.supportedHeaders</param-name>
-      <param-value>Authorization,Origin, No-Cache, X-Requested-With, If-Modified-Since, Pragma, Last-Modified, Cache-Control, Expires, Content-Type, X-E4M-With, Accept</param-value>
-   </init-param>   
-  <init-param>
-      <param-name>cors.supportedMethods</param-name>
-      <param-value>GET, POST, PUT, DELETE, OPTIONS</param-value>
-   </init-param>     
-  <init-param>
-      <param-name>cors.supportsCredentials </param-name>
-      <param-value>true</param-value>
-   </init-param>    
-</filter>
-
-<filter-mapping>
-   <filter-name>CORS</filter-name>
-   <url-pattern>/*</url-pattern>
-</filter-mapping>
-	
-	<!--
-		Apply Spring Security Filter to all Requests 
-	 -->
-	<filter>
-        <filter-name>springSecurityFilterChain</filter-name>
-        <filter-class>org.springframework.web.filter.DelegatingFilterProxy</filter-class>
-    </filter>
-    <filter-mapping>
-        <filter-name>springSecurityFilterChain</filter-name>
-        <url-pattern>/*</url-pattern>
-    </filter-mapping>
-
-    <servlet>
-        <servlet-name>kylin</servlet-name>
-        <servlet-class>org.springframework.web.servlet.DispatcherServlet</servlet-class>
-        <load-on-startup>1</load-on-startup>
-    </servlet>
-    <servlet-mapping>
-        <servlet-name>kylin</servlet-name>
-        <url-pattern>/api/*</url-pattern>
-    </servlet-mapping>
-
-    <distributable />
-</web-app>
+<web-app xmlns="http://java.sun.com/xml/ns/javaee"
+           xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+           xsi:schemaLocation="http://java.sun.com/xml/ns/javaee
+		  http://java.sun.com/xml/ns/javaee/web-app_2_5.xsd"
+           version="2.5">
+
+    <display-name>Kylin REST Service</display-name>
+
+    <welcome-file-list>
+        <welcome-file>/index.html</welcome-file>
+    </welcome-file-list>
+
+    <error-page>
+        <error-code>404</error-code>
+        <location>/index.html</location>
+    </error-page>
+
+    <context-param>
+        <param-name>log4jConfigLocation</param-name>
+        <param-value>classpath:kylin-server-log4j.properties</param-value>
+    </context-param>
+    <context-param>
+        <param-name>contextConfigLocation</param-name>
+        <param-value>
+        	classpath:applicationContext.xml
+			classpath:kylinSecurity.xml
+			classpath*:kylin-*-plugin.xml
+		</param-value>
+    </context-param>
+
+    <listener>
+        <listener-class>org.apache.kylin.rest.util.Log4jConfigListener</listener-class>
+    </listener>
+
+    <listener>
+        <listener-class>org.springframework.web.context.ContextLoaderListener</listener-class>
+    </listener>
+	<listener>
+		<listener-class>
+			org.springframework.security.web.session.HttpSessionEventPublisher
+		</listener-class>
+	</listener>
+	
+<filter>
+   <filter-name>CORS</filter-name>
+   <filter-class>com.thetransactioncompany.cors.CORSFilter</filter-class>       
+   <init-param>
+      <param-name>cors.supportedHeaders</param-name>
+      <param-value>Authorization,Origin, No-Cache, X-Requested-With, If-Modified-Since, Pragma, Last-Modified, Cache-Control, Expires, Content-Type, X-E4M-With, Accept</param-value>
+   </init-param>   
+  <init-param>
+      <param-name>cors.supportedMethods</param-name>
+      <param-value>GET, POST, PUT, DELETE, OPTIONS</param-value>
+   </init-param>     
+  <init-param>
+      <param-name>cors.supportsCredentials </param-name>
+      <param-value>true</param-value>
+   </init-param>    
+</filter>
+
+<filter-mapping>
+   <filter-name>CORS</filter-name>
+   <url-pattern>/*</url-pattern>
+</filter-mapping>
+	
+	<!--
+		Apply Spring Security Filter to all Requests 
+	 -->
+	<filter>
+        <filter-name>springSecurityFilterChain</filter-name>
+        <filter-class>org.springframework.web.filter.DelegatingFilterProxy</filter-class>
+    </filter>
+    <filter-mapping>
+        <filter-name>springSecurityFilterChain</filter-name>
+        <url-pattern>/*</url-pattern>
+    </filter-mapping>
+
+    <servlet>
+        <servlet-name>kylin</servlet-name>
+        <servlet-class>org.springframework.web.servlet.DispatcherServlet</servlet-class>
+        <load-on-startup>1</load-on-startup>
+    </servlet>
+    <servlet-mapping>
+        <servlet-name>kylin</servlet-name>
+        <url-pattern>/api/*</url-pattern>
+    </servlet-mapping>
+
+    <distributable />
+</web-app>

http://git-wip-us.apache.org/repos/asf/kylin/blob/826f23f1/server/src/test/java/org/apache/kylin/rest/metrics/QueryMetricsTest.java
----------------------------------------------------------------------
diff --git a/server/src/test/java/org/apache/kylin/rest/metrics/QueryMetricsTest.java b/server/src/test/java/org/apache/kylin/rest/metrics/QueryMetricsTest.java
index 47d3f7b..bd1da59 100644
--- a/server/src/test/java/org/apache/kylin/rest/metrics/QueryMetricsTest.java
+++ b/server/src/test/java/org/apache/kylin/rest/metrics/QueryMetricsTest.java
@@ -47,7 +47,7 @@ public class QueryMetricsTest extends ServiceTestBase {
 
     @Test
     public void testQueryMetrics() throws Exception {
-        System.setProperty("kylin.query.metrics.enabled", "true");
+        System.setProperty("kylin.server.query-metrics-enabled", "true");
         QueryMetricsFacade.init();
 
         SQLRequest sqlRequest = new SQLRequest();
@@ -108,7 +108,7 @@ public class QueryMetricsTest extends ServiceTestBase {
         Assert.assertEquals(1L, mBeanServer.getAttribute(objectName, "QuerySuccessCount"));
         Assert.assertEquals(1L, mBeanServer.getAttribute(objectName, "QueryFailCount"));
 
-        System.clearProperty("kylin.query.metrics.enabled");
+        System.clearProperty("kylin.server.query-metrics-enabled");
     }
 
 }

http://git-wip-us.apache.org/repos/asf/kylin/blob/826f23f1/server/src/test/java/org/apache/kylin/rest/service/CacheServiceTest.java
----------------------------------------------------------------------
diff --git a/server/src/test/java/org/apache/kylin/rest/service/CacheServiceTest.java b/server/src/test/java/org/apache/kylin/rest/service/CacheServiceTest.java
index 19c9c6e..8ea9cd2 100644
--- a/server/src/test/java/org/apache/kylin/rest/service/CacheServiceTest.java
+++ b/server/src/test/java/org/apache/kylin/rest/service/CacheServiceTest.java
@@ -73,9 +73,9 @@ public class CacheServiceTest extends LocalFileMetadataTestCase {
     public static void beforeClass() throws Exception {
         staticCreateTestMetadata();
         configA = KylinConfig.getInstanceFromEnv();
-        configA.setProperty("kylin.rest.servers", "localhost:7777");
+        configA.setProperty("kylin.server.cluster-servers", "localhost:7777");
         configB = KylinConfig.createKylinConfig(configA);
-        configB.setProperty("kylin.rest.servers", "localhost:7777");
+        configB.setProperty("kylin.server.cluster-servers", "localhost:7777");
         configB.setMetadataUrl("../examples/test_metadata");
 
         server = new Server(7777);

http://git-wip-us.apache.org/repos/asf/kylin/blob/826f23f1/source-hive/src/main/java/org/apache/kylin/source/hive/HiveMRInput.java
----------------------------------------------------------------------
diff --git a/source-hive/src/main/java/org/apache/kylin/source/hive/HiveMRInput.java b/source-hive/src/main/java/org/apache/kylin/source/hive/HiveMRInput.java
index 3b66287..6eb1a28 100644
--- a/source-hive/src/main/java/org/apache/kylin/source/hive/HiveMRInput.java
+++ b/source-hive/src/main/java/org/apache/kylin/source/hive/HiveMRInput.java
@@ -284,7 +284,7 @@ public class HiveMRInput implements IMRInput {
                 logger.debug("Row count of table '" + intermediateTable + "' is " + rowCount);
                 if (rowCount == 0) {
                     if (!config.isEmptySegmentAllowed()) {
-                        stepLogger.log("Detect upstream hive table is empty, " + "fail the job because \"kylin.job.allow.empty.segment\" = \"false\"");
+                        stepLogger.log("Detect upstream hive table is empty, " + "fail the job because \"kylin.job.allow-empty-segment\" = \"false\"");
                         return new ExecuteResult(ExecuteResult.State.ERROR, stepLogger.getBufferedLog());
                     } else {
                         return new ExecuteResult(ExecuteResult.State.SUCCEED, "Row count is 0, no need to redistribute");