You are viewing a plain text version of this content. The canonical link for it is here.
Posted to dev@kylin.apache.org by GitBox <gi...@apache.org> on 2018/09/27 13:15:01 UTC

[GitHub] yiming187 closed pull request #261: KYLIN-3597 Fix sonar reported static code issues

yiming187 closed pull request #261: KYLIN-3597 Fix sonar reported static code issues
URL: https://github.com/apache/kylin/pull/261
 
 
   

This is a PR merged from a forked repository.
As GitHub hides the original diff on merge, it is displayed below for
the sake of provenance:

As this is a foreign pull request (from a fork), the diff is supplied
below (as it won't show otherwise due to GitHub magic):

diff --git a/core-metadata/src/main/java/org/apache/kylin/metadata/model/TableDesc.java b/core-metadata/src/main/java/org/apache/kylin/metadata/model/TableDesc.java
index d8e3b028aa..63a78f89e6 100644
--- a/core-metadata/src/main/java/org/apache/kylin/metadata/model/TableDesc.java
+++ b/core-metadata/src/main/java/org/apache/kylin/metadata/model/TableDesc.java
@@ -74,7 +74,8 @@ public static String concatResourcePath(String tableIdentity, String prj) {
         if (cut >= 0)
             path = path.substring(cut + 1);
 
-        String table, prj;
+        String table;
+        String prj;
         int dash = path.indexOf("--");
         if (dash >= 0) {
             table = path.substring(0, dash);
@@ -153,9 +154,10 @@ public TableDesc appendColumns(ColumnDesc[] computedColumns, boolean makeCopy) {
                 if (existingColumns[i].getName().equalsIgnoreCase(computedColumns[j].getName())) {
                     // if we're adding a computed column twice, it should be allowed without producing duplicates
                     if (!existingColumns[i].isComputedColumn()) {
-                        throw new IllegalArgumentException(String.format(Locale.ROOT,
+                        String errorMsg = String.format(Locale.ROOT,
                                 "There is already a column named %s on table %s, please change your computed column name",
-                                new Object[] { computedColumns[j].getName(), this.getIdentity() }));
+                                computedColumns[j].getName(), this.getIdentity());
+                        throw new IllegalArgumentException(errorMsg);
                     } else {
                         isFreshCC = false;
                     }
@@ -178,7 +180,7 @@ public TableDesc appendColumns(ColumnDesc[] computedColumns, boolean makeCopy) {
 
     public ColumnDesc findColumnByName(String name) {
         //ignore the db name and table name if exists
-        int lastIndexOfDot = name.lastIndexOf(".");
+        int lastIndexOfDot = name.lastIndexOf('.');
         if (lastIndexOfDot >= 0) {
             name = name.substring(lastIndexOfDot + 1);
         }
@@ -204,6 +206,7 @@ public String getResourcePath() {
      * @deprecated this is for compatible with data model v1;
      * @return
      */
+    @Deprecated
     public String getResourcePathV1() {
         return concatResourcePath(name, null);
     }
diff --git a/engine-spark/src/main/java/org/apache/kylin/engine/spark/SparkCubingMerge.java b/engine-spark/src/main/java/org/apache/kylin/engine/spark/SparkCubingMerge.java
index 991c31e8ed..0b03f7077d 100644
--- a/engine-spark/src/main/java/org/apache/kylin/engine/spark/SparkCubingMerge.java
+++ b/engine-spark/src/main/java/org/apache/kylin/engine/spark/SparkCubingMerge.java
@@ -143,7 +143,7 @@ protected void execute(OptionsHelper optionsHelper) throws Exception {
         };
 
         final PairFunction convertTextFunction = new PairFunction<Tuple2<Text, Object[]>, org.apache.hadoop.io.Text, org.apache.hadoop.io.Text>() {
-            private volatile transient boolean initialized = false;
+            private transient volatile boolean initialized = false;
             BufferedMeasureCodec codec;
 
             @Override
@@ -231,12 +231,11 @@ protected void execute(OptionsHelper optionsHelper) throws Exception {
         }
         // output the data size to console, job engine will parse and save the metric
         // please note: this mechanism won't work when spark.submit.deployMode=cluster
-        logger.info("HDFS: Number of bytes written=" + jobListener.metrics.getBytesWritten());
-        //        HadoopUtil.deleteHDFSMeta(metaUrl);
+        logger.info("HDFS: Number of bytes written={}", jobListener.metrics.getBytesWritten());
     }
 
     static class ReEncodCuboidFunction implements PairFunction<Tuple2<Text, Text>, Text, Object[]> {
-        private volatile transient boolean initialized = false;
+        private transient volatile boolean initialized = false;
         private String cubeName;
         private String sourceSegmentId;
         private String mergedSegmentId;
diff --git a/engine-spark/src/main/java/org/apache/kylin/engine/spark/SparkFactDistinct.java b/engine-spark/src/main/java/org/apache/kylin/engine/spark/SparkFactDistinct.java
index 5a591673d0..043f479e7d 100644
--- a/engine-spark/src/main/java/org/apache/kylin/engine/spark/SparkFactDistinct.java
+++ b/engine-spark/src/main/java/org/apache/kylin/engine/spark/SparkFactDistinct.java
@@ -234,19 +234,19 @@ protected void execute(OptionsHelper optionsHelper) throws Exception {
     }
 
     static class FlatOutputFucntion implements PairFlatMapFunction<Iterator<String[]>, SelfDefineSortableKey, Text> {
-        private volatile transient boolean initialized = false;
+        private transient volatile boolean initialized = false;
         private String cubeName;
         private String segmentId;
         private String metaUrl;
         private SerializableConfiguration conf;
         private int samplingPercent;
-        private CuboidStatCalculator cuboidStatCalculator;
-        private FactDistinctColumnsReducerMapping reducerMapping;
+        private transient CuboidStatCalculator cuboidStatCalculator;
+        private transient FactDistinctColumnsReducerMapping reducerMapping;
         private List<TblColRef> allCols;
         private int[] columnIndex;
-        private DictColDeduper dictColDeduper;
+        private transient DictColDeduper dictColDeduper;
         private Map<Integer, DimensionRangeInfo> dimensionRangeInfoMap;
-        private ByteBuffer tmpbuf;
+        private transient ByteBuffer tmpbuf;
         private LongAccumulator bytesWritten;
 
         public FlatOutputFucntion(String cubeName, String segmentId, String metaurl, SerializableConfiguration conf,
@@ -374,8 +374,9 @@ private void init() {
                 result.add(new Tuple2<SelfDefineSortableKey, Text>(sortableKey, outputValue));
             }
 
-            for (Integer colIndex : dimensionRangeInfoMap.keySet()) {
-                DimensionRangeInfo rangeInfo = dimensionRangeInfoMap.get(colIndex);
+            for (Map.Entry<Integer, DimensionRangeInfo> entry : dimensionRangeInfoMap.entrySet()) {
+                int colIndex = entry.getKey();
+                DimensionRangeInfo rangeInfo = entry.getValue();
                 DataType dataType = allCols.get(colIndex).getType();
                 addFieldValue(dataType, colIndex, rangeInfo.getMin(), result);
                 addFieldValue(dataType, colIndex, rangeInfo.getMax(), result);
@@ -458,7 +459,7 @@ private void addFieldValue(DataType type, Integer colIndex, String value,
 
             // log a few rows for troubleshooting
             if (result.size() < 10) {
-                logger.info("Sample output: " + allCols.get(colIndex) + " '" + value + "' => reducer " + reducerIndex);
+                logger.info("Sample output: {} '{}' => reducer {}", allCols.get(colIndex), value, reducerIndex);
             }
         }
 
@@ -573,12 +574,12 @@ private void putRowKeyToHLLNew(String[] row) {
     }
 
     static class FactDistinctPartitioner extends Partitioner {
-        private volatile transient boolean initialized = false;
+        private transient volatile boolean initialized = false;
         private String cubeName;
         private String metaUrl;
         private SerializableConfiguration conf;
         private int totalReducerNum;
-        private FactDistinctColumnsReducerMapping reducerMapping;
+        private transient FactDistinctColumnsReducerMapping reducerMapping;
 
         public FactDistinctPartitioner(String cubeName, String metaUrl, SerializableConfiguration conf,
                 int totalReducerNum) {
@@ -626,14 +627,14 @@ public int getPartition(Object o) {
 
     static class MultiOutputFunction implements
             PairFlatMapFunction<Iterator<Tuple2<SelfDefineSortableKey, Iterable<Text>>>, String, Tuple3<Writable, Writable, String>> {
-        private volatile transient boolean initialized = false;
+        private transient volatile boolean initialized = false;
         private String DICT_FILE_POSTFIX = ".rldict";
         private String DIMENSION_COL_INFO_FILE_POSTFIX = ".dci";
         private String cubeName;
         private String metaUrl;
         private SerializableConfiguration conf;
         private int samplingPercent;
-        private FactDistinctColumnsReducerMapping reducerMapping;
+        private transient FactDistinctColumnsReducerMapping reducerMapping;
         private int taskId;
         private boolean isStatistics = false;
         private long baseCuboidId;
@@ -641,7 +642,7 @@ public int getPartition(Object o) {
         private Map<Long, HLLCounter> cuboidHLLMap;
         private TblColRef col;
         private boolean buildDictInReducer;
-        private IDictionaryBuilder builder;
+        private transient IDictionaryBuilder builder;
         private int rowCount = 0;
         private long totalRowsBeforeMerge = 0;
         private KylinConfig cubeConfig;
@@ -677,7 +678,7 @@ private void init() throws IOException {
                     baseCuboidRowCountInMappers = Lists.newArrayList();
                     cuboidHLLMap = Maps.newHashMap();
 
-                    logger.info("Partition " + taskId + " handling stats");
+                    logger.info("Partition {} handling stats", taskId);
                 } else {
                     // normal col
                     col = reducerMapping.getColForReducer(taskId);
@@ -697,8 +698,7 @@ private void init() throws IOException {
                         builder = DictionaryGenerator.newDictionaryBuilder(col.getType());
                         builder.init(null, 0, null);
                     }
-                    logger.info("Partition " + taskId + " handling column " + col + ", buildDictInReducer="
-                            + buildDictInReducer);
+                    logger.info("Partition {} handling column {}, buildDictInReducer={}", taskId, col, buildDictInReducer);
                 }
 
                 initialized = true;
@@ -707,7 +707,7 @@ private void init() throws IOException {
 
         private void logAFewRows(String value) {
             if (rowCount < 10) {
-                logger.info("Received value: " + value);
+                logger.info("Received value: {}", value);
             }
         }
 
@@ -781,7 +781,7 @@ private void logAFewRows(String value) {
             }
 
             if (isStatistics) {
-                //output the hll info;
+                //output the hll info
                 List<Long> allCuboids = Lists.newArrayList();
                 allCuboids.addAll(cuboidHLLMap.keySet());
                 Collections.sort(allCuboids);
@@ -803,26 +803,26 @@ private void logAFewRows(String value) {
             return result.iterator();
         }
 
-        private void logMapperAndCuboidStatistics(List<Long> allCuboids) throws IOException {
-            logger.info("Cuboid number for task: " + taskId + "\t" + allCuboids.size());
-            logger.info("Samping percentage: \t" + samplingPercent);
+        private void logMapperAndCuboidStatistics(List<Long> allCuboids) {
+            logger.info("Cuboid number for task: {}\t{}", taskId, allCuboids.size());
+            logger.info("Samping percentage: \t{}", samplingPercent);
             logger.info("The following statistics are collected based on sampling data. ");
-            logger.info("Number of Mappers: " + baseCuboidRowCountInMappers.size());
+            logger.info("Number of Mappers: {}", baseCuboidRowCountInMappers.size());
 
             for (int i = 0; i < baseCuboidRowCountInMappers.size(); i++) {
                 if (baseCuboidRowCountInMappers.get(i) > 0) {
-                    logger.info("Base Cuboid in Mapper " + i + " row count: \t " + baseCuboidRowCountInMappers.get(i));
+                    logger.info("Base Cuboid in Mapper {} row count: \t {}", i, baseCuboidRowCountInMappers.get(i));
                 }
             }
 
             long grantTotal = 0;
             for (long i : allCuboids) {
                 grantTotal += cuboidHLLMap.get(i).getCountEstimate();
-                logger.info("Cuboid " + i + " row count is: \t " + cuboidHLLMap.get(i).getCountEstimate());
+                logger.info("Cuboid {} row count is: \t {}", i, cuboidHLLMap.get(i).getCountEstimate());
             }
 
-            logger.info("Sum of row counts (before merge) is: \t " + totalRowsBeforeMerge);
-            logger.info("After merge, the row count: \t " + grantTotal);
+            logger.info("Sum of row counts (before merge) is: \t {}", totalRowsBeforeMerge);
+            logger.info("After merge, the row count: \t {}", grantTotal);
         }
 
         private void outputDimRangeInfo(List<Tuple2<String, Tuple3<Writable, Writable, String>>> result) {
@@ -836,14 +836,13 @@ private void outputDimRangeInfo(List<Tuple2<String, Tuple3<Writable, Writable, S
                 result.add(new Tuple2<String, Tuple3<Writable, Writable, String>>(BatchConstants.CFG_OUTPUT_PARTITION,
                         new Tuple3<Writable, Writable, String>(NullWritable.get(),
                                 new Text(maxValue.getBytes(StandardCharsets.UTF_8)), dimRangeFileName)));
-                logger.info("write dimension range info for col : " + col.getName() + "  minValue:" + minValue
-                        + " maxValue:" + maxValue);
+                logger.info("write dimension range info for col : {}  minValue:{} maxValue:{}", col.getName(), minValue, maxValue);
             }
         }
 
         private void outputDict(TblColRef col, Dictionary<String> dict,
                 List<Tuple2<String, Tuple3<Writable, Writable, String>>> result)
-                throws IOException, InterruptedException {
+                throws IOException {
             // output written to baseDir/colName/colName.rldict-r-00000 (etc)
             String dictFileName = col.getIdentity() + "/" + col.getName() + DICT_FILE_POSTFIX;
 
@@ -860,7 +859,7 @@ private void outputDict(TblColRef col, Dictionary<String> dict,
 
         private void outputStatistics(List<Long> allCuboids,
                 List<Tuple2<String, Tuple3<Writable, Writable, String>>> result)
-                throws IOException, InterruptedException {
+                throws IOException {
             // output written to baseDir/statistics/statistics-r-00000 (etc)
             String statisticsFileName = BatchConstants.CFG_OUTPUT_STATISTICS + "/"
                     + BatchConstants.CFG_OUTPUT_STATISTICS;
diff --git a/engine-spark/src/main/java/org/apache/kylin/engine/spark/SparkMergingDictionary.java b/engine-spark/src/main/java/org/apache/kylin/engine/spark/SparkMergingDictionary.java
index 4d4346fc0f..37f957fe31 100644
--- a/engine-spark/src/main/java/org/apache/kylin/engine/spark/SparkMergingDictionary.java
+++ b/engine-spark/src/main/java/org/apache/kylin/engine/spark/SparkMergingDictionary.java
@@ -156,8 +156,8 @@ protected void execute(OptionsHelper optionsHelper) throws Exception {
         colToDictPathRDD.coalesce(1, false).saveAsNewAPIHadoopFile(dictOutputPath, Text.class, Text.class, SequenceFileOutputFormat.class);
     }
 
-    static public class MergeDictAndStatsFunction implements PairFunction<Integer, Text, Text> {
-        private volatile transient boolean initialized = false;
+    public static class MergeDictAndStatsFunction implements PairFunction<Integer, Text, Text> {
+        private transient volatile boolean initialized = false;
         private String cubeName;
         private String metaUrl;
         private String segmentId;
@@ -165,7 +165,7 @@ protected void execute(OptionsHelper optionsHelper) throws Exception {
         private String statOutputPath;
         private TblColRef[] tblColRefs;
         private SerializableConfiguration conf;
-        private DictionaryManager dictMgr;
+        private transient DictionaryManager dictMgr;
         private KylinConfig kylinConfig;
         private List<CubeSegment> mergingSegments;
 
@@ -236,32 +236,27 @@ private void init() {
 
                     for (CubeSegment cubeSegment : mergingSegments) {
                         String filePath = cubeSegment.getStatisticsResourcePath();
-                        InputStream is = rs.getResource(filePath).inputStream;
-                        File tempFile;
-                        FileOutputStream tempFileStream = null;
 
-                        try {
-                            tempFile = File.createTempFile(segmentId, ".seq");
-                            tempFileStream = new FileOutputStream(tempFile);
+                        File tempFile = File.createTempFile(segmentId, ".seq");
+
+                        try(InputStream is = rs.getResource(filePath).inputStream;
+                            FileOutputStream tempFileStream = new FileOutputStream(tempFile)) {
+
                             org.apache.commons.io.IOUtils.copy(is, tempFileStream);
-                        } finally {
-                            IOUtils.closeStream(is);
-                            IOUtils.closeStream(tempFileStream);
                         }
 
                         FileSystem fs = HadoopUtil.getFileSystem("file:///" + tempFile.getAbsolutePath());
-                        SequenceFile.Reader reader = null;
 
-                        try {
-                            conf = HadoopUtil.getCurrentConfiguration();
+                        conf = HadoopUtil.getCurrentConfiguration();
+
+                        try(SequenceFile.Reader reader = new SequenceFile.Reader(fs, new Path(tempFile.getAbsolutePath()), conf)) {
                             //noinspection deprecation
-                            reader = new SequenceFile.Reader(fs, new Path(tempFile.getAbsolutePath()), conf);
                             LongWritable key = (LongWritable) ReflectionUtils.newInstance(reader.getKeyClass(), conf);
                             BytesWritable value = (BytesWritable) ReflectionUtils.newInstance(reader.getValueClass(), conf);
 
                             while (reader.next(key, value)) {
                                 if (key.get() == 0L) {
-                                    // sampling percentage;
+                                    // sampling percentage
                                     averageSamplingPercentage += Bytes.toInt(value.getBytes());
                                 } else if (key.get() > 0) {
                                     HLLCounter hll = new HLLCounter(kylinConfig.getCubeStatsHLLPrecision());
@@ -275,8 +270,6 @@ private void init() {
                                     }
                                 }
                             }
-                        } finally {
-                            IOUtils.closeStream(reader);
                         }
                     }
 
diff --git a/storage-hbase/src/main/java/org/apache/kylin/storage/hbase/steps/SparkCubeHFile.java b/storage-hbase/src/main/java/org/apache/kylin/storage/hbase/steps/SparkCubeHFile.java
index 539f03b23e..e2d43ba1c2 100644
--- a/storage-hbase/src/main/java/org/apache/kylin/storage/hbase/steps/SparkCubeHFile.java
+++ b/storage-hbase/src/main/java/org/apache/kylin/storage/hbase/steps/SparkCubeHFile.java
@@ -171,21 +171,21 @@ protected void execute(OptionsHelper optionsHelper) throws Exception {
             Writable value = NullWritable.get();
             while (reader.next(key, value)) {
                 keys.add(key);
-                logger.info(" ------- split key: " + key);
+                logger.info(" ------- split key: {}", key);
                 key = new RowKeyWritable(); // important, new an object!
             }
         }
 
-        logger.info("There are " + keys.size() + " split keys, totally " + (keys.size() + 1) + " hfiles");
+        logger.info("There are {} split keys, totally {} hfiles", keys.size(), (keys.size() + 1));
 
         //HBase conf
-        logger.info("Loading HBase configuration from:" + hbaseConfFile);
+        logger.info("Loading HBase configuration from:{}", hbaseConfFile);
         FSDataInputStream confInput = fs.open(new Path(hbaseConfFile));
 
         Configuration hbaseJobConf = new Configuration();
         hbaseJobConf.addResource(confInput);
         hbaseJobConf.set("spark.hadoop.dfs.replication", "3"); // HFile, replication=3
-        Job job = new Job(hbaseJobConf, cubeSegment.getStorageLocationIdentifier());
+        Job job = Job.getInstance(hbaseJobConf, cubeSegment.getStorageLocationIdentifier());
 
         FileOutputFormat.setOutputPath(job, new Path(outputPath));
 
@@ -233,15 +233,13 @@ protected void execute(OptionsHelper optionsHelper) throws Exception {
                     }
                 }).saveAsNewAPIHadoopDataset(job.getConfiguration());
 
-        logger.info("HDFS: Number of bytes written=" + jobListener.metrics.getBytesWritten());
+        logger.info("HDFS: Number of bytes written={}", jobListener.metrics.getBytesWritten());
 
         Map<String, String> counterMap = Maps.newHashMap();
         counterMap.put(ExecutableConstants.HDFS_BYTES_WRITTEN, String.valueOf(jobListener.metrics.getBytesWritten()));
 
         // save counter to hdfs
         HadoopUtil.writeToSequenceFile(sc.hadoopConfiguration(), counterPath, counterMap);
-
-        //HadoopUtil.deleteHDFSMeta(metaUrl);
     }
 
     static class HFilePartitioner extends Partitioner {
diff --git a/storage-hbase/src/main/java/org/apache/kylin/storage/hbase/util/UpdateHTableHostCLI.java b/storage-hbase/src/main/java/org/apache/kylin/storage/hbase/util/UpdateHTableHostCLI.java
index 3f290ac5bf..bf5c4e84e0 100644
--- a/storage-hbase/src/main/java/org/apache/kylin/storage/hbase/util/UpdateHTableHostCLI.java
+++ b/storage-hbase/src/main/java/org/apache/kylin/storage/hbase/util/UpdateHTableHostCLI.java
@@ -82,7 +82,7 @@ public static void main(String[] args) throws Exception {
         } else if (!filterType.equals("-all")) {
             printUsageAndExit();
         }
-        logger.info("These htables are needed to be updated: " + StringUtils.join(tableNames, ","));
+        logger.info("These htables are needed to be updated: {}", StringUtils.join(tableNames, ","));
 
         UpdateHTableHostCLI updateHTableHostCLI = new UpdateHTableHostCLI(tableNames, oldHostValue);
         updateHTableHostCLI.execute();
@@ -119,13 +119,13 @@ private static void printUsageAndExit() {
     private static List<String> getHTableNames(KylinConfig config) {
         CubeManager cubeMgr = CubeManager.getInstance(config);
 
-        ArrayList<String> result = new ArrayList<String>();
+        ArrayList<String> result = new ArrayList<>();
         for (CubeInstance cube : cubeMgr.listAllCubes()) {
             for (CubeSegment seg : cube.getSegments(SegmentStatusEnum.READY)) {
                 String tableName = seg.getStorageLocationIdentifier();
                 if (!StringUtils.isBlank(tableName)) {
                     result.add(tableName);
-                    System.out.println("added new table: " + tableName);
+                    logger.info("added new table: {}", tableName);
                 }
             }
         }


 

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
users@infra.apache.org


With regards,
Apache Git Services