You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@paimon.apache.org by zj...@apache.org on 2023/03/27 09:52:58 UTC

[incubator-paimon] branch master updated: [javadoc] optimize javadoc (#721)

This is an automated email from the ASF dual-hosted git repository.

zjureel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-paimon.git


The following commit(s) were added to refs/heads/master by this push:
     new af8517d48 [javadoc] optimize javadoc (#721)
af8517d48 is described below

commit af8517d4867f38c2e609d54a10e8c12f1b130150
Author: legendtkl <ta...@gmail.com>
AuthorDate: Mon Mar 27 17:52:52 2023 +0800

    [javadoc] optimize javadoc (#721)
---
 .../paimon/benchmark/metric/BenchmarkMetric.java   |  2 +-
 .../main/java/org/apache/paimon/sort/HeapSort.java |  7 ++--
 .../paimon/table/ColumnTypeFileMetaTestBase.java   | 42 +++++++++++-----------
 .../flink/sink/CommitterOperatorTestBase.java      |  4 +--
 .../parquet/reader/ParquetDataColumnReader.java    |  2 +-
 .../format/parquet/reader/RunLengthDecoder.java    |  6 ++--
 6 files changed, 31 insertions(+), 32 deletions(-)

diff --git a/paimon-benchmark/paimon-cluster-benchmark/src/main/java/org/apache/paimon/benchmark/metric/BenchmarkMetric.java b/paimon-benchmark/paimon-cluster-benchmark/src/main/java/org/apache/paimon/benchmark/metric/BenchmarkMetric.java
index 376fbe25b..50c3ea3cf 100644
--- a/paimon-benchmark/paimon-cluster-benchmark/src/main/java/org/apache/paimon/benchmark/metric/BenchmarkMetric.java
+++ b/paimon-benchmark/paimon-cluster-benchmark/src/main/java/org/apache/paimon/benchmark/metric/BenchmarkMetric.java
@@ -23,7 +23,7 @@ import org.apache.paimon.benchmark.utils.BenchmarkUtils;
 import javax.annotation.Nullable;
 
 /**
- * Metric collected per {@link org.apache.paimon.benchmark.BenchmarkOptions#METRIC_MONITOR_DURATION}
+ * Metric collected per {@link org.apache.paimon.benchmark.BenchmarkOptions#METRIC_MONITOR_INTERVAL}
  * for a single query.
  */
 public class BenchmarkMetric {
diff --git a/paimon-core/src/main/java/org/apache/paimon/sort/HeapSort.java b/paimon-core/src/main/java/org/apache/paimon/sort/HeapSort.java
index 7af0ea0b6..88c5c94df 100644
--- a/paimon-core/src/main/java/org/apache/paimon/sort/HeapSort.java
+++ b/paimon-core/src/main/java/org/apache/paimon/sort/HeapSort.java
@@ -19,9 +19,10 @@
 package org.apache.paimon.sort;
 
 /**
- * This file is based on source code from the Hadoop Project (http://hadoop.apache.org/), licensed
- * by the Apache Software Foundation (ASF) under the Apache License, Version 2.0. See the NOTICE
- * file distributed with this work for additional information regarding copyright ownership.
+ * This file is based on source code from the Hadoop Project (<a
+ * href="http://hadoop.apache.org/">Apache Hadoop</a>), licensed by the Apache Software Foundation
+ * (ASF) under the Apache License, Version 2.0. See the NOTICE file distributed with this work for
+ * additional information regarding copyright ownership.
  */
 public final class HeapSort implements IndexedSorter {
     public HeapSort() {}
diff --git a/paimon-core/src/test/java/org/apache/paimon/table/ColumnTypeFileMetaTestBase.java b/paimon-core/src/test/java/org/apache/paimon/table/ColumnTypeFileMetaTestBase.java
index 72eaef77a..f580ec9dd 100644
--- a/paimon-core/src/test/java/org/apache/paimon/table/ColumnTypeFileMetaTestBase.java
+++ b/paimon-core/src/test/java/org/apache/paimon/table/ColumnTypeFileMetaTestBase.java
@@ -107,17 +107,17 @@ public abstract class ColumnTypeFileMetaTestBase extends SchemaEvolutionTableTes
         writeAndCheckFileResultForColumnType(
                 schemas -> {
                     FileStoreTable table = createFileStoreTable(schemas);
-                    /**
-                     * Filter field "g" in [200, 500] in SCHEMA_FIELDS which is bigint and will get
-                     * one file with two data as followed:
-                     *
-                     * <ul>
-                     *   <li>2,"200","201",toDecimal(202),(short)203,204,205L,206F,207D,208,toTimestamp(209
-                     *       * millsPerDay),toBytes("210")
-                     *   <li>2,"300","301",toDecimal(302),(short)303,304,305L,306F,307D,308,toTimestamp(309
-                     *       * millsPerDay),toBytes("310")
-                     * </ul>
-                     */
+                    /*
+                     Filter field "g" in [200, 500] in SCHEMA_FIELDS which is bigint and will get
+                     one file with two data as followed:
+
+                     <ul>
+                       <li>2,"200","201",toDecimal(202),(short)203,204,205L,206F,207D,208,toTimestamp(209
+                           * millsPerDay),toBytes("210")
+                       <li>2,"300","301",toDecimal(302),(short)303,304,305L,306F,307D,308,toTimestamp(309
+                           * millsPerDay),toBytes("310")
+                     </ul>
+                    */
                     Predicate predicate =
                             new PredicateBuilder(table.schema().logicalRowType())
                                     .between(6, 200L, 500L);
@@ -131,16 +131,16 @@ public abstract class ColumnTypeFileMetaTestBase extends SchemaEvolutionTableTes
                 (files, schemas) -> {
                     FileStoreTable table = createFileStoreTable(schemas);
 
-                    /**
-                     * Filter field "g" in [200, 500] in SCHEMA_FIELDS which is updated from bigint
-                     * to float and will get another file with one data as followed:
-                     *
-                     * <ul>
-                     *   <li>2,"400","401",402D,403,toDecimal(404),405F,406D,toDecimal(407),408,409,toBytes("410")
-                     * </ul>
-                     *
-                     * <p>Then we can check the results of the two result files.
-                     */
+                    /*
+                     Filter field "g" in [200, 500] in SCHEMA_FIELDS which is updated from bigint
+                     to float and will get another file with one data as followed:
+
+                     <ul>
+                       <li>2,"400","401",402D,403,toDecimal(404),405F,406D,toDecimal(407),408,409,toBytes("410")
+                     </ul>
+
+                     <p>Then we can check the results of the two result files.
+                    */
                     List<DataSplit> splits =
                             table.newSnapshotSplitReader()
                                     .withFilter(
diff --git a/paimon-flink/paimon-flink-common/src/test/java/org/apache/paimon/flink/sink/CommitterOperatorTestBase.java b/paimon-flink/paimon-flink-common/src/test/java/org/apache/paimon/flink/sink/CommitterOperatorTestBase.java
index 421df487f..8950cc377 100644
--- a/paimon-flink/paimon-flink-common/src/test/java/org/apache/paimon/flink/sink/CommitterOperatorTestBase.java
+++ b/paimon-flink/paimon-flink-common/src/test/java/org/apache/paimon/flink/sink/CommitterOperatorTestBase.java
@@ -45,9 +45,7 @@ import java.util.List;
 
 import static org.assertj.core.api.Assertions.assertThat;
 
-/**
- * Base test class for {@link AtMostOnceCommitterOperator} and {@link ExactlyOnceCommitOperator}.
- */
+/** Base test class for {@link CommitterOperatorTest}. */
 public abstract class CommitterOperatorTestBase {
 
     private static final RowType ROW_TYPE =
diff --git a/paimon-format/src/main/java/org/apache/paimon/format/parquet/reader/ParquetDataColumnReader.java b/paimon-format/src/main/java/org/apache/paimon/format/parquet/reader/ParquetDataColumnReader.java
index aabeb5b94..f6dfed253 100644
--- a/paimon-format/src/main/java/org/apache/paimon/format/parquet/reader/ParquetDataColumnReader.java
+++ b/paimon-format/src/main/java/org/apache/paimon/format/parquet/reader/ParquetDataColumnReader.java
@@ -34,7 +34,7 @@ public interface ParquetDataColumnReader {
      *
      * @param valueCount value count
      * @param in page data
-     * @throws IOException
+     * @throws IOException the io exception
      */
     void initFromPage(int valueCount, ByteBufferInputStream in) throws IOException;
 
diff --git a/paimon-format/src/main/java/org/apache/paimon/format/parquet/reader/RunLengthDecoder.java b/paimon-format/src/main/java/org/apache/paimon/format/parquet/reader/RunLengthDecoder.java
index 5c23d24a6..58c925ba5 100644
--- a/paimon-format/src/main/java/org/apache/paimon/format/parquet/reader/RunLengthDecoder.java
+++ b/paimon-format/src/main/java/org/apache/paimon/format/parquet/reader/RunLengthDecoder.java
@@ -32,9 +32,9 @@ import java.io.IOException;
 import java.nio.ByteBuffer;
 
 /**
- * Run length decoder for data and dictionary ids. See
- * https://github.com/apache/parquet-format/blob/master/Encodings.md See {@link
- * RunLengthBitPackingHybridDecoder}.
+ * Run length decoder for data and dictionary ids. See <a
+ * href="https://github.com/apache/parquet-format/blob/master/Encodings.md">Parquet Format
+ * Encodings</a> See {@link RunLengthBitPackingHybridDecoder}.
  */
 final class RunLengthDecoder {