You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@parquet.apache.org by bl...@apache.org on 2018/04/05 19:53:34 UTC
[parquet-mr] branch master updated: RQUET-1264: Fix javadoc
warnings for Java 8.
This is an automated email from the ASF dual-hosted git repository.
blue pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/parquet-mr.git
The following commit(s) were added to refs/heads/master by this push:
new 0d55abd RQUET-1264: Fix javadoc warnings for Java 8.
0d55abd is described below
commit 0d55abd05b0e5027c18e60d1ac3b22998dd00951
Author: Ryan Blue <bl...@apache.org>
AuthorDate: Thu Apr 5 12:38:24 2018 -0700
RQUET-1264: Fix javadoc warnings for Java 8.
---
.../parquet/arrow/schema/SchemaConverter.java | 14 +--
.../apache/parquet/arrow/schema/SchemaMapping.java | 4 +-
.../parquet/avro/AvroParquetInputFormat.java | 14 +--
.../parquet/avro/AvroParquetOutputFormat.java | 4 +-
.../org/apache/parquet/avro/AvroParquetReader.java | 25 +++++-
.../org/apache/parquet/avro/AvroParquetWriter.java | 18 ++--
.../org/apache/parquet/avro/AvroReadSupport.java | 6 ++
.../org/apache/parquet/avro/AvroWriteSupport.java | 6 +-
.../java/org/apache/parquet/avro/package-info.java | 3 +
.../parquet/cascading/TupleWriteSupport.java | 5 --
.../parquet/cascading/ParquetTupleScheme.java | 2 -
.../parquet/cascading/ParquetTupleScheme.java | 2 -
.../java/org/apache/parquet/cli/BaseCommand.java | 23 ++---
.../main/java/org/apache/parquet/cli/Command.java | 2 +-
.../org/apache/parquet/cli/util/Expressions.java | 4 +
.../apache/parquet/cli/util/RecordException.java | 2 +
.../java/org/apache/parquet/cli/util/Schemas.java | 3 +
.../apache/parquet/column/ColumnDescriptor.java | 8 +-
.../org/apache/parquet/column/ColumnReadStore.java | 2 -
.../org/apache/parquet/column/ColumnReader.java | 9 +-
.../apache/parquet/column/ColumnWriteStore.java | 2 -
.../org/apache/parquet/column/ColumnWriter.java | 43 +++++-----
.../java/org/apache/parquet/column/Dictionary.java | 3 -
.../java/org/apache/parquet/column/Encoding.java | 12 +--
.../apache/parquet/column/ParquetProperties.java | 3 -
.../parquet/column/UnknownColumnException.java | 2 -
.../parquet/column/UnknownColumnTypeException.java | 2 -
.../java/org/apache/parquet/column/ValuesType.java | 3 -
.../parquet/column/impl/ColumnReadStoreImpl.java | 4 +-
.../parquet/column/impl/ColumnReaderImpl.java | 8 +-
.../apache/parquet/column/impl/ColumnWriterV1.java | 3 -
.../apache/parquet/column/impl/ColumnWriterV2.java | 3 -
.../org/apache/parquet/column/page/DataPage.java | 3 -
.../apache/parquet/column/page/DictionaryPage.java | 3 -
.../java/org/apache/parquet/column/page/Page.java | 3 -
.../apache/parquet/column/page/PageReadStore.java | 3 -
.../org/apache/parquet/column/page/PageReader.java | 3 -
.../apache/parquet/column/page/PageWriteStore.java | 3 -
.../org/apache/parquet/column/page/PageWriter.java | 3 -
.../parquet/column/statistics/Statistics.java | 18 +++-
.../statistics/StatisticsClassException.java | 2 -
.../parquet/column/values/RequiresFallback.java | 5 +-
.../apache/parquet/column/values/ValuesReader.java | 2 -
.../apache/parquet/column/values/ValuesWriter.java | 3 -
.../values/bitpacking/BitPackingValuesReader.java | 3 -
.../values/bitpacking/BitPackingValuesWriter.java | 3 -
.../values/delta/DeltaBinaryPackingConfig.java | 2 -
.../delta/DeltaBinaryPackingValuesReader.java | 2 -
.../delta/DeltaBinaryPackingValuesWriter.java | 5 +-
.../DeltaBinaryPackingValuesWriterForInteger.java | 7 +-
.../DeltaBinaryPackingValuesWriterForLong.java | 6 +-
.../DeltaLengthByteArrayValuesReader.java | 3 -
.../DeltaLengthByteArrayValuesWriter.java | 1 -
.../values/deltastrings/DeltaByteArrayReader.java | 3 -
.../values/deltastrings/DeltaByteArrayWriter.java | 2 -
.../values/dictionary/DictionaryValuesReader.java | 3 -
.../values/dictionary/DictionaryValuesWriter.java | 42 ---------
.../parquet/column/values/dictionary/IntList.java | 7 --
.../values/dictionary/PlainValuesDictionary.java | 22 ++---
.../column/values/factory/ValuesWriterFactory.java | 7 +-
.../values/plain/BooleanPlainValuesReader.java | 3 -
.../values/plain/BooleanPlainValuesWriter.java | 3 -
.../plain/FixedLenByteArrayPlainValuesReader.java | 2 -
.../plain/FixedLenByteArrayPlainValuesWriter.java | 2 -
.../column/values/plain/PlainValuesReader.java | 3 -
.../column/values/plain/PlainValuesWriter.java | 3 -
.../rle/RunLengthBitPackingHybridDecoder.java | 2 -
.../rle/RunLengthBitPackingHybridEncoder.java | 4 +-
.../rle/RunLengthBitPackingHybridValuesReader.java | 2 -
.../rle/RunLengthBitPackingHybridValuesWriter.java | 3 -
.../parquet/example/DummyRecordConverter.java | 3 -
.../java/org/apache/parquet/example/Paper.java | 3 -
.../org/apache/parquet/filter/AndRecordFilter.java | 2 -
.../org/apache/parquet/filter/NotRecordFilter.java | 2 -
.../org/apache/parquet/filter/OrRecordFilter.java | 2 -
.../org/apache/parquet/filter/RecordFilter.java | 2 -
.../apache/parquet/filter/UnboundRecordFilter.java | 2 -
.../parquet/filter2/compat/FilterCompat.java | 18 +++-
.../parquet/filter2/predicate/FilterApi.java | 81 ++++++++++++++++--
.../parquet/filter2/predicate/FilterPredicate.java | 4 +
.../filter2/predicate/LogicalInverseRewriter.java | 2 +-
.../parquet/filter2/predicate/Statistics.java | 16 ++++
.../filter2/predicate/UserDefinedPredicate.java | 33 +++++---
.../parquet/filter2/predicate/ValidTypeMap.java | 3 +-
.../IncrementallyUpdatedFilterPredicate.java | 9 ++
...ementallyUpdatedFilterPredicateBuilderBase.java | 2 +-
.../main/java/org/apache/parquet/io/ColumnIO.java | 3 -
.../org/apache/parquet/io/ColumnIOFactory.java | 3 -
.../apache/parquet/io/CompilationException.java | 3 -
.../org/apache/parquet/io/EmptyRecordReader.java | 2 -
.../apache/parquet/io/FilteredRecordReader.java | 2 -
.../java/org/apache/parquet/io/GroupColumnIO.java | 4 -
.../apache/parquet/io/InvalidRecordException.java | 3 -
.../org/apache/parquet/io/MessageColumnIO.java | 3 -
.../parquet/io/ParquetDecodingException.java | 3 -
.../parquet/io/ParquetEncodingException.java | 3 -
.../org/apache/parquet/io/PrimitiveColumnIO.java | 4 -
.../parquet/io/RecordConsumerLoggingWrapper.java | 3 -
.../java/org/apache/parquet/io/RecordReader.java | 1 -
.../parquet/io/RecordReaderImplementation.java | 1 -
.../parquet/io/ValidatingRecordConsumer.java | 3 -
.../java/org/apache/parquet/io/api/Converter.java | 3 -
.../org/apache/parquet/io/api/GroupConverter.java | 3 -
.../apache/parquet/io/api/PrimitiveConverter.java | 5 +-
.../org/apache/parquet/io/api/RecordConsumer.java | 2 -
.../apache/parquet/io/api/RecordMaterializer.java | 2 -
.../java/org/apache/parquet/schema/GroupType.java | 3 -
.../IncompatibleSchemaModificationException.java | 3 -
.../org/apache/parquet/schema/MessageType.java | 3 -
.../apache/parquet/schema/MessageTypeParser.java | 2 -
.../org/apache/parquet/schema/PrimitiveType.java | 6 --
.../main/java/org/apache/parquet/schema/Type.java | 5 --
.../org/apache/parquet/schema/TypeConverter.java | 2 -
.../org/apache/parquet/schema/TypeVisitor.java | 3 -
.../apache/parquet/column/values/RandomStr.java | 5 --
.../org/apache/parquet/column/values/Utils.java | 3 -
.../values/bitpacking/BitPackingPerfTest.java | 2 -
.../RunLengthBitPackingHybridIntegrationTest.java | 3 -
.../rle/TestRunLengthBitPackingHybridEncoder.java | 3 -
.../test/java/org/apache/parquet/io/PerfTest.java | 4 -
.../src/main/java/org/apache/parquet/Ints.java | 2 -
.../src/main/java/org/apache/parquet/Log.java | 3 -
.../apache/parquet/ParquetRuntimeException.java | 3 -
.../java/org/apache/parquet/Preconditions.java | 3 -
.../java/org/apache/parquet/bytes/BytesInput.java | 3 -
.../java/org/apache/parquet/bytes/BytesUtils.java | 3 -
.../bytes/CapacityByteArrayOutputStream.java | 3 -
.../parquet/bytes/LittleEndianDataInputStream.java | 3 -
.../bytes/LittleEndianDataOutputStream.java | 3 -
.../parquet/hadoop/metadata/Canonicalizer.java | 1 -
.../column/values/bitpacking/BitPacking.java | 9 --
.../bitpacking/ByteBasedBitPackingEncoder.java | 3 -
.../column/values/bitpacking/BytePacker.java | 3 -
.../values/bitpacking/BytePackerForLong.java | 3 -
.../column/values/bitpacking/IntPacker.java | 3 -
.../parquet/column/values/bitpacking/Packer.java | 3 -
.../org/apache/parquet/encoding/Generator.java | 3 -
.../bitpacking/ByteBasedBitPackingGenerator.java | 5 +-
.../bitpacking/IntBasedBitPackingGenerator.java | 5 +-
...crementallyUpdatedFilterPredicateGenerator.java | 2 +-
.../parquet/filter2/compat/RowGroupFilter.java | 4 +
.../format/converter/ParquetMetadataConverter.java | 20 +++--
.../parquet/hadoop/BadConfigurationException.java | 3 -
.../org/apache/parquet/hadoop/CodecFactory.java | 4 +-
.../java/org/apache/parquet/hadoop/Footer.java | 4 -
.../apache/parquet/hadoop/ParquetFileReader.java | 99 ++++++++++++++--------
.../apache/parquet/hadoop/ParquetFileWriter.java | 66 ++++++++++-----
.../apache/parquet/hadoop/ParquetInputFormat.java | 18 ++--
.../apache/parquet/hadoop/ParquetInputSplit.java | 32 +++----
.../apache/parquet/hadoop/ParquetOutputFormat.java | 6 +-
.../org/apache/parquet/hadoop/ParquetReader.java | 10 +--
.../apache/parquet/hadoop/ParquetRecordReader.java | 4 +-
.../apache/parquet/hadoop/ParquetRecordWriter.java | 7 +-
.../org/apache/parquet/hadoop/ParquetWriter.java | 28 +++---
.../org/apache/parquet/hadoop/PrintFooter.java | 2 -
.../parquet/hadoop/api/DelegatingReadSupport.java | 4 +-
.../parquet/hadoop/api/DelegatingWriteSupport.java | 4 +-
.../org/apache/parquet/hadoop/api/InitContext.java | 3 -
.../org/apache/parquet/hadoop/api/ReadSupport.java | 5 --
.../apache/parquet/hadoop/api/WriteSupport.java | 8 --
.../apache/parquet/hadoop/codec/CodecConfig.java | 2 -
.../parquet/hadoop/example/ExampleInputFormat.java | 3 -
.../hadoop/example/ExampleOutputFormat.java | 8 +-
.../apache/parquet/hadoop/mapred/Container.java | 4 +-
.../mapred/MapredParquetOutputCommitter.java | 3 -
.../parquet/hadoop/metadata/BlockMetaData.java | 3 -
.../hadoop/metadata/ColumnChunkMetaData.java | 1 -
.../parquet/hadoop/metadata/FileMetaData.java | 3 -
.../parquet/hadoop/metadata/GlobalMetaData.java | 3 -
.../parquet/hadoop/metadata/ParquetMetadata.java | 3 -
.../apache/parquet/hadoop/util/ContextUtil.java | 21 ++++-
.../parquet/hadoop/util/SerializationUtil.java | 10 +--
.../hadoop/util/counters/BenchmarkCounter.java | 8 +-
.../hadoop/util/counters/CounterLoader.java | 1 -
.../parquet/hadoop/util/counters/ICounter.java | 1 -
.../util/counters/mapred/MapRedCounterAdapter.java | 1 -
.../util/counters/mapred/MapRedCounterLoader.java | 1 -
.../mapreduce/MapReduceCounterAdapter.java | 1 -
.../counters/mapreduce/MapReduceCounterLoader.java | 1 -
.../parquet/hadoop/DeprecatedInputFormatTest.java | 1 -
.../parquet/hadoop/DeprecatedOutputFormatTest.java | 1 -
.../java/org/apache/parquet/hive/HiveBinding.java | 6 +-
.../io/parquet/read/DataWritableReadSupport.java | 4 +-
.../serde/ArrayWritableObjectInspector.java | 5 +-
.../parquet/serde/DeepParquetHiveMapInspector.java | 9 +-
.../parquet/serde/ParquetHiveArrayInspector.java | 4 +-
.../serde/StandardParquetHiveMapInspector.java | 6 +-
.../java/org/apache/parquet/pig/ParquetLoader.java | 7 +-
.../java/org/apache/parquet/pig/ParquetStorer.java | 3 -
.../java/org/apache/parquet/pig/PigMetaData.java | 3 -
.../org/apache/parquet/pig/PigSchemaConverter.java | 4 -
.../parquet/pig/SchemaConversionException.java | 3 -
.../org/apache/parquet/pig/TupleReadSupport.java | 3 -
.../apache/parquet/pig/convert/MapConverter.java | 11 ---
.../parquet/pig/convert/ParentValueContainer.java | 3 -
.../apache/parquet/pig/convert/TupleConverter.java | 23 -----
.../apache/parquet/pig/summary/BagSummaryData.java | 8 --
.../parquet/pig/summary/FieldSummaryData.java | 6 --
.../apache/parquet/pig/summary/MapSummaryData.java | 7 --
.../parquet/pig/summary/NumberSummaryData.java | 3 -
.../parquet/pig/summary/StringSummaryData.java | 3 -
.../org/apache/parquet/pig/summary/Summary.java | 3 -
.../apache/parquet/pig/summary/SummaryData.java | 16 ----
.../parquet/pig/summary/TupleSummaryData.java | 8 --
.../test/java/org/apache/parquet/pig/PerfTest.java | 4 -
.../java/org/apache/parquet/pig/PerfTest2.java | 4 -
.../apache/parquet/pig/PerfTestReadAllCols.java | 4 -
.../apache/parquet/pig/TupleConsumerPerfTest.java | 6 --
.../parquet/proto/ProtoMessageConverter.java | 3 -
.../parquet/proto/ProtoParquetOutputFormat.java | 7 +-
.../apache/parquet/proto/ProtoParquetReader.java | 5 ++
.../apache/parquet/proto/ProtoParquetWriter.java | 17 ++--
.../org/apache/parquet/proto/ProtoReadSupport.java | 8 +-
.../apache/parquet/proto/ProtoRecordConverter.java | 3 +-
.../apache/parquet/proto/ProtoSchemaConverter.java | 3 -
.../apache/parquet/proto/ProtoWriteSupport.java | 1 -
.../apache/parquet/scrooge/ScroogeReadSupport.java | 1 -
.../parquet/scrooge/ScroogeRecordConverter.java | 3 +
.../parquet/scrooge/ScroogeStructConverter.java | 8 +-
.../parquet/scrooge/ParquetScroogeSchemeTest.java | 2 -
.../hadoop/thrift/AbstractThriftWriteSupport.java | 1 -
.../thrift/ParquetThriftBytesOutputFormat.java | 4 -
.../hadoop/thrift/ParquetThriftInputFormat.java | 12 +--
.../hadoop/thrift/ParquetThriftOutputFormat.java | 3 -
.../parquet/hadoop/thrift/ThriftReadSupport.java | 5 ++
.../hadoop/thrift/ThriftToParquetFileWriter.java | 10 +--
.../thrift/BufferedProtocolReadToWrite.java | 5 +-
.../org/apache/parquet/thrift/ConvertedField.java | 2 +
.../apache/parquet/thrift/FieldIgnoredHandler.java | 3 +-
.../org/apache/parquet/thrift/ParquetProtocol.java | 3 -
.../org/apache/parquet/thrift/ProtocolPipe.java | 3 -
.../apache/parquet/thrift/ProtocolReadToWrite.java | 5 +-
.../apache/parquet/thrift/SkippableException.java | 3 -
.../parquet/thrift/TBaseRecordConverter.java | 3 +
.../org/apache/parquet/thrift/ThriftMetaData.java | 4 -
.../apache/parquet/thrift/ThriftParquetReader.java | 12 +--
.../apache/parquet/thrift/ThriftParquetWriter.java | 2 -
.../parquet/thrift/ThriftRecordConverter.java | 39 ++-------
.../parquet/thrift/ThriftSchemaConvertVisitor.java | 2 -
.../parquet/thrift/ThriftSchemaConverter.java | 6 ++
.../parquet/thrift/pig/ParquetThriftStorer.java | 3 -
.../thrift/pig/TupleToThriftWriteSupport.java | 3 -
.../parquet/thrift/projection/FieldsPath.java | 2 -
.../projection/StrictFieldProjectionFilter.java | 8 +-
.../projection/ThriftProjectionException.java | 3 -
.../projection/amend/ProtocolEventsAmender.java | 6 +-
.../DeprecatedFieldProjectionFilter.java | 1 -
.../projection/deprecated/PathGlobPattern.java | 2 -
.../thrift/struct/CompatibilityChecker.java | 2 -
.../parquet/thrift/struct/CompatibilityRunner.java | 2 -
.../apache/parquet/thrift/struct/ThriftType.java | 3 -
.../apache/parquet/thrift/struct/ThriftTypeID.java | 3 -
252 files changed, 621 insertions(+), 975 deletions(-)
diff --git a/parquet-arrow/src/main/java/org/apache/parquet/arrow/schema/SchemaConverter.java b/parquet-arrow/src/main/java/org/apache/parquet/arrow/schema/SchemaConverter.java
index 773f7c8..cf4ec0d 100644
--- a/parquet-arrow/src/main/java/org/apache/parquet/arrow/schema/SchemaConverter.java
+++ b/parquet-arrow/src/main/java/org/apache/parquet/arrow/schema/SchemaConverter.java
@@ -211,9 +211,9 @@ public class SchemaConverter {
}
/**
- * @see https://github.com/apache/parquet-format/blob/master/LogicalTypes.md#decimal
- * @param type
- * @return
+ * See https://github.com/apache/parquet-format/blob/master/LogicalTypes.md#decimal
+ * @param type an arrow decimal type
+ * @return a mapping from the arrow decimal to the Parquet type
*/
@Override
public TypeMapping visit(Decimal type) {
@@ -245,7 +245,7 @@ public class SchemaConverter {
}
/**
- * @see https://github.com/apache/parquet-format/blob/master/LogicalTypes.md#interval
+ * See https://github.com/apache/parquet-format/blob/master/LogicalTypes.md#interval
*/
@Override
public TypeMapping visit(Interval type) {
@@ -311,7 +311,7 @@ public class SchemaConverter {
* @param type parquet type
* @param name overrides parquet.getName)
* @param repetition overrides parquet.getRepetition()
- * @return
+ * @return a type mapping from the Parquet type to an Arrow type
*/
private TypeMapping fromParquet(Type type, String name, Repetition repetition) {
if (repetition == REPEATED) {
@@ -511,8 +511,8 @@ public class SchemaConverter {
/**
* Maps a Parquet and Arrow Schema
* For now does not validate primitive type compatibility
- * @param arrowSchema
- * @param parquetSchema
+ * @param arrowSchema an Arrow schema
+ * @param parquetSchema a Parquet message type
* @return the mapping between the 2
*/
public SchemaMapping map(Schema arrowSchema, MessageType parquetSchema) {
diff --git a/parquet-arrow/src/main/java/org/apache/parquet/arrow/schema/SchemaMapping.java b/parquet-arrow/src/main/java/org/apache/parquet/arrow/schema/SchemaMapping.java
index 184d7c6..cbb04ce 100644
--- a/parquet-arrow/src/main/java/org/apache/parquet/arrow/schema/SchemaMapping.java
+++ b/parquet-arrow/src/main/java/org/apache/parquet/arrow/schema/SchemaMapping.java
@@ -33,8 +33,6 @@ import org.apache.parquet.schema.Type;
/**
* The mapping between an Arrow and a Parquet schema
* @see SchemaConverter
- *
- * @author Julien Le Dem
*/
public class SchemaMapping {
@@ -66,7 +64,7 @@ public class SchemaMapping {
/**
* To traverse a schema mapping
- * @param <T>
+ * @param <T> the Java return type of the visitor
*/
public interface TypeMappingVisitor<T> {
T visit(PrimitiveTypeMapping primitiveTypeMapping);
diff --git a/parquet-avro/src/main/java/org/apache/parquet/avro/AvroParquetInputFormat.java b/parquet-avro/src/main/java/org/apache/parquet/avro/AvroParquetInputFormat.java
index 42bfb09..159b420 100644
--- a/parquet-avro/src/main/java/org/apache/parquet/avro/AvroParquetInputFormat.java
+++ b/parquet-avro/src/main/java/org/apache/parquet/avro/AvroParquetInputFormat.java
@@ -26,6 +26,8 @@ import org.apache.parquet.hadoop.util.ContextUtil;
/**
* A Hadoop {@link org.apache.hadoop.mapreduce.InputFormat} for Parquet files.
+ *
+ * @param <T> the Java type of objects produced by this InputFormat
*/
public class AvroParquetInputFormat<T> extends ParquetInputFormat<T> {
public AvroParquetInputFormat() {
@@ -45,8 +47,8 @@ public class AvroParquetInputFormat<T> extends ParquetInputFormat<T> {
* in the projection then it must either not be included or be optional in the read
* schema. Use {@link #setAvroReadSchema(org.apache.hadoop.mapreduce.Job,
* org.apache.avro.Schema)} to set a read schema, if needed.
- * @param job
- * @param requestedProjection
+ * @param job a job
+ * @param requestedProjection the requested projection schema
* @see #setAvroReadSchema(org.apache.hadoop.mapreduce.Job, org.apache.avro.Schema)
* @see org.apache.parquet.avro.AvroParquetOutputFormat#setSchema(org.apache.hadoop.mapreduce.Job, org.apache.avro.Schema)
*/
@@ -61,8 +63,8 @@ public class AvroParquetInputFormat<T> extends ParquetInputFormat<T> {
* <p>
* Differences between the read and write schemas are resolved using
* <a href="http://avro.apache.org/docs/current/spec.html#Schema+Resolution">Avro's schema resolution rules</a>.
- * @param job
- * @param avroReadSchema
+ * @param job a job
+ * @param avroReadSchema the requested schema
* @see #setRequestedProjection(org.apache.hadoop.mapreduce.Job, org.apache.avro.Schema)
* @see org.apache.parquet.avro.AvroParquetOutputFormat#setSchema(org.apache.hadoop.mapreduce.Job, org.apache.avro.Schema)
*/
@@ -74,8 +76,8 @@ public class AvroParquetInputFormat<T> extends ParquetInputFormat<T> {
* Uses an instance of the specified {@link AvroDataSupplier} class to control how the
* {@link org.apache.avro.specific.SpecificData} instance that is used to find
* Avro specific records is created.
- * @param job
- * @param supplierClass
+ * @param job a job
+ * @param supplierClass an avro data supplier class
*/
public static void setAvroDataSupplier(Job job,
Class<? extends AvroDataSupplier> supplierClass) {
diff --git a/parquet-avro/src/main/java/org/apache/parquet/avro/AvroParquetOutputFormat.java b/parquet-avro/src/main/java/org/apache/parquet/avro/AvroParquetOutputFormat.java
index 1eb4f93..afbaefc 100644
--- a/parquet-avro/src/main/java/org/apache/parquet/avro/AvroParquetOutputFormat.java
+++ b/parquet-avro/src/main/java/org/apache/parquet/avro/AvroParquetOutputFormat.java
@@ -35,8 +35,8 @@ public class AvroParquetOutputFormat<T> extends ParquetOutputFormat<T> {
* schema so that the records can be written in Parquet format. It is also
* stored in the Parquet metadata so that records can be reconstructed as Avro
* objects at read time without specifying a read schema.
- * @param job
- * @param schema
+ * @param job a job
+ * @param schema a schema for the data that will be written
* @see org.apache.parquet.avro.AvroParquetInputFormat#setAvroReadSchema(org.apache.hadoop.mapreduce.Job, org.apache.avro.Schema)
*/
public static void setSchema(Job job, Schema schema) {
diff --git a/parquet-avro/src/main/java/org/apache/parquet/avro/AvroParquetReader.java b/parquet-avro/src/main/java/org/apache/parquet/avro/AvroParquetReader.java
index 442c5b7..d26fb63 100644
--- a/parquet-avro/src/main/java/org/apache/parquet/avro/AvroParquetReader.java
+++ b/parquet-avro/src/main/java/org/apache/parquet/avro/AvroParquetReader.java
@@ -32,10 +32,15 @@ import org.apache.parquet.io.InputFile;
/**
* Read Avro records from a Parquet file.
+ *
+ * @param <T> the Java type of records created by this reader
*/
public class AvroParquetReader<T> extends ParquetReader<T> {
/**
+ * @param file a file path
+ * @param <T> the Java type of records to read from the file
+ * @return an Avro reader builder
* @deprecated will be removed in 2.0.0; use {@link #builder(InputFile)} instead.
*/
@Deprecated
@@ -48,7 +53,9 @@ public class AvroParquetReader<T> extends ParquetReader<T> {
}
/**
- * @deprecated use {@link #builder(Path)}
+ * @param file a file path
+ * @throws IOException if there is an error while reading
+ * @deprecated will be removed in 2.0.0; use {@link #builder(InputFile)} instead.
*/
@Deprecated
public AvroParquetReader(Path file) throws IOException {
@@ -56,7 +63,10 @@ public class AvroParquetReader<T> extends ParquetReader<T> {
}
/**
- * @deprecated use {@link #builder(Path)}
+ * @param file a file path
+ * @param unboundRecordFilter an unbound record filter (from the old filter API)
+ * @throws IOException if there is an error while reading
+ * @deprecated will be removed in 2.0.0; use {@link #builder(InputFile)} instead.
*/
@Deprecated
public AvroParquetReader(Path file, UnboundRecordFilter unboundRecordFilter) throws IOException {
@@ -64,7 +74,10 @@ public class AvroParquetReader<T> extends ParquetReader<T> {
}
/**
- * @deprecated use {@link #builder(Path)}
+ * @param conf a configuration
+ * @param file a file path
+ * @throws IOException if there is an error while reading
+ * @deprecated will be removed in 2.0.0; use {@link #builder(InputFile)} instead.
*/
@Deprecated
public AvroParquetReader(Configuration conf, Path file) throws IOException {
@@ -72,7 +85,11 @@ public class AvroParquetReader<T> extends ParquetReader<T> {
}
/**
- * @deprecated use {@link #builder(Path)}
+ * @param conf a configuration
+ * @param file a file path
+ * @param unboundRecordFilter an unbound record filter (from the old filter API)
+ * @throws IOException if there is an error while reading
+ * @deprecated will be removed in 2.0.0; use {@link #builder(InputFile)} instead.
*/
@Deprecated
public AvroParquetReader(Configuration conf, Path file, UnboundRecordFilter unboundRecordFilter) throws IOException {
diff --git a/parquet-avro/src/main/java/org/apache/parquet/avro/AvroParquetWriter.java b/parquet-avro/src/main/java/org/apache/parquet/avro/AvroParquetWriter.java
index 3e802a8..4ef820e 100644
--- a/parquet-avro/src/main/java/org/apache/parquet/avro/AvroParquetWriter.java
+++ b/parquet-avro/src/main/java/org/apache/parquet/avro/AvroParquetWriter.java
@@ -45,12 +45,12 @@ public class AvroParquetWriter<T> extends ParquetWriter<T> {
/** Create a new {@link AvroParquetWriter}.
*
- * @param file
- * @param avroSchema
- * @param compressionCodecName
- * @param blockSize
- * @param pageSize
- * @throws IOException
+ * @param file a file path
+ * @param avroSchema a schema for the write
+ * @param compressionCodecName compression codec
+ * @param blockSize target block size
+ * @param pageSize target page size
+ * @throws IOException if there is an error while writing
*/
@Deprecated
public AvroParquetWriter(Path file, Schema avroSchema,
@@ -68,7 +68,7 @@ public class AvroParquetWriter<T> extends ParquetWriter<T> {
* @param blockSize the block size threshold.
* @param pageSize See parquet write up. Blocks are subdivided into pages for alignment and other purposes.
* @param enableDictionary Whether to use a dictionary to compress columns.
- * @throws IOException
+ * @throws IOException if there is an error while writing
*/
@Deprecated
public AvroParquetWriter(Path file, Schema avroSchema,
@@ -84,7 +84,7 @@ public class AvroParquetWriter<T> extends ParquetWriter<T> {
*
* @param file The file name to write to.
* @param avroSchema The schema to write with.
- * @throws IOException
+ * @throws IOException if there is an error while writing
*/
@Deprecated
public AvroParquetWriter(Path file, Schema avroSchema) throws IOException {
@@ -101,7 +101,7 @@ public class AvroParquetWriter<T> extends ParquetWriter<T> {
* @param pageSize See parquet write up. Blocks are subdivided into pages for alignment and other purposes.
* @param enableDictionary Whether to use a dictionary to compress columns.
* @param conf The Configuration to use.
- * @throws IOException
+ * @throws IOException if there is an error while writing
*/
@Deprecated
public AvroParquetWriter(Path file, Schema avroSchema,
diff --git a/parquet-avro/src/main/java/org/apache/parquet/avro/AvroReadSupport.java b/parquet-avro/src/main/java/org/apache/parquet/avro/AvroReadSupport.java
index 7d55bf5..5bf0cff 100644
--- a/parquet-avro/src/main/java/org/apache/parquet/avro/AvroReadSupport.java
+++ b/parquet-avro/src/main/java/org/apache/parquet/avro/AvroReadSupport.java
@@ -32,6 +32,8 @@ import org.apache.parquet.schema.MessageType;
* Avro implementation of {@link ReadSupport} for avro generic, specific, and
* reflect models. Use {@link AvroParquetReader} or
* {@link AvroParquetInputFormat} rather than using this class directly.
+ *
+ * @param <T> the Java type of records created by this ReadSupport
*/
public class AvroReadSupport<T> extends ReadSupport<T> {
@@ -50,6 +52,8 @@ public class AvroReadSupport<T> extends ReadSupport<T> {
public static final boolean AVRO_DEFAULT_COMPATIBILITY = true;
/**
+ * @param configuration a configuration
+ * @param requestedProjection the requested projection schema
* @see org.apache.parquet.avro.AvroParquetInputFormat#setRequestedProjection(org.apache.hadoop.mapreduce.Job, org.apache.avro.Schema)
*/
public static void setRequestedProjection(Configuration configuration, Schema requestedProjection) {
@@ -57,6 +61,8 @@ public class AvroReadSupport<T> extends ReadSupport<T> {
}
/**
+ * @param configuration a configuration
+ * @param avroReadSchema the read schema
* @see org.apache.parquet.avro.AvroParquetInputFormat#setAvroReadSchema(org.apache.hadoop.mapreduce.Job, org.apache.avro.Schema)
*/
public static void setAvroReadSchema(Configuration configuration, Schema avroReadSchema) {
diff --git a/parquet-avro/src/main/java/org/apache/parquet/avro/AvroWriteSupport.java b/parquet-avro/src/main/java/org/apache/parquet/avro/AvroWriteSupport.java
index 460565b..2325dc5 100644
--- a/parquet-avro/src/main/java/org/apache/parquet/avro/AvroWriteSupport.java
+++ b/parquet-avro/src/main/java/org/apache/parquet/avro/AvroWriteSupport.java
@@ -80,7 +80,9 @@ public class AvroWriteSupport<T> extends WriteSupport<T> {
}
/**
- * @deprecated use {@link AvroWriteSupport(MessageType, Schema, Configuration)}
+ * @param schema the write parquet schema
+ * @param avroSchema the write avro schema
+ * @deprecated will be removed in 2.0.0
*/
@Deprecated
public AvroWriteSupport(MessageType schema, Schema avroSchema) {
@@ -104,6 +106,8 @@ public class AvroWriteSupport<T> extends WriteSupport<T> {
}
/**
+ * @param configuration a configuration
+ * @param schema the write schema
* @see org.apache.parquet.avro.AvroParquetOutputFormat#setSchema(org.apache.hadoop.mapreduce.Job, org.apache.avro.Schema)
*/
public static void setSchema(Configuration configuration, Schema schema) {
diff --git a/parquet-avro/src/main/java/org/apache/parquet/avro/package-info.java b/parquet-avro/src/main/java/org/apache/parquet/avro/package-info.java
index e5e0475..8b0f0d1 100644
--- a/parquet-avro/src/main/java/org/apache/parquet/avro/package-info.java
+++ b/parquet-avro/src/main/java/org/apache/parquet/avro/package-info.java
@@ -26,6 +26,7 @@
* </p>
*
* <table>
+ * <caption>Avro to Parquet type mapping</caption>
* <tr>
* <th>Avro type</th>
* <th>Parquet type</th>
@@ -96,6 +97,8 @@
* or a default Avro schema will be derived using the following mapping.
* </p>
*
+ * <table>
+ * <caption>Parquet to Avro type mapping</caption>
* <tr>
* <th>Parquet type</th>
* <th>Avro type</th>
diff --git a/parquet-cascading-common23/src/main/java/org/apache/parquet/cascading/TupleWriteSupport.java b/parquet-cascading-common23/src/main/java/org/apache/parquet/cascading/TupleWriteSupport.java
index 032f534..0e5f13e 100644
--- a/parquet-cascading-common23/src/main/java/org/apache/parquet/cascading/TupleWriteSupport.java
+++ b/parquet-cascading-common23/src/main/java/org/apache/parquet/cascading/TupleWriteSupport.java
@@ -30,11 +30,6 @@ import org.apache.parquet.schema.MessageTypeParser;
import org.apache.parquet.schema.PrimitiveType;
import org.apache.parquet.schema.Type;
-/**
- *
- *
- * @author Mickaƫl Lacour <m....@criteo.com>
- */
public class TupleWriteSupport extends WriteSupport<TupleEntry> {
private RecordConsumer recordConsumer;
diff --git a/parquet-cascading/src/main/java/org/apache/parquet/cascading/ParquetTupleScheme.java b/parquet-cascading/src/main/java/org/apache/parquet/cascading/ParquetTupleScheme.java
index 3b7d715..a62199f 100644
--- a/parquet-cascading/src/main/java/org/apache/parquet/cascading/ParquetTupleScheme.java
+++ b/parquet-cascading/src/main/java/org/apache/parquet/cascading/ParquetTupleScheme.java
@@ -55,8 +55,6 @@ import static org.apache.parquet.Preconditions.checkNotNull;
* Parquet schema.
* Currently, only primitive types are supported. TODO: allow nested fields in the Parquet schema to be
* flattened to a top-level field in the Cascading tuple.
- *
- * @author Avi Bryant
*/
@Deprecated // The parquet-cascading module depends on Cascading 2.x, and is being superseded with parquet-cascading3 for Cascading 3.x
diff --git a/parquet-cascading3/src/main/java/org/apache/parquet/cascading/ParquetTupleScheme.java b/parquet-cascading3/src/main/java/org/apache/parquet/cascading/ParquetTupleScheme.java
index 4532d3b..23220fb 100644
--- a/parquet-cascading3/src/main/java/org/apache/parquet/cascading/ParquetTupleScheme.java
+++ b/parquet-cascading3/src/main/java/org/apache/parquet/cascading/ParquetTupleScheme.java
@@ -55,8 +55,6 @@ import static org.apache.parquet.Preconditions.checkNotNull;
* Parquet schema.
* Currently, only primitive types are supported. TODO: allow nested fields in the Parquet schema to be
* flattened to a top-level field in the Cascading tuple.
- *
- * @author Avi Bryant
*/
public class ParquetTupleScheme extends Scheme<JobConf, RecordReader, OutputCollector, Object[], Object[]>{
diff --git a/parquet-cli/src/main/java/org/apache/parquet/cli/BaseCommand.java b/parquet-cli/src/main/java/org/apache/parquet/cli/BaseCommand.java
index 4b47164..f385fde 100644
--- a/parquet-cli/src/main/java/org/apache/parquet/cli/BaseCommand.java
+++ b/parquet-cli/src/main/java/org/apache/parquet/cli/BaseCommand.java
@@ -77,7 +77,7 @@ public abstract class BaseCommand implements Command, Configurable {
/**
* @return FileSystem to use when no file system scheme is present in a path
- * @throws IOException
+ * @throws IOException if there is an error loading the default fs
*/
public FileSystem defaultFS() throws IOException {
if (localFS == null) {
@@ -94,7 +94,7 @@ public abstract class BaseCommand implements Command, Configurable {
* @param content String content to write
* @param console A {@link Logger} for writing to the console
* @param filename The destination {@link Path} as a String
- * @throws IOException
+ * @throws IOException if there is an error while writing
*/
public void output(String content, Logger console, String filename)
throws IOException {
@@ -120,7 +120,7 @@ public abstract class BaseCommand implements Command, Configurable {
*
* @param filename The filename to create
* @return An open FSDataOutputStream
- * @throws IOException
+ * @throws IOException if there is an error creating the file
*/
public FSDataOutputStream create(String filename) throws IOException {
return create(filename, true);
@@ -136,7 +136,7 @@ public abstract class BaseCommand implements Command, Configurable {
*
* @param filename The filename to create
* @return An open FSDataOutputStream
- * @throws IOException
+ * @throws IOException if there is an error creating the file
*/
public FSDataOutputStream createWithChecksum(String filename)
throws IOException {
@@ -161,7 +161,7 @@ public abstract class BaseCommand implements Command, Configurable {
*
* @param filename The filename to qualify
* @return A qualified Path for the filename
- * @throws IOException
+ * @throws IOException if there is an error creating a qualified path
*/
public Path qualifiedPath(String filename) throws IOException {
Path cwd = defaultFS().makeQualified(new Path("."));
@@ -176,7 +176,7 @@ public abstract class BaseCommand implements Command, Configurable {
*
* @param filename The filename to qualify
* @return A qualified URI for the filename
- * @throws IOException
+ * @throws IOException if there is an error creating a qualified URI
*/
public URI qualifiedURI(String filename) throws IOException {
URI fileURI = URI.create(filename);
@@ -194,7 +194,7 @@ public abstract class BaseCommand implements Command, Configurable {
*
* @param filename The filename to open.
* @return An open InputStream with the file contents
- * @throws IOException
+ * @throws IOException if there is an error opening the file
* @throws IllegalArgumentException If the file does not exist
*/
public InputStream open(String filename) throws IOException {
@@ -236,7 +236,8 @@ public abstract class BaseCommand implements Command, Configurable {
*
* @param jars A list of jar paths
* @param paths A list of directories containing .class files
- * @throws MalformedURLException
+ * @return a classloader for the jars and paths
+ * @throws MalformedURLException if a jar or path is invalid
*/
protected static ClassLoader loaderFor(List<String> jars, List<String> paths)
throws MalformedURLException {
@@ -247,7 +248,8 @@ public abstract class BaseCommand implements Command, Configurable {
* Returns a {@link ClassLoader} for a set of jars.
*
* @param jars A list of jar paths
- * @throws MalformedURLException
+ * @return a classloader for the jars
+ * @throws MalformedURLException if a URL is invalid
*/
protected static ClassLoader loaderForJars(List<String> jars)
throws MalformedURLException {
@@ -258,7 +260,8 @@ public abstract class BaseCommand implements Command, Configurable {
* Returns a {@link ClassLoader} for a set of directories.
*
* @param paths A list of directories containing .class files
- * @throws MalformedURLException
+ * @return a classloader for the paths
+ * @throws MalformedURLException if a path is invalid
*/
protected static ClassLoader loaderForPaths(List<String> paths)
throws MalformedURLException {
diff --git a/parquet-cli/src/main/java/org/apache/parquet/cli/Command.java b/parquet-cli/src/main/java/org/apache/parquet/cli/Command.java
index 9c19143..99b82c8 100644
--- a/parquet-cli/src/main/java/org/apache/parquet/cli/Command.java
+++ b/parquet-cli/src/main/java/org/apache/parquet/cli/Command.java
@@ -26,7 +26,7 @@ public interface Command {
* Runs this {@code Command}.
*
* @return a return code for the process, 0 indicates success.
- * @throws IOException
+ * @throws IOException if there is an error while running the command
*/
int run() throws IOException;
diff --git a/parquet-cli/src/main/java/org/apache/parquet/cli/util/Expressions.java b/parquet-cli/src/main/java/org/apache/parquet/cli/util/Expressions.java
index 61f632a..06b28b4 100644
--- a/parquet-cli/src/main/java/org/apache/parquet/cli/util/Expressions.java
+++ b/parquet-cli/src/main/java/org/apache/parquet/cli/util/Expressions.java
@@ -102,6 +102,10 @@ public class Expressions {
* * fields should match by name
* * arrays are dereferenced by position [n] => schema is the element schema
* * maps are dereferenced by key => schema is the value schema
+ *
+ * @param schema an Avro schema
+ * @param fieldPaths selected field paths
+ * @return a filtered schema
*/
public static Schema filterSchema(Schema schema, String... fieldPaths) {
return filterSchema(schema, Lists.newArrayList(fieldPaths));
diff --git a/parquet-cli/src/main/java/org/apache/parquet/cli/util/RecordException.java b/parquet-cli/src/main/java/org/apache/parquet/cli/util/RecordException.java
index f7e7b6c..de18d08 100644
--- a/parquet-cli/src/main/java/org/apache/parquet/cli/util/RecordException.java
+++ b/parquet-cli/src/main/java/org/apache/parquet/cli/util/RecordException.java
@@ -39,6 +39,8 @@ public class RecordException extends RuntimeException {
* thrown
* @param message
* A String message for the exception.
+ * @param args
+ * Args to fill into the message using String.format
*/
public static void check(boolean isValid, String message, Object... args) {
if (!isValid) {
diff --git a/parquet-cli/src/main/java/org/apache/parquet/cli/util/Schemas.java b/parquet-cli/src/main/java/org/apache/parquet/cli/util/Schemas.java
index 877c7cc..72f3f86 100644
--- a/parquet-cli/src/main/java/org/apache/parquet/cli/util/Schemas.java
+++ b/parquet-cli/src/main/java/org/apache/parquet/cli/util/Schemas.java
@@ -454,6 +454,9 @@ public class Schemas {
* the incoming schema.
* <p>
* Fields cannot be used in more than one record (not Immutable?).
+ *
+ * @param field an Avro schema field
+ * @return a copy of the field
*/
public static Schema.Field copy(Schema.Field field) {
return new Schema.Field(
diff --git a/parquet-column/src/main/java/org/apache/parquet/column/ColumnDescriptor.java b/parquet-column/src/main/java/org/apache/parquet/column/ColumnDescriptor.java
index 5f30cd0..ff44b4f 100644
--- a/parquet-column/src/main/java/org/apache/parquet/column/ColumnDescriptor.java
+++ b/parquet-column/src/main/java/org/apache/parquet/column/ColumnDescriptor.java
@@ -26,9 +26,6 @@ import org.apache.parquet.schema.Type;
/**
* Describes a column's type as well as its position in its containing schema.
- *
- * @author Julien Le Dem
- *
*/
public class ColumnDescriptor implements Comparable<ColumnDescriptor> {
@@ -43,7 +40,7 @@ public class ColumnDescriptor implements Comparable<ColumnDescriptor> {
* @param type the type of the field
* @param maxRep the maximum repetition level for that path
* @param maxDef the maximum definition level for that path
- * @deprecated Use {@link #ColumnDescriptor(String[], PrimitiveTypeName, int, int)}
+ * @deprecated will be removed in 2.0.0; Use {@link #ColumnDescriptor(String[], PrimitiveType, int, int)}
*/
@Deprecated
public ColumnDescriptor(String[] path, PrimitiveTypeName type, int maxRep,
@@ -55,9 +52,10 @@ public class ColumnDescriptor implements Comparable<ColumnDescriptor> {
*
* @param path the path to the leaf field in the schema
* @param type the type of the field
+ * @param typeLength the length of the type, if type is a fixed-length byte array
* @param maxRep the maximum repetition level for that path
* @param maxDef the maximum definition level for that path
- * @deprecated Use {@link #ColumnDescriptor(String[], PrimitiveTypeName, int, int)}
+ * @deprecated will be removed in 2.0.0; Use {@link #ColumnDescriptor(String[], PrimitiveType, int, int)}
*/
@Deprecated
public ColumnDescriptor(String[] path, PrimitiveTypeName type,
diff --git a/parquet-column/src/main/java/org/apache/parquet/column/ColumnReadStore.java b/parquet-column/src/main/java/org/apache/parquet/column/ColumnReadStore.java
index 813666a..890110a 100644
--- a/parquet-column/src/main/java/org/apache/parquet/column/ColumnReadStore.java
+++ b/parquet-column/src/main/java/org/apache/parquet/column/ColumnReadStore.java
@@ -20,8 +20,6 @@ package org.apache.parquet.column;
/**
* Container which can produce a ColumnReader for any given column in a schema.
- *
- * @author Julien Le Dem
*/
public interface ColumnReadStore {
diff --git a/parquet-column/src/main/java/org/apache/parquet/column/ColumnReader.java b/parquet-column/src/main/java/org/apache/parquet/column/ColumnReader.java
index f802c92..52d269e 100644
--- a/parquet-column/src/main/java/org/apache/parquet/column/ColumnReader.java
+++ b/parquet-column/src/main/java/org/apache/parquet/column/ColumnReader.java
@@ -24,19 +24,18 @@ import org.apache.parquet.io.api.Binary;
* Reader for (repetition level, definition level, values) triplets.
* At any given point in time, a ColumnReader points to a single (r, d, v) triplet.
* In order to move to the next triplet, call {@link #consume()}.
- *
+ * <p>
* Depending on the type and the encoding of the column only a subset of the get* methods are implemented.
* Dictionary specific methods enable the upper layers to read the dictionary IDs without decoding the data.
* In particular the Converter will decode the strings in the dictionary only once and iterate on the
* dictionary IDs instead of the values.
- *
- * <ul>Each iteration looks at the current definition level and value as well as the next
- * repetition level:
+ * <p>
+ * Each iteration looks at the current definition level and value as well as the next repetition level:
+ * <ul>
* <li> The current definition level defines if the value is null.</li>
* <li> If the value is defined we can read it with the correct get*() method.</li>
* <li> Looking ahead to the next repetition determines what is the next column to read for in the FSA.</li>
* </ul>
- * @author Julien Le Dem
*/
public interface ColumnReader {
diff --git a/parquet-column/src/main/java/org/apache/parquet/column/ColumnWriteStore.java b/parquet-column/src/main/java/org/apache/parquet/column/ColumnWriteStore.java
index bb9dfea..e14c7dc 100644
--- a/parquet-column/src/main/java/org/apache/parquet/column/ColumnWriteStore.java
+++ b/parquet-column/src/main/java/org/apache/parquet/column/ColumnWriteStore.java
@@ -21,8 +21,6 @@ package org.apache.parquet.column;
/**
* Container which can construct writers for multiple columns to be stored
* together.
- *
- * @author Julien Le Dem
*/
public interface ColumnWriteStore {
/**
diff --git a/parquet-column/src/main/java/org/apache/parquet/column/ColumnWriter.java b/parquet-column/src/main/java/org/apache/parquet/column/ColumnWriter.java
index c824504..f85bfcc 100644
--- a/parquet-column/src/main/java/org/apache/parquet/column/ColumnWriter.java
+++ b/parquet-column/src/main/java/org/apache/parquet/column/ColumnWriter.java
@@ -22,64 +22,61 @@ import org.apache.parquet.io.api.Binary;
/**
* writer for (repetition level, definition level, values) triplets
- *
- * @author Julien Le Dem
- *
*/
public interface ColumnWriter {
/**
* writes the current value
- * @param value
- * @param repetitionLevel
- * @param definitionLevel
+ * @param value an int value
+ * @param repetitionLevel a repetition level
+ * @param definitionLevel a definition level
*/
void write(int value, int repetitionLevel, int definitionLevel);
/**
* writes the current value
- * @param value
- * @param repetitionLevel
- * @param definitionLevel
+ * @param value a long value
+ * @param repetitionLevel a repetition level
+ * @param definitionLevel a definition level
*/
void write(long value, int repetitionLevel, int definitionLevel);
/**
* writes the current value
- * @param value
- * @param repetitionLevel
- * @param definitionLevel
+ * @param value a boolean value
+ * @param repetitionLevel a repetition level
+ * @param definitionLevel a definition level
*/
void write(boolean value, int repetitionLevel, int definitionLevel);
/**
* writes the current value
- * @param value
- * @param repetitionLevel
- * @param definitionLevel
+ * @param value a Binary value
+ * @param repetitionLevel a repetition level
+ * @param definitionLevel a definition level
*/
void write(Binary value, int repetitionLevel, int definitionLevel);
/**
* writes the current value
- * @param value
- * @param repetitionLevel
- * @param definitionLevel
+ * @param value a float value
+ * @param repetitionLevel a repetition level
+ * @param definitionLevel a definition level
*/
void write(float value, int repetitionLevel, int definitionLevel);
/**
* writes the current value
- * @param value
- * @param repetitionLevel
- * @param definitionLevel
+ * @param value a double value
+ * @param repetitionLevel a repetition level
+ * @param definitionLevel a definition level
*/
void write(double value, int repetitionLevel, int definitionLevel);
/**
* writes the current null value
- * @param repetitionLevel
- * @param definitionLevel
+ * @param repetitionLevel a repetition level
+ * @param definitionLevel a definition level
*/
void writeNull(int repetitionLevel, int definitionLevel);
diff --git a/parquet-column/src/main/java/org/apache/parquet/column/Dictionary.java b/parquet-column/src/main/java/org/apache/parquet/column/Dictionary.java
index 45fd42d..90d80c5 100644
--- a/parquet-column/src/main/java/org/apache/parquet/column/Dictionary.java
+++ b/parquet-column/src/main/java/org/apache/parquet/column/Dictionary.java
@@ -22,9 +22,6 @@ import org.apache.parquet.io.api.Binary;
/**
* a dictionary to decode dictionary based encodings
- *
- * @author Julien Le Dem
- *
*/
public abstract class Dictionary {
diff --git a/parquet-column/src/main/java/org/apache/parquet/column/Encoding.java b/parquet-column/src/main/java/org/apache/parquet/column/Encoding.java
index 3f21a4e..e6d4b57 100644
--- a/parquet-column/src/main/java/org/apache/parquet/column/Encoding.java
+++ b/parquet-column/src/main/java/org/apache/parquet/column/Encoding.java
@@ -53,9 +53,6 @@ import org.apache.parquet.io.ParquetDecodingException;
/**
* encoding of the data
- *
- * @author Julien Le Dem
- *
*/
public enum Encoding {
@@ -258,8 +255,11 @@ public enum Encoding {
/**
* initializes a dictionary from a page
- * @param dictionaryPage
+ * @param descriptor the column descriptor for the dictionary-encoded column
+ * @param dictionaryPage a dictionary page
* @return the corresponding dictionary
+ * @throws IOException if there is an exception while reading the dictionary page
+ * @throws UnsupportedOperationException if the encoding is not dictionary based
*/
public Dictionary initDictionary(ColumnDescriptor descriptor, DictionaryPage dictionaryPage) throws IOException {
throw new UnsupportedOperationException(this.name() + " does not support dictionary");
@@ -271,7 +271,7 @@ public enum Encoding {
* @param descriptor the column to read
* @param valuesType the type of values
* @return the proper values reader for the given column
- * @throw {@link UnsupportedOperationException} if the encoding is dictionary based
+ * @throws UnsupportedOperationException if the encoding is dictionary based
*/
public ValuesReader getValuesReader(ColumnDescriptor descriptor, ValuesType valuesType) {
throw new UnsupportedOperationException("Error decoding " + descriptor + ". " + this.name() + " is dictionary based");
@@ -284,7 +284,7 @@ public enum Encoding {
* @param valuesType the type of values
* @param dictionary the dictionary
* @return the proper values reader for the given column
- * @throw {@link UnsupportedOperationException} if the encoding is not dictionary based
+ * @throws UnsupportedOperationException if the encoding is not dictionary based
*/
public ValuesReader getDictionaryBasedValuesReader(ColumnDescriptor descriptor, ValuesType valuesType, Dictionary dictionary) {
throw new UnsupportedOperationException(this.name() + " is not dictionary based");
diff --git a/parquet-column/src/main/java/org/apache/parquet/column/ParquetProperties.java b/parquet-column/src/main/java/org/apache/parquet/column/ParquetProperties.java
index e746811..39b65da 100644
--- a/parquet-column/src/main/java/org/apache/parquet/column/ParquetProperties.java
+++ b/parquet-column/src/main/java/org/apache/parquet/column/ParquetProperties.java
@@ -37,9 +37,6 @@ import org.apache.parquet.schema.MessageType;
/**
* This class represents all the configurable Parquet properties.
- *
- * @author amokashi
- *
*/
public class ParquetProperties {
diff --git a/parquet-column/src/main/java/org/apache/parquet/column/UnknownColumnException.java b/parquet-column/src/main/java/org/apache/parquet/column/UnknownColumnException.java
index 5c05447..80f8173 100644
--- a/parquet-column/src/main/java/org/apache/parquet/column/UnknownColumnException.java
+++ b/parquet-column/src/main/java/org/apache/parquet/column/UnknownColumnException.java
@@ -22,8 +22,6 @@ import org.apache.parquet.ParquetRuntimeException;
/**
* Thrown if the specified column is unknown in the underlying storage
- *
- * @author Julien Le Dem
*/
public class UnknownColumnException extends ParquetRuntimeException {
private static final long serialVersionUID = 1L;
diff --git a/parquet-column/src/main/java/org/apache/parquet/column/UnknownColumnTypeException.java b/parquet-column/src/main/java/org/apache/parquet/column/UnknownColumnTypeException.java
index 126bc48..46a570f 100644
--- a/parquet-column/src/main/java/org/apache/parquet/column/UnknownColumnTypeException.java
+++ b/parquet-column/src/main/java/org/apache/parquet/column/UnknownColumnTypeException.java
@@ -23,8 +23,6 @@ import org.apache.parquet.schema.PrimitiveType.PrimitiveTypeName;
/**
* Thrown if the specified column type is unknown in the underlying storage
- *
- * @author Katya Gonina
*/
public class UnknownColumnTypeException extends ParquetRuntimeException {
private static final long serialVersionUID = 1L;
diff --git a/parquet-column/src/main/java/org/apache/parquet/column/ValuesType.java b/parquet-column/src/main/java/org/apache/parquet/column/ValuesType.java
index 89cf55b..96023dd 100644
--- a/parquet-column/src/main/java/org/apache/parquet/column/ValuesType.java
+++ b/parquet-column/src/main/java/org/apache/parquet/column/ValuesType.java
@@ -20,9 +20,6 @@ package org.apache.parquet.column;
/**
* The different type of values we can store in columns
- *
- * @author Julien Le Dem
- *
*/
public enum ValuesType {
REPETITION_LEVEL, DEFINITION_LEVEL, VALUES;
diff --git a/parquet-column/src/main/java/org/apache/parquet/column/impl/ColumnReadStoreImpl.java b/parquet-column/src/main/java/org/apache/parquet/column/impl/ColumnReadStoreImpl.java
index 3217b94..3784596 100644
--- a/parquet-column/src/main/java/org/apache/parquet/column/impl/ColumnReadStoreImpl.java
+++ b/parquet-column/src/main/java/org/apache/parquet/column/impl/ColumnReadStoreImpl.java
@@ -37,9 +37,6 @@ import org.apache.parquet.schema.Type;
* Implementation of the ColumnReadStore
*
* Initializes individual columns based on schema and converter
- *
- * @author Julien Le Dem
- *
*/
public class ColumnReadStoreImpl implements ColumnReadStore {
@@ -52,6 +49,7 @@ public class ColumnReadStoreImpl implements ColumnReadStore {
* @param pageReadStore underlying page storage
* @param recordConverter the user provided converter to materialize records
* @param schema the schema we are reading
+ * @param createdBy writer version string from the Parquet file being read
*/
public ColumnReadStoreImpl(PageReadStore pageReadStore,
GroupConverter recordConverter,
diff --git a/parquet-column/src/main/java/org/apache/parquet/column/impl/ColumnReaderImpl.java b/parquet-column/src/main/java/org/apache/parquet/column/impl/ColumnReaderImpl.java
index 8b47977..8c85b37 100644
--- a/parquet-column/src/main/java/org/apache/parquet/column/impl/ColumnReaderImpl.java
+++ b/parquet-column/src/main/java/org/apache/parquet/column/impl/ColumnReaderImpl.java
@@ -53,18 +53,12 @@ import org.slf4j.LoggerFactory;
/**
* ColumnReader implementation
- *
- * @author Julien Le Dem
- *
*/
public class ColumnReaderImpl implements ColumnReader {
private static final Logger LOG = LoggerFactory.getLogger(ColumnReaderImpl.class);
/**
* binds the lower level page decoder to the record converter materializing the records
- *
- * @author Julien Le Dem
- *
*/
private static abstract class Binding {
@@ -331,6 +325,8 @@ public class ColumnReaderImpl implements ColumnReader {
* creates a reader for triplets
* @param path the descriptor for the corresponding column
* @param pageReader the underlying store to read from
+ * @param converter a converter that materializes the values in this column in the current record
+ * @param writerVersion writer version string from the Parquet file being read
*/
public ColumnReaderImpl(ColumnDescriptor path, PageReader pageReader, PrimitiveConverter converter, ParsedVersion writerVersion) {
this.path = checkNotNull(path, "path");
diff --git a/parquet-column/src/main/java/org/apache/parquet/column/impl/ColumnWriterV1.java b/parquet-column/src/main/java/org/apache/parquet/column/impl/ColumnWriterV1.java
index e274c11..c1f5d67 100644
--- a/parquet-column/src/main/java/org/apache/parquet/column/impl/ColumnWriterV1.java
+++ b/parquet-column/src/main/java/org/apache/parquet/column/impl/ColumnWriterV1.java
@@ -36,9 +36,6 @@ import org.slf4j.LoggerFactory;
/**
* Writes (repetition level, definition level, value) triplets and deals with writing pages to the underlying layer.
- *
- * @author Julien Le Dem
- *
*/
final class ColumnWriterV1 implements ColumnWriter {
private static final Logger LOG = LoggerFactory.getLogger(ColumnWriterV1.class);
diff --git a/parquet-column/src/main/java/org/apache/parquet/column/impl/ColumnWriterV2.java b/parquet-column/src/main/java/org/apache/parquet/column/impl/ColumnWriterV2.java
index b50d663..9abdee8 100644
--- a/parquet-column/src/main/java/org/apache/parquet/column/impl/ColumnWriterV2.java
+++ b/parquet-column/src/main/java/org/apache/parquet/column/impl/ColumnWriterV2.java
@@ -38,9 +38,6 @@ import org.slf4j.LoggerFactory;
/**
* Writes (repetition level, definition level, value) triplets and deals with writing pages to the underlying layer.
- *
- * @author Julien Le Dem
- *
*/
final class ColumnWriterV2 implements ColumnWriter {
private static final Logger LOG = LoggerFactory.getLogger(ColumnWriterV2.class);
diff --git a/parquet-column/src/main/java/org/apache/parquet/column/page/DataPage.java b/parquet-column/src/main/java/org/apache/parquet/column/page/DataPage.java
index 9f11490..4d8f381 100644
--- a/parquet-column/src/main/java/org/apache/parquet/column/page/DataPage.java
+++ b/parquet-column/src/main/java/org/apache/parquet/column/page/DataPage.java
@@ -20,9 +20,6 @@ package org.apache.parquet.column.page;
/**
* one data page in a chunk
- *
- * @author Julien Le Dem
- *
*/
abstract public class DataPage extends Page {
diff --git a/parquet-column/src/main/java/org/apache/parquet/column/page/DictionaryPage.java b/parquet-column/src/main/java/org/apache/parquet/column/page/DictionaryPage.java
index 306d81b..2401fef 100644
--- a/parquet-column/src/main/java/org/apache/parquet/column/page/DictionaryPage.java
+++ b/parquet-column/src/main/java/org/apache/parquet/column/page/DictionaryPage.java
@@ -28,9 +28,6 @@ import org.apache.parquet.column.Encoding;
/**
* Data for a dictionary page
- *
- * @author Julien Le Dem
- *
*/
public class DictionaryPage extends Page {
diff --git a/parquet-column/src/main/java/org/apache/parquet/column/page/Page.java b/parquet-column/src/main/java/org/apache/parquet/column/page/Page.java
index 3c6b012..606f9f7 100644
--- a/parquet-column/src/main/java/org/apache/parquet/column/page/Page.java
+++ b/parquet-column/src/main/java/org/apache/parquet/column/page/Page.java
@@ -20,9 +20,6 @@ package org.apache.parquet.column.page;
/**
* one page in a chunk
- *
- * @author Julien Le Dem
- *
*/
abstract public class Page {
diff --git a/parquet-column/src/main/java/org/apache/parquet/column/page/PageReadStore.java b/parquet-column/src/main/java/org/apache/parquet/column/page/PageReadStore.java
index 3cfe624..24d5825 100644
--- a/parquet-column/src/main/java/org/apache/parquet/column/page/PageReadStore.java
+++ b/parquet-column/src/main/java/org/apache/parquet/column/page/PageReadStore.java
@@ -24,9 +24,6 @@ import org.apache.parquet.column.ColumnDescriptor;
* contains all the readers for all the columns of the corresponding row group
*
* TODO: rename to RowGroup?
- *
- * @author Julien Le Dem
- *
*/
public interface PageReadStore {
diff --git a/parquet-column/src/main/java/org/apache/parquet/column/page/PageReader.java b/parquet-column/src/main/java/org/apache/parquet/column/page/PageReader.java
index 94c9cb7..2f6169b 100644
--- a/parquet-column/src/main/java/org/apache/parquet/column/page/PageReader.java
+++ b/parquet-column/src/main/java/org/apache/parquet/column/page/PageReader.java
@@ -20,9 +20,6 @@ package org.apache.parquet.column.page;
/**
* Reader for a sequence a page from a given column chunk
- *
- * @author Julien Le Dem
- *
*/
public interface PageReader {
diff --git a/parquet-column/src/main/java/org/apache/parquet/column/page/PageWriteStore.java b/parquet-column/src/main/java/org/apache/parquet/column/page/PageWriteStore.java
index 2de9db9..0aac63e 100644
--- a/parquet-column/src/main/java/org/apache/parquet/column/page/PageWriteStore.java
+++ b/parquet-column/src/main/java/org/apache/parquet/column/page/PageWriteStore.java
@@ -22,9 +22,6 @@ import org.apache.parquet.column.ColumnDescriptor;
/**
* contains all the writers for the columns in the corresponding row group
- *
- * @author Julien Le Dem
- *
*/
public interface PageWriteStore {
diff --git a/parquet-column/src/main/java/org/apache/parquet/column/page/PageWriter.java b/parquet-column/src/main/java/org/apache/parquet/column/page/PageWriter.java
index 1d3f7ed..a2d079f 100644
--- a/parquet-column/src/main/java/org/apache/parquet/column/page/PageWriter.java
+++ b/parquet-column/src/main/java/org/apache/parquet/column/page/PageWriter.java
@@ -27,9 +27,6 @@ import org.apache.parquet.column.statistics.Statistics;
/**
* a writer for all the pages of a given column chunk
- *
- * @author Julien Le Dem
- *
*/
public interface PageWriter {
diff --git a/parquet-column/src/main/java/org/apache/parquet/column/statistics/Statistics.java b/parquet-column/src/main/java/org/apache/parquet/column/statistics/Statistics.java
index 6888ad6..2cb4cb4 100644
--- a/parquet-column/src/main/java/org/apache/parquet/column/statistics/Statistics.java
+++ b/parquet-column/src/main/java/org/apache/parquet/column/statistics/Statistics.java
@@ -31,7 +31,7 @@ import org.apache.parquet.schema.Type;
/**
* Statistics class to keep track of statistics in parquet pages and column chunks
*
- * @author Katya Gonina
+ * @param <T> the Java type described by this Statistics instance
*/
public abstract class Statistics<T extends Comparable<T>> {
@@ -346,23 +346,29 @@ public abstract class Statistics<T extends Comparable<T>> {
/**
* Returns the min value in the statistics. The java natural order of the returned type defined by {@link
- * T#compareTo(Object)} might not be the proper one. For example, UINT_32 requires unsigned comparison instead of the
+ * Comparable#compareTo(Object)} might not be the proper one. For example, UINT_32 requires unsigned comparison instead of the
* natural signed one. Use {@link #compareMinToValue(Comparable)} or the comparator returned by {@link #comparator()} to
* always get the proper ordering.
+ *
+ * @return the min value
*/
abstract public T genericGetMin();
/**
* Returns the max value in the statistics. The java natural order of the returned type defined by {@link
- * T#compareTo(Object)} might not be the proper one. For example, UINT_32 requires unsigned comparison instead of the
+ * Comparable#compareTo(Object)} might not be the proper one. For example, UINT_32 requires unsigned comparison instead of the
* natural signed one. Use {@link #compareMaxToValue(Comparable)} or the comparator returned by {@link #comparator()} to
* always get the proper ordering.
+ *
+ * @return the max value
*/
abstract public T genericGetMax();
/**
* Returns the {@link PrimitiveComparator} implementation to be used to compare two generic values in the proper way
* (for example, unsigned comparison for UINT_32).
+ *
+ * @return the comparator for data described by this Statistics instance
*/
public final PrimitiveComparator<T> comparator() {
return comparator;
@@ -410,6 +416,8 @@ public abstract class Statistics<T extends Comparable<T>> {
/**
* Returns the string representation of min for debugging/logging purposes.
+ *
+ * @return the min value as a string
*/
public String minAsString() {
return stringify(genericGetMin());
@@ -417,6 +425,8 @@ public abstract class Statistics<T extends Comparable<T>> {
/**
* Returns the string representation of max for debugging/logging purposes.
+ *
+ * @return the max value as a string
*/
public String maxAsString() {
return stringify(genericGetMax());
@@ -492,6 +502,8 @@ public abstract class Statistics<T extends Comparable<T>> {
/**
* Returns whether there have been non-null values added to this statistics
+ *
+ * @return true if the values contained at least one non-null value
*/
public boolean hasNonNullValue() {
return hasNonNullValue;
diff --git a/parquet-column/src/main/java/org/apache/parquet/column/statistics/StatisticsClassException.java b/parquet-column/src/main/java/org/apache/parquet/column/statistics/StatisticsClassException.java
index 4c23101..e640de7 100644
--- a/parquet-column/src/main/java/org/apache/parquet/column/statistics/StatisticsClassException.java
+++ b/parquet-column/src/main/java/org/apache/parquet/column/statistics/StatisticsClassException.java
@@ -22,8 +22,6 @@ import org.apache.parquet.ParquetRuntimeException;
/**
* Thrown if the two Statistics objects have mismatching types
- *
- * @author Katya Gonina
*/
public class StatisticsClassException extends ParquetRuntimeException {
private static final long serialVersionUID = 1L;
diff --git a/parquet-column/src/main/java/org/apache/parquet/column/values/RequiresFallback.java b/parquet-column/src/main/java/org/apache/parquet/column/values/RequiresFallback.java
index f491233..3caead9 100644
--- a/parquet-column/src/main/java/org/apache/parquet/column/values/RequiresFallback.java
+++ b/parquet-column/src/main/java/org/apache/parquet/column/values/RequiresFallback.java
@@ -23,10 +23,7 @@ import org.apache.parquet.column.values.fallback.FallbackValuesWriter;
/**
*
* Used to add extra behavior to a ValuesWriter that requires fallback
- * @See {@link FallbackValuesWriter}
- *
- * @author Julien Le Dem
- *
+ * @see FallbackValuesWriter
*/
public interface RequiresFallback {
diff --git a/parquet-column/src/main/java/org/apache/parquet/column/values/ValuesReader.java b/parquet-column/src/main/java/org/apache/parquet/column/values/ValuesReader.java
index 315b72e..5732660 100644
--- a/parquet-column/src/main/java/org/apache/parquet/column/values/ValuesReader.java
+++ b/parquet-column/src/main/java/org/apache/parquet/column/values/ValuesReader.java
@@ -31,8 +31,6 @@ import org.apache.parquet.io.api.Binary;
*
* Given that pages are homogeneous (store only a single type), typical subclasses
* will only override one of the read*() methods.
- *
- * @author Julien Le Dem
*/
public abstract class ValuesReader {
diff --git a/parquet-column/src/main/java/org/apache/parquet/column/values/ValuesWriter.java b/parquet-column/src/main/java/org/apache/parquet/column/values/ValuesWriter.java
index 1bebd55..ce4a957 100755
--- a/parquet-column/src/main/java/org/apache/parquet/column/values/ValuesWriter.java
+++ b/parquet-column/src/main/java/org/apache/parquet/column/values/ValuesWriter.java
@@ -25,9 +25,6 @@ import org.apache.parquet.io.api.Binary;
/**
* base class to implement an encoding for a given column
- *
- * @author Julien Le Dem
- *
*/
public abstract class ValuesWriter {
diff --git a/parquet-column/src/main/java/org/apache/parquet/column/values/bitpacking/BitPackingValuesReader.java b/parquet-column/src/main/java/org/apache/parquet/column/values/bitpacking/BitPackingValuesReader.java
index bcc828b..78d1b72 100644
--- a/parquet-column/src/main/java/org/apache/parquet/column/values/bitpacking/BitPackingValuesReader.java
+++ b/parquet-column/src/main/java/org/apache/parquet/column/values/bitpacking/BitPackingValuesReader.java
@@ -33,9 +33,6 @@ import org.slf4j.LoggerFactory;
/**
* a column reader that packs the ints in the number of bits required based on the maximum size.
- *
- * @author Julien Le Dem
- *
*/
public class BitPackingValuesReader extends ValuesReader {
private static final Logger LOG = LoggerFactory.getLogger(BitPackingValuesReader.class);
diff --git a/parquet-column/src/main/java/org/apache/parquet/column/values/bitpacking/BitPackingValuesWriter.java b/parquet-column/src/main/java/org/apache/parquet/column/values/bitpacking/BitPackingValuesWriter.java
index fc0f185..1c98737 100644
--- a/parquet-column/src/main/java/org/apache/parquet/column/values/bitpacking/BitPackingValuesWriter.java
+++ b/parquet-column/src/main/java/org/apache/parquet/column/values/bitpacking/BitPackingValuesWriter.java
@@ -34,9 +34,6 @@ import org.apache.parquet.io.ParquetEncodingException;
/**
* a column writer that packs the ints in the number of bits required based on the maximum size.
- *
- * @author Julien Le Dem
- *
*/
public class BitPackingValuesWriter extends ValuesWriter {
diff --git a/parquet-column/src/main/java/org/apache/parquet/column/values/delta/DeltaBinaryPackingConfig.java b/parquet-column/src/main/java/org/apache/parquet/column/values/delta/DeltaBinaryPackingConfig.java
index 565d6ec..9d13bd3 100644
--- a/parquet-column/src/main/java/org/apache/parquet/column/values/delta/DeltaBinaryPackingConfig.java
+++ b/parquet-column/src/main/java/org/apache/parquet/column/values/delta/DeltaBinaryPackingConfig.java
@@ -28,8 +28,6 @@ import java.io.InputStream;
/**
* Config for delta binary packing
- *
- * @author Tianshuo Deng
*/
class DeltaBinaryPackingConfig {
final int blockSizeInValues;
diff --git a/parquet-column/src/main/java/org/apache/parquet/column/values/delta/DeltaBinaryPackingValuesReader.java b/parquet-column/src/main/java/org/apache/parquet/column/values/delta/DeltaBinaryPackingValuesReader.java
index bf53846..dceaa52 100644
--- a/parquet-column/src/main/java/org/apache/parquet/column/values/delta/DeltaBinaryPackingValuesReader.java
+++ b/parquet-column/src/main/java/org/apache/parquet/column/values/delta/DeltaBinaryPackingValuesReader.java
@@ -31,8 +31,6 @@ import java.nio.ByteBuffer;
/**
* Read values written by {@link DeltaBinaryPackingValuesWriter}
- *
- * @author Tianshuo Deng
*/
public class DeltaBinaryPackingValuesReader extends ValuesReader {
private int totalValueCount;
diff --git a/parquet-column/src/main/java/org/apache/parquet/column/values/delta/DeltaBinaryPackingValuesWriter.java b/parquet-column/src/main/java/org/apache/parquet/column/values/delta/DeltaBinaryPackingValuesWriter.java
index ac3c594..45b7a93 100644
--- a/parquet-column/src/main/java/org/apache/parquet/column/values/delta/DeltaBinaryPackingValuesWriter.java
+++ b/parquet-column/src/main/java/org/apache/parquet/column/values/delta/DeltaBinaryPackingValuesWriter.java
@@ -33,7 +33,6 @@ import java.io.IOException;
/**
* Write integers with delta encoding and binary packing
* The format is as follows:
- * <p/>
* <pre>
* {@code
* delta-binary-packing: <page-header> <block>*
@@ -45,10 +44,8 @@ import java.io.IOException;
* blockSizeInValues,blockSizeInValues,totalValueCount,firstValue : unsigned varint
* }
* </pre>
- *
+ * <p>
* The algorithm and format is inspired by D. Lemire's paper: http://lemire.me/blog/archives/2012/09/12/fast-integer-compression-decoding-billions-of-integers-per-second/
- *
- * @author Tianshuo Deng
*/
public abstract class DeltaBinaryPackingValuesWriter extends ValuesWriter {
diff --git a/parquet-column/src/main/java/org/apache/parquet/column/values/delta/DeltaBinaryPackingValuesWriterForInteger.java b/parquet-column/src/main/java/org/apache/parquet/column/values/delta/DeltaBinaryPackingValuesWriterForInteger.java
index f2d0acc..3c9e57c 100644
--- a/parquet-column/src/main/java/org/apache/parquet/column/values/delta/DeltaBinaryPackingValuesWriterForInteger.java
+++ b/parquet-column/src/main/java/org/apache/parquet/column/values/delta/DeltaBinaryPackingValuesWriterForInteger.java
@@ -20,7 +20,6 @@ package org.apache.parquet.column.values.delta;
import java.io.IOException;
-import org.apache.parquet.Preconditions;
import org.apache.parquet.bytes.ByteBufferAllocator;
import org.apache.parquet.bytes.BytesInput;
import org.apache.parquet.bytes.BytesUtils;
@@ -30,8 +29,6 @@ import org.apache.parquet.io.ParquetEncodingException;
/**
* Write integers (INT32) with delta encoding and binary packing.
- *
- * @author Vassil Lunchev
*/
public class DeltaBinaryPackingValuesWriterForInteger extends DeltaBinaryPackingValuesWriter {
/**
@@ -150,7 +147,7 @@ public class DeltaBinaryPackingValuesWriterForInteger extends DeltaBinaryPacking
/**
* iterate through values in each mini block and calculate the bitWidths of max values.
*
- * @param miniBlocksToFlush
+ * @param miniBlocksToFlush number of miniblocks
*/
private void calculateBitWidthsForDeltaBlockBuffer(int miniBlocksToFlush) {
for (int miniBlockIndex = 0; miniBlockIndex < miniBlocksToFlush; miniBlockIndex++) {
@@ -170,7 +167,7 @@ public class DeltaBinaryPackingValuesWriterForInteger extends DeltaBinaryPacking
/**
* getBytes will trigger flushing block buffer, DO NOT write after getBytes() is called without calling reset()
*
- * @return
+ * @return a BytesInput that contains the encoded page data
*/
@Override
public BytesInput getBytes() {
diff --git a/parquet-column/src/main/java/org/apache/parquet/column/values/delta/DeltaBinaryPackingValuesWriterForLong.java b/parquet-column/src/main/java/org/apache/parquet/column/values/delta/DeltaBinaryPackingValuesWriterForLong.java
index 30eecef..2f26103 100644
--- a/parquet-column/src/main/java/org/apache/parquet/column/values/delta/DeltaBinaryPackingValuesWriterForLong.java
+++ b/parquet-column/src/main/java/org/apache/parquet/column/values/delta/DeltaBinaryPackingValuesWriterForLong.java
@@ -30,8 +30,6 @@ import org.apache.parquet.io.ParquetEncodingException;
/**
* Write longs (INT64) with delta encoding and binary packing.
- *
- * @author Vassil Lunchev
*/
public class DeltaBinaryPackingValuesWriterForLong extends DeltaBinaryPackingValuesWriter {
/**
@@ -152,7 +150,7 @@ public class DeltaBinaryPackingValuesWriterForLong extends DeltaBinaryPackingVal
/**
* iterate through values in each mini block and calculate the bitWidths of max values.
*
- * @param miniBlocksToFlush
+ * @param miniBlocksToFlush number of miniblocks
*/
private void calculateBitWidthsForDeltaBlockBuffer(int miniBlocksToFlush) {
for (int miniBlockIndex = 0; miniBlockIndex < miniBlocksToFlush; miniBlockIndex++) {
@@ -172,7 +170,7 @@ public class DeltaBinaryPackingValuesWriterForLong extends DeltaBinaryPackingVal
/**
* getBytes will trigger flushing block buffer, DO NOT write after getBytes() is called without calling reset()
*
- * @return
+ * @return a BytesInput that contains the encoded page data
*/
@Override
public BytesInput getBytes() {
diff --git a/parquet-column/src/main/java/org/apache/parquet/column/values/deltalengthbytearray/DeltaLengthByteArrayValuesReader.java b/parquet-column/src/main/java/org/apache/parquet/column/values/deltalengthbytearray/DeltaLengthByteArrayValuesReader.java
index e6ee1fd..1a2ccb9 100644
--- a/parquet-column/src/main/java/org/apache/parquet/column/values/deltalengthbytearray/DeltaLengthByteArrayValuesReader.java
+++ b/parquet-column/src/main/java/org/apache/parquet/column/values/deltalengthbytearray/DeltaLengthByteArrayValuesReader.java
@@ -32,9 +32,6 @@ import org.slf4j.LoggerFactory;
/**
* Reads binary data written by {@link DeltaLengthByteArrayValuesWriter}
- *
- * @author Aniket Mokashi
- *
*/
public class DeltaLengthByteArrayValuesReader extends ValuesReader {
diff --git a/parquet-column/src/main/java/org/apache/parquet/column/values/deltalengthbytearray/DeltaLengthByteArrayValuesWriter.java b/parquet-column/src/main/java/org/apache/parquet/column/values/deltalengthbytearray/DeltaLengthByteArrayValuesWriter.java
index 118153c..3de1287 100644
--- a/parquet-column/src/main/java/org/apache/parquet/column/values/deltalengthbytearray/DeltaLengthByteArrayValuesWriter.java
+++ b/parquet-column/src/main/java/org/apache/parquet/column/values/deltalengthbytearray/DeltaLengthByteArrayValuesWriter.java
@@ -40,7 +40,6 @@ import org.slf4j.LoggerFactory;
* delta-length-byte-array : length* byte-array*
* }
* </pre>
- * @author Aniket Mokashi
*
*/
public class DeltaLengthByteArrayValuesWriter extends ValuesWriter {
diff --git a/parquet-column/src/main/java/org/apache/parquet/column/values/deltastrings/DeltaByteArrayReader.java b/parquet-column/src/main/java/org/apache/parquet/column/values/deltastrings/DeltaByteArrayReader.java
index 7a01627..8f7bbf9 100644
--- a/parquet-column/src/main/java/org/apache/parquet/column/values/deltastrings/DeltaByteArrayReader.java
+++ b/parquet-column/src/main/java/org/apache/parquet/column/values/deltastrings/DeltaByteArrayReader.java
@@ -31,9 +31,6 @@ import org.apache.parquet.io.api.Binary;
/**
* Reads binary data written by {@link DeltaByteArrayWriter}
- *
- * @author Aniket Mokashi
- *
*/
public class DeltaByteArrayReader extends ValuesReader implements RequiresPreviousReader {
private ValuesReader prefixLengthReader;
diff --git a/parquet-column/src/main/java/org/apache/parquet/column/values/deltastrings/DeltaByteArrayWriter.java b/parquet-column/src/main/java/org/apache/parquet/column/values/deltastrings/DeltaByteArrayWriter.java
index fb6cc9b..be7748f 100644
--- a/parquet-column/src/main/java/org/apache/parquet/column/values/deltastrings/DeltaByteArrayWriter.java
+++ b/parquet-column/src/main/java/org/apache/parquet/column/values/deltastrings/DeltaByteArrayWriter.java
@@ -34,8 +34,6 @@ import org.apache.parquet.io.api.Binary;
* delta-length-byte-array : prefix-length* suffixes*
* }
* </pre>
- * @author Aniket Mokashi
- *
*/
public class DeltaByteArrayWriter extends ValuesWriter{
diff --git a/parquet-column/src/main/java/org/apache/parquet/column/values/dictionary/DictionaryValuesReader.java b/parquet-column/src/main/java/org/apache/parquet/column/values/dictionary/DictionaryValuesReader.java
index 87edda6..489c97c 100644
--- a/parquet-column/src/main/java/org/apache/parquet/column/values/dictionary/DictionaryValuesReader.java
+++ b/parquet-column/src/main/java/org/apache/parquet/column/values/dictionary/DictionaryValuesReader.java
@@ -34,9 +34,6 @@ import org.slf4j.LoggerFactory;
/**
* Reads values that have been dictionary encoded
- *
- * @author Julien Le Dem
- *
*/
public class DictionaryValuesReader extends ValuesReader {
private static final Logger LOG = LoggerFactory.getLogger(DictionaryValuesReader.class);
diff --git a/parquet-column/src/main/java/org/apache/parquet/column/values/dictionary/DictionaryValuesWriter.java b/parquet-column/src/main/java/org/apache/parquet/column/values/dictionary/DictionaryValuesWriter.java
index 5ef7712..5eda9d0 100644
--- a/parquet-column/src/main/java/org/apache/parquet/column/values/dictionary/DictionaryValuesWriter.java
+++ b/parquet-column/src/main/java/org/apache/parquet/column/values/dictionary/DictionaryValuesWriter.java
@@ -60,9 +60,6 @@ import org.slf4j.LoggerFactory;
/**
* Will attempt to encode values using a dictionary and fall back to plain encoding
* if the dictionary gets too big
- *
- * @author Julien Le Dem
- *
*/
public abstract class DictionaryValuesWriter extends ValuesWriter implements RequiresFallback {
private static final Logger LOG = LoggerFactory.getLogger(DictionaryValuesWriter.class);
@@ -103,9 +100,6 @@ public abstract class DictionaryValuesWriter extends ValuesWriter implements Req
(currently used for off-heap memory which is not garbage collected) */
private List<RunLengthBitPackingHybridEncoder> encoders = new ArrayList<RunLengthBitPackingHybridEncoder>();
- /**
- * @param maxDictionaryByteSize
- */
protected DictionaryValuesWriter(int maxDictionaryByteSize, Encoding encodingForDataPage, Encoding encodingForDictionaryPage, ByteBufferAllocator allocator) {
this.allocator = allocator;
this.maxDictionaryByteSize = maxDictionaryByteSize;
@@ -238,17 +232,11 @@ public abstract class DictionaryValuesWriter extends ValuesWriter implements Req
);
}
- /**
- *
- */
public static class PlainBinaryDictionaryValuesWriter extends DictionaryValuesWriter {
/* type specific dictionary content */
protected Object2IntMap<Binary> binaryDictionaryContent = new Object2IntLinkedOpenHashMap<Binary>();
- /**
- * @param maxDictionaryByteSize
- */
public PlainBinaryDictionaryValuesWriter(int maxDictionaryByteSize, Encoding encodingForDataPage, Encoding encodingForDictionaryPage, ByteBufferAllocator allocator) {
super(maxDictionaryByteSize, encodingForDataPage, encodingForDictionaryPage, allocator);
binaryDictionaryContent.defaultReturnValue(-1);
@@ -309,16 +297,10 @@ public abstract class DictionaryValuesWriter extends ValuesWriter implements Req
}
}
- /**
- *
- */
public static class PlainFixedLenArrayDictionaryValuesWriter extends PlainBinaryDictionaryValuesWriter {
private final int length;
- /**
- * @param maxDictionaryByteSize
- */
public PlainFixedLenArrayDictionaryValuesWriter(int maxDictionaryByteSize, int length, Encoding encodingForDataPage, Encoding encodingForDictionaryPage, ByteBufferAllocator allocator) {
super(maxDictionaryByteSize, encodingForDataPage, encodingForDictionaryPage, allocator);
this.length = length;
@@ -352,17 +334,11 @@ public abstract class DictionaryValuesWriter extends ValuesWriter implements Req
}
}
- /**
- *
- */
public static class PlainLongDictionaryValuesWriter extends DictionaryValuesWriter {
/* type specific dictionary content */
private Long2IntMap longDictionaryContent = new Long2IntLinkedOpenHashMap();
- /**
- * @param maxDictionaryByteSize
- */
public PlainLongDictionaryValuesWriter(int maxDictionaryByteSize, Encoding encodingForDataPage, Encoding encodingForDictionaryPage, ByteBufferAllocator allocator) {
super(maxDictionaryByteSize, encodingForDataPage, encodingForDictionaryPage, allocator);
longDictionaryContent.defaultReturnValue(-1);
@@ -423,17 +399,11 @@ public abstract class DictionaryValuesWriter extends ValuesWriter implements Req
}
}
- /**
- *
- */
public static class PlainDoubleDictionaryValuesWriter extends DictionaryValuesWriter {
/* type specific dictionary content */
private Double2IntMap doubleDictionaryContent = new Double2IntLinkedOpenHashMap();
- /**
- * @param maxDictionaryByteSize
- */
public PlainDoubleDictionaryValuesWriter(int maxDictionaryByteSize, Encoding encodingForDataPage, Encoding encodingForDictionaryPage, ByteBufferAllocator allocator) {
super(maxDictionaryByteSize, encodingForDataPage, encodingForDictionaryPage, allocator);
doubleDictionaryContent.defaultReturnValue(-1);
@@ -494,17 +464,11 @@ public abstract class DictionaryValuesWriter extends ValuesWriter implements Req
}
}
- /**
- *
- */
public static class PlainIntegerDictionaryValuesWriter extends DictionaryValuesWriter {
/* type specific dictionary content */
private Int2IntMap intDictionaryContent = new Int2IntLinkedOpenHashMap();
- /**
- * @param maxDictionaryByteSize
- */
public PlainIntegerDictionaryValuesWriter(int maxDictionaryByteSize, Encoding encodingForDataPage, Encoding encodingForDictionaryPage, ByteBufferAllocator allocator) {
super(maxDictionaryByteSize, encodingForDataPage, encodingForDictionaryPage, allocator);
intDictionaryContent.defaultReturnValue(-1);
@@ -565,17 +529,11 @@ public abstract class DictionaryValuesWriter extends ValuesWriter implements Req
}
}
- /**
- *
- */
public static class PlainFloatDictionaryValuesWriter extends DictionaryValuesWriter {
/* type specific dictionary content */
private Float2IntMap floatDictionaryContent = new Float2IntLinkedOpenHashMap();
- /**
- * @param maxDictionaryByteSize
- */
public PlainFloatDictionaryValuesWriter(int maxDictionaryByteSize, Encoding encodingForDataPage, Encoding encodingForDictionaryPage, ByteBufferAllocator allocator) {
super(maxDictionaryByteSize, encodingForDataPage, encodingForDictionaryPage, allocator);
floatDictionaryContent.defaultReturnValue(-1);
diff --git a/parquet-column/src/main/java/org/apache/parquet/column/values/dictionary/IntList.java b/parquet-column/src/main/java/org/apache/parquet/column/values/dictionary/IntList.java
index dca1470..5735ca5 100644
--- a/parquet-column/src/main/java/org/apache/parquet/column/values/dictionary/IntList.java
+++ b/parquet-column/src/main/java/org/apache/parquet/column/values/dictionary/IntList.java
@@ -24,10 +24,6 @@ import java.util.List;
/**
* An append-only integer list
* avoids autoboxing and buffer resizing
- *
- *
- * @author Julien Le Dem
- *
*/
public class IntList {
@@ -49,9 +45,6 @@ public class IntList {
/**
* to iterate on the content of the list
* not an actual iterator to avoid autoboxing
- *
- * @author Julien Le Dem
- *
*/
public static class IntIterator {
diff --git a/parquet-column/src/main/java/org/apache/parquet/column/values/dictionary/PlainValuesDictionary.java b/parquet-column/src/main/java/org/apache/parquet/column/values/dictionary/PlainValuesDictionary.java
index 0b8beb2..3d21543 100644
--- a/parquet-column/src/main/java/org/apache/parquet/column/values/dictionary/PlainValuesDictionary.java
+++ b/parquet-column/src/main/java/org/apache/parquet/column/values/dictionary/PlainValuesDictionary.java
@@ -44,7 +44,7 @@ public abstract class PlainValuesDictionary extends Dictionary {
/**
* @param dictionaryPage the PLAIN encoded content of the dictionary
- * @throws IOException
+ * @throws IOException if there is an exception while decoding the dictionary page
*/
protected PlainValuesDictionary(DictionaryPage dictionaryPage) throws IOException {
super(dictionaryPage.getEncoding());
@@ -68,7 +68,7 @@ public abstract class PlainValuesDictionary extends Dictionary {
* length.
*
* @param dictionaryPage a {@code DictionaryPage} of encoded binary values
- * @throws IOException
+ * @throws IOException if there is an exception while decoding the dictionary page
*/
public PlainBinaryDictionary(DictionaryPage dictionaryPage) throws IOException {
this(dictionaryPage, null);
@@ -84,7 +84,7 @@ public abstract class PlainValuesDictionary extends Dictionary {
*
* @param dictionaryPage a {@code DictionaryPage} of encoded binary values
* @param length a fixed length of binary arrays, or null if not fixed
- * @throws IOException
+ * @throws IOException if there is an exception while decoding the dictionary page
*/
public PlainBinaryDictionary(DictionaryPage dictionaryPage, Integer length) throws IOException {
super(dictionaryPage);
@@ -146,8 +146,8 @@ public abstract class PlainValuesDictionary extends Dictionary {
private long[] longDictionaryContent = null;
/**
- * @param dictionaryPage
- * @throws IOException
+ * @param dictionaryPage a dictionary page of encoded long values
+ * @throws IOException if there is an exception while decoding the dictionary page
*/
public PlainLongDictionary(DictionaryPage dictionaryPage) throws IOException {
super(dictionaryPage);
@@ -189,8 +189,8 @@ public abstract class PlainValuesDictionary extends Dictionary {
private double[] doubleDictionaryContent = null;
/**
- * @param dictionaryPage
- * @throws IOException
+ * @param dictionaryPage a dictionary page of encoded double values
+ * @throws IOException if there is an exception while decoding the dictionary page
*/
public PlainDoubleDictionary(DictionaryPage dictionaryPage) throws IOException {
super(dictionaryPage);
@@ -232,8 +232,8 @@ public abstract class PlainValuesDictionary extends Dictionary {
private int[] intDictionaryContent = null;
/**
- * @param dictionaryPage
- * @throws IOException
+ * @param dictionaryPage a dictionary page of encoded integer values
+ * @throws IOException if there is an exception while decoding the dictionary page
*/
public PlainIntegerDictionary(DictionaryPage dictionaryPage) throws IOException {
super(dictionaryPage);
@@ -275,8 +275,8 @@ public abstract class PlainValuesDictionary extends Dictionary {
private float[] floatDictionaryContent = null;
/**
- * @param dictionaryPage
- * @throws IOException
+ * @param dictionaryPage a dictionary page of encoded float values
+ * @throws IOException if there is an exception while decoding the dictionary page
*/
public PlainFloatDictionary(DictionaryPage dictionaryPage) throws IOException {
super(dictionaryPage);
diff --git a/parquet-column/src/main/java/org/apache/parquet/column/values/factory/ValuesWriterFactory.java b/parquet-column/src/main/java/org/apache/parquet/column/values/factory/ValuesWriterFactory.java
index 8f06e7b..e4a11f8 100644
--- a/parquet-column/src/main/java/org/apache/parquet/column/values/factory/ValuesWriterFactory.java
+++ b/parquet-column/src/main/java/org/apache/parquet/column/values/factory/ValuesWriterFactory.java
@@ -25,7 +25,9 @@ import org.apache.parquet.column.values.ValuesWriter;
/**
* Can be overridden to allow users to manually test different strategies to create ValuesWriters.
* To do this, the ValuesWriterFactory to be used must be passed to the {@link org.apache.parquet.column.ParquetProperties.Builder}.
- * <ul>Lifecycle of ValuesWriterFactories is:
+ * <p>
+ * Lifecycle of ValuesWriterFactories is:
+ * <ul>
* <li> Initialized while creating a {@link org.apache.parquet.column.ParquetProperties} using the Builder</li>
* <li> If the factory must read Hadoop config, it needs to implement the Configurable interface.
* In addition to that, ParquetOutputFormat needs to be updated to pass in the Hadoop config via the setConf()
@@ -37,11 +39,14 @@ public interface ValuesWriterFactory {
/**
* Used to initialize the factory. This method is called before newValuesWriter()
+ * @param parquetProperties a write configuration
*/
void initialize(ParquetProperties parquetProperties);
/**
* Creates a ValuesWriter to write values for the given column.
+ * @param descriptor a column descriptor
+ * @return a new values writer for values in the descriptor's column
*/
ValuesWriter newValuesWriter(ColumnDescriptor descriptor);
}
diff --git a/parquet-column/src/main/java/org/apache/parquet/column/values/plain/BooleanPlainValuesReader.java b/parquet-column/src/main/java/org/apache/parquet/column/values/plain/BooleanPlainValuesReader.java
index 3296daa..9dc5629 100755
--- a/parquet-column/src/main/java/org/apache/parquet/column/values/plain/BooleanPlainValuesReader.java
+++ b/parquet-column/src/main/java/org/apache/parquet/column/values/plain/BooleanPlainValuesReader.java
@@ -30,9 +30,6 @@ import org.slf4j.LoggerFactory;
/**
* encodes boolean for the plain encoding: one bit at a time (0 = false)
- *
- * @author Julien Le Dem
- *
*/
public class BooleanPlainValuesReader extends ValuesReader {
private static final Logger LOG = LoggerFactory.getLogger(BooleanPlainValuesReader.class);
diff --git a/parquet-column/src/main/java/org/apache/parquet/column/values/plain/BooleanPlainValuesWriter.java b/parquet-column/src/main/java/org/apache/parquet/column/values/plain/BooleanPlainValuesWriter.java
index c3e88ea..62529e5 100644
--- a/parquet-column/src/main/java/org/apache/parquet/column/values/plain/BooleanPlainValuesWriter.java
+++ b/parquet-column/src/main/java/org/apache/parquet/column/values/plain/BooleanPlainValuesWriter.java
@@ -29,9 +29,6 @@ import org.apache.parquet.column.values.bitpacking.ByteBitPackingValuesWriter;
/**
* An implementation of the PLAIN encoding
- *
- * @author Julien Le Dem
- *
*/
public class BooleanPlainValuesWriter extends ValuesWriter {
diff --git a/parquet-column/src/main/java/org/apache/parquet/column/values/plain/FixedLenByteArrayPlainValuesReader.java b/parquet-column/src/main/java/org/apache/parquet/column/values/plain/FixedLenByteArrayPlainValuesReader.java
index 7738de7..15ed434 100644
--- a/parquet-column/src/main/java/org/apache/parquet/column/values/plain/FixedLenByteArrayPlainValuesReader.java
+++ b/parquet-column/src/main/java/org/apache/parquet/column/values/plain/FixedLenByteArrayPlainValuesReader.java
@@ -29,8 +29,6 @@ import org.slf4j.LoggerFactory;
/**
* ValuesReader for FIXED_LEN_BYTE_ARRAY.
- *
- * @author David Z. Chen <dc...@linkedin.com>
*/
public class FixedLenByteArrayPlainValuesReader extends ValuesReader {
private static final Logger LOG = LoggerFactory.getLogger(FixedLenByteArrayPlainValuesReader.class);
diff --git a/parquet-column/src/main/java/org/apache/parquet/column/values/plain/FixedLenByteArrayPlainValuesWriter.java b/parquet-column/src/main/java/org/apache/parquet/column/values/plain/FixedLenByteArrayPlainValuesWriter.java
index d7b2deb..3e80b3a 100644
--- a/parquet-column/src/main/java/org/apache/parquet/column/values/plain/FixedLenByteArrayPlainValuesWriter.java
+++ b/parquet-column/src/main/java/org/apache/parquet/column/values/plain/FixedLenByteArrayPlainValuesWriter.java
@@ -33,8 +33,6 @@ import org.slf4j.LoggerFactory;
/**
* ValuesWriter for FIXED_LEN_BYTE_ARRAY.
- *
- * @author David Z. Chen <dc...@linkedin.com>
*/
public class FixedLenByteArrayPlainValuesWriter extends ValuesWriter {
private static final Logger LOG = LoggerFactory.getLogger(PlainValuesWriter.class);
diff --git a/parquet-column/src/main/java/org/apache/parquet/column/values/plain/PlainValuesReader.java b/parquet-column/src/main/java/org/apache/parquet/column/values/plain/PlainValuesReader.java
index 726f611..f576528 100644
--- a/parquet-column/src/main/java/org/apache/parquet/column/values/plain/PlainValuesReader.java
+++ b/parquet-column/src/main/java/org/apache/parquet/column/values/plain/PlainValuesReader.java
@@ -29,9 +29,6 @@ import org.slf4j.LoggerFactory;
/**
* Plain encoding for float, double, int, long
- *
- * @author Julien Le Dem
- *
*/
abstract public class PlainValuesReader extends ValuesReader {
private static final Logger LOG = LoggerFactory.getLogger(PlainValuesReader.class);
diff --git a/parquet-column/src/main/java/org/apache/parquet/column/values/plain/PlainValuesWriter.java b/parquet-column/src/main/java/org/apache/parquet/column/values/plain/PlainValuesWriter.java
index aa96cb6..bc5bdda 100644
--- a/parquet-column/src/main/java/org/apache/parquet/column/values/plain/PlainValuesWriter.java
+++ b/parquet-column/src/main/java/org/apache/parquet/column/values/plain/PlainValuesWriter.java
@@ -34,9 +34,6 @@ import org.slf4j.LoggerFactory;
/**
* Plain encoding except for booleans
- *
- * @author Julien Le Dem
- *
*/
public class PlainValuesWriter extends ValuesWriter {
private static final Logger LOG = LoggerFactory.getLogger(PlainValuesWriter.class);
diff --git a/parquet-column/src/main/java/org/apache/parquet/column/values/rle/RunLengthBitPackingHybridDecoder.java b/parquet-column/src/main/java/org/apache/parquet/column/values/rle/RunLengthBitPackingHybridDecoder.java
index d682a98..0db266f 100644
--- a/parquet-column/src/main/java/org/apache/parquet/column/values/rle/RunLengthBitPackingHybridDecoder.java
+++ b/parquet-column/src/main/java/org/apache/parquet/column/values/rle/RunLengthBitPackingHybridDecoder.java
@@ -33,8 +33,6 @@ import org.slf4j.LoggerFactory;
/**
* Decodes values written in the grammar described in {@link RunLengthBitPackingHybridEncoder}
- *
- * @author Julien Le Dem
*/
public class RunLengthBitPackingHybridDecoder {
private static final Logger LOG = LoggerFactory.getLogger(RunLengthBitPackingHybridDecoder.class);
diff --git a/parquet-column/src/main/java/org/apache/parquet/column/values/rle/RunLengthBitPackingHybridEncoder.java b/parquet-column/src/main/java/org/apache/parquet/column/values/rle/RunLengthBitPackingHybridEncoder.java
index 5fba70a..7d5fb15 100644
--- a/parquet-column/src/main/java/org/apache/parquet/column/values/rle/RunLengthBitPackingHybridEncoder.java
+++ b/parquet-column/src/main/java/org/apache/parquet/column/values/rle/RunLengthBitPackingHybridEncoder.java
@@ -55,9 +55,7 @@ import org.slf4j.LoggerFactory;
* portion of the above grammar. The {@code <length>} portion is done by
* {@link RunLengthBitPackingHybridValuesWriter}
* <p>
- * Only supports values >= 0 // TODO: is that ok? Should we make a signed version?
- *
- * @author Alex Levenson
+ * Only supports positive values (including 0) // TODO: is that ok? Should we make a signed version?
*/
public class RunLengthBitPackingHybridEncoder {
private static final Logger LOG = LoggerFactory.getLogger(RunLengthBitPackingHybridEncoder.class);
diff --git a/parquet-column/src/main/java/org/apache/parquet/column/values/rle/RunLengthBitPackingHybridValuesReader.java b/parquet-column/src/main/java/org/apache/parquet/column/values/rle/RunLengthBitPackingHybridValuesReader.java
index ebfa76d..821ac62 100644
--- a/parquet-column/src/main/java/org/apache/parquet/column/values/rle/RunLengthBitPackingHybridValuesReader.java
+++ b/parquet-column/src/main/java/org/apache/parquet/column/values/rle/RunLengthBitPackingHybridValuesReader.java
@@ -28,8 +28,6 @@ import org.apache.parquet.io.ParquetDecodingException;
/**
* This ValuesReader does all the reading in {@link #initFromPage}
* and stores the values in an in memory buffer, which is less than ideal.
- *
- * @author Alex Levenson
*/
public class RunLengthBitPackingHybridValuesReader extends ValuesReader {
private final int bitWidth;
diff --git a/parquet-column/src/main/java/org/apache/parquet/column/values/rle/RunLengthBitPackingHybridValuesWriter.java b/parquet-column/src/main/java/org/apache/parquet/column/values/rle/RunLengthBitPackingHybridValuesWriter.java
index 14ef161..3b7a5de 100644
--- a/parquet-column/src/main/java/org/apache/parquet/column/values/rle/RunLengthBitPackingHybridValuesWriter.java
+++ b/parquet-column/src/main/java/org/apache/parquet/column/values/rle/RunLengthBitPackingHybridValuesWriter.java
@@ -27,9 +27,6 @@ import org.apache.parquet.column.Encoding;
import org.apache.parquet.column.values.ValuesWriter;
import org.apache.parquet.io.ParquetEncodingException;
-/**
- * @author Alex Levenson
- */
public class RunLengthBitPackingHybridValuesWriter extends ValuesWriter {
private final RunLengthBitPackingHybridEncoder encoder;
diff --git a/parquet-column/src/main/java/org/apache/parquet/example/DummyRecordConverter.java b/parquet-column/src/main/java/org/apache/parquet/example/DummyRecordConverter.java
index c9c3589..c4a20c7 100644
--- a/parquet-column/src/main/java/org/apache/parquet/example/DummyRecordConverter.java
+++ b/parquet-column/src/main/java/org/apache/parquet/example/DummyRecordConverter.java
@@ -32,9 +32,6 @@ import org.apache.parquet.schema.TypeConverter;
/**
* Dummy implementation for perf tests
- *
- * @author Julien Le Dem
- *
*/
public final class DummyRecordConverter extends RecordMaterializer<Object> {
diff --git a/parquet-column/src/main/java/org/apache/parquet/example/Paper.java b/parquet-column/src/main/java/org/apache/parquet/example/Paper.java
index a55cdbc..720acb9 100644
--- a/parquet-column/src/main/java/org/apache/parquet/example/Paper.java
+++ b/parquet-column/src/main/java/org/apache/parquet/example/Paper.java
@@ -31,9 +31,6 @@ import org.apache.parquet.schema.PrimitiveType;
/**
* Examples from the Dremel Paper
- *
- * @author Julien Le Dem
- *
*/
public class Paper {
public static final MessageType schema =
diff --git a/parquet-column/src/main/java/org/apache/parquet/filter/AndRecordFilter.java b/parquet-column/src/main/java/org/apache/parquet/filter/AndRecordFilter.java
index 0c2a295..9ae417b 100644
--- a/parquet-column/src/main/java/org/apache/parquet/filter/AndRecordFilter.java
+++ b/parquet-column/src/main/java/org/apache/parquet/filter/AndRecordFilter.java
@@ -25,8 +25,6 @@ import org.apache.parquet.column.ColumnReader;
* Provides ability to chain two filters together. Bear in mind that the first one will
* short circuit the second. Useful if getting a page of already filtered result.
* i.e and( column("manufacturer", equalTo("Volkswagen")), page(100,50))
- *
- * @author Jacob Metcalf
*/
public final class AndRecordFilter implements RecordFilter {
diff --git a/parquet-column/src/main/java/org/apache/parquet/filter/NotRecordFilter.java b/parquet-column/src/main/java/org/apache/parquet/filter/NotRecordFilter.java
index 1605dba..24a23f1 100644
--- a/parquet-column/src/main/java/org/apache/parquet/filter/NotRecordFilter.java
+++ b/parquet-column/src/main/java/org/apache/parquet/filter/NotRecordFilter.java
@@ -23,8 +23,6 @@ import org.apache.parquet.column.ColumnReader;
/**
* Provides ability to negate the result of a filter.
- *
- * @author Frank Austin Nothaft
*/
public final class NotRecordFilter implements RecordFilter {
diff --git a/parquet-column/src/main/java/org/apache/parquet/filter/OrRecordFilter.java b/parquet-column/src/main/java/org/apache/parquet/filter/OrRecordFilter.java
index 09db24a..0c6f71c 100644
--- a/parquet-column/src/main/java/org/apache/parquet/filter/OrRecordFilter.java
+++ b/parquet-column/src/main/java/org/apache/parquet/filter/OrRecordFilter.java
@@ -23,8 +23,6 @@ import org.apache.parquet.column.ColumnReader;
/**
* Provides ability to chain two filters together.
- *
- * @author Frank Austin Nothaft
*/
public final class OrRecordFilter implements RecordFilter {
diff --git a/parquet-column/src/main/java/org/apache/parquet/filter/RecordFilter.java b/parquet-column/src/main/java/org/apache/parquet/filter/RecordFilter.java
index e336645..fef3946 100644
--- a/parquet-column/src/main/java/org/apache/parquet/filter/RecordFilter.java
+++ b/parquet-column/src/main/java/org/apache/parquet/filter/RecordFilter.java
@@ -21,8 +21,6 @@ package org.apache.parquet.filter;
/**
* Filter to be applied to a record to work out whether to skip it.
- *
- * @author Jacob Metcalf
*/
public interface RecordFilter {
diff --git a/parquet-column/src/main/java/org/apache/parquet/filter/UnboundRecordFilter.java b/parquet-column/src/main/java/org/apache/parquet/filter/UnboundRecordFilter.java
index 4699980..02609f3 100644
--- a/parquet-column/src/main/java/org/apache/parquet/filter/UnboundRecordFilter.java
+++ b/parquet-column/src/main/java/org/apache/parquet/filter/UnboundRecordFilter.java
@@ -24,8 +24,6 @@ import org.apache.parquet.column.ColumnReader;
* Builder for a record filter. Idea is that each filter provides a create function
* which returns an unbound filter. This only becomes a filter when it is bound to the actual
* columns.
- *
- * @author Jacob Metcalf
*/
public interface UnboundRecordFilter {
diff --git a/parquet-column/src/main/java/org/apache/parquet/filter2/compat/FilterCompat.java b/parquet-column/src/main/java/org/apache/parquet/filter2/compat/FilterCompat.java
index 17bd2e1..f8e62bc 100644
--- a/parquet-column/src/main/java/org/apache/parquet/filter2/compat/FilterCompat.java
+++ b/parquet-column/src/main/java/org/apache/parquet/filter2/compat/FilterCompat.java
@@ -32,11 +32,11 @@ import static org.apache.parquet.Preconditions.checkNotNull;
* The first way, that only supports filtering records during record assembly, is found
* in {@link org.apache.parquet.filter}. The new API (found in {@link org.apache.parquet.filter2}) supports
* also filtering entire rowgroups of records without reading them at all.
- *
+ * <p>
* This class defines a common interface that both of these filters share,
* {@link Filter}. A Filter can be either an {@link UnboundRecordFilter} from the old API, or
* a {@link FilterPredicate} from the new API, or a sentinel no-op filter.
- *
+ * <p>
* Having this common interface simplifies passing a filter through the read path of parquet's
* codebase.
*/
@@ -64,6 +64,9 @@ public class FilterCompat {
* Given a FilterPredicate, return a Filter that wraps it.
* This method also logs the filter being used and rewrites
* the predicate to not include the not() operator.
+ *
+ * @param filterPredicate a filter predicate
+ * @return a filter for the given predicate
*/
public static Filter get(FilterPredicate filterPredicate) {
checkNotNull(filterPredicate, "filterPredicate");
@@ -82,6 +85,9 @@ public class FilterCompat {
/**
* Given an UnboundRecordFilter, return a Filter that wraps it.
+ *
+ * @param unboundRecordFilter an unbound record filter
+ * @return a Filter for the given record filter (from the old API)
*/
public static Filter get(UnboundRecordFilter unboundRecordFilter) {
return new UnboundRecordFilterCompat(unboundRecordFilter);
@@ -90,10 +96,14 @@ public class FilterCompat {
/**
* Given either a FilterPredicate or the class of an UnboundRecordFilter, or neither (but not both)
* return a Filter that wraps whichever was provided.
- *
+ * <p>
* Either filterPredicate or unboundRecordFilterClass must be null, or an exception is thrown.
- *
+ * <p>
* If both are null, the no op filter will be returned.
+ *
+ * @param filterPredicate a filter predicate, or null
+ * @param unboundRecordFilter an unbound record filter, or null
+ * @return a Filter wrapping either the predicate or the unbound record filter (from the old API)
*/
public static Filter get(FilterPredicate filterPredicate, UnboundRecordFilter unboundRecordFilter) {
checkArgument(filterPredicate == null || unboundRecordFilter == null,
diff --git a/parquet-column/src/main/java/org/apache/parquet/filter2/predicate/FilterApi.java b/parquet-column/src/main/java/org/apache/parquet/filter2/predicate/FilterApi.java
index b73e59c..60529bd 100644
--- a/parquet-column/src/main/java/org/apache/parquet/filter2/predicate/FilterApi.java
+++ b/parquet-column/src/main/java/org/apache/parquet/filter2/predicate/FilterApi.java
@@ -45,17 +45,15 @@ import org.apache.parquet.filter2.predicate.Operators.UserDefinedByInstance;
/**
* The Filter API is expressed through these static methods.
- *
+ * <p>
* Example usage:
- * {@code
- *
+ * <pre>
* IntColumn foo = intColumn("foo");
* DoubleColumn bar = doubleColumn("x.y.bar");
*
- * // foo == 10 || bar <= 17.0
+ * // foo == 10 || bar <= 17.0
* FilterPredicate pred = or(eq(foo, 10), ltEq(bar, 17.0));
- *
- * }
+ * </pre>
*/
// TODO: Support repeated columns (https://issues.apache.org/jira/browse/PARQUET-34)
//
@@ -94,9 +92,16 @@ public final class FilterApi {
/**
* Keeps records if their value is equal to the provided value.
* Nulls are treated the same way the java programming language does.
+ * <p>
* For example:
* eq(column, null) will keep all records whose value is null.
* eq(column, 7) will keep all records whose value is 7, and will drop records whose value is null
+ *
+ * @param column a column reference created by FilterApi
+ * @param value a value that matches the column's type
+ * @param <T> the Java type of values in the column
+ * @param <C> the column type that corresponds to values of type T
+ * @return an equals predicate for the given column and value
*/
public static <T extends Comparable<T>, C extends Column<T> & SupportsEqNotEq> Eq<T> eq(C column, T value) {
return new Eq<T>(column, value);
@@ -105,6 +110,7 @@ public final class FilterApi {
/**
* Keeps records if their value is not equal to the provided value.
* Nulls are treated the same way the java programming language does.
+ * <p>
* For example:
* notEq(column, null) will keep all records whose value is not null.
* notEq(column, 7) will keep all records whose value is not 7, including records whose value is null.
@@ -115,6 +121,12 @@ public final class FilterApi {
*
* NOTE: be sure to read the {@link #lt}, {@link #ltEq}, {@link #gt}, {@link #gtEq} operator's docs
* for how they handle nulls
+ *
+ * @param column a column reference created by FilterApi
+ * @param value a value that matches the column's type
+ * @param <T> the Java type of values in the column
+ * @param <C> the column type that corresponds to values of type T
+ * @return a not-equals predicate for the given column and value
*/
public static <T extends Comparable<T>, C extends Column<T> & SupportsEqNotEq> NotEq<T> notEq(C column, T value) {
return new NotEq<T>(column, value);
@@ -124,8 +136,15 @@ public final class FilterApi {
* Keeps records if their value is less than (but not equal to) the provided value.
* The provided value cannot be null, as less than null has no meaning.
* Records with null values will be dropped.
+ * <p>
* For example:
* lt(column, 7) will keep all records whose value is less than (but not equal to) 7, and not null.
+ *
+ * @param column a column reference created by FilterApi
+ * @param value a value that matches the column's type
+ * @param <T> the Java type of values in the column
+ * @param <C> the column type that corresponds to values of type T
+ * @return a less-than predicate for the given column and value
*/
public static <T extends Comparable<T>, C extends Column<T> & SupportsLtGt> Lt<T> lt(C column, T value) {
return new Lt<T>(column, value);
@@ -135,8 +154,15 @@ public final class FilterApi {
* Keeps records if their value is less than or equal to the provided value.
* The provided value cannot be null, as less than null has no meaning.
* Records with null values will be dropped.
+ * <p>
* For example:
* ltEq(column, 7) will keep all records whose value is less than or equal to 7, and not null.
+ *
+ * @param column a column reference created by FilterApi
+ * @param value a value that matches the column's type
+ * @param <T> the Java type of values in the column
+ * @param <C> the column type that corresponds to values of type T
+ * @return a less-than-or-equal predicate for the given column and value
*/
public static <T extends Comparable<T>, C extends Column<T> & SupportsLtGt> LtEq<T> ltEq(C column, T value) {
return new LtEq<T>(column, value);
@@ -146,8 +172,15 @@ public final class FilterApi {
* Keeps records if their value is greater than (but not equal to) the provided value.
* The provided value cannot be null, as less than null has no meaning.
* Records with null values will be dropped.
+ * <p>
* For example:
* gt(column, 7) will keep all records whose value is greater than (but not equal to) 7, and not null.
+ *
+ * @param column a column reference created by FilterApi
+ * @param value a value that matches the column's type
+ * @param <T> the Java type of values in the column
+ * @param <C> the column type that corresponds to values of type T
+ * @return a greater-than predicate for the given column and value
*/
public static <T extends Comparable<T>, C extends Column<T> & SupportsLtGt> Gt<T> gt(C column, T value) {
return new Gt<T>(column, value);
@@ -157,8 +190,15 @@ public final class FilterApi {
* Keeps records if their value is greater than or equal to the provided value.
* The provided value cannot be null, as less than null has no meaning.
* Records with null values will be dropped.
+ * <p>
* For example:
* gtEq(column, 7) will keep all records whose value is greater than or equal to 7, and not null.
+ *
+ * @param column a column reference created by FilterApi
+ * @param value a value that matches the column's type
+ * @param <T> the Java type of values in the column
+ * @param <C> the column type that corresponds to values of type T
+ * @return a greater-than-or-equal predicate for the given column and value
*/
public static <T extends Comparable<T>, C extends Column<T> & SupportsLtGt> GtEq<T> gtEq(C column, T value) {
return new GtEq<T>(column, value);
@@ -166,9 +206,15 @@ public final class FilterApi {
/**
* Keeps records that pass the provided {@link UserDefinedPredicate}
- *
+ * <p>
* The provided class must have a default constructor. To use an instance
- * of a UserDefinedPredicate instead, see {@link #userDefined(column, udp)} below.
+ * of a UserDefinedPredicate instead, see userDefined below.
+ *
+ * @param column a column reference created by FilterApi
+ * @param clazz a user-defined predicate class
+ * @param <T> the Java type of values in the column
+ * @param <U> a user-defined predicate for values of type T
+ * @return a user-defined predicate for the given column
*/
public static <T extends Comparable<T>, U extends UserDefinedPredicate<T>>
UserDefined<T, U> userDefined(Column<T> column, Class<U> clazz) {
@@ -177,8 +223,14 @@ public final class FilterApi {
/**
* Keeps records that pass the provided {@link UserDefinedPredicate}
- *
+ * <p>
* The provided instance of UserDefinedPredicate must be serializable.
+ *
+ * @param column a column reference created by FilterApi
+ * @param udp a user-defined predicate instance
+ * @param <T> the Java type of values in the column
+ * @param <U> a user-defined predicate for values of type T
+ * @return a user-defined predicate for the given column
*/
public static <T extends Comparable<T>, U extends UserDefinedPredicate<T> & Serializable>
UserDefined<T, U> userDefined(Column<T> column, U udp) {
@@ -188,6 +240,10 @@ public final class FilterApi {
/**
* Constructs the logical and of two predicates. Records will be kept if both the left and right predicate agree
* that the record should be kept.
+ *
+ * @param left a predicate
+ * @param right a predicate
+ * @return an and predicate from the result of the left and right predicates
*/
public static FilterPredicate and(FilterPredicate left, FilterPredicate right) {
return new And(left, right);
@@ -196,6 +252,10 @@ public final class FilterApi {
/**
* Constructs the logical or of two predicates. Records will be kept if either the left or right predicate
* is satisfied (or both).
+ *
+ * @param left a predicate
+ * @param right a predicate
+ * @return an or predicate from the result of the left and right predicates
*/
public static FilterPredicate or(FilterPredicate left, FilterPredicate right) {
return new Or(left, right);
@@ -204,6 +264,9 @@ public final class FilterApi {
/**
* Constructs the logical not (or inverse) of a predicate.
* Records will be kept if the provided predicate is not satisfied.
+ *
+ * @param predicate a predicate
+ * @return a not predicate wrapping the result of the given predicate
*/
public static FilterPredicate not(FilterPredicate predicate) {
return new Not(predicate);
diff --git a/parquet-column/src/main/java/org/apache/parquet/filter2/predicate/FilterPredicate.java b/parquet-column/src/main/java/org/apache/parquet/filter2/predicate/FilterPredicate.java
index 8afb334..211c71e 100644
--- a/parquet-column/src/main/java/org/apache/parquet/filter2/predicate/FilterPredicate.java
+++ b/parquet-column/src/main/java/org/apache/parquet/filter2/predicate/FilterPredicate.java
@@ -48,12 +48,16 @@ public interface FilterPredicate {
/**
* A FilterPredicate must accept a Visitor, per the visitor pattern.
+ * @param visitor a visitor
+ * @param <R> return type of the visitor
+ * @return the return value of Visitor#visit(this)
*/
<R> R accept(Visitor<R> visitor);
/**
* A FilterPredicate Visitor must visit all the operators in a FilterPredicate expression tree,
* and must handle recursion itself, per the visitor pattern.
+ * @param <R> return type of the visitor
*/
public static interface Visitor<R> {
<T extends Comparable<T>> R visit(Eq<T> eq);
diff --git a/parquet-column/src/main/java/org/apache/parquet/filter2/predicate/LogicalInverseRewriter.java b/parquet-column/src/main/java/org/apache/parquet/filter2/predicate/LogicalInverseRewriter.java
index 134a29c..bacf1c7 100644
--- a/parquet-column/src/main/java/org/apache/parquet/filter2/predicate/LogicalInverseRewriter.java
+++ b/parquet-column/src/main/java/org/apache/parquet/filter2/predicate/LogicalInverseRewriter.java
@@ -38,7 +38,7 @@ import static org.apache.parquet.filter2.predicate.FilterApi.or;
/**
* Recursively removes all use of the not() operator in a predicate
* by replacing all instances of not(x) with the inverse(x),
- * eg: not(and(eq(), not(eq(y))) -> or(notEq(), eq(y))
+ * eg: not(and(eq(), not(eq(y))) -> or(notEq(), eq(y))
*
* The returned predicate should have the same meaning as the original, but
* without the use of the not() operator.
diff --git a/parquet-column/src/main/java/org/apache/parquet/filter2/predicate/Statistics.java b/parquet-column/src/main/java/org/apache/parquet/filter2/predicate/Statistics.java
index 8df0250..3f67a15 100644
--- a/parquet-column/src/main/java/org/apache/parquet/filter2/predicate/Statistics.java
+++ b/parquet-column/src/main/java/org/apache/parquet/filter2/predicate/Statistics.java
@@ -24,6 +24,8 @@ import static org.apache.parquet.Preconditions.checkNotNull;
/**
* Contains statistics about a group of records
+ *
+ * @param <T> the type of values described by the statistics instance
*/
public class Statistics<T> {
private final T min;
@@ -32,6 +34,8 @@ public class Statistics<T> {
// Intended for use only within Parquet itself.
/**
+ * @param min the min value
+ * @param max the max value
* @deprecated will be removed in 2.0.0. Use {@link #Statistics(Object, Object, Comparator)} instead
*/
@Deprecated
@@ -42,6 +46,12 @@ public class Statistics<T> {
}
// Intended for use only within Parquet itself.
+
+ /**
+ * @param min the min value
+ * @param max the max value
+ * @param comparator a comparator to use when comparing values described by this statistics instance
+ */
public Statistics(T min, T max, Comparator<T> comparator) {
this.min = checkNotNull(min, "min");
this.max = checkNotNull(max, "max");
@@ -53,6 +63,8 @@ public class Statistics<T> {
* natural ordering of type {@code T} defined by the {@code compareTo} method
* might not be appropriate for the actual logical type. Use
* {@link #getComparator()} for comparing.
+ *
+ * @return the min value
*/
public T getMin() {
return min;
@@ -63,6 +75,8 @@ public class Statistics<T> {
* natural ordering of type {@code T} defined by the {@code compareTo} method
* might not be appropriate for the actual logical type. Use
* {@link #getComparator()} for comparing.
+ *
+ * @return the max value
*/
public T getMax() {
return max;
@@ -71,6 +85,8 @@ public class Statistics<T> {
/**
* Returns the comparator to be used to compare two generic values in the proper way (e.g. unsigned comparison for
* UINT_32)
+ *
+ * @return a comparator for the values described by the statistics instance
*/
public Comparator<T> getComparator() {
return comparator;
diff --git a/parquet-column/src/main/java/org/apache/parquet/filter2/predicate/UserDefinedPredicate.java b/parquet-column/src/main/java/org/apache/parquet/filter2/predicate/UserDefinedPredicate.java
index 16b7c3d..05365b9 100644
--- a/parquet-column/src/main/java/org/apache/parquet/filter2/predicate/UserDefinedPredicate.java
+++ b/parquet-column/src/main/java/org/apache/parquet/filter2/predicate/UserDefinedPredicate.java
@@ -40,6 +40,9 @@ public abstract class UserDefinedPredicate<T extends Comparable<T>> {
/**
* Return true to keep the record with this value, false to drop it.
+ *
+ * @param value a value
+ * @return true to keep the record with the value, false to drop it
*/
public abstract boolean keep(T value);
@@ -51,13 +54,16 @@ public abstract class UserDefinedPredicate<T extends Comparable<T>> {
*
* It is safe to always return false here, if you simply want to visit each record via the {@link #keep} method,
* though it is much more efficient to drop entire chunks of records here if you can.
+ *
+ * @param statistics statistics for the column
+ * @return true if none of the values described by statistics can match the predicate
*/
public abstract boolean canDrop(Statistics<T> statistics);
/**
* Same as {@link #canDrop} except this method describes the logical inverse
* behavior of this predicate. If this predicate is passed to the not() operator, then
- * {@link #inverseCanDrop} will be called instead of {@link #canDrop}
+ * this method will be called instead of {@link #canDrop}
*
* It is safe to always return false here, if you simply want to visit each record via the {@link #keep} method,
* though it is much more efficient to drop entire chunks of records here if you can.
@@ -65,33 +71,29 @@ public abstract class UserDefinedPredicate<T extends Comparable<T>> {
* It may be valid to simply return !canDrop(statistics) but that is not always the case.
* To illustrate, look at this re-implementation of a UDP that checks for values greater than 7:
*
- * {@code
- *
+ * <pre>
* // This is just an example, you should use the built in {@link FilterApi#gt} operator instead of
* // implementing your own like this.
*
- * public class IntGreaterThan7UDP extends UserDefinedPredicate<Integer> {
- * @Override
+ * public class IntGreaterThan7UDP extends UserDefinedPredicate<Integer> {
* public boolean keep(Integer value) {
* // here we just check if the value is greater than 7.
* // here, parquet knows that if the predicate not(columnX, IntGreaterThan7UDP) is being evaluated,
* // it is safe to simply use !IntEquals7UDP.keep(value)
- * return value > 7;
+ * return value > 7;
* }
*
- * @Override
- * public boolean canDrop(Statistics<Integer> statistics) {
+ * public boolean canDrop(Statistics<Integer> statistics) {
* // here we drop a group of records if they are all less than or equal to 7,
* // (there can't possibly be any values greater than 7 in this group of records)
- * return statistics.getMax() <= 7;
+ * return statistics.getMax() <= 7;
* }
*
- * @Override
- * public boolean inverseCanDrop(Statistics<Integer> statistics) {
+ * public boolean inverseCanDrop(Statistics<Integer> statistics) {
* // here the predicate not(columnX, IntGreaterThan7UDP) is being evaluated, which means we want
* // to keep all records whose value is is not greater than 7, or, rephrased, whose value is less than or equal to 7.
* // notice what would happen if parquet just tried to evaluate !IntGreaterThan7UDP.canDrop():
- * // !IntGreaterThan7UDP.canDrop(stats) == !(stats.getMax() <= 7) == (stats.getMax() > 7)
+ * // !IntGreaterThan7UDP.canDrop(stats) == !(stats.getMax() <= 7) == (stats.getMax() < 7)
* // it would drop the following group of records: [100, 1, 2, 3], even though this group of records contains values
* // less than than or equal to 7.
*
@@ -99,10 +101,13 @@ public abstract class UserDefinedPredicate<T extends Comparable<T>> {
* // for example: the group of records: [100, 8, 9, 10] has a min of 8, so there's no way there are going
* // to be records with a value
* // less than or equal to 7 in this group.
- * return statistics.getMin() > 7;
+ * return statistics.getMin() > 7;
* }
* }
- * }
+ * </pre>
+ *
+ * @param statistics statistics for the column
+ * @return false if none of the values described by statistics can match the predicate
*/
public abstract boolean inverseCanDrop(Statistics<T> statistics);
}
diff --git a/parquet-column/src/main/java/org/apache/parquet/filter2/predicate/ValidTypeMap.java b/parquet-column/src/main/java/org/apache/parquet/filter2/predicate/ValidTypeMap.java
index 574604a..b8f48bb 100644
--- a/parquet-column/src/main/java/org/apache/parquet/filter2/predicate/ValidTypeMap.java
+++ b/parquet-column/src/main/java/org/apache/parquet/filter2/predicate/ValidTypeMap.java
@@ -29,7 +29,7 @@ import org.apache.parquet.schema.OriginalType;
import org.apache.parquet.schema.PrimitiveType.PrimitiveTypeName;
/**
- * Contains all valid mappings from class -> parquet type (and vice versa) for use in
+ * Contains all valid mappings from class -> parquet type (and vice versa) for use in
* {@link FilterPredicate}s
*
* This is a bit ugly, but it allows us to provide good error messages at runtime
@@ -82,6 +82,7 @@ public class ValidTypeMap {
*
* @param foundColumn the column as declared by the user
* @param primitiveType the primitive type according to the schema
+ * @param <T> the java Type of values in the column, must be Comparable
*/
public static <T extends Comparable<T>> void assertTypeValid(Column<T> foundColumn, PrimitiveTypeName primitiveType) {
Class<T> foundColumnType = foundColumn.getColumnType();
diff --git a/parquet-column/src/main/java/org/apache/parquet/filter2/recordlevel/IncrementallyUpdatedFilterPredicate.java b/parquet-column/src/main/java/org/apache/parquet/filter2/recordlevel/IncrementallyUpdatedFilterPredicate.java
index 606c78f..84fd0f4 100644
--- a/parquet-column/src/main/java/org/apache/parquet/filter2/recordlevel/IncrementallyUpdatedFilterPredicate.java
+++ b/parquet-column/src/main/java/org/apache/parquet/filter2/recordlevel/IncrementallyUpdatedFilterPredicate.java
@@ -46,6 +46,9 @@ public interface IncrementallyUpdatedFilterPredicate {
/**
* A {@link IncrementallyUpdatedFilterPredicate} must accept a {@link Visitor}, per the visitor pattern.
+ *
+ * @param visitor a Visitor
+ * @return the result of this predicate
*/
boolean accept(Visitor visitor);
@@ -81,6 +84,8 @@ public interface IncrementallyUpdatedFilterPredicate {
/**
* Subclasses should call this method to signal that the result of this predicate is known.
+ *
+ * @param result the result of this predicate, when it is determined
*/
protected final void setResult(boolean result) {
if (isKnown) {
@@ -93,6 +98,8 @@ public interface IncrementallyUpdatedFilterPredicate {
/**
* Should only be called if {@link #isKnown} return true.
+ *
+ * @return the result of this predicate
*/
public final boolean getResult() {
if (!isKnown) {
@@ -103,6 +110,8 @@ public interface IncrementallyUpdatedFilterPredicate {
/**
* Return true if this inspector has received a value yet, false otherwise.
+ *
+ * @return true if the value of this predicate has been determined
*/
public final boolean isKnown() {
return isKnown;
diff --git a/parquet-column/src/main/java/org/apache/parquet/filter2/recordlevel/IncrementallyUpdatedFilterPredicateBuilderBase.java b/parquet-column/src/main/java/org/apache/parquet/filter2/recordlevel/IncrementallyUpdatedFilterPredicateBuilderBase.java
index c1f759c..8707b7e 100644
--- a/parquet-column/src/main/java/org/apache/parquet/filter2/recordlevel/IncrementallyUpdatedFilterPredicateBuilderBase.java
+++ b/parquet-column/src/main/java/org/apache/parquet/filter2/recordlevel/IncrementallyUpdatedFilterPredicateBuilderBase.java
@@ -38,7 +38,7 @@ import static org.apache.parquet.Preconditions.checkArgument;
/**
* The implementation of this abstract class is auto-generated by
- * {@link org.apache.parquet.filter2.IncrementallyUpdatedFilterPredicateGenerator}
+ * org.apache.parquet.filter2.IncrementallyUpdatedFilterPredicateGenerator
*
* Constructs a {@link IncrementallyUpdatedFilterPredicate} from a {@link org.apache.parquet.filter2.predicate.FilterPredicate}
* This is how records are filtered during record assembly. The implementation is generated in order to avoid autoboxing.
diff --git a/parquet-column/src/main/java/org/apache/parquet/io/ColumnIO.java b/parquet-column/src/main/java/org/apache/parquet/io/ColumnIO.java
index 9c6e729..43416e4 100644
--- a/parquet-column/src/main/java/org/apache/parquet/io/ColumnIO.java
+++ b/parquet-column/src/main/java/org/apache/parquet/io/ColumnIO.java
@@ -27,9 +27,6 @@ import org.apache.parquet.schema.Type.Repetition;
/**
* a structure used to serialize deserialize records
- *
- * @author Julien Le Dem
- *
*/
abstract public class ColumnIO {
diff --git a/parquet-column/src/main/java/org/apache/parquet/io/ColumnIOFactory.java b/parquet-column/src/main/java/org/apache/parquet/io/ColumnIOFactory.java
index aeef510..dcf1e87 100644
--- a/parquet-column/src/main/java/org/apache/parquet/io/ColumnIOFactory.java
+++ b/parquet-column/src/main/java/org/apache/parquet/io/ColumnIOFactory.java
@@ -29,9 +29,6 @@ import org.apache.parquet.schema.TypeVisitor;
/**
* Factory constructing the ColumnIO structure from the schema
- *
- * @author Julien Le Dem
- *
*/
public class ColumnIOFactory {
diff --git a/parquet-column/src/main/java/org/apache/parquet/io/CompilationException.java b/parquet-column/src/main/java/org/apache/parquet/io/CompilationException.java
index e15ab2e..3074730 100644
--- a/parquet-column/src/main/java/org/apache/parquet/io/CompilationException.java
+++ b/parquet-column/src/main/java/org/apache/parquet/io/CompilationException.java
@@ -22,9 +22,6 @@ import org.apache.parquet.ParquetRuntimeException;
/**
* thrown when a problem occured while compiling the column reader
- *
- * @author Julien Le Dem
- *
*/
public class CompilationException extends ParquetRuntimeException {
private static final long serialVersionUID = 1L;
diff --git a/parquet-column/src/main/java/org/apache/parquet/io/EmptyRecordReader.java b/parquet-column/src/main/java/org/apache/parquet/io/EmptyRecordReader.java
index 671c651..a2f88ac 100644
--- a/parquet-column/src/main/java/org/apache/parquet/io/EmptyRecordReader.java
+++ b/parquet-column/src/main/java/org/apache/parquet/io/EmptyRecordReader.java
@@ -24,8 +24,6 @@ import org.apache.parquet.io.api.RecordMaterializer;
/**
* used to read empty schema
*
- * @author Mickael Lacour <m....@criteo.com>
- *
* @param <T> the type of the materialized record
*/
class EmptyRecordReader<T> extends RecordReader<T> {
diff --git a/parquet-column/src/main/java/org/apache/parquet/io/FilteredRecordReader.java b/parquet-column/src/main/java/org/apache/parquet/io/FilteredRecordReader.java
index 3444b1f..0304a61 100644
--- a/parquet-column/src/main/java/org/apache/parquet/io/FilteredRecordReader.java
+++ b/parquet-column/src/main/java/org/apache/parquet/io/FilteredRecordReader.java
@@ -26,8 +26,6 @@ import org.apache.parquet.io.api.RecordMaterializer;
/**
* Extends the
- * @author Jacob Metcalf
- *
*/
class FilteredRecordReader<T> extends RecordReaderImplementation<T> {
diff --git a/parquet-column/src/main/java/org/apache/parquet/io/GroupColumnIO.java b/parquet-column/src/main/java/org/apache/parquet/io/GroupColumnIO.java
index 14b8426..8e7cb80 100644
--- a/parquet-column/src/main/java/org/apache/parquet/io/GroupColumnIO.java
+++ b/parquet-column/src/main/java/org/apache/parquet/io/GroupColumnIO.java
@@ -33,10 +33,6 @@ import org.slf4j.LoggerFactory;
/**
* Group level of the IO structure
- *
- *
- * @author Julien Le Dem
- *
*/
public class GroupColumnIO extends ColumnIO {
private static final Logger LOG = LoggerFactory.getLogger(GroupColumnIO.class);
diff --git a/parquet-column/src/main/java/org/apache/parquet/io/InvalidRecordException.java b/parquet-column/src/main/java/org/apache/parquet/io/InvalidRecordException.java
index d3d0111..a457b81 100644
--- a/parquet-column/src/main/java/org/apache/parquet/io/InvalidRecordException.java
+++ b/parquet-column/src/main/java/org/apache/parquet/io/InvalidRecordException.java
@@ -22,9 +22,6 @@ import org.apache.parquet.ParquetRuntimeException;
/**
* thrown when an invalid record is encountered
- *
- * @author Julien Le Dem
- *
*/
public class InvalidRecordException extends ParquetRuntimeException {
private static final long serialVersionUID = 1L;
diff --git a/parquet-column/src/main/java/org/apache/parquet/io/MessageColumnIO.java b/parquet-column/src/main/java/org/apache/parquet/io/MessageColumnIO.java
index 1bec79b..f1da363 100644
--- a/parquet-column/src/main/java/org/apache/parquet/io/MessageColumnIO.java
+++ b/parquet-column/src/main/java/org/apache/parquet/io/MessageColumnIO.java
@@ -54,9 +54,6 @@ import static org.apache.parquet.Preconditions.checkNotNull;
/**
* Message level of the IO structure
- *
- *
- * @author Julien Le Dem
*/
public class MessageColumnIO extends GroupColumnIO {
private static final Logger LOG = LoggerFactory.getLogger(MessageColumnIO.class);
diff --git a/parquet-column/src/main/java/org/apache/parquet/io/ParquetDecodingException.java b/parquet-column/src/main/java/org/apache/parquet/io/ParquetDecodingException.java
index 1007e32..ec6a91c 100644
--- a/parquet-column/src/main/java/org/apache/parquet/io/ParquetDecodingException.java
+++ b/parquet-column/src/main/java/org/apache/parquet/io/ParquetDecodingException.java
@@ -22,9 +22,6 @@ import org.apache.parquet.ParquetRuntimeException;
/**
* thrown when an encoding problem occured
- *
- * @author Julien Le Dem
- *
*/
public class ParquetDecodingException extends ParquetRuntimeException {
private static final long serialVersionUID = 1L;
diff --git a/parquet-column/src/main/java/org/apache/parquet/io/ParquetEncodingException.java b/parquet-column/src/main/java/org/apache/parquet/io/ParquetEncodingException.java
index 05f9c56..09b93f1 100644
--- a/parquet-column/src/main/java/org/apache/parquet/io/ParquetEncodingException.java
+++ b/parquet-column/src/main/java/org/apache/parquet/io/ParquetEncodingException.java
@@ -22,9 +22,6 @@ import org.apache.parquet.ParquetRuntimeException;
/**
* thrown when a decoding problem occured
- *
- * @author Julien Le Dem
- *
*/
public class ParquetEncodingException extends ParquetRuntimeException {
private static final long serialVersionUID = 1L;
diff --git a/parquet-column/src/main/java/org/apache/parquet/io/PrimitiveColumnIO.java b/parquet-column/src/main/java/org/apache/parquet/io/PrimitiveColumnIO.java
index e40b24f..9bb3ee4 100644
--- a/parquet-column/src/main/java/org/apache/parquet/io/PrimitiveColumnIO.java
+++ b/parquet-column/src/main/java/org/apache/parquet/io/PrimitiveColumnIO.java
@@ -30,10 +30,6 @@ import org.apache.parquet.schema.PrimitiveType.PrimitiveTypeName;
/**
* Primitive level of the IO structure
- *
- *
- * @author Julien Le Dem
- *
*/
public class PrimitiveColumnIO extends ColumnIO {
// private static final Logger logger = Logger.getLogger(PrimitiveColumnIO.class.getName());
diff --git a/parquet-column/src/main/java/org/apache/parquet/io/RecordConsumerLoggingWrapper.java b/parquet-column/src/main/java/org/apache/parquet/io/RecordConsumerLoggingWrapper.java
index 9e3e967..97ed216 100644
--- a/parquet-column/src/main/java/org/apache/parquet/io/RecordConsumerLoggingWrapper.java
+++ b/parquet-column/src/main/java/org/apache/parquet/io/RecordConsumerLoggingWrapper.java
@@ -27,9 +27,6 @@ import java.util.Arrays;
/**
* This class can be used to wrap an actual RecordConsumer and log all calls
- *
- * @author Julien Le Dem
- *
*/
public class RecordConsumerLoggingWrapper extends RecordConsumer {
private static final Logger LOG = LoggerFactory.getLogger(RecordConsumerLoggingWrapper.class);
diff --git a/parquet-column/src/main/java/org/apache/parquet/io/RecordReader.java b/parquet-column/src/main/java/org/apache/parquet/io/RecordReader.java
index 64bc9c8..7705568 100644
--- a/parquet-column/src/main/java/org/apache/parquet/io/RecordReader.java
+++ b/parquet-column/src/main/java/org/apache/parquet/io/RecordReader.java
@@ -21,7 +21,6 @@ package org.apache.parquet.io;
/**
* used to read reassembled records
- * @author Julien Le Dem
*
* @param <T> the type of the materialized record
*/
diff --git a/parquet-column/src/main/java/org/apache/parquet/io/RecordReaderImplementation.java b/parquet-column/src/main/java/org/apache/parquet/io/RecordReaderImplementation.java
index f883c4a..b4ac363 100644
--- a/parquet-column/src/main/java/org/apache/parquet/io/RecordReaderImplementation.java
+++ b/parquet-column/src/main/java/org/apache/parquet/io/RecordReaderImplementation.java
@@ -41,7 +41,6 @@ import org.slf4j.LoggerFactory;
/**
* used to read reassembled records
- * @author Julien Le Dem
*
* @param <T> the type of the materialized record
*/
diff --git a/parquet-column/src/main/java/org/apache/parquet/io/ValidatingRecordConsumer.java b/parquet-column/src/main/java/org/apache/parquet/io/ValidatingRecordConsumer.java
index c27381a..e382a40 100644
--- a/parquet-column/src/main/java/org/apache/parquet/io/ValidatingRecordConsumer.java
+++ b/parquet-column/src/main/java/org/apache/parquet/io/ValidatingRecordConsumer.java
@@ -36,9 +36,6 @@ import static org.apache.parquet.schema.PrimitiveType.PrimitiveTypeName.*;
/**
* Wraps a record consumer
* Validates the record written against the schema and pass down the event to the wrapped consumer
- *
- * @author Julien Le Dem
- *
*/
public class ValidatingRecordConsumer extends RecordConsumer {
private static final Logger LOG = LoggerFactory.getLogger(ValidatingRecordConsumer.class);
diff --git a/parquet-column/src/main/java/org/apache/parquet/io/api/Converter.java b/parquet-column/src/main/java/org/apache/parquet/io/api/Converter.java
index 648e245..d449766 100644
--- a/parquet-column/src/main/java/org/apache/parquet/io/api/Converter.java
+++ b/parquet-column/src/main/java/org/apache/parquet/io/api/Converter.java
@@ -21,9 +21,6 @@ package org.apache.parquet.io.api;
/**
* Represent a tree of converters
* that materializes tuples
- *
- * @author Julien Le Dem
- *
*/
public abstract class Converter {
diff --git a/parquet-column/src/main/java/org/apache/parquet/io/api/GroupConverter.java b/parquet-column/src/main/java/org/apache/parquet/io/api/GroupConverter.java
index 823e388..381148e 100644
--- a/parquet-column/src/main/java/org/apache/parquet/io/api/GroupConverter.java
+++ b/parquet-column/src/main/java/org/apache/parquet/io/api/GroupConverter.java
@@ -21,9 +21,6 @@ package org.apache.parquet.io.api;
/**
* converter for group nodes
- *
- * @author Julien Le Dem
- *
*/
abstract public class GroupConverter extends Converter {
diff --git a/parquet-column/src/main/java/org/apache/parquet/io/api/PrimitiveConverter.java b/parquet-column/src/main/java/org/apache/parquet/io/api/PrimitiveConverter.java
index 763c6fd..b97722a 100644
--- a/parquet-column/src/main/java/org/apache/parquet/io/api/PrimitiveConverter.java
+++ b/parquet-column/src/main/java/org/apache/parquet/io/api/PrimitiveConverter.java
@@ -22,9 +22,6 @@ import org.apache.parquet.column.Dictionary;
/**
* converter for leaves of the schema
- *
- * @author Julien Le Dem
- *
*/
abstract public class PrimitiveConverter extends Converter {
@@ -55,7 +52,7 @@ abstract public class PrimitiveConverter extends Converter {
throw new UnsupportedOperationException(getClass().getName());
}
- /** runtime calls **/
+ /* runtime calls */
/**
* add a value based on the dictionary set with setDictionary()
diff --git a/parquet-column/src/main/java/org/apache/parquet/io/api/RecordConsumer.java b/parquet-column/src/main/java/org/apache/parquet/io/api/RecordConsumer.java
index a8de336..b1919a8 100644
--- a/parquet-column/src/main/java/org/apache/parquet/io/api/RecordConsumer.java
+++ b/parquet-column/src/main/java/org/apache/parquet/io/api/RecordConsumer.java
@@ -49,8 +49,6 @@ package org.apache.parquet.io.api;
* }
* }
* </pre>
- * @author Julien Le Dem
- *
*/
abstract public class RecordConsumer {
diff --git a/parquet-column/src/main/java/org/apache/parquet/io/api/RecordMaterializer.java b/parquet-column/src/main/java/org/apache/parquet/io/api/RecordMaterializer.java
index 9aee114..47cca91 100644
--- a/parquet-column/src/main/java/org/apache/parquet/io/api/RecordMaterializer.java
+++ b/parquet-column/src/main/java/org/apache/parquet/io/api/RecordMaterializer.java
@@ -27,8 +27,6 @@ import org.apache.parquet.io.ParquetDecodingException;
* Each record will be wrapped by {@link GroupConverter#start()} and {@link GroupConverter#end()},
* between which the appropriate fields will be materialized.
*
- * @author Julien Le Dem
- *
* @param <T> the materialized object class
*/
abstract public class RecordMaterializer<T> {
diff --git a/parquet-column/src/main/java/org/apache/parquet/schema/GroupType.java b/parquet-column/src/main/java/org/apache/parquet/schema/GroupType.java
index 49c29a3..dafe7cc 100644
--- a/parquet-column/src/main/java/org/apache/parquet/schema/GroupType.java
+++ b/parquet-column/src/main/java/org/apache/parquet/schema/GroupType.java
@@ -32,9 +32,6 @@ import org.apache.parquet.io.InvalidRecordException;
/**
* Represents a group type: a list of fields
- *
- * @author Julien Le Dem
- *
*/
public class GroupType extends Type {
diff --git a/parquet-column/src/main/java/org/apache/parquet/schema/IncompatibleSchemaModificationException.java b/parquet-column/src/main/java/org/apache/parquet/schema/IncompatibleSchemaModificationException.java
index ea64d95..2c6a6d4 100644
--- a/parquet-column/src/main/java/org/apache/parquet/schema/IncompatibleSchemaModificationException.java
+++ b/parquet-column/src/main/java/org/apache/parquet/schema/IncompatibleSchemaModificationException.java
@@ -22,9 +22,6 @@ import org.apache.parquet.ParquetRuntimeException;
/**
* thrown when we are trying to read together files with incompatible schemas.
- *
- * @author Julien Le Dem
- *
*/
public class IncompatibleSchemaModificationException extends ParquetRuntimeException {
private static final long serialVersionUID = 1L;
diff --git a/parquet-column/src/main/java/org/apache/parquet/schema/MessageType.java b/parquet-column/src/main/java/org/apache/parquet/schema/MessageType.java
index 943e2a0..d305eb8 100644
--- a/parquet-column/src/main/java/org/apache/parquet/schema/MessageType.java
+++ b/parquet-column/src/main/java/org/apache/parquet/schema/MessageType.java
@@ -26,9 +26,6 @@ import org.apache.parquet.io.InvalidRecordException;
/**
* The root of a schema
- *
- * @author Julien Le Dem
- *
*/
public final class MessageType extends GroupType {
diff --git a/parquet-column/src/main/java/org/apache/parquet/schema/MessageTypeParser.java b/parquet-column/src/main/java/org/apache/parquet/schema/MessageTypeParser.java
index f0c178a..4e1d0fd 100644
--- a/parquet-column/src/main/java/org/apache/parquet/schema/MessageTypeParser.java
+++ b/parquet-column/src/main/java/org/apache/parquet/schema/MessageTypeParser.java
@@ -31,8 +31,6 @@ import org.slf4j.LoggerFactory;
/**
* Parses a schema from a textual format similar to that described in the Dremel paper.
- *
- * @author Julien Le Dem
*/
public class MessageTypeParser {
private static final Logger LOG = LoggerFactory.getLogger(MessageTypeParser.class);
diff --git a/parquet-column/src/main/java/org/apache/parquet/schema/PrimitiveType.java b/parquet-column/src/main/java/org/apache/parquet/schema/PrimitiveType.java
index 8124906..2a5e250 100644
--- a/parquet-column/src/main/java/org/apache/parquet/schema/PrimitiveType.java
+++ b/parquet-column/src/main/java/org/apache/parquet/schema/PrimitiveType.java
@@ -33,11 +33,7 @@ import org.apache.parquet.schema.ColumnOrder.ColumnOrderName;
/**
- *
* Representation of a Primitive type
- *
- * @author Julien Le Dem
- *
*/
public final class PrimitiveType extends Type {
@@ -63,8 +59,6 @@ public final class PrimitiveType extends Type {
/**
* Supported Primitive types
- *
- * @author Julien Le Dem
*/
public static enum PrimitiveTypeName {
INT64("getLong", Long.TYPE) {
diff --git a/parquet-column/src/main/java/org/apache/parquet/schema/Type.java b/parquet-column/src/main/java/org/apache/parquet/schema/Type.java
index c34651e..bca8121 100644
--- a/parquet-column/src/main/java/org/apache/parquet/schema/Type.java
+++ b/parquet-column/src/main/java/org/apache/parquet/schema/Type.java
@@ -34,9 +34,6 @@ abstract public class Type {
/**
* represents a field ID
- *
- * @author Julien Le Dem
- *
*/
public static final class ID {
private final int id;
@@ -77,8 +74,6 @@ abstract public class Type {
/**
* Constraint on the repetition of a field
- *
- * @author Julien Le Dem
*/
public static enum Repetition {
/**
diff --git a/parquet-column/src/main/java/org/apache/parquet/schema/TypeConverter.java b/parquet-column/src/main/java/org/apache/parquet/schema/TypeConverter.java
index 8be896f..886afc9 100755
--- a/parquet-column/src/main/java/org/apache/parquet/schema/TypeConverter.java
+++ b/parquet-column/src/main/java/org/apache/parquet/schema/TypeConverter.java
@@ -24,8 +24,6 @@ import java.util.List;
* to convert a MessageType tree
* @see Type#convert(List, TypeConverter)
*
- * @author Julien Le Dem
- *
* @param <T> the resulting Type
*/
public interface TypeConverter<T> {
diff --git a/parquet-column/src/main/java/org/apache/parquet/schema/TypeVisitor.java b/parquet-column/src/main/java/org/apache/parquet/schema/TypeVisitor.java
index bd7a548..6861aad 100644
--- a/parquet-column/src/main/java/org/apache/parquet/schema/TypeVisitor.java
+++ b/parquet-column/src/main/java/org/apache/parquet/schema/TypeVisitor.java
@@ -22,9 +22,6 @@ package org.apache.parquet.schema;
* Implement this interface to visit a schema
*
* type.accept(new TypeVisitor() { ... });
- *
- * @author Julien Le Dem
- *
*/
public interface TypeVisitor {
diff --git a/parquet-column/src/test/java/org/apache/parquet/column/values/RandomStr.java b/parquet-column/src/test/java/org/apache/parquet/column/values/RandomStr.java
index 8b41c39..aa3aec3 100644
--- a/parquet-column/src/test/java/org/apache/parquet/column/values/RandomStr.java
+++ b/parquet-column/src/test/java/org/apache/parquet/column/values/RandomStr.java
@@ -20,11 +20,6 @@ package org.apache.parquet.column.values;
import java.util.Random;
-/**
- *
- * @author Aniket Mokashi
- *
- */
public class RandomStr {
private final char[] alphanumeric=alphanumeric();
private final Random rand;
diff --git a/parquet-column/src/test/java/org/apache/parquet/column/values/Utils.java b/parquet-column/src/test/java/org/apache/parquet/column/values/Utils.java
index 248e039..05074a9 100644
--- a/parquet-column/src/test/java/org/apache/parquet/column/values/Utils.java
+++ b/parquet-column/src/test/java/org/apache/parquet/column/values/Utils.java
@@ -27,9 +27,6 @@ import org.apache.parquet.io.api.Binary;
/**
* Test Utility class
- *
- * @author Aniket Mokashi
- *
*/
public class Utils {
private static Random randomLen = new Random();
diff --git a/parquet-column/src/test/java/org/apache/parquet/column/values/bitpacking/BitPackingPerfTest.java b/parquet-column/src/test/java/org/apache/parquet/column/values/bitpacking/BitPackingPerfTest.java
index 656623c..c5abdfb 100644
--- a/parquet-column/src/test/java/org/apache/parquet/column/values/bitpacking/BitPackingPerfTest.java
+++ b/parquet-column/src/test/java/org/apache/parquet/column/values/bitpacking/BitPackingPerfTest.java
@@ -29,8 +29,6 @@ import org.apache.parquet.column.values.bitpacking.BitPacking.BitPackingWriter;
/**
* Improvable micro benchmark for bitpacking
* run with: -verbose:gc -Xmx2g -Xms2g
- * @author Julien Le Dem
- *
*/
public class BitPackingPerfTest {
diff --git a/parquet-column/src/test/java/org/apache/parquet/column/values/rle/RunLengthBitPackingHybridIntegrationTest.java b/parquet-column/src/test/java/org/apache/parquet/column/values/rle/RunLengthBitPackingHybridIntegrationTest.java
index 173d8fa..6c94f14 100644
--- a/parquet-column/src/test/java/org/apache/parquet/column/values/rle/RunLengthBitPackingHybridIntegrationTest.java
+++ b/parquet-column/src/test/java/org/apache/parquet/column/values/rle/RunLengthBitPackingHybridIntegrationTest.java
@@ -26,9 +26,6 @@ import org.apache.parquet.bytes.DirectByteBufferAllocator;
import static org.junit.Assert.assertEquals;
-/**
- * @author Alex Levenson
- */
public class RunLengthBitPackingHybridIntegrationTest {
@Test
diff --git a/parquet-column/src/test/java/org/apache/parquet/column/values/rle/TestRunLengthBitPackingHybridEncoder.java b/parquet-column/src/test/java/org/apache/parquet/column/values/rle/TestRunLengthBitPackingHybridEncoder.java
index dd329c0..1ab09f5 100644
--- a/parquet-column/src/test/java/org/apache/parquet/column/values/rle/TestRunLengthBitPackingHybridEncoder.java
+++ b/parquet-column/src/test/java/org/apache/parquet/column/values/rle/TestRunLengthBitPackingHybridEncoder.java
@@ -32,9 +32,6 @@ import org.apache.parquet.bytes.BytesUtils;
import org.apache.parquet.column.values.bitpacking.BytePacker;
import org.apache.parquet.column.values.bitpacking.Packer;
-/**
- * @author Alex Levenson
- */
public class TestRunLengthBitPackingHybridEncoder {
private RunLengthBitPackingHybridEncoder getRunLengthBitPackingHybridEncoder() {
diff --git a/parquet-column/src/test/java/org/apache/parquet/io/PerfTest.java b/parquet-column/src/test/java/org/apache/parquet/io/PerfTest.java
index bf783df..df111bd 100644
--- a/parquet-column/src/test/java/org/apache/parquet/io/PerfTest.java
+++ b/parquet-column/src/test/java/org/apache/parquet/io/PerfTest.java
@@ -33,10 +33,6 @@ import org.apache.parquet.io.api.RecordMaterializer;
import org.apache.parquet.schema.MessageType;
-/**
- * @author Julien Le Dem
- *
- */
public class PerfTest {
public static void main(String[] args) {
diff --git a/parquet-common/src/main/java/org/apache/parquet/Ints.java b/parquet-common/src/main/java/org/apache/parquet/Ints.java
index b195966..6137236 100644
--- a/parquet-common/src/main/java/org/apache/parquet/Ints.java
+++ b/parquet-common/src/main/java/org/apache/parquet/Ints.java
@@ -20,8 +20,6 @@ package org.apache.parquet;
/**
* Utilities for working with ints
- *
- * @author Alex Levenson
*/
public final class Ints {
private Ints() { }
diff --git a/parquet-common/src/main/java/org/apache/parquet/Log.java b/parquet-common/src/main/java/org/apache/parquet/Log.java
index 7856686..f447f3e 100644
--- a/parquet-common/src/main/java/org/apache/parquet/Log.java
+++ b/parquet-common/src/main/java/org/apache/parquet/Log.java
@@ -30,9 +30,6 @@ import java.util.logging.Level;
* <code>
* if (DEBUG) LOG.debug("removed by the compiler if DEBUG is a false constant")
* </code>
- *
- * @author Julien Le Dem
- *
*/
public class Log {
diff --git a/parquet-common/src/main/java/org/apache/parquet/ParquetRuntimeException.java b/parquet-common/src/main/java/org/apache/parquet/ParquetRuntimeException.java
index d0f13a8..a1d3a8e 100644
--- a/parquet-common/src/main/java/org/apache/parquet/ParquetRuntimeException.java
+++ b/parquet-common/src/main/java/org/apache/parquet/ParquetRuntimeException.java
@@ -23,9 +23,6 @@ import java.io.IOException;
/**
* The parent class for all runtime exceptions
- *
- * @author Julien Le Dem
- *
*/
abstract public class ParquetRuntimeException extends RuntimeException {
private static final long serialVersionUID = 1L;
diff --git a/parquet-common/src/main/java/org/apache/parquet/Preconditions.java b/parquet-common/src/main/java/org/apache/parquet/Preconditions.java
index 3a14306..2827b46 100644
--- a/parquet-common/src/main/java/org/apache/parquet/Preconditions.java
+++ b/parquet-common/src/main/java/org/apache/parquet/Preconditions.java
@@ -20,9 +20,6 @@ package org.apache.parquet;
/**
* Utility for parameter validation
- *
- * @author Julien Le Dem
- *
*/
public final class Preconditions {
private Preconditions() { }
diff --git a/parquet-common/src/main/java/org/apache/parquet/bytes/BytesInput.java b/parquet-common/src/main/java/org/apache/parquet/bytes/BytesInput.java
index fd4986a..b18aae3 100644
--- a/parquet-common/src/main/java/org/apache/parquet/bytes/BytesInput.java
+++ b/parquet-common/src/main/java/org/apache/parquet/bytes/BytesInput.java
@@ -40,9 +40,6 @@ import org.slf4j.LoggerFactory;
* For example if it is referring to a stream,
* subsequent BytesInput reads from the stream will be incorrect
* if the previous has not been consumed.
- *
- * @author Julien Le Dem
- *
*/
abstract public class BytesInput {
private static final Logger LOG = LoggerFactory.getLogger(BytesInput.class);
diff --git a/parquet-common/src/main/java/org/apache/parquet/bytes/BytesUtils.java b/parquet-common/src/main/java/org/apache/parquet/bytes/BytesUtils.java
index ce06b17..2657c7e 100644
--- a/parquet-common/src/main/java/org/apache/parquet/bytes/BytesUtils.java
+++ b/parquet-common/src/main/java/org/apache/parquet/bytes/BytesUtils.java
@@ -30,9 +30,6 @@ import org.slf4j.LoggerFactory;
/**
* utility methods to deal with bytes
- *
- * @author Julien Le Dem
- *
*/
public class BytesUtils {
private static final Logger LOG = LoggerFactory.getLogger(BytesUtils.class);
diff --git a/parquet-common/src/main/java/org/apache/parquet/bytes/CapacityByteArrayOutputStream.java b/parquet-common/src/main/java/org/apache/parquet/bytes/CapacityByteArrayOutputStream.java
index 44a5623..950dcd5 100644
--- a/parquet-common/src/main/java/org/apache/parquet/bytes/CapacityByteArrayOutputStream.java
+++ b/parquet-common/src/main/java/org/apache/parquet/bytes/CapacityByteArrayOutputStream.java
@@ -51,9 +51,6 @@ import org.slf4j.LoggerFactory;
* When reusing this stream it will adjust the initial slab size based on the previous data size, aiming for fewer
* allocations, with the assumption that a similar amount of data will be written to this stream on re-use.
* See ({@link CapacityByteArrayOutputStream#reset()}).
- *
- * @author Julien Le Dem
- *
*/
public class CapacityByteArrayOutputStream extends OutputStream {
private static final Logger LOG = LoggerFactory.getLogger(CapacityByteArrayOutputStream.class);
diff --git a/parquet-common/src/main/java/org/apache/parquet/bytes/LittleEndianDataInputStream.java b/parquet-common/src/main/java/org/apache/parquet/bytes/LittleEndianDataInputStream.java
index 9a886f4..4338c24 100644
--- a/parquet-common/src/main/java/org/apache/parquet/bytes/LittleEndianDataInputStream.java
+++ b/parquet-common/src/main/java/org/apache/parquet/bytes/LittleEndianDataInputStream.java
@@ -24,9 +24,6 @@ import java.io.InputStream;
/**
* Based on DataInputStream but little endian and without the String/char methods
- *
- * @author Julien Le Dem
- *
*/
public final class LittleEndianDataInputStream extends InputStream {
diff --git a/parquet-common/src/main/java/org/apache/parquet/bytes/LittleEndianDataOutputStream.java b/parquet-common/src/main/java/org/apache/parquet/bytes/LittleEndianDataOutputStream.java
index 9d4a8a9..ab94b32 100644
--- a/parquet-common/src/main/java/org/apache/parquet/bytes/LittleEndianDataOutputStream.java
+++ b/parquet-common/src/main/java/org/apache/parquet/bytes/LittleEndianDataOutputStream.java
@@ -26,9 +26,6 @@ import java.io.OutputStream;
/**
* Based on DataOutputStream but in little endian and without the String/char methods
- *
- * @author Julien Le Dem
- *
*/
public class LittleEndianDataOutputStream extends OutputStream {
diff --git a/parquet-common/src/main/java/org/apache/parquet/hadoop/metadata/Canonicalizer.java b/parquet-common/src/main/java/org/apache/parquet/hadoop/metadata/Canonicalizer.java
index a3409da..a36c2ab 100644
--- a/parquet-common/src/main/java/org/apache/parquet/hadoop/metadata/Canonicalizer.java
+++ b/parquet-common/src/main/java/org/apache/parquet/hadoop/metadata/Canonicalizer.java
@@ -24,7 +24,6 @@ import java.util.concurrent.ConcurrentHashMap;
* returns canonical representation of objects (similar to String.intern()) to save memory
* if a.equals(b) then canonicalize(a) == canonicalize(b)
* this class is thread safe
- * @author Julien Le Dem
*
* @param <T> the type of values canonicalized by subclasses
*/
diff --git a/parquet-encoding/src/main/java/org/apache/parquet/column/values/bitpacking/BitPacking.java b/parquet-encoding/src/main/java/org/apache/parquet/column/values/bitpacking/BitPacking.java
index 3e73fc7..a8c2620 100755
--- a/parquet-encoding/src/main/java/org/apache/parquet/column/values/bitpacking/BitPacking.java
+++ b/parquet-encoding/src/main/java/org/apache/parquet/column/values/bitpacking/BitPacking.java
@@ -29,18 +29,12 @@ import org.apache.parquet.column.values.bitpacking.BitPacking.BitPackingWriter;
// TODO: rework the whole thing. It does not need to use streams at all
/**
* provides the correct implementation of a bitpacking based on the width in bits
- *
- * @author Julien Le Dem
- *
*/
public class BitPacking {
/**
* to writes ints to a stream packed to only the needed bits.
* there is no guarantee of corecteness if ints larger than the max size are written
- *
- * @author Julien Le Dem
- *
*/
abstract public static class BitPackingWriter {
/**
@@ -59,9 +53,6 @@ public class BitPacking {
/**
* to read back what has been written with the corresponding writer
- *
- * @author Julien Le Dem
- *
*/
abstract public static class BitPackingReader {
diff --git a/parquet-encoding/src/main/java/org/apache/parquet/column/values/bitpacking/ByteBasedBitPackingEncoder.java b/parquet-encoding/src/main/java/org/apache/parquet/column/values/bitpacking/ByteBasedBitPackingEncoder.java
index 1ce1e6a..8555ebe 100644
--- a/parquet-encoding/src/main/java/org/apache/parquet/column/values/bitpacking/ByteBasedBitPackingEncoder.java
+++ b/parquet-encoding/src/main/java/org/apache/parquet/column/values/bitpacking/ByteBasedBitPackingEncoder.java
@@ -31,9 +31,6 @@ import static org.apache.parquet.bytes.BytesInput.concat;
/**
* Uses the generated Byte based bit packing to write ints into a BytesInput
- *
- * @author Julien Le Dem
- *
*/
public class ByteBasedBitPackingEncoder {
private static final Logger LOG = LoggerFactory.getLogger(ByteBasedBitPackingEncoder.class);
diff --git a/parquet-encoding/src/main/java/org/apache/parquet/column/values/bitpacking/BytePacker.java b/parquet-encoding/src/main/java/org/apache/parquet/column/values/bitpacking/BytePacker.java
index 634f5b0..13b981e 100644
--- a/parquet-encoding/src/main/java/org/apache/parquet/column/values/bitpacking/BytePacker.java
+++ b/parquet-encoding/src/main/java/org/apache/parquet/column/values/bitpacking/BytePacker.java
@@ -26,9 +26,6 @@ import java.nio.ByteBuffer;
* packing unpacking treats:
* - n values at a time (with n % 8 == 0)
* - bitWidth * (n/8) bytes at a time.
- *
- * @author Julien Le Dem
- *
*/
public abstract class BytePacker {
diff --git a/parquet-encoding/src/main/java/org/apache/parquet/column/values/bitpacking/BytePackerForLong.java b/parquet-encoding/src/main/java/org/apache/parquet/column/values/bitpacking/BytePackerForLong.java
index 9859f5b..7490b65 100644
--- a/parquet-encoding/src/main/java/org/apache/parquet/column/values/bitpacking/BytePackerForLong.java
+++ b/parquet-encoding/src/main/java/org/apache/parquet/column/values/bitpacking/BytePackerForLong.java
@@ -21,9 +21,6 @@ import java.nio.ByteBuffer;
*
* packing unpacking treats: - n values at a time (with n % 8 == 0) - bitWidth * (n/8) bytes at a
* time.
- *
- * @author Vassil Lunchev
- *
*/
public abstract class BytePackerForLong {
diff --git a/parquet-encoding/src/main/java/org/apache/parquet/column/values/bitpacking/IntPacker.java b/parquet-encoding/src/main/java/org/apache/parquet/column/values/bitpacking/IntPacker.java
index 1d8d616..627e8a2 100644
--- a/parquet-encoding/src/main/java/org/apache/parquet/column/values/bitpacking/IntPacker.java
+++ b/parquet-encoding/src/main/java/org/apache/parquet/column/values/bitpacking/IntPacker.java
@@ -24,9 +24,6 @@ package org.apache.parquet.column.values.bitpacking;
* packing unpacking treats:
* - 32 values at a time
* - bitWidth ints at a time.
- *
- * @author Julien Le Dem
- *
*/
public abstract class IntPacker {
diff --git a/parquet-encoding/src/main/java/org/apache/parquet/column/values/bitpacking/Packer.java b/parquet-encoding/src/main/java/org/apache/parquet/column/values/bitpacking/Packer.java
index 5c56941..f345596 100644
--- a/parquet-encoding/src/main/java/org/apache/parquet/column/values/bitpacking/Packer.java
+++ b/parquet-encoding/src/main/java/org/apache/parquet/column/values/bitpacking/Packer.java
@@ -20,9 +20,6 @@ package org.apache.parquet.column.values.bitpacking;
/**
* Factory for packing implementations
- *
- * @author Julien Le Dem
- *
*/
public enum Packer {
diff --git a/parquet-generator/src/main/java/org/apache/parquet/encoding/Generator.java b/parquet-generator/src/main/java/org/apache/parquet/encoding/Generator.java
index 641e76d..2e6dc77 100644
--- a/parquet-generator/src/main/java/org/apache/parquet/encoding/Generator.java
+++ b/parquet-generator/src/main/java/org/apache/parquet/encoding/Generator.java
@@ -23,9 +23,6 @@ import org.apache.parquet.encoding.bitpacking.IntBasedBitPackingGenerator;
/**
* main class for code generation hook in build for encodings generation
- *
- * @author Julien Le Dem
- *
*/
public class Generator {
diff --git a/parquet-generator/src/main/java/org/apache/parquet/encoding/bitpacking/ByteBasedBitPackingGenerator.java b/parquet-generator/src/main/java/org/apache/parquet/encoding/bitpacking/ByteBasedBitPackingGenerator.java
index 842f41f..1538176 100644
--- a/parquet-generator/src/main/java/org/apache/parquet/encoding/bitpacking/ByteBasedBitPackingGenerator.java
+++ b/parquet-generator/src/main/java/org/apache/parquet/encoding/bitpacking/ByteBasedBitPackingGenerator.java
@@ -26,9 +26,6 @@ import java.io.IOException;
*
* This class generates bit packers that pack the most significant bit first.
* The result of the generation is checked in. To regenerate the code run this class and check in the result.
- *
- * @author Julien Le Dem
- *
*/
public class ByteBasedBitPackingGenerator {
@@ -77,7 +74,7 @@ public class ByteBasedBitPackingGenerator {
}
fw.append(" * \n");
fw.append(" * See ByteBasedBitPackingGenerator to make changes to this file\n");
- fw.append(" * @author automatically generated\n");
+ fw.append(" * Automatically generated\n");
fw.append(" *\n");
fw.append(" */\n");
fw.append("public abstract class " + className + " {\n");
diff --git a/parquet-generator/src/main/java/org/apache/parquet/encoding/bitpacking/IntBasedBitPackingGenerator.java b/parquet-generator/src/main/java/org/apache/parquet/encoding/bitpacking/IntBasedBitPackingGenerator.java
index 300b84d..85a89a1 100644
--- a/parquet-generator/src/main/java/org/apache/parquet/encoding/bitpacking/IntBasedBitPackingGenerator.java
+++ b/parquet-generator/src/main/java/org/apache/parquet/encoding/bitpacking/IntBasedBitPackingGenerator.java
@@ -37,9 +37,6 @@ import java.io.IOException;
* The generated classes pack the values into arrays of ints (as opposed to arrays of bytes) based on a given bit width.
*
* Note: This is not really used for now as the hadoop API does not really let write int[]. We need to revisit this
- *
- * @author Julien Le Dem
- *
*/
public class IntBasedBitPackingGenerator {
@@ -70,7 +67,7 @@ public class IntBasedBitPackingGenerator {
fw.append(" * Adapted to pack from the Most Significant Bit first\n");
}
fw.append(" * \n");
- fw.append(" * @author automatically generated\n");
+ fw.append(" * Automatically generated\n");
fw.append(" * @see IntBasedBitPackingGenerator\n");
fw.append(" *\n");
fw.append(" */\n");
diff --git a/parquet-generator/src/main/java/org/apache/parquet/filter2/IncrementallyUpdatedFilterPredicateGenerator.java b/parquet-generator/src/main/java/org/apache/parquet/filter2/IncrementallyUpdatedFilterPredicateGenerator.java
index fc5413e..1cdb5d1 100644
--- a/parquet-generator/src/main/java/org/apache/parquet/filter2/IncrementallyUpdatedFilterPredicateGenerator.java
+++ b/parquet-generator/src/main/java/org/apache/parquet/filter2/IncrementallyUpdatedFilterPredicateGenerator.java
@@ -83,7 +83,7 @@ public class IncrementallyUpdatedFilterPredicateGenerator {
"import org.apache.parquet.io.PrimitiveColumnIO;\n" +
"import org.apache.parquet.schema.PrimitiveComparator;\n\n" +
"/**\n" +
- " * This class is auto-generated by {@link parquet.filter2.IncrementallyUpdatedFilterPredicateGenerator}\n" +
+ " * This class is auto-generated by org.apache.parquet.filter2.IncrementallyUpdatedFilterPredicateGenerator\n" +
" * Do not manually edit!\n" +
" * See {@link IncrementallyUpdatedFilterPredicateBuilderBase}\n" +
" */\n");
diff --git a/parquet-hadoop/src/main/java/org/apache/parquet/filter2/compat/RowGroupFilter.java b/parquet-hadoop/src/main/java/org/apache/parquet/filter2/compat/RowGroupFilter.java
index 68c38ce..d1d40e9 100644
--- a/parquet-hadoop/src/main/java/org/apache/parquet/filter2/compat/RowGroupFilter.java
+++ b/parquet-hadoop/src/main/java/org/apache/parquet/filter2/compat/RowGroupFilter.java
@@ -52,6 +52,10 @@ public class RowGroupFilter implements Visitor<List<BlockMetaData>> {
}
/**
+ * @param filter a filter
+ * @param blocks a list of block metadata to filter
+ * @param schema the file schema
+ * @return a filtered list of block metadata
* @deprecated will be removed in 2.0.0.
*/
@Deprecated
diff --git a/parquet-hadoop/src/main/java/org/apache/parquet/format/converter/ParquetMetadataConverter.java b/parquet-hadoop/src/main/java/org/apache/parquet/format/converter/ParquetMetadataConverter.java
index bc43516..555b856 100644
--- a/parquet-hadoop/src/main/java/org/apache/parquet/format/converter/ParquetMetadataConverter.java
+++ b/parquet-hadoop/src/main/java/org/apache/parquet/format/converter/ParquetMetadataConverter.java
@@ -96,6 +96,7 @@ public class ParquetMetadataConverter {
}
/**
+ * @param conf a configuration
* @deprecated will be removed in 2.0.0; use {@code ParquetMetadataConverter(ParquetReadOptions)}
*/
@Deprecated
@@ -379,8 +380,10 @@ public class ParquetMetadataConverter {
}
/**
- * @deprecated Replaced by {@link #fromParquetStatistics(
- * String createdBy, Statistics statistics, PrimitiveTypeName type)}
+ * @param statistics parquet format statistics
+ * @param type a primitive type name
+ * @return the statistics
+ * @deprecated will be removed in 2.0.0.
*/
@Deprecated
public static org.apache.parquet.column.statistics.Statistics fromParquetStatistics(Statistics statistics, PrimitiveTypeName type) {
@@ -388,7 +391,11 @@ public class ParquetMetadataConverter {
}
/**
- * @deprecated Use {@link #fromParquetStatistics(String, Statistics, PrimitiveType)} instead.
+ * @param createdBy the created-by string from the file
+ * @param statistics parquet format statistics
+ * @param type a primitive type name
+ * @return the statistics
+ * @deprecated will be removed in 2.0.0.
*/
@Deprecated
public static org.apache.parquet.column.statistics.Statistics fromParquetStatistics
@@ -716,9 +723,9 @@ public class ParquetMetadataConverter {
/**
* [ startOffset, endOffset )
- * @param startOffset
- * @param endOffset
- * @return the filter
+ * @param startOffset a start offset (inclusive)
+ * @param endOffset an end offset (exclusive)
+ * @return a range filter from the offsets
*/
public static MetadataFilter range(long startOffset, long endOffset) {
return new RangeMetadataFilter(startOffset, endOffset);
@@ -757,7 +764,6 @@ public class ParquetMetadataConverter {
/**
* [ startOffset, endOffset )
- * @author Julien Le Dem
*/
// Visible for testing
static final class RangeMetadataFilter extends MetadataFilter {
diff --git a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/BadConfigurationException.java b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/BadConfigurationException.java
index 67d9bc4..3eba962 100644
--- a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/BadConfigurationException.java
+++ b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/BadConfigurationException.java
@@ -22,9 +22,6 @@ import org.apache.parquet.ParquetRuntimeException;
/**
* Thrown when the input/output formats are misconfigured
- *
- * @author Julien Le Dem
- *
*/
public class BadConfigurationException extends ParquetRuntimeException {
private static final long serialVersionUID = 1L;
diff --git a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/CodecFactory.java b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/CodecFactory.java
index 31d7bba..da87e5d 100644
--- a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/CodecFactory.java
+++ b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/CodecFactory.java
@@ -81,6 +81,7 @@ public class CodecFactory implements CompressionCodecFactory {
* always records the uncompressed size of a buffer. If this
* CodecFactory is only going to be used for decompressors, this
* parameter will not impact the function of the factory.
+ * @return a configured direct codec factory
*/
public static CodecFactory createDirectCodecFactory(Configuration config, ByteBufferAllocator allocator, int pageSize) {
return new DirectCodecFactory(config, allocator, pageSize);
@@ -128,9 +129,6 @@ public class CodecFactory implements CompressionCodecFactory {
/**
* Encapsulates the logic around hadoop compression
- *
- * @author Julien Le Dem
- *
*/
class HeapBytesCompressor extends BytesCompressor {
diff --git a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/Footer.java b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/Footer.java
index e707a5a..f677752 100644
--- a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/Footer.java
+++ b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/Footer.java
@@ -24,11 +24,7 @@ import org.apache.hadoop.fs.Path;
import org.apache.parquet.hadoop.metadata.ParquetMetadata;
/**
- *
* Represent the footer for a given file
- *
- * @author Julien Le Dem
- *
*/
public class Footer {
diff --git a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/ParquetFileReader.java b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/ParquetFileReader.java
index 6ef8a6c..95aaec4 100644
--- a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/ParquetFileReader.java
+++ b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/ParquetFileReader.java
@@ -94,9 +94,6 @@ import org.slf4j.LoggerFactory;
/**
* Internal implementation of the Parquet file reader as a block container
- *
- * @author Julien Le Dem
- *
*/
public class ParquetFileReader implements Closeable {
@@ -112,7 +109,7 @@ public class ParquetFileReader implements Closeable {
* @param configuration the hadoop conf to connect to the file system;
* @param partFiles the part files to read
* @return the footers for those files using the summary file if possible.
- * @throws IOException
+ * @throws IOException if there is an exception while reading footers
* @deprecated metadata files are not recommended and will be removed in 2.0.0
*/
@Deprecated
@@ -131,7 +128,7 @@ public class ParquetFileReader implements Closeable {
* @param partFiles the part files to read
* @param skipRowGroups to skipRowGroups in the footers
* @return the footers for those files using the summary file if possible.
- * @throws IOException
+ * @throws IOException if there is an exception while reading footers
* @deprecated metadata files are not recommended and will be removed in 2.0.0
*/
@Deprecated
@@ -231,6 +228,10 @@ public class ParquetFileReader implements Closeable {
}
/**
+ * @param configuration the conf to access the File System
+ * @param partFiles the files to read
+ * @return the footers
+ * @throws IOException if an exception was raised while reading footers
* @deprecated metadata files are not recommended and will be removed in 2.0.0
*/
@Deprecated
@@ -245,7 +246,7 @@ public class ParquetFileReader implements Closeable {
* @param partFiles the files to read
* @param skipRowGroups to skip the rowGroup info
* @return the footers
- * @throws IOException
+ * @throws IOException if there is an exception while reading footers
* @deprecated will be removed in 2.0.0;
* use {@link ParquetFileReader#open(InputFile, ParquetReadOptions)}
*/
@@ -274,6 +275,12 @@ public class ParquetFileReader implements Closeable {
/**
* Read the footers of all the files under that path (recursively)
* not using summary files.
+ *
+ * @param configuration a configuration
+ * @param fileStatus a file status to recursively list
+ * @param skipRowGroups whether to skip reading row group metadata
+ * @return a list of footers
+ * @throws IOException if an exception is thrown while reading the footers
* @deprecated will be removed in 2.0.0;
* use {@link ParquetFileReader#open(InputFile, ParquetReadOptions)}
*/
@@ -290,7 +297,7 @@ public class ParquetFileReader implements Closeable {
* @param configuration the configuration to access the FS
* @param fileStatus the root dir
* @return all the footers
- * @throws IOException
+ * @throws IOException if an exception is thrown while reading the footers
* @deprecated will be removed in 2.0.0;
* use {@link ParquetFileReader#open(InputFile, ParquetReadOptions)}
*/
@@ -300,6 +307,10 @@ public class ParquetFileReader implements Closeable {
}
/**
+ * @param configuration a configuration
+ * @param path a file path
+ * @return a list of footers
+ * @throws IOException if an exception is thrown while reading the footers
* @deprecated will be removed in 2.0.0;
* use {@link ParquetFileReader#open(InputFile, ParquetReadOptions)}
*/
@@ -314,10 +325,10 @@ public class ParquetFileReader implements Closeable {
/**
* this always returns the row groups
- * @param configuration
- * @param pathStatus
- * @return
- * @throws IOException
+ * @param configuration a configuration
+ * @param pathStatus a file status to read footers from
+ * @return a list of footers
+ * @throws IOException if an exception is thrown while reading the footers
* @deprecated will be removed in 2.0.0;
* use {@link ParquetFileReader#open(InputFile, ParquetReadOptions)}
*/
@@ -331,8 +342,9 @@ public class ParquetFileReader implements Closeable {
* using summary files if possible
* @param configuration the configuration to access the FS
* @param pathStatus the root dir
+ * @param skipRowGroups whether to skip reading row group metadata
* @return all the footers
- * @throws IOException
+ * @throws IOException if an exception is thrown while reading the footers
* @deprecated will be removed in 2.0.0;
* use {@link ParquetFileReader#open(InputFile, ParquetReadOptions)}
*/
@@ -358,10 +370,10 @@ public class ParquetFileReader implements Closeable {
/**
* Specifically reads a given summary file
- * @param configuration
- * @param summaryStatus
+ * @param configuration a configuration
+ * @param summaryStatus file status for a summary file
* @return the metadata translated for each file
- * @throws IOException
+ * @throws IOException if an exception is thrown while reading the summary file
* @deprecated metadata files are not recommended and will be removed in 2.0.0
*/
@Deprecated
@@ -409,7 +421,7 @@ public class ParquetFileReader implements Closeable {
/**
* Reads the meta data block in the footer of the file
- * @param configuration
+ * @param configuration a configuration
* @param file the parquet File
* @return the metadata blocks in the footer
* @throws IOException if an error occurs while reading the file
@@ -424,7 +436,7 @@ public class ParquetFileReader implements Closeable {
/**
* Reads the meta data in the footer of the file.
* Skipping row groups (or not) based on the provided filter
- * @param configuration
+ * @param configuration a configuration
* @param file the Parquet File
* @param filter the filter to apply to row groups
* @return the metadata with row groups filtered.
@@ -437,6 +449,10 @@ public class ParquetFileReader implements Closeable {
}
/**
+ * @param configuration a configuration
+ * @param file the Parquet File
+ * @return the metadata with row groups.
+ * @throws IOException if an error occurs while reading the file
* @deprecated will be removed in 2.0.0;
* use {@link ParquetFileReader#open(InputFile, ParquetReadOptions)}
*/
@@ -447,7 +463,7 @@ public class ParquetFileReader implements Closeable {
/**
* Reads the meta data block in the footer of the file
- * @param configuration
+ * @param configuration a configuration
* @param file the parquet File
* @param filter the filter to apply to row groups
* @return the metadata blocks in the footer
@@ -517,6 +533,10 @@ public class ParquetFileReader implements Closeable {
}
/**
+ * @param conf a configuration
+ * @param file a file path to open
+ * @return a parquet file reader
+ * @throws IOException if there is an error while opening the file
* @deprecated will be removed in 2.0.0; use {@link #open(InputFile)}
*/
@Deprecated
@@ -526,6 +546,11 @@ public class ParquetFileReader implements Closeable {
}
/**
+ * @param conf a configuration
+ * @param file a file path to open
+ * @param filter a metadata filter
+ * @return a parquet file reader
+ * @throws IOException if there is an error while opening the file
* @deprecated will be removed in 2.0.0; use {@link #open(InputFile,ParquetReadOptions)}
*/
@Deprecated
@@ -535,6 +560,11 @@ public class ParquetFileReader implements Closeable {
}
/**
+ * @param conf a configuration
+ * @param file a file path to open
+ * @param footer a footer for the file if already loaded
+ * @return a parquet file reader
+ * @throws IOException if there is an error while opening the file
* @deprecated will be removed in 2.0.0
*/
@Deprecated
@@ -547,6 +577,7 @@ public class ParquetFileReader implements Closeable {
*
* @param file an input file
* @return an open ParquetFileReader
+ * @throws IOException if there is an error while opening the file
*/
public static ParquetFileReader open(InputFile file) throws IOException {
return new ParquetFileReader(file, ParquetReadOptions.builder().build());
@@ -556,7 +587,9 @@ public class ParquetFileReader implements Closeable {
* Open a {@link InputFile file} with {@link ParquetReadOptions options}.
*
* @param file an input file
+ * @param options parquet read options
* @return an open ParquetFileReader
+ * @throws IOException if there is an error while opening the file
*/
public static ParquetFileReader open(InputFile file, ParquetReadOptions options) throws IOException {
return new ParquetFileReader(file, options);
@@ -577,7 +610,12 @@ public class ParquetFileReader implements Closeable {
private DictionaryPageReader nextDictionaryReader = null;
/**
- * @deprecated use {@link ParquetFileReader(Configuration,FileMetaData,Path,List,List)} instead.
+ * @param configuration the Hadoop conf
+ * @param filePath Path for the parquet file
+ * @param blocks the blocks to read
+ * @param columns the columns to read (their path)
+ * @throws IOException if the file can not be opened
+ * @deprecated will be removed in 2.0.0.
*/
@Deprecated
public ParquetFileReader(Configuration configuration, Path filePath, List<BlockMetaData> blocks,
@@ -588,9 +626,11 @@ public class ParquetFileReader implements Closeable {
/**
* @param configuration the Hadoop conf
* @param fileMetaData fileMetaData for parquet file
+ * @param filePath Path for the parquet file
* @param blocks the blocks to read
* @param columns the columns to read (their path)
* @throws IOException if the file can not be opened
+ * @deprecated will be removed in 2.0.0.
*/
@Deprecated
public ParquetFileReader(
@@ -612,8 +652,7 @@ public class ParquetFileReader implements Closeable {
* @param file Path to a parquet file
* @param filter a {@link MetadataFilter} for selecting row groups
* @throws IOException if the file can not be opened
- * @deprecated will be removed in 2.0.0;
- * use {@link ParquetFileReader(InputFile,MetadataFilter)} instead
+ * @deprecated will be removed in 2.0.0.
*/
@Deprecated
public ParquetFileReader(Configuration conf, Path file, MetadataFilter filter) throws IOException {
@@ -626,6 +665,7 @@ public class ParquetFileReader implements Closeable {
* @param file Path to a parquet file
* @param footer a {@link ParquetMetadata} footer already read from the file
* @throws IOException if the file can not be opened
+ * @deprecated will be removed in 2.0.0.
*/
@Deprecated
public ParquetFileReader(Configuration conf, Path file, ParquetMetadata footer) throws IOException {
@@ -682,6 +722,7 @@ public class ParquetFileReader implements Closeable {
}
/**
+ * @return the path for this file
* @deprecated will be removed in 2.0.0; use {@link #getFile()} instead
*/
@Deprecated
@@ -818,7 +859,7 @@ public class ParquetFileReader implements Closeable {
*
* @param meta a column's ColumnChunkMetaData to read the dictionary from
* @return an uncompressed DictionaryPage or null
- * @throws IOException
+ * @throws IOException if there is an error while reading the dictionary
*/
DictionaryPage readDictionary(ColumnChunkMetaData meta) throws IOException {
if (!meta.getEncodings().contains(Encoding.PLAIN_DICTIONARY) &&
@@ -875,9 +916,6 @@ public class ParquetFileReader implements Closeable {
/**
* The data for a column chunk
- *
- * @author Julien Le Dem
- *
*/
private class Chunk {
@@ -986,7 +1024,7 @@ public class ParquetFileReader implements Closeable {
/**
* @param size the size of the page
* @return the page
- * @throws IOException
+ * @throws IOException if there is an error while reading from the file stream
*/
public BytesInput readAsBytesInput(int size) throws IOException {
return BytesInput.from(stream.sliceBuffers(size));
@@ -996,9 +1034,6 @@ public class ParquetFileReader implements Closeable {
/**
* deals with a now fixed bug where compressedLength was missing a few bytes.
- *
- * @author Julien Le Dem
- *
*/
private class WorkaroundChunk extends Chunk {
@@ -1088,8 +1123,6 @@ public class ParquetFileReader implements Closeable {
/**
* describes a list of consecutive column chunks to be read at once.
- *
- * @author Julien Le Dem
*/
private class ConsecutiveChunkList {
@@ -1107,7 +1140,7 @@ public class ParquetFileReader implements Closeable {
/**
* adds a chunk to the list.
* It must be consecutive to the previous chunk
- * @param descriptor
+ * @param descriptor a chunk descriptor
*/
public void addChunk(ChunkDescriptor descriptor) {
chunks.add(descriptor);
@@ -1117,7 +1150,7 @@ public class ParquetFileReader implements Closeable {
/**
* @param f file to read the chunks from
* @return the chunks
- * @throws IOException
+ * @throws IOException if there is an error while reading from the stream
*/
public List<Chunk> readAll(SeekableInputStream f) throws IOException {
List<Chunk> result = new ArrayList<Chunk>(chunks.size());
diff --git a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/ParquetFileWriter.java b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/ParquetFileWriter.java
index f94fd9c..c98c247 100644
--- a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/ParquetFileWriter.java
+++ b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/ParquetFileWriter.java
@@ -72,9 +72,6 @@ import org.slf4j.LoggerFactory;
/**
* Internal implementation of the Parquet file writer as a block container
- *
- * @author Julien Le Dem
- *
*/
public class ParquetFileWriter {
private static final Logger LOG = LoggerFactory.getLogger(ParquetFileWriter.class);
@@ -126,9 +123,6 @@ public class ParquetFileWriter {
/**
* Captures the order in which methods should be called
- *
- * @author Julien Le Dem
- *
*/
private enum STATE {
NOT_STARTED {
@@ -182,8 +176,7 @@ public class ParquetFileWriter {
* @param schema the schema of the data
* @param file the file to write to
* @throws IOException if the file can not be created
- * @deprecated will be removed in 2.0.0;
- * use {@link ParquetFileWriter(OutputFile,MessageType,Mode,long,long)} instead
+ * @deprecated will be removed in 2.0.0
*/
@Deprecated
public ParquetFileWriter(Configuration configuration, MessageType schema,
@@ -198,8 +191,7 @@ public class ParquetFileWriter {
* @param file the file to write to
* @param mode file creation mode
* @throws IOException if the file can not be created
- * @deprecated will be removed in 2.0.0;
- * use {@link ParquetFileWriter(OutputFile,MessageType,Mode,long,long)} instead
+ * @deprecated will be removed in 2.0.0
*/
@Deprecated
public ParquetFileWriter(Configuration configuration, MessageType schema,
@@ -216,8 +208,7 @@ public class ParquetFileWriter {
* @param rowGroupSize the row group size
* @param maxPaddingSize the maximum padding
* @throws IOException if the file can not be created
- * @deprecated will be removed in 2.0.0;
- * use {@link ParquetFileWriter(OutputFile,MessageType,Mode,long,long)} instead
+ * @deprecated will be removed in 2.0.0
*/
@Deprecated
public ParquetFileWriter(Configuration configuration, MessageType schema,
@@ -267,6 +258,7 @@ public class ParquetFileWriter {
* @param schema the schema of the data
* @param file the file to write to
* @param rowAndBlockSize the row group size
+ * @param maxPaddingSize the maximum padding
* @throws IOException if the file can not be created
*/
ParquetFileWriter(Configuration configuration, MessageType schema,
@@ -282,7 +274,7 @@ public class ParquetFileWriter {
}
/**
* start the file
- * @throws IOException
+ * @throws IOException if there is an error while writing
*/
public void start() throws IOException {
state = state.start();
@@ -293,7 +285,7 @@ public class ParquetFileWriter {
/**
* start a block
* @param recordCount the record count in this block
- * @throws IOException
+ * @throws IOException if there is an error while writing
*/
public void startBlock(long recordCount) throws IOException {
state = state.startBlock();
@@ -310,8 +302,8 @@ public class ParquetFileWriter {
* start a column inside a block
* @param descriptor the column descriptor
* @param valueCount the value count in this column
- * @param compressionCodecName
- * @throws IOException
+ * @param compressionCodecName a compression codec name
+ * @throws IOException if there is an error while writing
*/
public void startColumn(ColumnDescriptor descriptor,
long valueCount,
@@ -333,6 +325,7 @@ public class ParquetFileWriter {
/**
* writes a dictionary page page
* @param dictionaryPage the dictionary page
+ * @throws IOException if there is an error while writing
*/
public void writeDictionaryPage(DictionaryPage dictionaryPage) throws IOException {
state = state.write();
@@ -364,6 +357,7 @@ public class ParquetFileWriter {
* @param rlEncoding encoding of the repetition level
* @param dlEncoding encoding of the definition level
* @param valuesEncoding encoding of values
+ * @throws IOException if there is an error while writing
*/
@Deprecated
public void writeDataPage(
@@ -399,9 +393,11 @@ public class ParquetFileWriter {
* @param valueCount count of values
* @param uncompressedPageSize the size of the data once uncompressed
* @param bytes the compressed data for the page without header
+ * @param statistics statistics for the page
* @param rlEncoding encoding of the repetition level
* @param dlEncoding encoding of the definition level
* @param valuesEncoding encoding of values
+ * @throws IOException if there is an error while writing
*/
public void writeDataPage(
int valueCount, int uncompressedPageSize,
@@ -446,7 +442,7 @@ public class ParquetFileWriter {
* @param bytes bytes to be written including page headers
* @param uncompressedTotalPageSize total uncompressed size (without page headers)
* @param compressedTotalPageSize total compressed size (without page headers)
- * @throws IOException
+ * @throws IOException if there is an error while writing
*/
void writeDataPages(BytesInput bytes,
long uncompressedTotalPageSize,
@@ -474,7 +470,7 @@ public class ParquetFileWriter {
/**
* end a column (once all rep, def and data have been written)
- * @throws IOException
+ * @throws IOException if there is an error while writing
*/
public void endColumn() throws IOException {
state = state.endColumn();
@@ -498,7 +494,7 @@ public class ParquetFileWriter {
/**
* ends a block once all column chunks have been written
- * @throws IOException
+ * @throws IOException if there is an error while writing
*/
public void endBlock() throws IOException {
state = state.endBlock();
@@ -509,6 +505,9 @@ public class ParquetFileWriter {
}
/**
+ * @param conf a configuration
+ * @param file a file path to append the contents of to this file
+ * @throws IOException if there is an error while reading or writing
* @deprecated will be removed in 2.0.0; use {@link #appendFile(InputFile)} instead
*/
@Deprecated
@@ -521,6 +520,10 @@ public class ParquetFileWriter {
}
/**
+ * @param file a file stream to read from
+ * @param rowGroups row groups to copy
+ * @param dropColumns whether to drop columns from the file that are not in this file's schema
+ * @throws IOException if there is an error while reading or writing
* @deprecated will be removed in 2.0.0;
* use {@link #appendRowGroups(SeekableInputStream,List,boolean)} instead
*/
@@ -540,6 +543,10 @@ public class ParquetFileWriter {
}
/**
+ * @param from a file stream to read from
+ * @param rowGroup row group to copy
+ * @param dropColumns whether to drop columns from the file that are not in this file's schema
+ * @throws IOException if there is an error while reading or writing
* @deprecated will be removed in 2.0.0;
* use {@link #appendRowGroup(SeekableInputStream,BlockMetaData,boolean)} instead
*/
@@ -643,7 +650,7 @@ public class ParquetFileWriter {
* @param to any {@link PositionOutputStream}
* @param start where in the from stream to start copying
* @param length the number of bytes to copy
- * @throws IOException
+ * @throws IOException if there is an error while reading or writing
*/
private static void copy(SeekableInputStream from, PositionOutputStream to,
long start, long length) throws IOException{
@@ -668,7 +675,7 @@ public class ParquetFileWriter {
* ends a file once all blocks have been written.
* closes the file.
* @param extraMetaData the extra meta data to write in the footer
- * @throws IOException
+ * @throws IOException if there is an error while writing
*/
public void end(Map<String, String> extraMetaData) throws IOException {
state = state.end();
@@ -695,6 +702,10 @@ public class ParquetFileWriter {
/**
* Given a list of metadata files, merge them into a single ParquetMetadata
* Requires that the schemas be compatible, and the extraMetadata be exactly equal.
+ * @param files a list of files to merge metadata from
+ * @param conf a configuration
+ * @return merged parquet metadata for the files
+ * @throws IOException if there is an error while writing
* @deprecated metadata files are not recommended and will be removed in 2.0.0
*/
@Deprecated
@@ -720,6 +731,10 @@ public class ParquetFileWriter {
* Requires that the schemas be compatible, and the extraMetaData be exactly equal.
* This is useful when merging 2 directories of parquet files into a single directory, as long
* as both directories were written with compatible schemas and equal extraMetaData.
+ * @param files a list of files to merge metadata from
+ * @param outputPath path to write merged metadata to
+ * @param conf a configuration
+ * @throws IOException if there is an error while reading or writing
* @deprecated metadata files are not recommended and will be removed in 2.0.0
*/
@Deprecated
@@ -733,7 +748,7 @@ public class ParquetFileWriter {
* @param configuration the configuration to use to get the FileSystem
* @param outputPath the directory to write the _metadata file to
* @param footers the list of footers to merge
- * @throws IOException
+ * @throws IOException if there is an error while writing
* @deprecated metadata files are not recommended and will be removed in 2.0.0
*/
@Deprecated
@@ -743,6 +758,11 @@ public class ParquetFileWriter {
/**
* writes _common_metadata file, and optionally a _metadata file depending on the {@link JobSummaryLevel} provided
+ * @param configuration the configuration to use to get the FileSystem
+ * @param outputPath the directory to write the _metadata file to
+ * @param footers the list of footers to merge
+ * @param level level of summary to write
+ * @throws IOException if there is an error while writing
* @deprecated metadata files are not recommended and will be removed in 2.0.0
*/
@Deprecated
@@ -808,7 +828,7 @@ public class ParquetFileWriter {
/**
* @return the current position in the underlying file
- * @throws IOException
+ * @throws IOException if there is an error while getting the current stream's position
*/
public long getPos() throws IOException {
return out.getPos();
diff --git a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/ParquetInputFormat.java b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/ParquetInputFormat.java
index 979388d..2c21e52 100644
--- a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/ParquetInputFormat.java
+++ b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/ParquetInputFormat.java
@@ -88,8 +88,6 @@ import org.slf4j.LoggerFactory;
* @see #FILTER_PREDICATE
* @see #TASK_SIDE_METADATA
*
- * @author Julien Le Dem
- *
* @param <T> the type of the materialized records
*/
public class ParquetInputFormat<T> extends FileInputFormat<Void, T> {
@@ -166,6 +164,8 @@ public class ParquetInputFormat<T> extends FileInputFormat<Void, T> {
}
/**
+ * @param configuration a configuration
+ * @return an unbound record filter class
* @deprecated use {@link #getFilter(Configuration)}
*/
@Deprecated
@@ -223,6 +223,9 @@ public class ParquetInputFormat<T> extends FileInputFormat<Void, T> {
/**
* Returns a non-null Filter, which is a wrapper around either a
* FilterPredicate, an UnboundRecordFilter, or a no-op filter.
+ *
+ * @param conf a configuration
+ * @return a filter for the unbound record filter specified in conf
*/
public static Filter getFilter(Configuration conf) {
return FilterCompat.get(getFilterPredicate(conf), getUnboundRecordFilterInstance(conf));
@@ -247,6 +250,7 @@ public class ParquetInputFormat<T> extends FileInputFormat<Void, T> {
* the read support property in their configuration.
*
* @param readSupportClass a ReadSupport subclass
+ * @param <S> the Java read support type
*/
public <S extends ReadSupport<T>> ParquetInputFormat(Class<S> readSupportClass) {
this.readSupportClass = readSupportClass;
@@ -279,6 +283,7 @@ public class ParquetInputFormat<T> extends FileInputFormat<Void, T> {
/**
* @param configuration to find the configuration for the read support
+ * @param <T> the Java type of objects created by the ReadSupport
* @return the configured read support
*/
@SuppressWarnings("unchecked")
@@ -289,6 +294,7 @@ public class ParquetInputFormat<T> extends FileInputFormat<Void, T> {
/**
* @param readSupportClass to instantiate
+ * @param <T> the Java type of objects created by the ReadSupport
* @return the configured read support
*/
@SuppressWarnings("unchecked")
@@ -337,7 +343,7 @@ public class ParquetInputFormat<T> extends FileInputFormat<Void, T> {
* @param configuration the configuration to connect to the file system
* @param footers the footers of the files to read
* @return the splits for the footers
- * @throws IOException
+ * @throws IOException if there is an error while reading
* @deprecated split planning using file footers will be removed
*/
@Deprecated
@@ -399,7 +405,7 @@ public class ParquetInputFormat<T> extends FileInputFormat<Void, T> {
/**
* @param jobContext the current job context
* @return the footers for the files
- * @throws IOException
+ * @throws IOException if there is an error while reading
*/
public List<Footer> getFooters(JobContext jobContext) throws IOException {
List<FileStatus> statuses = listStatus(jobContext);
@@ -473,7 +479,7 @@ public class ParquetInputFormat<T> extends FileInputFormat<Void, T> {
* @param configuration to connect to the file system
* @param statuses the files to open
* @return the footers of the files
- * @throws IOException
+ * @throws IOException if there is an error while reading
*/
public List<Footer> getFooters(Configuration configuration, Collection<FileStatus> statuses) throws IOException {
LOG.debug("reading {} files", statuses.size());
@@ -484,7 +490,7 @@ public class ParquetInputFormat<T> extends FileInputFormat<Void, T> {
/**
* @param jobContext the current job context
* @return the merged metadata from the footers
- * @throws IOException
+ * @throws IOException if there is an error while reading
*/
public GlobalMetaData getGlobalMetaData(JobContext jobContext) throws IOException {
return ParquetFileWriter.getGlobalMetaData(getFooters(jobContext));
diff --git a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/ParquetInputSplit.java b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/ParquetInputSplit.java
index b97daa5..0507440 100644
--- a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/ParquetInputSplit.java
+++ b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/ParquetInputSplit.java
@@ -47,10 +47,10 @@ import org.apache.parquet.schema.MessageTypeParser;
*
* This class is private to the ParquetInputFormat.
* Backward compatibility is not maintained.
- *
- * @author Julien Le Dem
+ * @deprecated will be removed in 2.0.0. use FileInputSplit instead.
*/
@Private
+@Deprecated
public class ParquetInputSplit extends FileSplit implements Writable {
@@ -67,15 +67,15 @@ public class ParquetInputSplit extends FileSplit implements Writable {
/**
* For compatibility only
* use {@link ParquetInputSplit#ParquetInputSplit(Path, long, long, long, String[], long[])}
- * @param path
- * @param start
- * @param length
- * @param hosts
- * @param blocks
- * @param requestedSchema
- * @param fileSchema
- * @param extraMetadata
- * @param readSupportMetadata
+ * @param path a Path
+ * @param start split start location
+ * @param length split length
+ * @param hosts locality information for this split
+ * @param blocks Parquet blocks in this split
+ * @param requestedSchema the requested schema
+ * @param fileSchema the file schema
+ * @param extraMetadata string map of file metadata
+ * @param readSupportMetadata string map of metadata from read support
*/
@Deprecated
public ParquetInputSplit(
@@ -129,7 +129,7 @@ public class ParquetInputSplit extends FileSplit implements Writable {
*
* @param split a mapreduce FileSplit
* @return a ParquetInputSplit
- * @throws IOException
+ * @throws IOException if there is an error while creating the Parquet split
*/
static ParquetInputSplit from(FileSplit split) throws IOException {
return new ParquetInputSplit(split.getPath(),
@@ -141,9 +141,9 @@ public class ParquetInputSplit extends FileSplit implements Writable {
* Builds a {@code ParquetInputSplit} from a mapred
* {@link org.apache.hadoop.mapred.FileSplit}.
*
- * @param split a mapreduce FileSplit
+ * @param split a mapred FileSplit
* @return a ParquetInputSplit
- * @throws IOException
+ * @throws IOException if there is an error while creating the Parquet split
*/
static ParquetInputSplit from(org.apache.hadoop.mapred.FileSplit split) throws IOException {
return new ParquetInputSplit(split.getPath(),
@@ -196,7 +196,7 @@ public class ParquetInputSplit extends FileSplit implements Writable {
/**
* @return app specific metadata from the file
- * @deprecated the file footer is no longer read before creating input splits
+ * @deprecated will be removed in 2.0.0. the file footer is no longer read before creating input splits
*/
@Deprecated
public Map<String, String> getExtraMetadata() {
@@ -206,6 +206,7 @@ public class ParquetInputSplit extends FileSplit implements Writable {
/**
* @return app specific metadata provided by the read support in the init phase
+ * @deprecated will be removed in 2.0.0.
*/
@Deprecated
Map<String, String> getReadSupportMetadata() {
@@ -215,6 +216,7 @@ public class ParquetInputSplit extends FileSplit implements Writable {
/**
* @return the offsets of the row group selected if this has been determined on the client side
+ * @deprecated will be removed in 2.0.0.
*/
public long[] getRowGroupOffsets() {
return rowGroupOffsets;
diff --git a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/ParquetOutputFormat.java b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/ParquetOutputFormat.java
index 340ec11..ff5bab3 100644
--- a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/ParquetOutputFormat.java
+++ b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/ParquetOutputFormat.java
@@ -97,8 +97,6 @@ import org.slf4j.LoggerFactory;
*
* if none of those is set the data is uncompressed.
*
- * @author Julien Le Dem
- *
* @param <T> the type of the materialized records
*/
public class ParquetOutputFormat<T> extends FileOutputFormat<Void, T> {
@@ -319,7 +317,9 @@ public class ParquetOutputFormat<T> extends FileOutputFormat<Void, T> {
/**
* constructor used when this OutputFormat in wrapped in another one (In Pig for example)
+ *
* @param writeSupport the class used to convert the incoming records
+ * @param <S> the Java write support type
*/
public <S extends WriteSupport<T>> ParquetOutputFormat(S writeSupport) {
this.writeSupport = writeSupport;
@@ -328,6 +328,8 @@ public class ParquetOutputFormat<T> extends FileOutputFormat<Void, T> {
/**
* used when directly using the output format and configuring the write support implementation
* using parquet.write.support.class
+ *
+ * @param <S> the Java write support type
*/
public <S extends WriteSupport<T>> ParquetOutputFormat() {
}
diff --git a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/ParquetReader.java b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/ParquetReader.java
index 22c2198..d9b273b 100644
--- a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/ParquetReader.java
+++ b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/ParquetReader.java
@@ -59,7 +59,7 @@ public class ParquetReader<T> implements Closeable {
/**
* @param file the file to read
* @param readSupport to materialize records
- * @throws IOException
+ * @throws IOException if there is an error while reading
* @deprecated use {@link #builder(ReadSupport, Path)}
*/
@Deprecated
@@ -71,7 +71,7 @@ public class ParquetReader<T> implements Closeable {
* @param conf the configuration
* @param file the file to read
* @param readSupport to materialize records
- * @throws IOException
+ * @throws IOException if there is an error while reading
* @deprecated use {@link #builder(ReadSupport, Path)}
*/
@Deprecated
@@ -83,7 +83,7 @@ public class ParquetReader<T> implements Closeable {
* @param file the file to read
* @param readSupport to materialize records
* @param unboundRecordFilter the filter to use to filter records
- * @throws IOException
+ * @throws IOException if there is an error while reading
* @deprecated use {@link #builder(ReadSupport, Path)}
*/
@Deprecated
@@ -96,7 +96,7 @@ public class ParquetReader<T> implements Closeable {
* @param file the file to read
* @param readSupport to materialize records
* @param unboundRecordFilter the filter to use to filter records
- * @throws IOException
+ * @throws IOException if there is an error while reading
* @deprecated use {@link #builder(ReadSupport, Path)}
*/
@Deprecated
@@ -125,7 +125,7 @@ public class ParquetReader<T> implements Closeable {
/**
* @return the next record or null if finished
- * @throws IOException
+ * @throws IOException if there is an error while reading
*/
public T read() throws IOException {
try {
diff --git a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/ParquetRecordReader.java b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/ParquetRecordReader.java
index 9ca8be9..492d917 100644
--- a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/ParquetRecordReader.java
+++ b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/ParquetRecordReader.java
@@ -57,8 +57,6 @@ import org.slf4j.LoggerFactory;
*
* @see ParquetInputFormat
*
- * @author Julien Le Dem
- *
* @param <T> type of the materialized records
*/
public class ParquetRecordReader<T> extends RecordReader<Void, T> {
@@ -84,7 +82,7 @@ public class ParquetRecordReader<T> extends RecordReader<Void, T> {
/**
* @param readSupport Object which helps reads files of the given type, e.g. Thrift, Avro.
* @param filter for filtering individual records
- * @deprecated use {@link #ParquetRecordReader(ReadSupport, Filter)}
+ * @deprecated will be removed in 2.0.0.
*/
@Deprecated
public ParquetRecordReader(ReadSupport<T> readSupport, UnboundRecordFilter filter) {
diff --git a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/ParquetRecordWriter.java b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/ParquetRecordWriter.java
index a9ade96..2542402 100644
--- a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/ParquetRecordWriter.java
+++ b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/ParquetRecordWriter.java
@@ -39,8 +39,6 @@ import static org.apache.parquet.Preconditions.checkNotNull;
*
* @see ParquetOutputFormat
*
- * @author Julien Le Dem
- *
* @param <T> the type of the materialized records
*/
public class ParquetRecordWriter<T> extends RecordWriter<Void, T> {
@@ -56,10 +54,12 @@ public class ParquetRecordWriter<T> extends RecordWriter<Void, T> {
* @param schema the schema of the records
* @param extraMetaData extra meta data to write in the footer of the file
* @param blockSize the size of a block in the file (this will be approximate)
+ * @param pageSize the size of a page in the file (this will be approximate)
* @param compressor the compressor used to compress the pages
* @param dictionaryPageSize the threshold for dictionary size
* @param enableDictionary to enable the dictionary
* @param validating if schema validation should be turned on
+ * @param writerVersion writer compatibility version
*/
@Deprecated
public ParquetRecordWriter(
@@ -92,10 +92,13 @@ public class ParquetRecordWriter<T> extends RecordWriter<Void, T> {
* @param schema the schema of the records
* @param extraMetaData extra meta data to write in the footer of the file
* @param blockSize the size of a block in the file (this will be approximate)
+ * @param pageSize the size of a page in the file (this will be approximate)
* @param compressor the compressor used to compress the pages
* @param dictionaryPageSize the threshold for dictionary size
* @param enableDictionary to enable the dictionary
* @param validating if schema validation should be turned on
+ * @param writerVersion writer compatibility version
+ * @param memoryManager memory manager for the write
*/
@Deprecated
public ParquetRecordWriter(
diff --git a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/ParquetWriter.java b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/ParquetWriter.java
index 1908206..a32df39 100644
--- a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/ParquetWriter.java
+++ b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/ParquetWriter.java
@@ -66,8 +66,8 @@ public class ParquetWriter<T> implements Closeable {
* @param compressionCodecName the compression codec to use
* @param blockSize the block size threshold
* @param pageSize the page size threshold
- * @throws IOException
- * @see #ParquetWriter(Path, WriteSupport, CompressionCodecName, int, int, boolean, boolean)
+ * @throws IOException if there is an error while writing
+ * @deprecated will be removed in 2.0.0
*/
@Deprecated
public ParquetWriter(Path file, WriteSupport<T> writeSupport, CompressionCodecName compressionCodecName, int blockSize, int pageSize) throws IOException {
@@ -85,8 +85,8 @@ public class ParquetWriter<T> implements Closeable {
* @param pageSize the page size threshold (both data and dictionary)
* @param enableDictionary to turn dictionary encoding on
* @param validating to turn on validation using the schema
- * @throws IOException
- * @see #ParquetWriter(Path, WriteSupport, CompressionCodecName, int, int, int, boolean, boolean)
+ * @throws IOException if there is an error while writing
+ * @deprecated will be removed in 2.0.0
*/
@Deprecated
public ParquetWriter(
@@ -111,8 +111,8 @@ public class ParquetWriter<T> implements Closeable {
* @param dictionaryPageSize the page size threshold for the dictionary pages
* @param enableDictionary to turn dictionary encoding on
* @param validating to turn on validation using the schema
- * @throws IOException
- * @see #ParquetWriter(Path, WriteSupport, CompressionCodecName, int, int, int, boolean, boolean, WriterVersion)
+ * @throws IOException if there is an error while writing
+ * @deprecated will be removed in 2.0.0
*/
@Deprecated
public ParquetWriter(
@@ -144,8 +144,8 @@ public class ParquetWriter<T> implements Closeable {
* @param enableDictionary to turn dictionary encoding on
* @param validating to turn on validation using the schema
* @param writerVersion version of parquetWriter from {@link ParquetProperties.WriterVersion}
- * @throws IOException
- * @see #ParquetWriter(Path, WriteSupport, CompressionCodecName, int, int, int, boolean, boolean, WriterVersion, Configuration)
+ * @throws IOException if there is an error while writing
+ * @deprecated will be removed in 2.0.0
*/
@Deprecated
public ParquetWriter(
@@ -174,7 +174,8 @@ public class ParquetWriter<T> implements Closeable {
* @param validating to turn on validation using the schema
* @param writerVersion version of parquetWriter from {@link ParquetProperties.WriterVersion}
* @param conf Hadoop configuration to use while accessing the filesystem
- * @throws IOException
+ * @throws IOException if there is an error while writing
+ * @deprecated will be removed in 2.0.0
*/
@Deprecated
public ParquetWriter(
@@ -207,7 +208,8 @@ public class ParquetWriter<T> implements Closeable {
* @param validating to turn on validation using the schema
* @param writerVersion version of parquetWriter from {@link ParquetProperties.WriterVersion}
* @param conf Hadoop configuration to use while accessing the filesystem
- * @throws IOException
+ * @throws IOException if there is an error while writing
+ * @deprecated will be removed in 2.0.0
*/
@Deprecated
public ParquetWriter(
@@ -239,7 +241,8 @@ public class ParquetWriter<T> implements Closeable {
*
* @param file the file to create
* @param writeSupport the implementation to write a record to a RecordConsumer
- * @throws IOException
+ * @throws IOException if there is an error while writing
+ * @deprecated will be removed in 2.0.0
*/
@Deprecated
public ParquetWriter(Path file, WriteSupport<T> writeSupport) throws IOException {
@@ -360,6 +363,7 @@ public class ParquetWriter<T> implements Closeable {
protected abstract SELF self();
/**
+ * @param conf a configuration
* @return an appropriate WriteSupport for the object model.
*/
protected abstract WriteSupport<T> getWriteSupport(Configuration conf);
@@ -517,7 +521,7 @@ public class ParquetWriter<T> implements Closeable {
* Build a {@link ParquetWriter} with the accumulated configuration.
*
* @return a configured {@code ParquetWriter} instance.
- * @throws IOException
+ * @throws IOException if there is an error while creating the writer
*/
public ParquetWriter<T> build() throws IOException {
if (file != null) {
diff --git a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/PrintFooter.java b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/PrintFooter.java
index 5a3c6f5..e04ea62 100644
--- a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/PrintFooter.java
+++ b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/PrintFooter.java
@@ -55,8 +55,6 @@ import org.apache.parquet.schema.MessageType;
/**
* Utility to print footer information
- * @author Julien Le Dem
- *
*/
public class PrintFooter {
diff --git a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/api/DelegatingReadSupport.java b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/api/DelegatingReadSupport.java
index 3ddaa77..8100a35 100644
--- a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/api/DelegatingReadSupport.java
+++ b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/api/DelegatingReadSupport.java
@@ -28,9 +28,7 @@ import org.apache.parquet.schema.MessageType;
/**
* Helps composing read supports
*
- * @author Julien Le Dem
- *
- * @param <T>
+ * @param <T> the Java class of objects created by this ReadSupport
*/
public class DelegatingReadSupport<T> extends ReadSupport<T> {
diff --git a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/api/DelegatingWriteSupport.java b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/api/DelegatingWriteSupport.java
index 66a4b01..f5bbfc6 100644
--- a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/api/DelegatingWriteSupport.java
+++ b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/api/DelegatingWriteSupport.java
@@ -26,9 +26,7 @@ import org.apache.parquet.io.api.RecordConsumer;
*
* Helps composing write supports
*
- * @author Julien Le Dem
- *
- * @param <T>
+ * @param <T> the Java class of objects written with this WriteSupport
*/
public class DelegatingWriteSupport<T> extends WriteSupport<T> {
diff --git a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/api/InitContext.java b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/api/InitContext.java
index 222898e..6bc5e5d 100644
--- a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/api/InitContext.java
+++ b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/api/InitContext.java
@@ -30,9 +30,6 @@ import org.apache.parquet.schema.MessageType;
/**
*
* Context passed to ReadSupport when initializing for read
- *
- * @author Julien Le Dem
- *
*/
public class InitContext {
diff --git a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/api/ReadSupport.java b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/api/ReadSupport.java
index 6d8c1fd..6234452 100644
--- a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/api/ReadSupport.java
+++ b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/api/ReadSupport.java
@@ -29,8 +29,6 @@ import org.apache.parquet.schema.MessageTypeParser;
/**
* Abstraction used by the {@link org.apache.parquet.hadoop.ParquetInputFormat} to materialize records
*
- * @author Julien Le Dem
- *
* @param <T> the type of the materialized record
*/
abstract public class ReadSupport<T> {
@@ -105,9 +103,6 @@ abstract public class ReadSupport<T> {
/**
* information to read the file
- *
- * @author Julien Le Dem
- *
*/
public static final class ReadContext {
private final MessageType requestedSchema;
diff --git a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/api/WriteSupport.java b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/api/WriteSupport.java
index 1a61faa..c08882f 100644
--- a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/api/WriteSupport.java
+++ b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/api/WriteSupport.java
@@ -33,17 +33,12 @@ import org.apache.parquet.schema.MessageType;
/**
* Abstraction to use with {@link org.apache.parquet.hadoop.ParquetOutputFormat} to convert incoming records
*
- * @author Julien Le Dem
- *
* @param <T> the type of the incoming records
*/
abstract public class WriteSupport<T> {
/**
* information to be persisted in the file
- *
- * @author Julien Le Dem
- *
*/
public static final class WriteContext {
private final MessageType schema;
@@ -75,9 +70,6 @@ abstract public class WriteSupport<T> {
/**
* Information to be added in the file once all the records have been written
- *
- * @author Julien Le Dem
- *
*/
public static final class FinalizedWriteContext {
private final Map<String, String> extraMetaData;
diff --git a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/codec/CodecConfig.java b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/codec/CodecConfig.java
index 4530abc..66349e7 100644
--- a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/codec/CodecConfig.java
+++ b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/codec/CodecConfig.java
@@ -34,8 +34,6 @@ import static org.apache.parquet.hadoop.metadata.CompressionCodecName.UNCOMPRESS
* Template class and factory for accessing codec related configurations in different APIs(mapreduce or mapred),
* use {@link #from(org.apache.hadoop.mapred.JobConf)} for mapred API,
* use {@link #from(org.apache.hadoop.mapreduce.TaskAttemptContext)} for mapreduce API
- *
- * @author Tianshuo Deng
*/
public abstract class CodecConfig {
private static final Logger LOG = LoggerFactory.getLogger(CodecConfig.class);
diff --git a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/example/ExampleInputFormat.java b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/example/ExampleInputFormat.java
index 791cb04..173aef4 100644
--- a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/example/ExampleInputFormat.java
+++ b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/example/ExampleInputFormat.java
@@ -25,9 +25,6 @@ import org.apache.parquet.hadoop.ParquetInputFormat;
* Example input format to read Parquet files
*
* This Input format uses a rather inefficient data model but works independently of higher level abstractions.
- *
- * @author Julien Le Dem
- *
*/
public class ExampleInputFormat extends ParquetInputFormat<Group> {
diff --git a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/example/ExampleOutputFormat.java b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/example/ExampleOutputFormat.java
index d503e0d..c95716e 100755
--- a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/example/ExampleOutputFormat.java
+++ b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/example/ExampleOutputFormat.java
@@ -18,7 +18,6 @@
*/
package org.apache.parquet.hadoop.example;
-import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.Job;
import org.apache.parquet.example.data.Group;
@@ -32,15 +31,12 @@ import org.apache.parquet.schema.MessageType;
* must be provided the schema up front
* @see ExampleOutputFormat#setSchema(Job, MessageType)
* @see GroupWriteSupport#PARQUET_EXAMPLE_SCHEMA
- *
- * @author Julien Le Dem
- *
*/
public class ExampleOutputFormat extends ParquetOutputFormat<Group> {
/**
* set the schema being written to the job conf
- * @param job
+ * @param job a job
* @param schema the schema of the data
*/
public static void setSchema(Job job, MessageType schema) {
@@ -49,7 +45,7 @@ public class ExampleOutputFormat extends ParquetOutputFormat<Group> {
/**
* retrieve the schema from the conf
- * @param job
+ * @param job a job
* @return the schema
*/
public static MessageType getSchema(Job job) {
diff --git a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/mapred/Container.java b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/mapred/Container.java
index ecdf685..bf77b69 100644
--- a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/mapred/Container.java
+++ b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/mapred/Container.java
@@ -19,8 +19,8 @@
package org.apache.parquet.hadoop.mapred;
/**
- * A simple container of <T> objects that you can get and set.
- * @param <T>
+ * A simple container of objects that you can get and set.
+ * @param <T> the Java type of the object held by this container
*/
public class Container<T> {
diff --git a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/mapred/MapredParquetOutputCommitter.java b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/mapred/MapredParquetOutputCommitter.java
index 0504db8..3baf19b 100644
--- a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/mapred/MapredParquetOutputCommitter.java
+++ b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/mapred/MapredParquetOutputCommitter.java
@@ -28,10 +28,7 @@ import org.apache.parquet.hadoop.util.ContextUtil;
import java.io.IOException;
/**
- *
* Adapter for supporting ParquetOutputCommitter in mapred API
- *
- * @author Tianshuo Deng
*/
public class MapredParquetOutputCommitter extends FileOutputCommitter {
diff --git a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/metadata/BlockMetaData.java b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/metadata/BlockMetaData.java
index 13e6fa8..0cad0a5 100644
--- a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/metadata/BlockMetaData.java
+++ b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/metadata/BlockMetaData.java
@@ -25,9 +25,6 @@ import java.util.List;
/**
* Block metadata stored in the footer and passed in an InputSplit
- *
- * @author Julien Le Dem
- *
*/
public class BlockMetaData {
diff --git a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/metadata/ColumnChunkMetaData.java b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/metadata/ColumnChunkMetaData.java
index 562bcad..fb94247 100644
--- a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/metadata/ColumnChunkMetaData.java
+++ b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/metadata/ColumnChunkMetaData.java
@@ -30,7 +30,6 @@ import org.apache.parquet.schema.Types;
/**
* Column meta data for a block stored in the file footer and passed in the InputSplit
- * @author Julien Le Dem
*/
abstract public class ColumnChunkMetaData {
diff --git a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/metadata/FileMetaData.java b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/metadata/FileMetaData.java
index 6135d58..9d02bf8 100644
--- a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/metadata/FileMetaData.java
+++ b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/metadata/FileMetaData.java
@@ -29,9 +29,6 @@ import org.apache.parquet.schema.MessageType;
/**
* File level meta data (Schema, codec, ...)
- *
- * @author Julien Le Dem
- *
*/
public final class FileMetaData implements Serializable {
private static final long serialVersionUID = 1L;
diff --git a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/metadata/GlobalMetaData.java b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/metadata/GlobalMetaData.java
index 677ef03..740405d 100644
--- a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/metadata/GlobalMetaData.java
+++ b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/metadata/GlobalMetaData.java
@@ -32,9 +32,6 @@ import org.apache.parquet.schema.MessageType;
/**
* Merged metadata when reading from multiple files.
* THis is to allow schema evolution
- *
- * @author Julien Le Dem
- *
*/
public class GlobalMetaData implements Serializable {
private static final long serialVersionUID = 1L;
diff --git a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/metadata/ParquetMetadata.java b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/metadata/ParquetMetadata.java
index 3ee61cd..47cad49 100755
--- a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/metadata/ParquetMetadata.java
+++ b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/metadata/ParquetMetadata.java
@@ -33,9 +33,6 @@ import org.codehaus.jackson.map.SerializationConfig.Feature;
/**
* Meta Data block stored in the footer of the file
* contains file level (Codec, Schema, ...) and block level (location, columns, record count, ...) meta data
- *
- * @author Julien Le Dem
- *
*/
public class ParquetMetadata {
diff --git a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/util/ContextUtil.java b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/util/ContextUtil.java
index b2fec1b..1114ed3 100644
--- a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/util/ContextUtil.java
+++ b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/util/ContextUtil.java
@@ -197,6 +197,10 @@ public class ContextUtil {
/**
* Creates JobContext from a JobConf and jobId using the correct constructor
* for based on Hadoop version. <code>jobId</code> could be null.
+ *
+ * @param conf a configuration
+ * @param jobId a job id
+ * @return a job context
*/
public static JobContext newJobContext(Configuration conf, JobID jobId) {
try {
@@ -212,8 +216,12 @@ public class ContextUtil {
}
/**
- * Creates TaskAttempContext from a JobConf and jobId using the correct
+ * Creates TaskAttemptContext from a JobConf and jobId using the correct
* constructor for based on Hadoop version.
+ *
+ * @param conf a configuration
+ * @param taskAttemptId a task attempt id
+ * @return a task attempt context
*/
public static TaskAttemptContext newTaskAttemptContext(
Configuration conf, TaskAttemptID taskAttemptId) {
@@ -230,6 +238,9 @@ public class ContextUtil {
}
/**
+ * @param name a string name
+ * @param displayName a string display name
+ * @param value an initial value
* @return with Hadoop 2 : <code>new GenericCounter(args)</code>,<br>
* with Hadoop 1 : <code>new Counter(args)</code>
*/
@@ -249,6 +260,9 @@ public class ContextUtil {
/**
* Invoke getConfiguration() method on JobContext. Works with both
* Hadoop 1 and 2.
+ *
+ * @param context a job context
+ * @return the context's configuration
*/
public static Configuration getConfiguration(JobContext context) {
try {
@@ -291,6 +305,11 @@ public class ContextUtil {
/**
* Invokes a method and rethrows any exception as runtime exceptions.
+ *
+ * @param method a method
+ * @param obj an object to run method on
+ * @param args an array of arguments to the method
+ * @return the result of the method call
*/
private static Object invoke(Method method, Object obj, Object... args) {
try {
diff --git a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/util/SerializationUtil.java b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/util/SerializationUtil.java
index ffbe2a7..529115b 100644
--- a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/util/SerializationUtil.java
+++ b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/util/SerializationUtil.java
@@ -45,13 +45,12 @@ public final class SerializationUtil {
private SerializationUtil() { }
/**
- * Reads an object (that was written using
- * {@link #writeObjectToConfAsBase64}) from a configuration.
+ * Writes an object to a configuration.
*
* @param key for the configuration
+ * @param obj the object to write
* @param conf to read from
- * @return the read object, or null if key is not present in conf
- * @throws IOException
+ * @throws IOException if there is an error while writing
*/
public static void writeObjectToConfAsBase64(String key, Object obj, Configuration conf) throws IOException {
ByteArrayOutputStream baos = null;
@@ -78,8 +77,9 @@ public final class SerializationUtil {
*
* @param key for the configuration
* @param conf to read from
+ * @param <T> the Java type of the deserialized object
* @return the read object, or null if key is not present in conf
- * @throws IOException
+ * @throws IOException if there is an error while reading
*/
@SuppressWarnings("unchecked")
public static <T> T readObjectFromConfAsBase64(String key, Configuration conf) throws IOException {
diff --git a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/util/counters/BenchmarkCounter.java b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/util/counters/BenchmarkCounter.java
index b8521b3..4e5f890 100644
--- a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/util/counters/BenchmarkCounter.java
+++ b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/util/counters/BenchmarkCounter.java
@@ -26,8 +26,6 @@ import org.apache.parquet.hadoop.util.counters.mapreduce.MapReduceCounterLoader;
/**
* Encapsulate counter operations, compatible with Hadoop1/2, mapred/mapreduce API
- *
- * @author Tianshuo Deng
*/
public class BenchmarkCounter {
@@ -46,7 +44,7 @@ public class BenchmarkCounter {
/**
* Init counters in hadoop's mapreduce API, support both 1.x and 2.x
*
- * @param context
+ * @param context a task attempt context
*/
public static void initCounterFromContext(TaskAttemptContext context) {
counterLoader = new MapReduceCounterLoader(context);
@@ -56,8 +54,8 @@ public class BenchmarkCounter {
/**
* Init counters in hadoop's mapred API, which is used by cascading and Hive.
*
- * @param reporter
- * @param configuration
+ * @param reporter a reporter
+ * @param configuration a configuration
*/
public static void initCounterFromReporter(Reporter reporter, Configuration configuration) {
counterLoader = new MapRedCounterLoader(reporter, configuration);
diff --git a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/util/counters/CounterLoader.java b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/util/counters/CounterLoader.java
index 0b9f92f..f606c7c 100644
--- a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/util/counters/CounterLoader.java
+++ b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/util/counters/CounterLoader.java
@@ -21,7 +21,6 @@ package org.apache.parquet.hadoop.util.counters;
/**
* Factory interface for CounterLoaders, will load the counter according to groupName, counterName,
* and if in the configuration, flag with name counterFlag is false, the counter will not be loaded
- * @author Tianshuo Deng
*/
public interface CounterLoader {
public ICounter getCounterByNameAndFlag(String groupName, String counterName, String counterFlag);
diff --git a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/util/counters/ICounter.java b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/util/counters/ICounter.java
index c10b8a8..32272af 100644
--- a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/util/counters/ICounter.java
+++ b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/util/counters/ICounter.java
@@ -20,7 +20,6 @@ package org.apache.parquet.hadoop.util.counters;
/**
* Interface for counters in mapred/mapreduce package of hadoop
- * @author Tianshuo Deng
*/
public interface ICounter {
public void increment(long val);
diff --git a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/util/counters/mapred/MapRedCounterAdapter.java b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/util/counters/mapred/MapRedCounterAdapter.java
index 4377d44..2cf5226 100644
--- a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/util/counters/mapred/MapRedCounterAdapter.java
+++ b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/util/counters/mapred/MapRedCounterAdapter.java
@@ -23,7 +23,6 @@ import org.apache.parquet.hadoop.util.counters.ICounter;
/**
* Adapt a mapred counter to ICounter
- * @author Tianshuo Deng
*/
public class MapRedCounterAdapter implements ICounter {
private org.apache.hadoop.mapred.Counters.Counter adaptee;
diff --git a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/util/counters/mapred/MapRedCounterLoader.java b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/util/counters/mapred/MapRedCounterLoader.java
index 0e5a32d..a9567f4 100644
--- a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/util/counters/mapred/MapRedCounterLoader.java
+++ b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/util/counters/mapred/MapRedCounterLoader.java
@@ -28,7 +28,6 @@ import org.apache.parquet.hadoop.util.counters.ICounter;
/**
* Concrete factory for counters in mapred API,
* get a counter using mapred API when the corresponding flag is set, otherwise return a NullCounter
- * @author Tianshuo Deng
*/
public class MapRedCounterLoader implements CounterLoader {
private Reporter reporter;
diff --git a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/util/counters/mapreduce/MapReduceCounterAdapter.java b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/util/counters/mapreduce/MapReduceCounterAdapter.java
index 1339977..a361b26 100644
--- a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/util/counters/mapreduce/MapReduceCounterAdapter.java
+++ b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/util/counters/mapreduce/MapReduceCounterAdapter.java
@@ -24,7 +24,6 @@ import org.apache.parquet.hadoop.util.counters.ICounter;
/**
* Adapt a mapreduce counter to ICounter
- * @author Tianshuo Deng
*/
public class MapReduceCounterAdapter implements ICounter {
private Counter adaptee;
diff --git a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/util/counters/mapreduce/MapReduceCounterLoader.java b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/util/counters/mapreduce/MapReduceCounterLoader.java
index 1bf4b97..25bcc7e 100644
--- a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/util/counters/mapreduce/MapReduceCounterLoader.java
+++ b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/util/counters/mapreduce/MapReduceCounterLoader.java
@@ -28,7 +28,6 @@ import org.apache.parquet.hadoop.util.counters.ICounter;
/**
* Concrete factory for counters in mapred API,
* get a counter using mapreduce API when the corresponding flag is set, otherwise return a NullCounter
- * @author Tianshuo Deng
*/
public class MapReduceCounterLoader implements CounterLoader {
private TaskAttemptContext context;
diff --git a/parquet-hadoop/src/test/java/org/apache/parquet/hadoop/DeprecatedInputFormatTest.java b/parquet-hadoop/src/test/java/org/apache/parquet/hadoop/DeprecatedInputFormatTest.java
index e293483..92238de 100644
--- a/parquet-hadoop/src/test/java/org/apache/parquet/hadoop/DeprecatedInputFormatTest.java
+++ b/parquet-hadoop/src/test/java/org/apache/parquet/hadoop/DeprecatedInputFormatTest.java
@@ -50,7 +50,6 @@ import static org.junit.Assert.assertTrue;
/**
* DeprecatedParquetInputFormat is used by cascading. It initializes the recordReader using an initialize method with
* different parameters than ParquetInputFormat
- * @author Tianshuo Deng
*/
public class DeprecatedInputFormatTest {
final Path parquetPath = new Path("target/test/example/TestInputOutputFormat/parquet");
diff --git a/parquet-hadoop/src/test/java/org/apache/parquet/hadoop/DeprecatedOutputFormatTest.java b/parquet-hadoop/src/test/java/org/apache/parquet/hadoop/DeprecatedOutputFormatTest.java
index 73ae131..d866458 100644
--- a/parquet-hadoop/src/test/java/org/apache/parquet/hadoop/DeprecatedOutputFormatTest.java
+++ b/parquet-hadoop/src/test/java/org/apache/parquet/hadoop/DeprecatedOutputFormatTest.java
@@ -39,7 +39,6 @@ import java.io.IOException;
/**
* DeprecatedParquetInputFormat is used by cascading. It initializes the recordReader using an initialize method with
* different parameters than ParquetInputFormat
- * @author Tianshuo Deng
*/
public class DeprecatedOutputFormatTest {
final Path parquetPath = new Path("target/test/example/TestInputOutputFormat/parquet");
diff --git a/parquet-hive/parquet-hive-binding/parquet-hive-binding-interface/src/main/java/org/apache/parquet/hive/HiveBinding.java b/parquet-hive/parquet-hive-binding/parquet-hive-binding-interface/src/main/java/org/apache/parquet/hive/HiveBinding.java
index 1500153..09e4974 100644
--- a/parquet-hive/parquet-hive-binding/parquet-hive-binding-interface/src/main/java/org/apache/parquet/hive/HiveBinding.java
+++ b/parquet-hive/parquet-hive-binding/parquet-hive-binding-interface/src/main/java/org/apache/parquet/hive/HiveBinding.java
@@ -48,10 +48,10 @@ public interface HiveBinding {
* practice when modifying JobConf objects in InputFormats, for example
* HCatalog does this.
*
- * @param jobConf
- * @param path
+ * @param jobConf a mapred job conf
+ * @param path a path
* @return cloned jobConf which can be used to read Parquet files
- * @throws IOException
+ * @throws IOException if there is an error pushing projections and filters
*/
public JobConf pushProjectionsAndFilters(final JobConf jobConf, final Path path) throws IOException;
}
diff --git a/parquet-hive/parquet-hive-storage-handler/src/main/java/org/apache/hadoop/hive/ql/io/parquet/read/DataWritableReadSupport.java b/parquet-hive/parquet-hive-storage-handler/src/main/java/org/apache/hadoop/hive/ql/io/parquet/read/DataWritableReadSupport.java
index 50dc564..fbb7a95 100644
--- a/parquet-hive/parquet-hive-storage-handler/src/main/java/org/apache/hadoop/hive/ql/io/parquet/read/DataWritableReadSupport.java
+++ b/parquet-hive/parquet-hive-storage-handler/src/main/java/org/apache/hadoop/hive/ql/io/parquet/read/DataWritableReadSupport.java
@@ -40,11 +40,9 @@ import org.apache.parquet.schema.Type;
import org.apache.parquet.schema.Type.Repetition;
/**
- *
* A MapWritableReadSupport
*
* Manages the translation between Hive and Parquet
- *
*/
public class DataWritableReadSupport extends ReadSupport<ArrayWritable> {
@@ -114,7 +112,7 @@ public class DataWritableReadSupport extends ReadSupport<ArrayWritable> {
* It creates the hive read support to interpret data from parquet to hive
*
* @param configuration // unused
- * @param keyValueMetaData
+ * @param keyValueMetaData string map of metadata
* @param fileSchema // unused
* @param readContext containing the requested schema and the schema of the hive table
* @return Record Materialize for Hive
diff --git a/parquet-hive/parquet-hive-storage-handler/src/main/java/org/apache/hadoop/hive/ql/io/parquet/serde/ArrayWritableObjectInspector.java b/parquet-hive/parquet-hive-storage-handler/src/main/java/org/apache/hadoop/hive/ql/io/parquet/serde/ArrayWritableObjectInspector.java
index 3acc493..44152a6 100644
--- a/parquet-hive/parquet-hive-storage-handler/src/main/java/org/apache/hadoop/hive/ql/io/parquet/serde/ArrayWritableObjectInspector.java
+++ b/parquet-hive/parquet-hive-storage-handler/src/main/java/org/apache/hadoop/hive/ql/io/parquet/serde/ArrayWritableObjectInspector.java
@@ -36,10 +36,9 @@ import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
import org.apache.hadoop.io.ArrayWritable;
/**
- *
- * The ArrayWritableObjectInspector will inspect an ArrayWritable, considering it as a Hive struct.<br />
+ * The ArrayWritableObjectInspector will inspect an ArrayWritable, considering it as a Hive struct.
+ * <p>
* It can also inspect a List if Hive decides to inspect the result of an inspection.
- *
*/
public class ArrayWritableObjectInspector extends SettableStructObjectInspector {
diff --git a/parquet-hive/parquet-hive-storage-handler/src/main/java/org/apache/hadoop/hive/ql/io/parquet/serde/DeepParquetHiveMapInspector.java b/parquet-hive/parquet-hive-storage-handler/src/main/java/org/apache/hadoop/hive/ql/io/parquet/serde/DeepParquetHiveMapInspector.java
index 2c2b22d..e6cf65e 100644
--- a/parquet-hive/parquet-hive-storage-handler/src/main/java/org/apache/hadoop/hive/ql/io/parquet/serde/DeepParquetHiveMapInspector.java
+++ b/parquet-hive/parquet-hive-storage-handler/src/main/java/org/apache/hadoop/hive/ql/io/parquet/serde/DeepParquetHiveMapInspector.java
@@ -26,11 +26,12 @@ import org.apache.hadoop.io.ArrayWritable;
import org.apache.hadoop.io.Writable;
/**
- * The DeepParquetHiveMapInspector will inspect an ArrayWritable, considering it as a Hive map.<br />
- * It can also inspect a Map if Hive decides to inspect the result of an inspection.<br />
+ * The DeepParquetHiveMapInspector will inspect an ArrayWritable, considering it as a Hive map.
+ * <p>
+ * It can also inspect a Map if Hive decides to inspect the result of an inspection.
+ * <p>
* When trying to access elements from the map it will iterate over all keys, inspecting them and comparing them to the
- * desired key.
- *
+ * desired key.
*/
public class DeepParquetHiveMapInspector extends AbstractParquetMapInspector {
diff --git a/parquet-hive/parquet-hive-storage-handler/src/main/java/org/apache/hadoop/hive/ql/io/parquet/serde/ParquetHiveArrayInspector.java b/parquet-hive/parquet-hive-storage-handler/src/main/java/org/apache/hadoop/hive/ql/io/parquet/serde/ParquetHiveArrayInspector.java
index dbcc8fb..be0b1c9 100644
--- a/parquet-hive/parquet-hive-storage-handler/src/main/java/org/apache/hadoop/hive/ql/io/parquet/serde/ParquetHiveArrayInspector.java
+++ b/parquet-hive/parquet-hive-storage-handler/src/main/java/org/apache/hadoop/hive/ql/io/parquet/serde/ParquetHiveArrayInspector.java
@@ -27,9 +27,9 @@ import org.apache.hadoop.io.ArrayWritable;
import org.apache.hadoop.io.Writable;
/**
- * The ParquetHiveArrayInspector will inspect an ArrayWritable, considering it as an Hive array.<br />
+ * The ParquetHiveArrayInspector will inspect an ArrayWritable, considering it as an Hive array.
+ * <p>
* It can also inspect a List if Hive decides to inspect the result of an inspection.
- *
*/
public class ParquetHiveArrayInspector implements SettableListObjectInspector {
diff --git a/parquet-hive/parquet-hive-storage-handler/src/main/java/org/apache/hadoop/hive/ql/io/parquet/serde/StandardParquetHiveMapInspector.java b/parquet-hive/parquet-hive-storage-handler/src/main/java/org/apache/hadoop/hive/ql/io/parquet/serde/StandardParquetHiveMapInspector.java
index 85136ea..f875d5d 100644
--- a/parquet-hive/parquet-hive-storage-handler/src/main/java/org/apache/hadoop/hive/ql/io/parquet/serde/StandardParquetHiveMapInspector.java
+++ b/parquet-hive/parquet-hive-storage-handler/src/main/java/org/apache/hadoop/hive/ql/io/parquet/serde/StandardParquetHiveMapInspector.java
@@ -25,9 +25,9 @@ import org.apache.hadoop.io.ArrayWritable;
import org.apache.hadoop.io.Writable;
/**
- * The StandardParquetHiveMapInspector will inspect an ArrayWritable, considering it as a Hive map.<br />
+ * The StandardParquetHiveMapInspector will inspect an ArrayWritable, considering it as a Hive map.
+ * <p>
* It can also inspect a Map if Hive decides to inspect the result of an inspection.
- *
*/
public class StandardParquetHiveMapInspector extends AbstractParquetMapInspector {
@@ -62,4 +62,4 @@ public class StandardParquetHiveMapInspector extends AbstractParquetMapInspector
}
throw new UnsupportedOperationException("Cannot inspect " + data.getClass().getCanonicalName());
}
-}
\ No newline at end of file
+}
diff --git a/parquet-pig/src/main/java/org/apache/parquet/pig/ParquetLoader.java b/parquet-pig/src/main/java/org/apache/parquet/pig/ParquetLoader.java
index 7f87691..566dbee 100644
--- a/parquet-pig/src/main/java/org/apache/parquet/pig/ParquetLoader.java
+++ b/parquet-pig/src/main/java/org/apache/parquet/pig/ParquetLoader.java
@@ -80,12 +80,7 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
- *
* A Pig Loader for the Parquet file format.
- *
- *
- * @author Julien Le Dem
- *
*/
public class ParquetLoader extends LoadFunc implements LoadMetadata, LoadPushDown, LoadPredicatePushdown {
private static final Logger LOG = LoggerFactory.getLogger(ParquetLoader.class);
@@ -148,7 +143,7 @@ public class ParquetLoader extends LoadFunc implements LoadMetadata, LoadPushDow
* The same as the string based constructor but for programmatic use.
*
* @param requestedSchema a subset of the original pig schema in the file
- * @param columnIndexAccess
+ * @param columnIndexAccess use column index positions as opposed to name (default: false)
*/
public ParquetLoader(Schema requestedSchema, boolean columnIndexAccess) {
this.requestedSchema = requestedSchema;
diff --git a/parquet-pig/src/main/java/org/apache/parquet/pig/ParquetStorer.java b/parquet-pig/src/main/java/org/apache/parquet/pig/ParquetStorer.java
index 68591c9..0274da8 100644
--- a/parquet-pig/src/main/java/org/apache/parquet/pig/ParquetStorer.java
+++ b/parquet-pig/src/main/java/org/apache/parquet/pig/ParquetStorer.java
@@ -46,9 +46,6 @@ import org.apache.parquet.io.ParquetEncodingException;
* It uses a TupleWriteSupport to write Tuples into the ParquetOutputFormat
* The Pig schema is automatically converted to the Parquet schema using {@link PigSchemaConverter}
* and stored in the file
- *
- * @author Julien Le Dem
- *
*/
public class ParquetStorer extends StoreFunc implements StoreMetadata {
diff --git a/parquet-pig/src/main/java/org/apache/parquet/pig/PigMetaData.java b/parquet-pig/src/main/java/org/apache/parquet/pig/PigMetaData.java
index 48a6aef..0e06f3c 100644
--- a/parquet-pig/src/main/java/org/apache/parquet/pig/PigMetaData.java
+++ b/parquet-pig/src/main/java/org/apache/parquet/pig/PigMetaData.java
@@ -27,9 +27,6 @@ import org.apache.pig.impl.logicalLayer.schema.Schema;
/**
* Represents Pig meta data stored in the file footer
- *
- * @author Julien Le Dem
- *
*/
public class PigMetaData {
diff --git a/parquet-pig/src/main/java/org/apache/parquet/pig/PigSchemaConverter.java b/parquet-pig/src/main/java/org/apache/parquet/pig/PigSchemaConverter.java
index cf99534..c445134 100644
--- a/parquet-pig/src/main/java/org/apache/parquet/pig/PigSchemaConverter.java
+++ b/parquet-pig/src/main/java/org/apache/parquet/pig/PigSchemaConverter.java
@@ -49,15 +49,11 @@ import org.slf4j.LoggerFactory;
/**
- *
* Converts a Pig Schema into a Parquet schema
*
* Bags are converted into an optional group containing one repeated group field to preserve distinction between empty bag and null.
* Map are converted into an optional group containing one repeated group field of (key, value).
* anonymous fields are named field_{index}. (in most cases pig already gives them an alias val_{int}, so this rarely happens)
- *
- * @author Julien Le Dem
- *
*/
public class PigSchemaConverter {
private static final Logger LOG = LoggerFactory.getLogger(PigSchemaConverter.class);
diff --git a/parquet-pig/src/main/java/org/apache/parquet/pig/SchemaConversionException.java b/parquet-pig/src/main/java/org/apache/parquet/pig/SchemaConversionException.java
index e838433..2ca5bdf 100644
--- a/parquet-pig/src/main/java/org/apache/parquet/pig/SchemaConversionException.java
+++ b/parquet-pig/src/main/java/org/apache/parquet/pig/SchemaConversionException.java
@@ -22,9 +22,6 @@ import org.apache.parquet.ParquetRuntimeException;
/**
* thrown if the schema can not be converted
- *
- * @author Julien Le Dem
- *
*/
public class SchemaConversionException extends ParquetRuntimeException {
private static final long serialVersionUID = 1L;
diff --git a/parquet-pig/src/main/java/org/apache/parquet/pig/TupleReadSupport.java b/parquet-pig/src/main/java/org/apache/parquet/pig/TupleReadSupport.java
index 75bb5b5..50f9ebc 100644
--- a/parquet-pig/src/main/java/org/apache/parquet/pig/TupleReadSupport.java
+++ b/parquet-pig/src/main/java/org/apache/parquet/pig/TupleReadSupport.java
@@ -46,9 +46,6 @@ import org.slf4j.LoggerFactory;
/**
* Read support for Pig Tuple
* a Pig MetaDataBlock is expected in the initialization call
- *
- * @author Julien Le Dem
- *
*/
public class TupleReadSupport extends ReadSupport<Tuple> {
static final String PARQUET_PIG_SCHEMA = "parquet.pig.schema";
diff --git a/parquet-pig/src/main/java/org/apache/parquet/pig/convert/MapConverter.java b/parquet-pig/src/main/java/org/apache/parquet/pig/convert/MapConverter.java
index 857cc3b..2d2c639 100644
--- a/parquet-pig/src/main/java/org/apache/parquet/pig/convert/MapConverter.java
+++ b/parquet-pig/src/main/java/org/apache/parquet/pig/convert/MapConverter.java
@@ -43,9 +43,6 @@ import org.apache.parquet.schema.GroupType;
/**
* Converts groups into Pig Maps
- *
- * @author Julien Le Dem
- *
*/
final class MapConverter extends GroupConverter {
@@ -87,8 +84,6 @@ final class MapConverter extends GroupConverter {
/**
* to contain the values of the Map until we read them all
- * @author Julien Le Dem
- *
*/
private static final class BufferMap extends AbstractMap<String, Object> {
private List<Entry<String, Object>> entries = new ArrayList<Entry<String, Object>>();
@@ -124,9 +119,6 @@ final class MapConverter extends GroupConverter {
/**
* convert Key/Value groups into map entries
- *
- * @author Julien Le Dem
- *
*/
final class MapKeyValueConverter extends GroupConverter {
@@ -185,9 +177,6 @@ final class MapConverter extends GroupConverter {
/**
* convert the key into a string
- *
- * @author Julien Le Dem
- *
*/
final class StringKeyConverter extends PrimitiveConverter {
diff --git a/parquet-pig/src/main/java/org/apache/parquet/pig/convert/ParentValueContainer.java b/parquet-pig/src/main/java/org/apache/parquet/pig/convert/ParentValueContainer.java
index 729a963..e5939b0 100644
--- a/parquet-pig/src/main/java/org/apache/parquet/pig/convert/ParentValueContainer.java
+++ b/parquet-pig/src/main/java/org/apache/parquet/pig/convert/ParentValueContainer.java
@@ -20,9 +20,6 @@ package org.apache.parquet.pig.convert;
/**
* for converters to add their current value to their parent
- *
- * @author Julien Le Dem
- *
*/
abstract public class ParentValueContainer {
diff --git a/parquet-pig/src/main/java/org/apache/parquet/pig/convert/TupleConverter.java b/parquet-pig/src/main/java/org/apache/parquet/pig/convert/TupleConverter.java
index 1c7ab6c..18ea9e4 100644
--- a/parquet-pig/src/main/java/org/apache/parquet/pig/convert/TupleConverter.java
+++ b/parquet-pig/src/main/java/org/apache/parquet/pig/convert/TupleConverter.java
@@ -50,11 +50,7 @@ import org.apache.parquet.schema.Type.Repetition;
/**
* converts a group into a tuple
- *
- * @author Julien Le Dem
- *
*/
-
public class TupleConverter extends GroupConverter {
private static final TupleFactory TF = TupleFactory.getInstance();
@@ -221,8 +217,6 @@ public class TupleConverter extends GroupConverter {
/**
* handle string values.
* In case of dictionary encoding, the strings will be decoded only once.
- * @author Julien Le Dem
- *
*/
static final class FieldStringConverter extends PrimitiveConverter {
@@ -289,8 +283,6 @@ public class TupleConverter extends GroupConverter {
/**
* handles DataByteArrays
- * @author Julien Le Dem
- *
*/
static final class FieldByteArrayConverter extends PrimitiveConverter {
@@ -309,8 +301,6 @@ public class TupleConverter extends GroupConverter {
/**
* Handles doubles
- * @author Julien Le Dem
- *
*/
static final class FieldDoubleConverter extends PrimitiveConverter {
@@ -354,8 +344,6 @@ public class TupleConverter extends GroupConverter {
/**
* handles floats
- * @author Julien Le Dem
- *
*/
static final class FieldFloatConverter extends PrimitiveConverter {
@@ -399,9 +387,6 @@ public class TupleConverter extends GroupConverter {
/**
* Handles longs
- *
- * @author Julien Le Dem
- *
*/
static final class FieldLongConverter extends PrimitiveConverter {
@@ -445,8 +430,6 @@ public class TupleConverter extends GroupConverter {
/**
* handle integers
- * @author Julien Le Dem
- *
*/
static final class FieldIntegerConverter extends PrimitiveConverter {
@@ -490,8 +473,6 @@ public class TupleConverter extends GroupConverter {
/**
* handle booleans
- * @author Julien Le Dem
- *
*/
static final class FieldBooleanConverter extends PrimitiveConverter {
@@ -536,7 +517,6 @@ public class TupleConverter extends GroupConverter {
/**
* handle decimal type
- *
*/
static final class FieldBigDecimalConverter extends PrimitiveConverter {
private final ParentValueContainer parent;
@@ -558,9 +538,6 @@ public class TupleConverter extends GroupConverter {
/**
* Converts groups into bags
- *
- * @author Julien Le Dem
- *
*/
static class BagConverter extends GroupConverter {
diff --git a/parquet-pig/src/main/java/org/apache/parquet/pig/summary/BagSummaryData.java b/parquet-pig/src/main/java/org/apache/parquet/pig/summary/BagSummaryData.java
index 7cb9ef8..8d8b263 100644
--- a/parquet-pig/src/main/java/org/apache/parquet/pig/summary/BagSummaryData.java
+++ b/parquet-pig/src/main/java/org/apache/parquet/pig/summary/BagSummaryData.java
@@ -25,9 +25,6 @@ import org.apache.pig.impl.logicalLayer.schema.Schema.FieldSchema;
/**
* summary data for a bag
- *
- * @author Julien Le Dem
- *
*/
public class BagSummaryData extends SummaryData {
@@ -35,11 +32,6 @@ public class BagSummaryData extends SummaryData {
private FieldSummaryData content;
- /**
- * add a bag to the summary data
- *
- * @param bag
- */
public void add(Schema schema, DataBag bag) {
super.add(bag);
size.add(bag.size());
diff --git a/parquet-pig/src/main/java/org/apache/parquet/pig/summary/FieldSummaryData.java b/parquet-pig/src/main/java/org/apache/parquet/pig/summary/FieldSummaryData.java
index a828a95..1ec5785 100644
--- a/parquet-pig/src/main/java/org/apache/parquet/pig/summary/FieldSummaryData.java
+++ b/parquet-pig/src/main/java/org/apache/parquet/pig/summary/FieldSummaryData.java
@@ -27,9 +27,6 @@ import org.apache.pig.impl.logicalLayer.schema.Schema;
/**
* summary data for one field of a tuple
* usually only one the *Summary member if set
- *
- * @author Julien Le Dem
- *
*/
public class FieldSummaryData extends SummaryData {
@@ -65,9 +62,6 @@ public class FieldSummaryData extends SummaryData {
error += otherFieldSummaryData.error;
}
- /**
- * add an object to the summary data
- */
public void add(Schema schema, Object o) {
super.add(o);
if (o == null) {
diff --git a/parquet-pig/src/main/java/org/apache/parquet/pig/summary/MapSummaryData.java b/parquet-pig/src/main/java/org/apache/parquet/pig/summary/MapSummaryData.java
index 65c9e46..a8775c8 100644
--- a/parquet-pig/src/main/java/org/apache/parquet/pig/summary/MapSummaryData.java
+++ b/parquet-pig/src/main/java/org/apache/parquet/pig/summary/MapSummaryData.java
@@ -25,9 +25,6 @@ import org.apache.pig.impl.logicalLayer.schema.Schema.FieldSchema;
/**
* Summary data for a Map
- *
- * @author Julien Le Dem
- *
*/
public class MapSummaryData extends SummaryData {
@@ -36,10 +33,6 @@ public class MapSummaryData extends SummaryData {
private FieldSummaryData key;
private FieldSummaryData value;
- /**
- * add a map to the summary
- * @param m the map
- */
public void add(Schema schema, Map<?, ?> m) {
super.add(m);
size.add(m.size());
diff --git a/parquet-pig/src/main/java/org/apache/parquet/pig/summary/NumberSummaryData.java b/parquet-pig/src/main/java/org/apache/parquet/pig/summary/NumberSummaryData.java
index b8bcbef..eeebb8c 100644
--- a/parquet-pig/src/main/java/org/apache/parquet/pig/summary/NumberSummaryData.java
+++ b/parquet-pig/src/main/java/org/apache/parquet/pig/summary/NumberSummaryData.java
@@ -20,9 +20,6 @@ package org.apache.parquet.pig.summary;
/**
* Summary data for a Number
- *
- * @author Julien Le Dem
- *
*/
public class NumberSummaryData extends SummaryData {
diff --git a/parquet-pig/src/main/java/org/apache/parquet/pig/summary/StringSummaryData.java b/parquet-pig/src/main/java/org/apache/parquet/pig/summary/StringSummaryData.java
index 4e7ec79..7c1351e 100644
--- a/parquet-pig/src/main/java/org/apache/parquet/pig/summary/StringSummaryData.java
+++ b/parquet-pig/src/main/java/org/apache/parquet/pig/summary/StringSummaryData.java
@@ -31,9 +31,6 @@ import org.apache.parquet.pig.summary.EnumStat.EnumValueCount;
/**
* Summary data for a String
- *
- * @author Julien Le Dem
- *
*/
@JsonWriteNullProperties(value = false)
public class StringSummaryData extends SummaryData {
diff --git a/parquet-pig/src/main/java/org/apache/parquet/pig/summary/Summary.java b/parquet-pig/src/main/java/org/apache/parquet/pig/summary/Summary.java
index 5447858..4eed734 100644
--- a/parquet-pig/src/main/java/org/apache/parquet/pig/summary/Summary.java
+++ b/parquet-pig/src/main/java/org/apache/parquet/pig/summary/Summary.java
@@ -39,9 +39,6 @@ import org.codehaus.jackson.map.JsonMappingException;
/**
* computes a summary of the input to a json string
- *
- * @author Julien Le Dem
- *
*/
public class Summary extends EvalFunc<String> implements Algebraic {
diff --git a/parquet-pig/src/main/java/org/apache/parquet/pig/summary/SummaryData.java b/parquet-pig/src/main/java/org/apache/parquet/pig/summary/SummaryData.java
index 463a039..9a548de 100644
--- a/parquet-pig/src/main/java/org/apache/parquet/pig/summary/SummaryData.java
+++ b/parquet-pig/src/main/java/org/apache/parquet/pig/summary/SummaryData.java
@@ -34,9 +34,6 @@ import org.codehaus.jackson.map.SerializationConfig.Feature;
/**
* Base class for a node of the data summary tree
- *
- * @author Julien Le Dem
- *
*/
@JsonWriteNullProperties(value = false)
public abstract class SummaryData {
@@ -71,13 +68,6 @@ public abstract class SummaryData {
return stringWriter.toString();
}
- /**
- * parses JSON into the given class
- *
- * @param json
- * @param clazz
- * @return
- */
public static <T extends SummaryData> T fromJSON(String json, Class<T> clazz) {
try {
return objectMapper.readValue(new StringReader(json), clazz);
@@ -90,12 +80,6 @@ public abstract class SummaryData {
}
}
- /**
- * merges s2 into s1
- * @param s1
- * @param s2
- * @return
- */
public static <T extends SummaryData> T merge(T s1, T s2) {
if (s1 == null) {
return s2;
diff --git a/parquet-pig/src/main/java/org/apache/parquet/pig/summary/TupleSummaryData.java b/parquet-pig/src/main/java/org/apache/parquet/pig/summary/TupleSummaryData.java
index 56e6086..d0844e2 100644
--- a/parquet-pig/src/main/java/org/apache/parquet/pig/summary/TupleSummaryData.java
+++ b/parquet-pig/src/main/java/org/apache/parquet/pig/summary/TupleSummaryData.java
@@ -31,9 +31,6 @@ import org.apache.pig.impl.logicalLayer.schema.Schema.FieldSchema;
/**
* Summary data for a Tuple
* fields are in the same order as the input tuple
- *
- * @author Julien Le Dem
- *
*/
public class TupleSummaryData extends SummaryData {
private static final Logger LOG = Logger.getLogger(TupleSummaryData.class.getName());
@@ -42,11 +39,6 @@ public class TupleSummaryData extends SummaryData {
private ValueStat size = new ValueStat();
- /**
- * add tuple to the summary
- *
- * @param tuple
- */
public void addTuple(Schema schema, Tuple tuple) {
super.add(tuple);
int tupleSize = tuple.size();
diff --git a/parquet-pig/src/test/java/org/apache/parquet/pig/PerfTest.java b/parquet-pig/src/test/java/org/apache/parquet/pig/PerfTest.java
index ef4da0b..65abfec 100644
--- a/parquet-pig/src/test/java/org/apache/parquet/pig/PerfTest.java
+++ b/parquet-pig/src/test/java/org/apache/parquet/pig/PerfTest.java
@@ -35,11 +35,7 @@ import org.apache.pig.data.Tuple;
import org.apache.pig.data.TupleFactory;
/**
- *
* some hardcoded latencies in hadoop prevent any information to come out of this test
- *
- * @author Julien Le Dem
- *
*/
public class PerfTest {
private static final int COLUMN_COUNT = 50;
diff --git a/parquet-pig/src/test/java/org/apache/parquet/pig/PerfTest2.java b/parquet-pig/src/test/java/org/apache/parquet/pig/PerfTest2.java
index 0b8a464..68251e4 100644
--- a/parquet-pig/src/test/java/org/apache/parquet/pig/PerfTest2.java
+++ b/parquet-pig/src/test/java/org/apache/parquet/pig/PerfTest2.java
@@ -53,11 +53,7 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
- *
* Uses directly loader and storer to bypass the scheduling overhead
- *
- * @author Julien Le Dem
- *
*/
public class PerfTest2 {
diff --git a/parquet-pig/src/test/java/org/apache/parquet/pig/PerfTestReadAllCols.java b/parquet-pig/src/test/java/org/apache/parquet/pig/PerfTestReadAllCols.java
index 6c59f84..f5a1862 100644
--- a/parquet-pig/src/test/java/org/apache/parquet/pig/PerfTestReadAllCols.java
+++ b/parquet-pig/src/test/java/org/apache/parquet/pig/PerfTestReadAllCols.java
@@ -22,11 +22,7 @@ import java.io.File;
/**
- *
* Uses directly loader and storer to bypass the scheduling overhead
- *
- * @author Julien Le Dem
- *
*/
public class PerfTestReadAllCols {
diff --git a/parquet-pig/src/test/java/org/apache/parquet/pig/TupleConsumerPerfTest.java b/parquet-pig/src/test/java/org/apache/parquet/pig/TupleConsumerPerfTest.java
index 2148e06..c8e36ad 100644
--- a/parquet-pig/src/test/java/org/apache/parquet/pig/TupleConsumerPerfTest.java
+++ b/parquet-pig/src/test/java/org/apache/parquet/pig/TupleConsumerPerfTest.java
@@ -41,12 +41,6 @@ import org.apache.parquet.io.RecordReader;
import org.apache.parquet.io.api.RecordMaterializer;
import org.apache.parquet.schema.MessageType;
-/**
- * make sure {@link Log#LEVEL} is set to {@link Level#OFF}
- *
- * @author Julien Le Dem
- *
- */
public class TupleConsumerPerfTest {
private static final int TOP_LEVEL_COLS = 1;
diff --git a/parquet-protobuf/src/main/java/org/apache/parquet/proto/ProtoMessageConverter.java b/parquet-protobuf/src/main/java/org/apache/parquet/proto/ProtoMessageConverter.java
index b5649a0..890f16c 100644
--- a/parquet-protobuf/src/main/java/org/apache/parquet/proto/ProtoMessageConverter.java
+++ b/parquet-protobuf/src/main/java/org/apache/parquet/proto/ProtoMessageConverter.java
@@ -42,9 +42,6 @@ import static com.google.protobuf.Descriptors.FieldDescriptor.JavaType;
/**
* Converts Protocol Buffer message (both top level and inner) to parquet.
* This is internal class, use {@link ProtoRecordConverter}.
- *
- * @see {@link ProtoWriteSupport}
- * @author Lukas Nalezenec
*/
class ProtoMessageConverter extends GroupConverter {
diff --git a/parquet-protobuf/src/main/java/org/apache/parquet/proto/ProtoParquetOutputFormat.java b/parquet-protobuf/src/main/java/org/apache/parquet/proto/ProtoParquetOutputFormat.java
index 75e3ad8..25a85ca 100644
--- a/parquet-protobuf/src/main/java/org/apache/parquet/proto/ProtoParquetOutputFormat.java
+++ b/parquet-protobuf/src/main/java/org/apache/parquet/proto/ProtoParquetOutputFormat.java
@@ -26,19 +26,14 @@ import org.apache.parquet.hadoop.util.ContextUtil;
/**
* A Hadoop {@link org.apache.hadoop.mapreduce.OutputFormat} for Protocol Buffer Parquet files.
- * <p/>
+ * <p>
* Usage:
- * <p/>
* <pre>
- * {@code
* final Job job = new Job(conf, "Parquet writing job");
* job.setOutputFormatClass(ProtoParquetOutputFormat.class);
* ProtoParquetOutputFormat.setOutputPath(job, parquetPath);
* ProtoParquetOutputFormat.setProtobufClass(job, YourProtocolbuffer.class);
- * }
* </pre>
- *
- * @author Lukas Nalezenec
*/
public class ProtoParquetOutputFormat<T extends MessageOrBuilder> extends ParquetOutputFormat<T> {
diff --git a/parquet-protobuf/src/main/java/org/apache/parquet/proto/ProtoParquetReader.java b/parquet-protobuf/src/main/java/org/apache/parquet/proto/ProtoParquetReader.java
index 50bfca2..73ddec2 100644
--- a/parquet-protobuf/src/main/java/org/apache/parquet/proto/ProtoParquetReader.java
+++ b/parquet-protobuf/src/main/java/org/apache/parquet/proto/ProtoParquetReader.java
@@ -38,6 +38,8 @@ public class ProtoParquetReader<T extends MessageOrBuilder> extends ParquetReade
}
/**
+ * @param file a file path
+ * @throws IOException if there is an error while reading
* @deprecated use {@link #builder(Path)}
*/
@Deprecated
@@ -47,6 +49,9 @@ public class ProtoParquetReader<T extends MessageOrBuilder> extends ParquetReade
}
/**
+ * @param file a file path
+ * @param recordFilter an unbound record filter
+ * @throws IOException if there is an error while reading
* @deprecated use {@link #builder(Path)}
*/
@Deprecated
diff --git a/parquet-protobuf/src/main/java/org/apache/parquet/proto/ProtoParquetWriter.java b/parquet-protobuf/src/main/java/org/apache/parquet/proto/ProtoParquetWriter.java
index 1af8a9a..ef9a5ba 100644
--- a/parquet-protobuf/src/main/java/org/apache/parquet/proto/ProtoParquetWriter.java
+++ b/parquet-protobuf/src/main/java/org/apache/parquet/proto/ProtoParquetWriter.java
@@ -35,11 +35,12 @@ public class ProtoParquetWriter<T extends MessageOrBuilder> extends ParquetWrite
/**
* Create a new {@link ProtoParquetWriter}.
*
- * @param file
- * @param compressionCodecName
- * @param blockSize
- * @param pageSize
- * @throws IOException
+ * @param file The file name to write to.
+ * @param protoMessage Protobuf message class
+ * @param compressionCodecName Compression code to use, or CompressionCodecName.UNCOMPRESSED
+ * @param blockSize HDFS block size
+ * @param pageSize See parquet write up. Blocks are subdivided into pages for alignment and other purposes.
+ * @throws IOException if there is an error while writing
*/
public ProtoParquetWriter(Path file, Class<? extends Message> protoMessage,
CompressionCodecName compressionCodecName, int blockSize,
@@ -52,12 +53,13 @@ public class ProtoParquetWriter<T extends MessageOrBuilder> extends ParquetWrite
* Create a new {@link ProtoParquetWriter}.
*
* @param file The file name to write to.
+ * @param protoMessage Protobuf message class
* @param compressionCodecName Compression code to use, or CompressionCodecName.UNCOMPRESSED
* @param blockSize HDFS block size
* @param pageSize See parquet write up. Blocks are subdivided into pages for alignment and other purposes.
* @param enableDictionary Whether to use a dictionary to compress columns.
* @param validating to turn on validation using the schema
- * @throws IOException
+ * @throws IOException if there is an error while writing
*/
public ProtoParquetWriter(Path file, Class<? extends Message> protoMessage,
CompressionCodecName compressionCodecName, int blockSize,
@@ -71,7 +73,8 @@ public class ProtoParquetWriter<T extends MessageOrBuilder> extends ParquetWrite
* page size is 1 MB. Default compression is no compression. (Inherited from {@link ParquetWriter})
*
* @param file The file name to write to.
- * @throws IOException
+ * @param protoMessage Protobuf message class
+ * @throws IOException if there is an error while writing
*/
public ProtoParquetWriter(Path file, Class<? extends Message> protoMessage) throws IOException {
this(file, protoMessage, CompressionCodecName.UNCOMPRESSED,
diff --git a/parquet-protobuf/src/main/java/org/apache/parquet/proto/ProtoReadSupport.java b/parquet-protobuf/src/main/java/org/apache/parquet/proto/ProtoReadSupport.java
index 3a21d84..0d79d01 100644
--- a/parquet-protobuf/src/main/java/org/apache/parquet/proto/ProtoReadSupport.java
+++ b/parquet-protobuf/src/main/java/org/apache/parquet/proto/ProtoReadSupport.java
@@ -31,9 +31,6 @@ import org.slf4j.LoggerFactory;
import java.util.Map;
-/**
- * @author Lukas Nalezenec
- */
public class ProtoReadSupport<T extends Message> extends ReadSupport<T> {
private static final Logger LOG = LoggerFactory.getLogger(ProtoReadSupport.class);
@@ -52,7 +49,10 @@ public class ProtoReadSupport<T extends Message> extends ReadSupport<T> {
* If no class is set, value from file header is used.
* Note that the value in header is present only if the file was written
* using parquet-protobuf project, it will fail otherwise.
- * */
+ *
+ * @param configuration a configuration
+ * @param protobufClass a fully-qualified protobuf class name
+ */
public static void setProtobufClass(Configuration configuration, String protobufClass) {
configuration.set(PB_CLASS, protobufClass);
}
diff --git a/parquet-protobuf/src/main/java/org/apache/parquet/proto/ProtoRecordConverter.java b/parquet-protobuf/src/main/java/org/apache/parquet/proto/ProtoRecordConverter.java
index 99153a9..e161819 100644
--- a/parquet-protobuf/src/main/java/org/apache/parquet/proto/ProtoRecordConverter.java
+++ b/parquet-protobuf/src/main/java/org/apache/parquet/proto/ProtoRecordConverter.java
@@ -28,7 +28,7 @@ import org.apache.parquet.schema.MessageType;
* It delegates conversion of inner fields to {@link ProtoMessageConverter} class using inheritance.
* Schema is converted in {@link ProtoSchemaConverter} class.
*
- * @author Lukas Nalezenec
+ * @param <T> the Java class of protobuf messages created by this converter
*/
public class ProtoRecordConverter<T extends MessageOrBuilder> extends ProtoMessageConverter {
@@ -77,6 +77,7 @@ public class ProtoRecordConverter<T extends MessageOrBuilder> extends ProtoMessa
/***
* if buildBefore is true, Protocol Buffer builder is build to message before returning record.
+ * @param buildBefore whether to build before
*/
public void setBuildBefore(boolean buildBefore) {
this.buildBefore = buildBefore;
diff --git a/parquet-protobuf/src/main/java/org/apache/parquet/proto/ProtoSchemaConverter.java b/parquet-protobuf/src/main/java/org/apache/parquet/proto/ProtoSchemaConverter.java
index 2c4a1ca..64668c0 100644
--- a/parquet-protobuf/src/main/java/org/apache/parquet/proto/ProtoSchemaConverter.java
+++ b/parquet-protobuf/src/main/java/org/apache/parquet/proto/ProtoSchemaConverter.java
@@ -43,10 +43,7 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
- * <p/>
* Converts a Protocol Buffer Descriptor into a Parquet schema.
- *
- * @author Lukas Nalezenec
*/
public class ProtoSchemaConverter {
diff --git a/parquet-protobuf/src/main/java/org/apache/parquet/proto/ProtoWriteSupport.java b/parquet-protobuf/src/main/java/org/apache/parquet/proto/ProtoWriteSupport.java
index c0ed351..19b0706 100644
--- a/parquet-protobuf/src/main/java/org/apache/parquet/proto/ProtoWriteSupport.java
+++ b/parquet-protobuf/src/main/java/org/apache/parquet/proto/ProtoWriteSupport.java
@@ -45,7 +45,6 @@ import java.util.Map;
/**
* Implementation of {@link WriteSupport} for writing Protocol Buffers.
- * @author Lukas Nalezenec
*/
public class ProtoWriteSupport<T extends MessageOrBuilder> extends WriteSupport<T> {
diff --git a/parquet-scrooge/src/main/java/org/apache/parquet/scrooge/ScroogeReadSupport.java b/parquet-scrooge/src/main/java/org/apache/parquet/scrooge/ScroogeReadSupport.java
index 173a64f..63b9897 100644
--- a/parquet-scrooge/src/main/java/org/apache/parquet/scrooge/ScroogeReadSupport.java
+++ b/parquet-scrooge/src/main/java/org/apache/parquet/scrooge/ScroogeReadSupport.java
@@ -26,7 +26,6 @@ import org.apache.parquet.thrift.struct.ThriftType;
/**
* Read support for Scrooge
- * @author Tianshuo Deng
*/
public class ScroogeReadSupport extends ThriftReadSupport{
diff --git a/parquet-scrooge/src/main/java/org/apache/parquet/scrooge/ScroogeRecordConverter.java b/parquet-scrooge/src/main/java/org/apache/parquet/scrooge/ScroogeRecordConverter.java
index 9c4faa0..983d7b4 100644
--- a/parquet-scrooge/src/main/java/org/apache/parquet/scrooge/ScroogeRecordConverter.java
+++ b/parquet-scrooge/src/main/java/org/apache/parquet/scrooge/ScroogeRecordConverter.java
@@ -34,6 +34,9 @@ public class ScroogeRecordConverter<T extends ThriftStruct> extends ThriftRecord
/**
* This is for compatibility only.
+ * @param thriftClass a thrift class
+ * @param parquetSchema a parquet schema
+ * @param thriftType a thrift type descriptor
* @deprecated will be removed in 2.x
*/
@Deprecated
diff --git a/parquet-scrooge/src/main/java/org/apache/parquet/scrooge/ScroogeStructConverter.java b/parquet-scrooge/src/main/java/org/apache/parquet/scrooge/ScroogeStructConverter.java
index 310bb4c..94a0325 100644
--- a/parquet-scrooge/src/main/java/org/apache/parquet/scrooge/ScroogeStructConverter.java
+++ b/parquet-scrooge/src/main/java/org/apache/parquet/scrooge/ScroogeStructConverter.java
@@ -48,13 +48,14 @@ import static org.apache.parquet.thrift.struct.ThriftField.Requirement.OPTIONAL;
/**
* Class to convert a scrooge generated class to {@link ThriftType.StructType}. {@link ScroogeReadSupport } uses this
* class to get the requested schema
- *
- * @author Tianshuo Deng
*/
public class ScroogeStructConverter {
/**
* convert a given scrooge generated class to {@link ThriftType.StructType}
+ *
+ * @param scroogeClass a scrooge class
+ * @return a thrift type descriptor for the class
*/
public ThriftType.StructType convert(Class scroogeClass) {
return convertStructFromClass(scroogeClass);
@@ -161,6 +162,9 @@ public class ScroogeStructConverter {
/**
* Convert a field in scrooge to ThriftField in parquet
+ *
+ * @param scroogeField a scrooge field
+ * @return a thrift field descriptor for the scrooge field
*/
public ThriftField toThriftField(ThriftStructFieldInfo scroogeField) {
Requirement requirement = getRequirementType(scroogeField);
diff --git a/parquet-scrooge/src/test/java/org/apache/parquet/scrooge/ParquetScroogeSchemeTest.java b/parquet-scrooge/src/test/java/org/apache/parquet/scrooge/ParquetScroogeSchemeTest.java
index 87abf8e..159eb99 100644
--- a/parquet-scrooge/src/test/java/org/apache/parquet/scrooge/ParquetScroogeSchemeTest.java
+++ b/parquet-scrooge/src/test/java/org/apache/parquet/scrooge/ParquetScroogeSchemeTest.java
@@ -68,8 +68,6 @@ import static org.junit.Assert.assertEquals;
/**
* Write data in thrift, read in scrooge
- *
- * @author Tianshuo Deng
*/
public class ParquetScroogeSchemeTest {
diff --git a/parquet-thrift/src/main/java/org/apache/parquet/hadoop/thrift/AbstractThriftWriteSupport.java b/parquet-thrift/src/main/java/org/apache/parquet/hadoop/thrift/AbstractThriftWriteSupport.java
index fe8019c..d0f9d01 100644
--- a/parquet-thrift/src/main/java/org/apache/parquet/hadoop/thrift/AbstractThriftWriteSupport.java
+++ b/parquet-thrift/src/main/java/org/apache/parquet/hadoop/thrift/AbstractThriftWriteSupport.java
@@ -69,7 +69,6 @@ public abstract class AbstractThriftWriteSupport<T> extends WriteSupport<T> {
/**
* used from hadoop
* the configuration must contain a thriftClass setting
- * @see AbstractThriftWriteSupport#setThriftClass(Configuration, Class)
*/
public AbstractThriftWriteSupport() {
}
diff --git a/parquet-thrift/src/main/java/org/apache/parquet/hadoop/thrift/ParquetThriftBytesOutputFormat.java b/parquet-thrift/src/main/java/org/apache/parquet/hadoop/thrift/ParquetThriftBytesOutputFormat.java
index 275d5ac..dfdb140 100644
--- a/parquet-thrift/src/main/java/org/apache/parquet/hadoop/thrift/ParquetThriftBytesOutputFormat.java
+++ b/parquet-thrift/src/main/java/org/apache/parquet/hadoop/thrift/ParquetThriftBytesOutputFormat.java
@@ -30,10 +30,6 @@ import org.apache.parquet.thrift.FieldIgnoredHandler;
/**
* Output format that turns Thrift bytes into Parquet format using the thrift TProtocol layer
- *
- *
- * @author Julien Le Dem
- *
*/
public class ParquetThriftBytesOutputFormat extends ParquetOutputFormat<BytesWritable> {
diff --git a/parquet-thrift/src/main/java/org/apache/parquet/hadoop/thrift/ParquetThriftInputFormat.java b/parquet-thrift/src/main/java/org/apache/parquet/hadoop/thrift/ParquetThriftInputFormat.java
index fa2cdb9..dcf6987 100644
--- a/parquet-thrift/src/main/java/org/apache/parquet/hadoop/thrift/ParquetThriftInputFormat.java
+++ b/parquet-thrift/src/main/java/org/apache/parquet/hadoop/thrift/ParquetThriftInputFormat.java
@@ -32,7 +32,7 @@ public class ParquetThriftInputFormat<T> extends ParquetInputFormat<T> {
/**
* ScroogeReadSupport can be used when reading scrooge records out of parquet file
- * @param readSupportClass
+ * @param readSupportClass a read support class
*/
protected ParquetThriftInputFormat(Class readSupportClass) {
super(readSupportClass);
@@ -43,8 +43,9 @@ public class ParquetThriftInputFormat<T> extends ParquetInputFormat<T> {
* that is not encoded into the parquet-serialized thrift metadata (for example,
* writing with Apache Thrift, but reading back into Twitter Scrooge version of
* the same thrift definition, or a different but compatible Apache Thrift class).
- * @param conf
- * @param klass
+ * @param conf a mapred jobconf
+ * @param klass a thrift class
+ * @param <T> the Java type of records the configured ReadSupport will produce
*/
public static <T> void setThriftClass(JobConf conf, Class<T> klass) {
conf.set(ThriftReadSupport.THRIFT_READ_CLASS_KEY, klass.getName());
@@ -55,8 +56,9 @@ public class ParquetThriftInputFormat<T> extends ParquetInputFormat<T> {
* that is not encoded into the parquet-serialized thrift metadata (for example,
* writing with Apache Thrift, but reading back into Twitter Scrooge version of
* the same thrift definition, or a different but compatible Apache Thrift class).
- * @param conf
- * @param klass
+ * @param conf a configuration
+ * @param klass a thrift class
+ * @param <T> the Java type of records the configured ReadSupport will produce
*/
public static <T> void setThriftClass(Configuration conf, Class<T> klass) {
conf.set(ThriftReadSupport.THRIFT_READ_CLASS_KEY, klass.getName());
diff --git a/parquet-thrift/src/main/java/org/apache/parquet/hadoop/thrift/ParquetThriftOutputFormat.java b/parquet-thrift/src/main/java/org/apache/parquet/hadoop/thrift/ParquetThriftOutputFormat.java
index e3aec5a..51369f5 100644
--- a/parquet-thrift/src/main/java/org/apache/parquet/hadoop/thrift/ParquetThriftOutputFormat.java
+++ b/parquet-thrift/src/main/java/org/apache/parquet/hadoop/thrift/ParquetThriftOutputFormat.java
@@ -24,9 +24,6 @@ import org.apache.parquet.hadoop.ParquetOutputFormat;
import org.apache.parquet.hadoop.util.ContextUtil;
/**
- *
- * @author Julien Le Dem
- *
* @param <T> the thrift class use for serialization
*/
public class ParquetThriftOutputFormat<T extends TBase<?,?>> extends ParquetOutputFormat<T> {
diff --git a/parquet-thrift/src/main/java/org/apache/parquet/hadoop/thrift/ThriftReadSupport.java b/parquet-thrift/src/main/java/org/apache/parquet/hadoop/thrift/ThriftReadSupport.java
index f49fb67..1a3a2c0 100644
--- a/parquet-thrift/src/main/java/org/apache/parquet/hadoop/thrift/ThriftReadSupport.java
+++ b/parquet-thrift/src/main/java/org/apache/parquet/hadoop/thrift/ThriftReadSupport.java
@@ -80,6 +80,8 @@ public class ThriftReadSupport<T> extends ReadSupport<T> {
* as <a href="http://github.com/twitter/scrooge">Twiter's Scrooge</a>, a custom converter can be specified
* (for example, ScroogeRecordConverter from parquet-scrooge).
*
+ * @param conf a mapred jobconf
+ * @param klass a thrift class
* @deprecated use {@link #setRecordConverterClass(Configuration, Class)} below
*/
@Deprecated
@@ -93,6 +95,9 @@ public class ThriftReadSupport<T> extends ReadSupport<T> {
* implementation creates standard Apache Thrift {@link TBase} objects; to support alternatives, such
* as <a href="http://github.com/twitter/scrooge">Twiter's Scrooge</a>, a custom converter can be specified
* (for example, ScroogeRecordConverter from parquet-scrooge).
+ *
+ * @param conf a configuration
+ * @param klass a thrift class
*/
public static void setRecordConverterClass(Configuration conf,
Class<?> klass) {
diff --git a/parquet-thrift/src/main/java/org/apache/parquet/hadoop/thrift/ThriftToParquetFileWriter.java b/parquet-thrift/src/main/java/org/apache/parquet/hadoop/thrift/ThriftToParquetFileWriter.java
index 8029603..b0ace04 100644
--- a/parquet-thrift/src/main/java/org/apache/parquet/hadoop/thrift/ThriftToParquetFileWriter.java
+++ b/parquet-thrift/src/main/java/org/apache/parquet/hadoop/thrift/ThriftToParquetFileWriter.java
@@ -32,9 +32,6 @@ import org.apache.parquet.thrift.FieldIgnoredHandler;
/**
* To create a Parquet file from the Thrift binary of records
- *
- * @author Julien Le Dem
- *
*/
public class ThriftToParquetFileWriter implements Closeable {
@@ -84,6 +81,7 @@ public class ThriftToParquetFileWriter implements Closeable {
* @param protocolFactory to create protocols to read the incoming bytes
* @param thriftClass to produce the schema
* @param buffered buffer each record individually
+ * @param errorHandler an error handler
* @throws IOException if there was a problem writing
* @throws InterruptedException from the underlying Hadoop API
*/
@@ -100,9 +98,9 @@ public class ThriftToParquetFileWriter implements Closeable {
/**
* write one record to the columnar store
- * @param bytes
- * @throws IOException
- * @throws InterruptedException
+ * @param bytes a bytes writable
+ * @throws IOException if there is an error while writing
+ * @throws InterruptedException if writing is interrupted
*/
public void write(BytesWritable bytes) throws IOException, InterruptedException {
recordWriter.write(null, bytes);
diff --git a/parquet-thrift/src/main/java/org/apache/parquet/thrift/BufferedProtocolReadToWrite.java b/parquet-thrift/src/main/java/org/apache/parquet/thrift/BufferedProtocolReadToWrite.java
index 9fce1c3..54aac6d 100644
--- a/parquet-thrift/src/main/java/org/apache/parquet/thrift/BufferedProtocolReadToWrite.java
+++ b/parquet-thrift/src/main/java/org/apache/parquet/thrift/BufferedProtocolReadToWrite.java
@@ -47,13 +47,10 @@ import org.apache.parquet.thrift.struct.ThriftTypeID;
* Class to read from one protocol in a buffer and then write to another one
* When there is an exception during reading, it's a skippable exception.
* When schema is not compatible, the {@link SkippableException} will be thrown.
- * <p/>
+ * <p>
* When there are fields in the data that are not defined in the schema, the fields will be ignored and the handler will
* be notified through {@link FieldIgnoredHandler#handleFieldIgnored(org.apache.thrift.protocol.TField)}
* and {@link FieldIgnoredHandler#handleRecordHasFieldIgnored()}
- *
- * @author Julien Le Dem
- *
*/
public class BufferedProtocolReadToWrite implements ProtocolPipe {
diff --git a/parquet-thrift/src/main/java/org/apache/parquet/thrift/ConvertedField.java b/parquet-thrift/src/main/java/org/apache/parquet/thrift/ConvertedField.java
index 9674054..8a0bbca 100644
--- a/parquet-thrift/src/main/java/org/apache/parquet/thrift/ConvertedField.java
+++ b/parquet-thrift/src/main/java/org/apache/parquet/thrift/ConvertedField.java
@@ -36,6 +36,8 @@ public interface ConvertedField {
/**
* The path from the root of the schema to this field.
+ *
+ * @return the fields path
*/
FieldsPath path();
diff --git a/parquet-thrift/src/main/java/org/apache/parquet/thrift/FieldIgnoredHandler.java b/parquet-thrift/src/main/java/org/apache/parquet/thrift/FieldIgnoredHandler.java
index 15f4a22..1e0620c 100644
--- a/parquet-thrift/src/main/java/org/apache/parquet/thrift/FieldIgnoredHandler.java
+++ b/parquet-thrift/src/main/java/org/apache/parquet/thrift/FieldIgnoredHandler.java
@@ -22,7 +22,6 @@ import org.apache.thrift.protocol.TField;
/**
* Implements this class to handle when fields get ignored in {@link BufferedProtocolReadToWrite}
- * @author Tianshuo Deng
*/
public abstract class FieldIgnoredHandler {
@@ -39,7 +38,7 @@ public abstract class FieldIgnoredHandler {
* notice the difference between this method and {@link #handleRecordHasFieldIgnored()} is that:
* for one record, this method maybe called many times when there are multiple fields not defined in the schema.
*
- * @param field
+ * @param field a thrift field
*/
public void handleFieldIgnored(TField field) {
}
diff --git a/parquet-thrift/src/main/java/org/apache/parquet/thrift/ParquetProtocol.java b/parquet-thrift/src/main/java/org/apache/parquet/thrift/ParquetProtocol.java
index 0151cde..3eb431e 100644
--- a/parquet-thrift/src/main/java/org/apache/parquet/thrift/ParquetProtocol.java
+++ b/parquet-thrift/src/main/java/org/apache/parquet/thrift/ParquetProtocol.java
@@ -32,9 +32,6 @@ import org.apache.thrift.protocol.TStruct;
/**
* Allows simple implementation of partial protocols
- *
- * @author Julien Le Dem
- *
*/
public abstract class ParquetProtocol extends TProtocol {
diff --git a/parquet-thrift/src/main/java/org/apache/parquet/thrift/ProtocolPipe.java b/parquet-thrift/src/main/java/org/apache/parquet/thrift/ProtocolPipe.java
index e686ac6..bb519dd 100644
--- a/parquet-thrift/src/main/java/org/apache/parquet/thrift/ProtocolPipe.java
+++ b/parquet-thrift/src/main/java/org/apache/parquet/thrift/ProtocolPipe.java
@@ -23,9 +23,6 @@ import org.apache.thrift.protocol.TProtocol;
/**
* reads one record from an input and writes it to an output
- *
- * @author Julien Le Dem
- *
*/
public interface ProtocolPipe {
void readOne(TProtocol in, TProtocol out) throws TException;
diff --git a/parquet-thrift/src/main/java/org/apache/parquet/thrift/ProtocolReadToWrite.java b/parquet-thrift/src/main/java/org/apache/parquet/thrift/ProtocolReadToWrite.java
index 362ef5b..13cd11b 100644
--- a/parquet-thrift/src/main/java/org/apache/parquet/thrift/ProtocolReadToWrite.java
+++ b/parquet-thrift/src/main/java/org/apache/parquet/thrift/ProtocolReadToWrite.java
@@ -29,9 +29,6 @@ import org.apache.thrift.protocol.TType;
/**
* Class to read from one protocol and write to another one
- *
- * @author Julien Le Dem
- *
*/
public class ProtocolReadToWrite implements ProtocolPipe {
@@ -40,7 +37,7 @@ public class ProtocolReadToWrite implements ProtocolPipe {
* exceptions are not recoverable as record might be halfway written
* @param in input protocol
* @param out output protocol
- * @throws TException
+ * @throws TException if there is an error while reading or writing
*/
@Override
public void readOne(TProtocol in, TProtocol out) throws TException {
diff --git a/parquet-thrift/src/main/java/org/apache/parquet/thrift/SkippableException.java b/parquet-thrift/src/main/java/org/apache/parquet/thrift/SkippableException.java
index d04f357..f81cd2d 100644
--- a/parquet-thrift/src/main/java/org/apache/parquet/thrift/SkippableException.java
+++ b/parquet-thrift/src/main/java/org/apache/parquet/thrift/SkippableException.java
@@ -24,9 +24,6 @@ import org.apache.parquet.ParquetRuntimeException;
*
* Thrown when an error happened reading a thrift record
* Ignoring this exception will skip the bad record
- *
- * @author Julien Le Dem
- *
*/
public class SkippableException extends ParquetRuntimeException {
private static final long serialVersionUID = 1L;
diff --git a/parquet-thrift/src/main/java/org/apache/parquet/thrift/TBaseRecordConverter.java b/parquet-thrift/src/main/java/org/apache/parquet/thrift/TBaseRecordConverter.java
index 6483e59..78fc4a8 100644
--- a/parquet-thrift/src/main/java/org/apache/parquet/thrift/TBaseRecordConverter.java
+++ b/parquet-thrift/src/main/java/org/apache/parquet/thrift/TBaseRecordConverter.java
@@ -31,6 +31,9 @@ public class TBaseRecordConverter<T extends TBase<?,?>> extends ThriftRecordConv
/**
* This is for compatibility only.
+ * @param thriftClass a thrift class
+ * @param requestedParquetSchema the requested Parquet schema
+ * @param thriftType the thrift type
* @deprecated will be removed in 2.x
*/
@Deprecated
diff --git a/parquet-thrift/src/main/java/org/apache/parquet/thrift/ThriftMetaData.java b/parquet-thrift/src/main/java/org/apache/parquet/thrift/ThriftMetaData.java
index f61c311..5f6387b 100644
--- a/parquet-thrift/src/main/java/org/apache/parquet/thrift/ThriftMetaData.java
+++ b/parquet-thrift/src/main/java/org/apache/parquet/thrift/ThriftMetaData.java
@@ -27,11 +27,7 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
- *
* Metadata for thrift stored in the file footer
- *
- * @author Julien Le Dem
- *
*/
public class ThriftMetaData {
private static final Logger LOG = LoggerFactory.getLogger(ThriftMetaData.class);
diff --git a/parquet-thrift/src/main/java/org/apache/parquet/thrift/ThriftParquetReader.java b/parquet-thrift/src/main/java/org/apache/parquet/thrift/ThriftParquetReader.java
index 3354e01..602d757 100644
--- a/parquet-thrift/src/main/java/org/apache/parquet/thrift/ThriftParquetReader.java
+++ b/parquet-thrift/src/main/java/org/apache/parquet/thrift/ThriftParquetReader.java
@@ -34,7 +34,6 @@ import static org.apache.parquet.Preconditions.checkNotNull;
/**
* To read a parquet file into thrift objects
- * @author Julien Le Dem
* @param <T> the thrift type
*/
public class ThriftParquetReader<T extends TBase<?,?>> extends ParquetReader<T> {
@@ -42,7 +41,7 @@ public class ThriftParquetReader<T extends TBase<?,?>> extends ParquetReader<T>
/**
* @param file the file to read
* @param thriftClass the class used to read
- * @throws IOException
+ * @throws IOException if there is an error while reading
* @deprecated use {@link #build(Path)}
*/
@Deprecated
@@ -54,7 +53,7 @@ public class ThriftParquetReader<T extends TBase<?,?>> extends ParquetReader<T>
* @param conf the configuration
* @param file the file to read
* @param thriftClass the class used to read
- * @throws IOException
+ * @throws IOException if there is an error while reading
* @deprecated use {@link #build(Path)}
*/
@Deprecated
@@ -65,7 +64,7 @@ public class ThriftParquetReader<T extends TBase<?,?>> extends ParquetReader<T>
/**
* will use the thrift class based on the file metadata if a thrift class information is present
* @param file the file to read
- * @throws IOException
+ * @throws IOException if there is an error while reading
* @deprecated use {@link #build(Path)}
*/
@Deprecated
@@ -77,7 +76,7 @@ public class ThriftParquetReader<T extends TBase<?,?>> extends ParquetReader<T>
* will use the thrift class based on the file metadata if a thrift class information is present
* @param conf the configuration
* @param file the file to read
- * @throws IOException
+ * @throws IOException if there is an error while reading
* @deprecated use {@link #build(Path)}
*/
@Deprecated
@@ -116,6 +115,9 @@ public class ThriftParquetReader<T extends TBase<?,?>> extends ParquetReader<T>
* If this is called, the thrift class is used.
* If not, will use the thrift class based on the file
* metadata if a thrift class information is present.
+ *
+ * @param thriftClass a thrift class
+ * @return this for method chaining
*/
public Builder<T> withThriftClass(Class<T> thriftClass) {
this.thriftClass = checkNotNull(thriftClass, "thriftClass");
diff --git a/parquet-thrift/src/main/java/org/apache/parquet/thrift/ThriftParquetWriter.java b/parquet-thrift/src/main/java/org/apache/parquet/thrift/ThriftParquetWriter.java
index 51f253b..4a23131 100644
--- a/parquet-thrift/src/main/java/org/apache/parquet/thrift/ThriftParquetWriter.java
+++ b/parquet-thrift/src/main/java/org/apache/parquet/thrift/ThriftParquetWriter.java
@@ -31,8 +31,6 @@ import org.apache.parquet.hadoop.thrift.TBaseWriteSupport;
/**
* To generate Parquet files using thrift
*
- * @author Julien Le Dem
- *
* @param <T> the type of the thrift class used to write data
*/
public class ThriftParquetWriter<T extends TBase<?,?>> extends ParquetWriter<T> {
diff --git a/parquet-thrift/src/main/java/org/apache/parquet/thrift/ThriftRecordConverter.java b/parquet-thrift/src/main/java/org/apache/parquet/thrift/ThriftRecordConverter.java
index 0bc0455..3244b32 100644
--- a/parquet-thrift/src/main/java/org/apache/parquet/thrift/ThriftRecordConverter.java
+++ b/parquet-thrift/src/main/java/org/apache/parquet/thrift/ThriftRecordConverter.java
@@ -61,9 +61,7 @@ import org.slf4j.LoggerFactory;
/**
* converts the columnar events into a Thrift protocol.
*
- * @author Julien Le Dem
- *
- * @param <T>
+ * @param <T> the Java type of records created by this converter
*/
public class ThriftRecordConverter<T> extends RecordMaterializer<T> {
@@ -82,9 +80,6 @@ public class ThriftRecordConverter<T> extends RecordMaterializer<T> {
/**
* Handles field events creation by wrapping the converter for the actual type
- *
- * @author Julien Le Dem
- *
*/
static class PrimitiveFieldHandler extends PrimitiveConverter {
@@ -161,9 +156,6 @@ public class ThriftRecordConverter<T> extends RecordMaterializer<T> {
/**
* Handles field events creation by wrapping the converter for the actual type
- *
- * @author Julien Le Dem
- *
*/
static class GroupFieldhandler extends GroupConverter {
@@ -210,9 +202,6 @@ public class ThriftRecordConverter<T> extends RecordMaterializer<T> {
/**
* counts the instances created to use in List/Set/Map that need to inform of the element count in the protocol
- *
- * @author Julien Le Dem
- *
*/
static class GroupCounter extends GroupConverter implements Counter {
@@ -253,9 +242,6 @@ public class ThriftRecordConverter<T> extends RecordMaterializer<T> {
/**
* counts the instances created to use in List/Set/Map that need to inform of the element count in the protocol
- *
- * @author Julien Le Dem
- *
*/
static class PrimitiveCounter extends PrimitiveConverter implements Counter {
@@ -316,9 +302,6 @@ public class ThriftRecordConverter<T> extends RecordMaterializer<T> {
/**
* convert primitive values
- *
- * @author Julien Le Dem
- *
*/
static class FieldPrimitiveConverter extends PrimitiveConverter {
@@ -408,8 +391,6 @@ public class ThriftRecordConverter<T> extends RecordMaterializer<T> {
/**
* converts Binary into String
- * @author Julien Le Dem
- *
*/
static class FieldStringConverter extends PrimitiveConverter {
@@ -437,8 +418,6 @@ public class ThriftRecordConverter<T> extends RecordMaterializer<T> {
/**
* converts Binary into Enum
- * @author Julien Le Dem
- *
*/
static class FieldEnumConverter extends PrimitiveConverter {
@@ -479,8 +458,6 @@ public class ThriftRecordConverter<T> extends RecordMaterializer<T> {
/**
* convert to Maps
- * @author Julien Le Dem
- *
*/
class MapConverter extends GroupConverter {
@@ -541,8 +518,6 @@ public class ThriftRecordConverter<T> extends RecordMaterializer<T> {
/**
* converts to a key value pair (in maps)
- * @author Julien Le Dem
- *
*/
class MapKeyValueConverter extends GroupConverter {
@@ -579,8 +554,6 @@ public class ThriftRecordConverter<T> extends RecordMaterializer<T> {
/**
* converts to a Set
- * @author Julien Le Dem
- *
*/
class SetConverter extends CollectionConverter {
@@ -616,8 +589,6 @@ public class ThriftRecordConverter<T> extends RecordMaterializer<T> {
/**
* converts to a List
- * @author Julien Le Dem
- *
*/
class ListConverter extends CollectionConverter {
@@ -653,8 +624,6 @@ public class ThriftRecordConverter<T> extends RecordMaterializer<T> {
/**
* Base class to convert List and Set which basically work the same
- * @author Julien Le Dem
- *
*/
abstract class CollectionConverter extends GroupConverter {
@@ -776,8 +745,6 @@ public class ThriftRecordConverter<T> extends RecordMaterializer<T> {
/**
* converts to Struct
- * @author Julien Le Dem
- *
*/
class StructConverter extends GroupConverter {
@@ -868,6 +835,10 @@ public class ThriftRecordConverter<T> extends RecordMaterializer<T> {
/**
* This is for compatibility only.
+ * @param thriftReader the class responsible for instantiating the final object and read from the protocol
+ * @param name the name of that type ( the thrift class simple name)
+ * @param requestedParquetSchema the schema for the incoming columnar events
+ * @param thriftType the thrift type descriptor
* @deprecated will be removed in 2.x
*/
@Deprecated
diff --git a/parquet-thrift/src/main/java/org/apache/parquet/thrift/ThriftSchemaConvertVisitor.java b/parquet-thrift/src/main/java/org/apache/parquet/thrift/ThriftSchemaConvertVisitor.java
index 82175a2..1185382 100644
--- a/parquet-thrift/src/main/java/org/apache/parquet/thrift/ThriftSchemaConvertVisitor.java
+++ b/parquet-thrift/src/main/java/org/apache/parquet/thrift/ThriftSchemaConvertVisitor.java
@@ -70,8 +70,6 @@ import static org.apache.parquet.schema.Types.primitive;
/**
* Visitor Class for converting a thrift definition to parquet message type.
* Projection can be done by providing a {@link FieldProjectionFilter}
- *
- * @author Tianshuo Deng
*/
class ThriftSchemaConvertVisitor implements ThriftType.StateVisitor<ConvertedField, ThriftSchemaConvertVisitor.State> {
private final FieldProjectionFilter fieldProjectionFilter;
diff --git a/parquet-thrift/src/main/java/org/apache/parquet/thrift/ThriftSchemaConverter.java b/parquet-thrift/src/main/java/org/apache/parquet/thrift/ThriftSchemaConverter.java
index 7717e04..2cb061b 100644
--- a/parquet-thrift/src/main/java/org/apache/parquet/thrift/ThriftSchemaConverter.java
+++ b/parquet-thrift/src/main/java/org/apache/parquet/thrift/ThriftSchemaConverter.java
@@ -67,6 +67,9 @@ public class ThriftSchemaConverter {
* This method may throw if structOrUnionType is unknown.
*
* Use convertWithoutProjection below to convert a StructType to MessageType
+ *
+ * @param struct the thrift type descriptor
+ * @return the struct as a Parquet message type
*/
public MessageType convert(StructType struct) {
MessageType messageType = ThriftSchemaConvertVisitor.convert(struct, fieldProjectionFilter, true);
@@ -77,6 +80,9 @@ public class ThriftSchemaConverter {
/**
* struct is not required to have known structOrUnionType, which is useful
* for converting a StructType from an (older) file schema to a MessageType
+ *
+ * @param struct the thrift type descriptor
+ * @return the struct as a Parquet message type
*/
public static MessageType convertWithoutProjection(StructType struct) {
return ThriftSchemaConvertVisitor.convert(struct, FieldProjectionFilter.ALL_COLUMNS, false);
diff --git a/parquet-thrift/src/main/java/org/apache/parquet/thrift/pig/ParquetThriftStorer.java b/parquet-thrift/src/main/java/org/apache/parquet/thrift/pig/ParquetThriftStorer.java
index 4fa85c4..1f899e3 100644
--- a/parquet-thrift/src/main/java/org/apache/parquet/thrift/pig/ParquetThriftStorer.java
+++ b/parquet-thrift/src/main/java/org/apache/parquet/thrift/pig/ParquetThriftStorer.java
@@ -36,9 +36,6 @@ import org.apache.parquet.io.ParquetEncodingException;
* To store in Pig using a thrift class
* usage:
* STORE 'foo' USING parquet.thrift.pig.ParquetThriftStorer('my.thrift.Class');
- *
- * @author Julien Le Dem
- *
*/
public class ParquetThriftStorer extends StoreFunc {
diff --git a/parquet-thrift/src/main/java/org/apache/parquet/thrift/pig/TupleToThriftWriteSupport.java b/parquet-thrift/src/main/java/org/apache/parquet/thrift/pig/TupleToThriftWriteSupport.java
index b8add82..d752061 100644
--- a/parquet-thrift/src/main/java/org/apache/parquet/thrift/pig/TupleToThriftWriteSupport.java
+++ b/parquet-thrift/src/main/java/org/apache/parquet/thrift/pig/TupleToThriftWriteSupport.java
@@ -31,9 +31,6 @@ import com.twitter.elephantbird.pig.util.PigToThrift;
/**
* Stores Pig tuples as Thrift objects
- *
- * @author Julien Le Dem
- *
*/
public class TupleToThriftWriteSupport extends WriteSupport<Tuple> {
diff --git a/parquet-thrift/src/main/java/org/apache/parquet/thrift/projection/FieldsPath.java b/parquet-thrift/src/main/java/org/apache/parquet/thrift/projection/FieldsPath.java
index 239384d..e193f16 100644
--- a/parquet-thrift/src/main/java/org/apache/parquet/thrift/projection/FieldsPath.java
+++ b/parquet-thrift/src/main/java/org/apache/parquet/thrift/projection/FieldsPath.java
@@ -25,8 +25,6 @@ import org.apache.parquet.thrift.struct.ThriftType;
/**
* Represents an immutable column path as a sequence of fields.
- *
- * @author Tianshuo Deng
*/
public class FieldsPath {
private final ArrayList<ThriftField> fields;
diff --git a/parquet-thrift/src/main/java/org/apache/parquet/thrift/projection/StrictFieldProjectionFilter.java b/parquet-thrift/src/main/java/org/apache/parquet/thrift/projection/StrictFieldProjectionFilter.java
index b048f16..917e5ba 100644
--- a/parquet-thrift/src/main/java/org/apache/parquet/thrift/projection/StrictFieldProjectionFilter.java
+++ b/parquet-thrift/src/main/java/org/apache/parquet/thrift/projection/StrictFieldProjectionFilter.java
@@ -29,7 +29,7 @@ import org.slf4j.LoggerFactory;
/**
* Stricter Implementation of {@link FieldProjectionFilter}.
*
- * See {@link parquet.thrift.projection.deprecated.DeprecatedFieldProjectionFilter} for the previous
+ * See {@link org.apache.parquet.thrift.projection.deprecated.DeprecatedFieldProjectionFilter} for the previous
* syntax that allows for more powerful glob patterns, but has less error reporting and less strict requirements.
*
* This filter requires that every *possible* expansion of glob expressions (like '{x,y,z}') must match at least one
@@ -70,7 +70,10 @@ public class StrictFieldProjectionFilter implements FieldProjectionFilter {
* columnsToKeepGlobs should be a list of Strings in the format expected by
* {@link Strings#expandGlobToWildCardPaths(String, char)}, separated by ';'
* Should only be used for parsing values out of the hadoop config -- for APIs
- * and programmatic access, use {@link StrictFieldProjectionFilter(List)}.
+ * and programmatic access, use {@link #StrictFieldProjectionFilter(List)}.
+ *
+ * @param columnsToKeepGlobs glob pattern for columns to keep
+ * @return a field projection filter
*/
public static StrictFieldProjectionFilter fromSemicolonDelimitedString(String columnsToKeepGlobs) {
return new StrictFieldProjectionFilter(parseSemicolonDelimitedString(columnsToKeepGlobs));
@@ -79,6 +82,7 @@ public class StrictFieldProjectionFilter implements FieldProjectionFilter {
/**
* Construct a StrictFieldProjectionFilter from a list of Strings in the format expected by
* {@link Strings#expandGlobToWildCardPaths(String, char)}
+ * @param columnsToKeepGlobs glob patterns for columns to keep
*/
public StrictFieldProjectionFilter(List<String> columnsToKeepGlobs) {
this.columnsToKeep = new ArrayList<WildcardPathStatus>();
diff --git a/parquet-thrift/src/main/java/org/apache/parquet/thrift/projection/ThriftProjectionException.java b/parquet-thrift/src/main/java/org/apache/parquet/thrift/projection/ThriftProjectionException.java
index 298705f..85ea215 100644
--- a/parquet-thrift/src/main/java/org/apache/parquet/thrift/projection/ThriftProjectionException.java
+++ b/parquet-thrift/src/main/java/org/apache/parquet/thrift/projection/ThriftProjectionException.java
@@ -22,9 +22,6 @@ import org.apache.parquet.ParquetRuntimeException;
/**
* thrown if the schema can not be projected/filtered
- *
- * @author Tianshuo Deng
- *
*/
public class ThriftProjectionException extends ParquetRuntimeException {
diff --git a/parquet-thrift/src/main/java/org/apache/parquet/thrift/projection/amend/ProtocolEventsAmender.java b/parquet-thrift/src/main/java/org/apache/parquet/thrift/projection/amend/ProtocolEventsAmender.java
index 63ba233..842778f 100644
--- a/parquet-thrift/src/main/java/org/apache/parquet/thrift/projection/amend/ProtocolEventsAmender.java
+++ b/parquet-thrift/src/main/java/org/apache/parquet/thrift/projection/amend/ProtocolEventsAmender.java
@@ -28,8 +28,6 @@ import java.util.*;
/**
* fill in default value for required fields in TProtocols after projection is specified.
- *
- * @author Tianshuo Deng
*/
public class ProtocolEventsAmender {
List<TProtocol> rootEvents;
@@ -44,8 +42,8 @@ public class ProtocolEventsAmender {
* and create default value if a required field is missing
*
* @param recordThriftType the Thrift Struct definition for events
- * @return
- * @throws TException
+ * @return a list of events
+ * @throws TException if there is an error while amending events
*/
public List<TProtocol> amendMissingRequiredFields(StructType recordThriftType) throws TException {
Iterator<TProtocol> protocolIter = rootEvents.iterator();
diff --git a/parquet-thrift/src/main/java/org/apache/parquet/thrift/projection/deprecated/DeprecatedFieldProjectionFilter.java b/parquet-thrift/src/main/java/org/apache/parquet/thrift/projection/deprecated/DeprecatedFieldProjectionFilter.java
index f84f957..f0b11ca 100644
--- a/parquet-thrift/src/main/java/org/apache/parquet/thrift/projection/deprecated/DeprecatedFieldProjectionFilter.java
+++ b/parquet-thrift/src/main/java/org/apache/parquet/thrift/projection/deprecated/DeprecatedFieldProjectionFilter.java
@@ -29,7 +29,6 @@ import org.apache.parquet.thrift.projection.ThriftProjectionException;
/**
* Filter thrift attributes using glob syntax.
* This is used for parsing values assigned to ThriftReadSupport.THRIFT_COLUMN_FILTER_KEY
- * @author Tianshuo Deng
*/
@Deprecated
public class DeprecatedFieldProjectionFilter implements FieldProjectionFilter {
diff --git a/parquet-thrift/src/main/java/org/apache/parquet/thrift/projection/deprecated/PathGlobPattern.java b/parquet-thrift/src/main/java/org/apache/parquet/thrift/projection/deprecated/PathGlobPattern.java
index e576e97..ba5646d 100644
--- a/parquet-thrift/src/main/java/org/apache/parquet/thrift/projection/deprecated/PathGlobPattern.java
+++ b/parquet-thrift/src/main/java/org/apache/parquet/thrift/projection/deprecated/PathGlobPattern.java
@@ -28,8 +28,6 @@ import java.util.regex.PatternSyntaxException;
* full path separated by '/', and double star matching
*
* This is used for parsing values assigned to ThriftReadSupport.THRIFT_COLUMN_FILTER_KEY
- *
- * @author Tianshuo Deng
*/
@Deprecated
public class PathGlobPattern {
diff --git a/parquet-thrift/src/main/java/org/apache/parquet/thrift/struct/CompatibilityChecker.java b/parquet-thrift/src/main/java/org/apache/parquet/thrift/struct/CompatibilityChecker.java
index 81e0b55..f5e215c 100644
--- a/parquet-thrift/src/main/java/org/apache/parquet/thrift/struct/CompatibilityChecker.java
+++ b/parquet-thrift/src/main/java/org/apache/parquet/thrift/struct/CompatibilityChecker.java
@@ -42,8 +42,6 @@ import org.apache.parquet.Strings;
* 2. Should not change field type for an existing field
* 3. Should not delete existing field
* 4. Should not make requirement type more restrictive for a field in new thrift struct
- *
- * @author Tianshuo Deng
*/
public class CompatibilityChecker {
diff --git a/parquet-thrift/src/main/java/org/apache/parquet/thrift/struct/CompatibilityRunner.java b/parquet-thrift/src/main/java/org/apache/parquet/thrift/struct/CompatibilityRunner.java
index b8d577d..6e30025 100644
--- a/parquet-thrift/src/main/java/org/apache/parquet/thrift/struct/CompatibilityRunner.java
+++ b/parquet-thrift/src/main/java/org/apache/parquet/thrift/struct/CompatibilityRunner.java
@@ -37,8 +37,6 @@ import java.util.LinkedList;
* java CompatibilityRunner compare-json {old_json_path} {new_json_path}
* The above command will succeed when the new schema is compatible with the old schema.
* It will fail when they are not compatible. For compatibility rules: {@link CompatibilityChecker}
- *
- * @author Tianshuo Deng
*/
public class CompatibilityRunner {
public static void main(String[] args) throws Exception {
diff --git a/parquet-thrift/src/main/java/org/apache/parquet/thrift/struct/ThriftType.java b/parquet-thrift/src/main/java/org/apache/parquet/thrift/struct/ThriftType.java
index 4c2d662..3d64e8d 100644
--- a/parquet-thrift/src/main/java/org/apache/parquet/thrift/struct/ThriftType.java
+++ b/parquet-thrift/src/main/java/org/apache/parquet/thrift/struct/ThriftType.java
@@ -48,9 +48,6 @@ import org.codehaus.jackson.annotate.JsonTypeInfo.Id;
/**
* Descriptor for a Thrift class.
* Used to persist the thrift schema
- *
- * @author Julien Le Dem
- *
*/
@JsonTypeInfo(use = Id.NAME, include = As.PROPERTY, property = "id")
@JsonSubTypes({
diff --git a/parquet-thrift/src/main/java/org/apache/parquet/thrift/struct/ThriftTypeID.java b/parquet-thrift/src/main/java/org/apache/parquet/thrift/struct/ThriftTypeID.java
index 161b093..f5f97ed 100644
--- a/parquet-thrift/src/main/java/org/apache/parquet/thrift/struct/ThriftTypeID.java
+++ b/parquet-thrift/src/main/java/org/apache/parquet/thrift/struct/ThriftTypeID.java
@@ -34,9 +34,6 @@ import org.apache.parquet.thrift.struct.ThriftType.StringType;
import org.apache.parquet.thrift.struct.ThriftType.StructType;
/**
* The list of thrift types
- *
- * @author Julien Le Dem
- *
*/
public enum ThriftTypeID {
STOP (TType.STOP),
--
To stop receiving notification emails like this one, please contact
blue@apache.org.