You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@arrow.apache.org by we...@apache.org on 2018/12/17 21:35:45 UTC

[arrow] branch master updated: ARROW-4028: [Rust] Merge parquet-rs codebase

This is an automated email from the ASF dual-hosted git repository.

wesm pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/arrow.git


The following commit(s) were added to refs/heads/master by this push:
     new e9ed591  ARROW-4028: [Rust] Merge parquet-rs codebase
e9ed591 is described below

commit e9ed591db9cb87e0086bf9fef4201cc726bd5d03
Author: Chao Sun <su...@uber.com>
AuthorDate: Mon Dec 17 15:35:38 2018 -0600

    ARROW-4028: [Rust] Merge parquet-rs codebase
    
    This imports parquet-rs source code into Apache Arrow Rust implementation. I include most of the source code except a few things such as `fuzz` and benchmarks. Thinking about adding them later.
    
    The module hierarchy now looks like:
    - arrow: all the arrow code
    - parquet: all the parquet code (in future, parquet-arrow integration will live here)
    - util: common util libraries shared between arrow and parquet (I'll try to move the utils from parquet to here in future).
    
    Author: Chao Sun <su...@uber.com>
    Author: Chao Sun <su...@apache.org>
    
    Closes #3050 from sunchao/import-parquet and squashes the following commits:
    
    2ce98bd2a <Chao Sun> Update git submodule
    2d296f8f7 <Chao Sun> ARROW-4028:  Merge parquet-rs codebase
---
 ci/rust-build-main.bat                        |    3 +
 ci/travis_script_rust.sh                      |    2 +
 cpp/submodules/parquet-testing                |    2 +-
 docker-compose.yml                            |    2 +
 rust/Cargo.toml                               |   12 +
 rust/benches/array_from_vec.rs                |    1 -
 rust/benches/builder.rs                       |    6 +-
 rust/build.rs                                 |   43 +
 rust/examples/read_csv.rs                     |    5 +-
 ci/travis_script_rust.sh => rust/rustfmt.toml |   26 +-
 rust/src/array.rs                             |    8 +-
 rust/src/array_data.rs                        |    3 +-
 rust/src/builder.rs                           |    6 +-
 rust/src/csv/reader.rs                        |   10 +-
 rust/src/lib.rs                               |    6 +
 rust/src/{lib.rs => mod.rs}                   |    4 -
 rust/src/parquet/basic.rs                     | 1497 ++++++++++
 rust/src/parquet/column/mod.rs                |  124 +
 rust/src/parquet/column/page.rs               |  296 ++
 rust/src/parquet/column/reader.rs             | 1576 +++++++++++
 rust/src/parquet/column/writer.rs             | 1617 +++++++++++
 rust/src/parquet/compression.rs               |  321 +++
 rust/src/parquet/data_type.rs                 |  463 ++++
 rust/src/parquet/encodings/decoding.rs        | 1403 ++++++++++
 rust/src/parquet/encodings/encoding.rs        | 1360 +++++++++
 rust/src/parquet/encodings/levels.rs          |  529 ++++
 rust/src/{lib.rs => parquet/encodings/mod.rs} |   19 +-
 rust/src/parquet/encodings/rle.rs             |  839 ++++++
 rust/src/parquet/errors.rs                    |   87 +
 rust/src/parquet/file/metadata.rs             |  736 +++++
 rust/src/parquet/file/mod.rs                  |   88 +
 rust/src/parquet/file/properties.rs           |  648 +++++
 rust/src/parquet/file/reader.rs               |  899 ++++++
 rust/src/parquet/file/statistics.rs           |  692 +++++
 rust/src/parquet/file/writer.rs               |  936 +++++++
 rust/src/{lib.rs => parquet/mod.rs}           |   30 +-
 rust/src/parquet/record/api.rs                | 1439 ++++++++++
 rust/src/{lib.rs => parquet/record/mod.rs}    |   20 +-
 rust/src/parquet/record/reader.rs             | 1464 ++++++++++
 rust/src/parquet/record/triplet.rs            |  561 ++++
 rust/src/parquet/schema/mod.rs                |   66 +
 rust/src/parquet/schema/parser.rs             |  764 ++++++
 rust/src/parquet/schema/printer.rs            |  467 ++++
 rust/src/parquet/schema/types.rs              | 1830 +++++++++++++
 rust/src/parquet/util/bit_packing.rs          | 3658 +++++++++++++++++++++++++
 rust/src/parquet/util/bit_util.rs             | 1058 +++++++
 rust/src/parquet/util/hash_util.rs            |  160 ++
 rust/src/parquet/util/io.rs                   |  220 ++
 rust/src/parquet/util/memory.rs               |  524 ++++
 rust/src/{lib.rs => parquet/util/mod.rs}      |   22 +-
 rust/src/parquet/util/test_common.rs          |  190 ++
 rust/src/record_batch.rs                      |    4 +-
 rust/src/tensor.rs                            |    1 +
 53 files changed, 26641 insertions(+), 106 deletions(-)

diff --git a/ci/rust-build-main.bat b/ci/rust-build-main.bat
index c8a51fe..e338f7e 100644
--- a/ci/rust-build-main.bat
+++ b/ci/rust-build-main.bat
@@ -17,6 +17,9 @@
 
 @rem The "main" Rust build script for Windows CI
 
+@rem Retrieve git submodules, configure env var for Parquet unit tests
+git submodule update --init || exit /B
+set PARQUET_TEST_DATA=%CD%\cpp\submodules\parquet-testing\data
 pushd rust
 
 @echo ===================================
diff --git a/ci/travis_script_rust.sh b/ci/travis_script_rust.sh
index 55cce8f..4b09bc2 100755
--- a/ci/travis_script_rust.sh
+++ b/ci/travis_script_rust.sh
@@ -19,6 +19,8 @@
 
 set -e
 
+source $TRAVIS_BUILD_DIR/ci/travis_env_common.sh
+
 RUST_DIR=${TRAVIS_BUILD_DIR}/rust
 
 pushd $RUST_DIR
diff --git a/cpp/submodules/parquet-testing b/cpp/submodules/parquet-testing
index 46ae260..92a8e6c 160000
--- a/cpp/submodules/parquet-testing
+++ b/cpp/submodules/parquet-testing
@@ -1 +1 @@
-Subproject commit 46ae2605c2de306f5740587107dcf333a527f2d1
+Subproject commit 92a8e6c2efdce1925c605d6313994db2c94478fb
diff --git a/docker-compose.yml b/docker-compose.yml
index 0a01a7c..b61511e 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -152,6 +152,8 @@ services:
     build:
       context: .
       dockerfile: rust/Dockerfile
+    environment:
+      PARQUET_TEST_DATA: /arrow/cpp/submodules/parquet-testing/data
     volumes: *ubuntu-volumes
 
   r:
diff --git a/rust/Cargo.toml b/rust/Cargo.toml
index aa23815..49e8a9d 100644
--- a/rust/Cargo.toml
+++ b/rust/Cargo.toml
@@ -42,10 +42,22 @@ serde_derive = "1.0.80"
 serde_json = "1.0.13"
 rand = "0.5"
 csv = "1.0.0"
+parquet-format = "2.5.0"
+quick-error = "1.2.2"
+byteorder = "1"
+thrift = "0.0.4"
+snap = "0.2"
+brotli = "2.5"
+flate2 = "1.0.2"
+lz4 = "1.23"
+zstd = "0.4"
+chrono = "0.4"
+num-bigint = "0.2"
 num = "0.2"
 
 [dev-dependencies]
 criterion = "0.2"
+lazy_static = "1"
 
 [[bench]]
 name = "array_from_vec"
diff --git a/rust/benches/array_from_vec.rs b/rust/benches/array_from_vec.rs
index 669b88e..f935714 100644
--- a/rust/benches/array_from_vec.rs
+++ b/rust/benches/array_from_vec.rs
@@ -17,7 +17,6 @@
 
 #[macro_use]
 extern crate criterion;
-
 use criterion::Criterion;
 
 extern crate arrow;
diff --git a/rust/benches/builder.rs b/rust/benches/builder.rs
index 04f8a33..90fd75a 100644
--- a/rust/benches/builder.rs
+++ b/rust/benches/builder.rs
@@ -19,11 +19,13 @@ extern crate arrow;
 extern crate criterion;
 extern crate rand;
 
-use arrow::builder::*;
+use std::mem::size_of;
+
 use criterion::*;
 use rand::distributions::Standard;
 use rand::{thread_rng, Rng};
-use std::mem::size_of;
+
+use arrow::builder::*;
 
 // Build arrays with 512k elements.
 const BATCH_SIZE: usize = 8 << 10;
diff --git a/rust/build.rs b/rust/build.rs
new file mode 100644
index 0000000..b42b2a4
--- /dev/null
+++ b/rust/build.rs
@@ -0,0 +1,43 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+use std::process::Command;
+
+fn main() {
+    // Set Parquet version, build hash and "created by" string.
+    let version = env!("CARGO_PKG_VERSION");
+    let mut created_by = format!("parquet-rs version {}", version);
+    if let Ok(git_hash) = run(Command::new("git").arg("rev-parse").arg("HEAD")) {
+        created_by.push_str(format!(" (build {})", git_hash).as_str());
+        println!("cargo:rustc-env=PARQUET_BUILD={}", git_hash);
+    }
+    println!("cargo:rustc-env=PARQUET_VERSION={}", version);
+    println!("cargo:rustc-env=PARQUET_CREATED_BY={}", created_by);
+}
+
+/// Runs command and returns either content of stdout for successful execution,
+/// or an error message otherwise.
+fn run(command: &mut Command) -> Result<String, String> {
+    println!("Running: `{:?}`", command);
+    match command.output() {
+        Ok(ref output) if output.status.success() => {
+            Ok(String::from_utf8_lossy(&output.stdout).trim().to_string())
+        }
+        Ok(ref output) => Err(format!("Failed: `{:?}` ({})", command, output.status)),
+        Err(error) => Err(format!("Failed: `{:?}` ({})", command, error)),
+    }
+}
diff --git a/rust/examples/read_csv.rs b/rust/examples/read_csv.rs
index df66a81..147d2f9 100644
--- a/rust/examples/read_csv.rs
+++ b/rust/examples/read_csv.rs
@@ -17,11 +17,12 @@
 
 extern crate arrow;
 
+use std::fs::File;
+use std::sync::Arc;
+
 use arrow::array::{BinaryArray, Float64Array};
 use arrow::csv;
 use arrow::datatypes::{DataType, Field, Schema};
-use std::fs::File;
-use std::sync::Arc;
 
 fn main() {
     let schema = Schema::new(vec![
diff --git a/ci/travis_script_rust.sh b/rust/rustfmt.toml
old mode 100755
new mode 100644
similarity index 67%
copy from ci/travis_script_rust.sh
copy to rust/rustfmt.toml
index 55cce8f..72eeee0
--- a/ci/travis_script_rust.sh
+++ b/rust/rustfmt.toml
@@ -1,5 +1,3 @@
-#!/usr/bin/env bash
-
 # Licensed to the Apache Software Foundation (ASF) under one
 # or more contributor license agreements.  See the NOTICE file
 # distributed with this work for additional information
@@ -17,26 +15,4 @@
 # specific language governing permissions and limitations
 # under the License.
 
-set -e
-
-RUST_DIR=${TRAVIS_BUILD_DIR}/rust
-
-pushd $RUST_DIR
-
-# show activated toolchain
-rustup show
-
-# raises on any formatting errors
-cargo +stable fmt --all -- --check
-
-# raises on any warnings
-cargo rustc -- -D warnings
-
-cargo build
-cargo test
-cargo bench
-cargo run --example builders
-cargo run --example dynamic_types
-cargo run --example read_csv
-
-popd
+format_doc_comments = true
\ No newline at end of file
diff --git a/rust/src/array.rs b/rust/src/array.rs
index 11e732a..251dd35 100644
--- a/rust/src/array.rs
+++ b/rust/src/array.rs
@@ -657,12 +657,14 @@ impl From<Vec<(Field, ArrayRef)>> for StructArray {
 #[cfg(test)]
 mod tests {
     use super::*;
+
+    use std::sync::Arc;
+    use std::thread;
+
     use crate::array_data::ArrayData;
     use crate::buffer::Buffer;
-    use crate::datatypes::{DataType, Field, ToByteSlice};
+    use crate::datatypes::{DataType, Field};
     use crate::memory;
-    use std::sync::Arc;
-    use std::thread;
 
     #[test]
     fn test_primitive_array_from_vec() {
diff --git a/rust/src/array_data.rs b/rust/src/array_data.rs
index 36a817e..9ea01a4 100644
--- a/rust/src/array_data.rs
+++ b/rust/src/array_data.rs
@@ -225,9 +225,10 @@ impl ArrayDataBuilder {
 
 #[cfg(test)]
 mod tests {
+    use super::*;
+
     use std::sync::Arc;
 
-    use super::{ArrayData, DataType};
     use crate::buffer::Buffer;
     use crate::util::bit_util;
 
diff --git a/rust/src/builder.rs b/rust/src/builder.rs
index fc781ff..d5d222d 100644
--- a/rust/src/builder.rs
+++ b/rust/src/builder.rs
@@ -456,10 +456,10 @@ impl BinaryArrayBuilder {
 
 #[cfg(test)]
 mod tests {
-    use crate::array::Array;
-
     use super::*;
 
+    use crate::array::Array;
+
     #[test]
     fn test_builder_i32_empty() {
         let b = Int32BufferBuilder::new(5);
@@ -825,7 +825,6 @@ mod tests {
 
     #[test]
     fn test_binary_array_builder() {
-        use crate::array::BinaryArray;
         let mut builder = BinaryArrayBuilder::new(20);
 
         builder.push(b'h').unwrap();
@@ -860,7 +859,6 @@ mod tests {
 
     #[test]
     fn test_binary_array_builder_push_string() {
-        use crate::array::BinaryArray;
         let mut builder = BinaryArrayBuilder::new(20);
 
         let var = "hello".to_owned();
diff --git a/rust/src/csv/reader.rs b/rust/src/csv/reader.rs
index 956408e..632aa7a 100644
--- a/rust/src/csv/reader.rs
+++ b/rust/src/csv/reader.rs
@@ -29,16 +29,16 @@
 //! use std::sync::Arc;
 //!
 //! let schema = Schema::new(vec![
-//!   Field::new("city", DataType::Utf8, false),
-//!   Field::new("lat", DataType::Float64, false),
-//!   Field::new("lng", DataType::Float64, false),
+//!     Field::new("city", DataType::Utf8, false),
+//!     Field::new("lat", DataType::Float64, false),
+//!     Field::new("lng", DataType::Float64, false),
 //! ]);
 //!
 //! let file = File::open("test/data/uk_cities.csv").unwrap();
 //!
 //! let mut csv = csv::Reader::new(file, Arc::new(schema), false, 1024, None);
 //! let batch = csv.next().unwrap().unwrap();
-//!```
+//! ```
 
 use std::fs::File;
 use std::io::BufReader;
@@ -195,8 +195,8 @@ impl Reader {
 
 #[cfg(test)]
 mod tests {
-
     use super::*;
+
     use crate::array::*;
     use crate::datatypes::Field;
 
diff --git a/rust/src/lib.rs b/rust/src/lib.rs
index f41d08f..d5708b1 100644
--- a/rust/src/lib.rs
+++ b/rust/src/lib.rs
@@ -15,7 +15,12 @@
 // specific language governing permissions and limitations
 // under the License.
 
+#![feature(type_ascription)]
+#![feature(rustc_private)]
 #![feature(specialization)]
+#![feature(try_from)]
+#![allow(dead_code)]
+#![allow(non_camel_case_types)]
 
 pub mod array;
 pub mod array_data;
@@ -27,6 +32,7 @@ pub mod csv;
 pub mod datatypes;
 pub mod error;
 pub mod memory;
+pub mod parquet;
 pub mod record_batch;
 pub mod tensor;
 pub mod util;
diff --git a/rust/src/lib.rs b/rust/src/mod.rs
similarity index 94%
copy from rust/src/lib.rs
copy to rust/src/mod.rs
index f41d08f..b9fa43a 100644
--- a/rust/src/lib.rs
+++ b/rust/src/mod.rs
@@ -15,11 +15,8 @@
 // specific language governing permissions and limitations
 // under the License.
 
-#![feature(specialization)]
-
 pub mod array;
 pub mod array_data;
-pub mod array_ops;
 pub mod bitmap;
 pub mod buffer;
 pub mod builder;
@@ -29,4 +26,3 @@ pub mod error;
 pub mod memory;
 pub mod record_batch;
 pub mod tensor;
-pub mod util;
diff --git a/rust/src/parquet/basic.rs b/rust/src/parquet/basic.rs
new file mode 100644
index 0000000..22e1634
--- /dev/null
+++ b/rust/src/parquet/basic.rs
@@ -0,0 +1,1497 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+//! Contains Rust mappings for Thrift definition.
+//! Refer to `parquet.thrift` file to see raw definitions.
+
+use std::{convert, fmt, result, str};
+
+use parquet_format as parquet;
+
+use crate::parquet::errors::ParquetError;
+
+// ----------------------------------------------------------------------
+// Types from the Thrift definition
+
+// ----------------------------------------------------------------------
+// Mirrors `parquet::Type`
+
+/// Types supported by Parquet.
+/// These physical types are intended to be used in combination with the encodings to
+/// control the on disk storage format.
+/// For example INT16 is not included as a type since a good encoding of INT32
+/// would handle this.
+#[derive(Debug, Clone, Copy, PartialEq)]
+pub enum Type {
+    BOOLEAN,
+    INT32,
+    INT64,
+    INT96,
+    FLOAT,
+    DOUBLE,
+    BYTE_ARRAY,
+    FIXED_LEN_BYTE_ARRAY,
+}
+
+// ----------------------------------------------------------------------
+// Mirrors `parquet::ConvertedType`
+
+/// Common types (logical types) used by frameworks when using Parquet.
+/// This helps map between types in those frameworks to the base types in Parquet.
+/// This is only metadata and not needed to read or write the data.
+#[derive(Debug, Clone, Copy, PartialEq)]
+pub enum LogicalType {
+    NONE,
+    /// A BYTE_ARRAY actually contains UTF8 encoded chars.
+    UTF8,
+
+    /// A map is converted as an optional field containing a repeated key/value pair.
+    MAP,
+
+    /// A key/value pair is converted into a group of two fields.
+    MAP_KEY_VALUE,
+
+    /// A list is converted into an optional field containing a repeated field for its
+    /// values.
+    LIST,
+
+    /// An enum is converted into a binary field
+    ENUM,
+
+    /// A decimal value.
+    /// This may be used to annotate binary or fixed primitive types. The
+    /// underlying byte array stores the unscaled value encoded as two's
+    /// complement using big-endian byte order (the most significant byte is the
+    /// zeroth element).
+    ///
+    /// This must be accompanied by a (maximum) precision and a scale in the
+    /// SchemaElement. The precision specifies the number of digits in the decimal
+    /// and the scale stores the location of the decimal point. For example 1.23
+    /// would have precision 3 (3 total digits) and scale 2 (the decimal point is
+    /// 2 digits over).
+    DECIMAL,
+
+    /// A date stored as days since Unix epoch, encoded as the INT32 physical type.
+    DATE,
+
+    /// The total number of milliseconds since midnight. The value is stored as an INT32
+    /// physical type.
+    TIME_MILLIS,
+
+    /// The total number of microseconds since midnight. The value is stored as an INT64
+    /// physical type.
+    TIME_MICROS,
+
+    /// Date and time recorded as milliseconds since the Unix epoch.
+    /// Recorded as a physical type of INT64.
+    TIMESTAMP_MILLIS,
+
+    /// Date and time recorded as microseconds since the Unix epoch.
+    /// The value is stored as an INT64 physical type.
+    TIMESTAMP_MICROS,
+
+    /// An unsigned 8 bit integer value stored as INT32 physical type.
+    UINT_8,
+
+    /// An unsigned 16 bit integer value stored as INT32 physical type.
+    UINT_16,
+
+    /// An unsigned 32 bit integer value stored as INT32 physical type.
+    UINT_32,
+
+    /// An unsigned 64 bit integer value stored as INT64 physical type.
+    UINT_64,
+
+    /// A signed 8 bit integer value stored as INT32 physical type.
+    INT_8,
+
+    /// A signed 16 bit integer value stored as INT32 physical type.
+    INT_16,
+
+    /// A signed 32 bit integer value stored as INT32 physical type.
+    INT_32,
+
+    /// A signed 64 bit integer value stored as INT64 physical type.
+    INT_64,
+
+    /// A JSON document embedded within a single UTF8 column.
+    JSON,
+
+    /// A BSON document embedded within a single BINARY column.
+    BSON,
+
+    /// An interval of time.
+    ///
+    /// This type annotates data stored as a FIXED_LEN_BYTE_ARRAY of length 12.
+    /// This data is composed of three separate little endian unsigned integers.
+    /// Each stores a component of a duration of time. The first integer identifies
+    /// the number of months associated with the duration, the second identifies
+    /// the number of days associated with the duration and the third identifies
+    /// the number of milliseconds associated with the provided duration.
+    /// This duration of time is independent of any particular timezone or date.
+    INTERVAL,
+}
+
+// ----------------------------------------------------------------------
+// Mirrors `parquet::FieldRepetitionType`
+
+/// Representation of field types in schema.
+#[derive(Debug, Clone, Copy, PartialEq)]
+pub enum Repetition {
+    /// Field is required (can not be null) and each record has exactly 1 value.
+    REQUIRED,
+    /// Field is optional (can be null) and each record has 0 or 1 values.
+    OPTIONAL,
+    /// Field is repeated and can contain 0 or more values.
+    REPEATED,
+}
+
+// ----------------------------------------------------------------------
+// Mirrors `parquet::Encoding`
+
+/// Encodings supported by Parquet.
+/// Not all encodings are valid for all types. These enums are also used to specify the
+/// encoding of definition and repetition levels.
+#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
+pub enum Encoding {
+    /// Default byte encoding.
+    /// - BOOLEAN - 1 bit per value, 0 is false; 1 is true.
+    /// - INT32 - 4 bytes per value, stored as little-endian.
+    /// - INT64 - 8 bytes per value, stored as little-endian.
+    /// - FLOAT - 4 bytes per value, stored as little-endian.
+    /// - DOUBLE - 8 bytes per value, stored as little-endian.
+    /// - BYTE_ARRAY - 4 byte length stored as little endian, followed by bytes.
+    /// - FIXED_LEN_BYTE_ARRAY - just the bytes are stored.
+    PLAIN,
+
+    /// **Deprecated** dictionary encoding.
+    ///
+    /// The values in the dictionary are encoded using PLAIN encoding.
+    /// Since it is deprecated, RLE_DICTIONARY encoding is used for a data page, and PLAIN
+    /// encoding is used for dictionary page.
+    PLAIN_DICTIONARY,
+
+    /// Group packed run length encoding.
+    ///
+    /// Usable for definition/repetition levels encoding and boolean values.
+    RLE,
+
+    /// Bit packed encoding.
+    ///
+    /// This can only be used if the data has a known max width.
+    /// Usable for definition/repetition levels encoding.
+    BIT_PACKED,
+
+    /// Delta encoding for integers, either INT32 or INT64.
+    ///
+    /// Works best on sorted data.
+    DELTA_BINARY_PACKED,
+
+    /// Encoding for byte arrays to separate the length values and the data.
+    ///
+    /// The lengths are encoded using DELTA_BINARY_PACKED encoding.
+    DELTA_LENGTH_BYTE_ARRAY,
+
+    /// Incremental encoding for byte arrays.
+    ///
+    /// Prefix lengths are encoded using DELTA_BINARY_PACKED encoding.
+    /// Suffixes are stored using DELTA_LENGTH_BYTE_ARRAY encoding.
+    DELTA_BYTE_ARRAY,
+
+    /// Dictionary encoding.
+    ///
+    /// The ids are encoded using the RLE encoding.
+    RLE_DICTIONARY,
+}
+
+// ----------------------------------------------------------------------
+// Mirrors `parquet::CompressionCodec`
+
+/// Supported compression algorithms.
+#[derive(Debug, Clone, Copy, PartialEq)]
+pub enum Compression {
+    UNCOMPRESSED,
+    SNAPPY,
+    GZIP,
+    LZO,
+    BROTLI,
+    LZ4,
+    ZSTD,
+}
+
+// ----------------------------------------------------------------------
+// Mirrors `parquet::PageType`
+
+/// Available data pages for Parquet file format.
+/// Note that some of the page types may not be supported.
+#[derive(Debug, Clone, Copy, PartialEq)]
+pub enum PageType {
+    DATA_PAGE,
+    INDEX_PAGE,
+    DICTIONARY_PAGE,
+    DATA_PAGE_V2,
+}
+
+// ----------------------------------------------------------------------
+// Mirrors `parquet::ColumnOrder`
+
+/// Sort order for page and column statistics.
+///
+/// Types are associated with sort orders and column stats are aggregated using a sort
+/// order, and a sort order should be considered when comparing values with statistics
+/// min/max.
+///
+/// See reference in
+/// https://github.com/apache/parquet-cpp/blob/master/src/parquet/types.h
+#[derive(Debug, Clone, Copy, PartialEq)]
+pub enum SortOrder {
+    /// Signed (either value or legacy byte-wise) comparison.
+    SIGNED,
+    /// Unsigned (depending on physical type either value or byte-wise) comparison.
+    UNSIGNED,
+    /// Comparison is undefined.
+    UNDEFINED,
+}
+
+/// Column order that specifies what method was used to aggregate min/max values for
+/// statistics.
+///
+/// If column order is undefined, then it is the legacy behaviour and all values should
+/// be compared as signed values/bytes.
+#[derive(Debug, Clone, Copy, PartialEq)]
+pub enum ColumnOrder {
+    /// Column uses the order defined by its logical or physical type
+    /// (if there is no logical type), parquet-format 2.4.0+.
+    TYPE_DEFINED_ORDER(SortOrder),
+    /// Undefined column order, means legacy behaviour before parquet-format 2.4.0.
+    /// Sort order is always SIGNED.
+    UNDEFINED,
+}
+
+impl ColumnOrder {
+    /// Returns sort order for a physical/logical type.
+    pub fn get_sort_order(logical_type: LogicalType, physical_type: Type) -> SortOrder {
+        match logical_type {
+            // Unsigned byte-wise comparison.
+            LogicalType::UTF8 | LogicalType::JSON | LogicalType::BSON | LogicalType::ENUM => {
+                SortOrder::UNSIGNED
+            }
+
+            LogicalType::INT_8
+            | LogicalType::INT_16
+            | LogicalType::INT_32
+            | LogicalType::INT_64 => SortOrder::SIGNED,
+
+            LogicalType::UINT_8
+            | LogicalType::UINT_16
+            | LogicalType::UINT_32
+            | LogicalType::UINT_64 => SortOrder::UNSIGNED,
+
+            // Signed comparison of the represented value.
+            LogicalType::DECIMAL => SortOrder::SIGNED,
+
+            LogicalType::DATE => SortOrder::SIGNED,
+
+            LogicalType::TIME_MILLIS
+            | LogicalType::TIME_MICROS
+            | LogicalType::TIMESTAMP_MILLIS
+            | LogicalType::TIMESTAMP_MICROS => SortOrder::SIGNED,
+
+            LogicalType::INTERVAL => SortOrder::UNSIGNED,
+
+            LogicalType::LIST | LogicalType::MAP | LogicalType::MAP_KEY_VALUE => {
+                SortOrder::UNDEFINED
+            }
+
+            // Fall back to physical type.
+            LogicalType::NONE => Self::get_default_sort_order(physical_type),
+        }
+    }
+
+    /// Returns default sort order based on physical type.
+    fn get_default_sort_order(physical_type: Type) -> SortOrder {
+        match physical_type {
+            // Order: false, true
+            Type::BOOLEAN => SortOrder::UNSIGNED,
+            Type::INT32 | Type::INT64 => SortOrder::SIGNED,
+            Type::INT96 => SortOrder::UNDEFINED,
+            // Notes to remember when comparing float/double values:
+            // If the min is a NaN, it should be ignored.
+            // If the max is a NaN, it should be ignored.
+            // If the min is +0, the row group may contain -0 values as well.
+            // If the max is -0, the row group may contain +0 values as well.
+            // When looking for NaN values, min and max should be ignored.
+            Type::FLOAT | Type::DOUBLE => SortOrder::SIGNED,
+            // unsigned byte-wise comparison
+            Type::BYTE_ARRAY | Type::FIXED_LEN_BYTE_ARRAY => SortOrder::UNSIGNED,
+        }
+    }
+
+    /// Returns sort order associated with this column order.
+    pub fn sort_order(&self) -> SortOrder {
+        match *self {
+            ColumnOrder::TYPE_DEFINED_ORDER(order) => order,
+            ColumnOrder::UNDEFINED => SortOrder::SIGNED,
+        }
+    }
+}
+
+impl fmt::Display for Type {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        write!(f, "{:?}", self)
+    }
+}
+
+impl fmt::Display for LogicalType {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        write!(f, "{:?}", self)
+    }
+}
+
+impl fmt::Display for Repetition {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        write!(f, "{:?}", self)
+    }
+}
+
+impl fmt::Display for Encoding {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        write!(f, "{:?}", self)
+    }
+}
+
+impl fmt::Display for Compression {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        write!(f, "{:?}", self)
+    }
+}
+
+impl fmt::Display for PageType {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        write!(f, "{:?}", self)
+    }
+}
+
+impl fmt::Display for SortOrder {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        write!(f, "{:?}", self)
+    }
+}
+
+impl fmt::Display for ColumnOrder {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        write!(f, "{:?}", self)
+    }
+}
+
+// ----------------------------------------------------------------------
+// parquet::Type <=> Type conversion
+
+impl convert::From<parquet::Type> for Type {
+    fn from(value: parquet::Type) -> Self {
+        match value {
+            parquet::Type::BOOLEAN => Type::BOOLEAN,
+            parquet::Type::INT32 => Type::INT32,
+            parquet::Type::INT64 => Type::INT64,
+            parquet::Type::INT96 => Type::INT96,
+            parquet::Type::FLOAT => Type::FLOAT,
+            parquet::Type::DOUBLE => Type::DOUBLE,
+            parquet::Type::BYTE_ARRAY => Type::BYTE_ARRAY,
+            parquet::Type::FIXED_LEN_BYTE_ARRAY => Type::FIXED_LEN_BYTE_ARRAY,
+        }
+    }
+}
+
+impl convert::From<Type> for parquet::Type {
+    fn from(value: Type) -> Self {
+        match value {
+            Type::BOOLEAN => parquet::Type::BOOLEAN,
+            Type::INT32 => parquet::Type::INT32,
+            Type::INT64 => parquet::Type::INT64,
+            Type::INT96 => parquet::Type::INT96,
+            Type::FLOAT => parquet::Type::FLOAT,
+            Type::DOUBLE => parquet::Type::DOUBLE,
+            Type::BYTE_ARRAY => parquet::Type::BYTE_ARRAY,
+            Type::FIXED_LEN_BYTE_ARRAY => parquet::Type::FIXED_LEN_BYTE_ARRAY,
+        }
+    }
+}
+
+// ----------------------------------------------------------------------
+// parquet::ConvertedType <=> LogicalType conversion
+
+impl convert::From<Option<parquet::ConvertedType>> for LogicalType {
+    fn from(option: Option<parquet::ConvertedType>) -> Self {
+        match option {
+            None => LogicalType::NONE,
+            Some(value) => match value {
+                parquet::ConvertedType::UTF8 => LogicalType::UTF8,
+                parquet::ConvertedType::MAP => LogicalType::MAP,
+                parquet::ConvertedType::MAP_KEY_VALUE => LogicalType::MAP_KEY_VALUE,
+                parquet::ConvertedType::LIST => LogicalType::LIST,
+                parquet::ConvertedType::ENUM => LogicalType::ENUM,
+                parquet::ConvertedType::DECIMAL => LogicalType::DECIMAL,
+                parquet::ConvertedType::DATE => LogicalType::DATE,
+                parquet::ConvertedType::TIME_MILLIS => LogicalType::TIME_MILLIS,
+                parquet::ConvertedType::TIME_MICROS => LogicalType::TIME_MICROS,
+                parquet::ConvertedType::TIMESTAMP_MILLIS => LogicalType::TIMESTAMP_MILLIS,
+                parquet::ConvertedType::TIMESTAMP_MICROS => LogicalType::TIMESTAMP_MICROS,
+                parquet::ConvertedType::UINT_8 => LogicalType::UINT_8,
+                parquet::ConvertedType::UINT_16 => LogicalType::UINT_16,
+                parquet::ConvertedType::UINT_32 => LogicalType::UINT_32,
+                parquet::ConvertedType::UINT_64 => LogicalType::UINT_64,
+                parquet::ConvertedType::INT_8 => LogicalType::INT_8,
+                parquet::ConvertedType::INT_16 => LogicalType::INT_16,
+                parquet::ConvertedType::INT_32 => LogicalType::INT_32,
+                parquet::ConvertedType::INT_64 => LogicalType::INT_64,
+                parquet::ConvertedType::JSON => LogicalType::JSON,
+                parquet::ConvertedType::BSON => LogicalType::BSON,
+                parquet::ConvertedType::INTERVAL => LogicalType::INTERVAL,
+            },
+        }
+    }
+}
+
+impl convert::From<LogicalType> for Option<parquet::ConvertedType> {
+    fn from(value: LogicalType) -> Self {
+        match value {
+            LogicalType::NONE => None,
+            LogicalType::UTF8 => Some(parquet::ConvertedType::UTF8),
+            LogicalType::MAP => Some(parquet::ConvertedType::MAP),
+            LogicalType::MAP_KEY_VALUE => Some(parquet::ConvertedType::MAP_KEY_VALUE),
+            LogicalType::LIST => Some(parquet::ConvertedType::LIST),
+            LogicalType::ENUM => Some(parquet::ConvertedType::ENUM),
+            LogicalType::DECIMAL => Some(parquet::ConvertedType::DECIMAL),
+            LogicalType::DATE => Some(parquet::ConvertedType::DATE),
+            LogicalType::TIME_MILLIS => Some(parquet::ConvertedType::TIME_MILLIS),
+            LogicalType::TIME_MICROS => Some(parquet::ConvertedType::TIME_MICROS),
+            LogicalType::TIMESTAMP_MILLIS => Some(parquet::ConvertedType::TIMESTAMP_MILLIS),
+            LogicalType::TIMESTAMP_MICROS => Some(parquet::ConvertedType::TIMESTAMP_MICROS),
+            LogicalType::UINT_8 => Some(parquet::ConvertedType::UINT_8),
+            LogicalType::UINT_16 => Some(parquet::ConvertedType::UINT_16),
+            LogicalType::UINT_32 => Some(parquet::ConvertedType::UINT_32),
+            LogicalType::UINT_64 => Some(parquet::ConvertedType::UINT_64),
+            LogicalType::INT_8 => Some(parquet::ConvertedType::INT_8),
+            LogicalType::INT_16 => Some(parquet::ConvertedType::INT_16),
+            LogicalType::INT_32 => Some(parquet::ConvertedType::INT_32),
+            LogicalType::INT_64 => Some(parquet::ConvertedType::INT_64),
+            LogicalType::JSON => Some(parquet::ConvertedType::JSON),
+            LogicalType::BSON => Some(parquet::ConvertedType::BSON),
+            LogicalType::INTERVAL => Some(parquet::ConvertedType::INTERVAL),
+        }
+    }
+}
+
+// ----------------------------------------------------------------------
+// parquet::FieldRepetitionType <=> Repetition conversion
+
+impl convert::From<parquet::FieldRepetitionType> for Repetition {
+    fn from(value: parquet::FieldRepetitionType) -> Self {
+        match value {
+            parquet::FieldRepetitionType::REQUIRED => Repetition::REQUIRED,
+            parquet::FieldRepetitionType::OPTIONAL => Repetition::OPTIONAL,
+            parquet::FieldRepetitionType::REPEATED => Repetition::REPEATED,
+        }
+    }
+}
+
+impl convert::From<Repetition> for parquet::FieldRepetitionType {
+    fn from(value: Repetition) -> Self {
+        match value {
+            Repetition::REQUIRED => parquet::FieldRepetitionType::REQUIRED,
+            Repetition::OPTIONAL => parquet::FieldRepetitionType::OPTIONAL,
+            Repetition::REPEATED => parquet::FieldRepetitionType::REPEATED,
+        }
+    }
+}
+
+// ----------------------------------------------------------------------
+// parquet::Encoding <=> Encoding conversion
+
+impl convert::From<parquet::Encoding> for Encoding {
+    fn from(value: parquet::Encoding) -> Self {
+        match value {
+            parquet::Encoding::PLAIN => Encoding::PLAIN,
+            parquet::Encoding::PLAIN_DICTIONARY => Encoding::PLAIN_DICTIONARY,
+            parquet::Encoding::RLE => Encoding::RLE,
+            parquet::Encoding::BIT_PACKED => Encoding::BIT_PACKED,
+            parquet::Encoding::DELTA_BINARY_PACKED => Encoding::DELTA_BINARY_PACKED,
+            parquet::Encoding::DELTA_LENGTH_BYTE_ARRAY => Encoding::DELTA_LENGTH_BYTE_ARRAY,
+            parquet::Encoding::DELTA_BYTE_ARRAY => Encoding::DELTA_BYTE_ARRAY,
+            parquet::Encoding::RLE_DICTIONARY => Encoding::RLE_DICTIONARY,
+        }
+    }
+}
+
+impl convert::From<Encoding> for parquet::Encoding {
+    fn from(value: Encoding) -> Self {
+        match value {
+            Encoding::PLAIN => parquet::Encoding::PLAIN,
+            Encoding::PLAIN_DICTIONARY => parquet::Encoding::PLAIN_DICTIONARY,
+            Encoding::RLE => parquet::Encoding::RLE,
+            Encoding::BIT_PACKED => parquet::Encoding::BIT_PACKED,
+            Encoding::DELTA_BINARY_PACKED => parquet::Encoding::DELTA_BINARY_PACKED,
+            Encoding::DELTA_LENGTH_BYTE_ARRAY => parquet::Encoding::DELTA_LENGTH_BYTE_ARRAY,
+            Encoding::DELTA_BYTE_ARRAY => parquet::Encoding::DELTA_BYTE_ARRAY,
+            Encoding::RLE_DICTIONARY => parquet::Encoding::RLE_DICTIONARY,
+        }
+    }
+}
+
+// ----------------------------------------------------------------------
+// parquet::CompressionCodec <=> Compression conversion
+
+impl convert::From<parquet::CompressionCodec> for Compression {
+    fn from(value: parquet::CompressionCodec) -> Self {
+        match value {
+            parquet::CompressionCodec::UNCOMPRESSED => Compression::UNCOMPRESSED,
+            parquet::CompressionCodec::SNAPPY => Compression::SNAPPY,
+            parquet::CompressionCodec::GZIP => Compression::GZIP,
+            parquet::CompressionCodec::LZO => Compression::LZO,
+            parquet::CompressionCodec::BROTLI => Compression::BROTLI,
+            parquet::CompressionCodec::LZ4 => Compression::LZ4,
+            parquet::CompressionCodec::ZSTD => Compression::ZSTD,
+        }
+    }
+}
+
+impl convert::From<Compression> for parquet::CompressionCodec {
+    fn from(value: Compression) -> Self {
+        match value {
+            Compression::UNCOMPRESSED => parquet::CompressionCodec::UNCOMPRESSED,
+            Compression::SNAPPY => parquet::CompressionCodec::SNAPPY,
+            Compression::GZIP => parquet::CompressionCodec::GZIP,
+            Compression::LZO => parquet::CompressionCodec::LZO,
+            Compression::BROTLI => parquet::CompressionCodec::BROTLI,
+            Compression::LZ4 => parquet::CompressionCodec::LZ4,
+            Compression::ZSTD => parquet::CompressionCodec::ZSTD,
+        }
+    }
+}
+
+// ----------------------------------------------------------------------
+// parquet::PageType <=> PageType conversion
+
+impl convert::From<parquet::PageType> for PageType {
+    fn from(value: parquet::PageType) -> Self {
+        match value {
+            parquet::PageType::DATA_PAGE => PageType::DATA_PAGE,
+            parquet::PageType::INDEX_PAGE => PageType::INDEX_PAGE,
+            parquet::PageType::DICTIONARY_PAGE => PageType::DICTIONARY_PAGE,
+            parquet::PageType::DATA_PAGE_V2 => PageType::DATA_PAGE_V2,
+        }
+    }
+}
+
+impl convert::From<PageType> for parquet::PageType {
+    fn from(value: PageType) -> Self {
+        match value {
+            PageType::DATA_PAGE => parquet::PageType::DATA_PAGE,
+            PageType::INDEX_PAGE => parquet::PageType::INDEX_PAGE,
+            PageType::DICTIONARY_PAGE => parquet::PageType::DICTIONARY_PAGE,
+            PageType::DATA_PAGE_V2 => parquet::PageType::DATA_PAGE_V2,
+        }
+    }
+}
+
+// ----------------------------------------------------------------------
+// String conversions for schema parsing.
+
+impl str::FromStr for Repetition {
+    type Err = ParquetError;
+
+    fn from_str(s: &str) -> result::Result<Self, Self::Err> {
+        match s {
+            "REQUIRED" => Ok(Repetition::REQUIRED),
+            "OPTIONAL" => Ok(Repetition::OPTIONAL),
+            "REPEATED" => Ok(Repetition::REPEATED),
+            other => Err(general_err!("Invalid repetition {}", other)),
+        }
+    }
+}
+
+impl str::FromStr for Type {
+    type Err = ParquetError;
+
+    fn from_str(s: &str) -> result::Result<Self, Self::Err> {
+        match s {
+            "BOOLEAN" => Ok(Type::BOOLEAN),
+            "INT32" => Ok(Type::INT32),
+            "INT64" => Ok(Type::INT64),
+            "INT96" => Ok(Type::INT96),
+            "FLOAT" => Ok(Type::FLOAT),
+            "DOUBLE" => Ok(Type::DOUBLE),
+            "BYTE_ARRAY" | "BINARY" => Ok(Type::BYTE_ARRAY),
+            "FIXED_LEN_BYTE_ARRAY" => Ok(Type::FIXED_LEN_BYTE_ARRAY),
+            other => Err(general_err!("Invalid type {}", other)),
+        }
+    }
+}
+
+impl str::FromStr for LogicalType {
+    type Err = ParquetError;
+
+    fn from_str(s: &str) -> result::Result<Self, Self::Err> {
+        match s {
+            "NONE" => Ok(LogicalType::NONE),
+            "UTF8" => Ok(LogicalType::UTF8),
+            "MAP" => Ok(LogicalType::MAP),
+            "MAP_KEY_VALUE" => Ok(LogicalType::MAP_KEY_VALUE),
+            "LIST" => Ok(LogicalType::LIST),
+            "ENUM" => Ok(LogicalType::ENUM),
+            "DECIMAL" => Ok(LogicalType::DECIMAL),
+            "DATE" => Ok(LogicalType::DATE),
+            "TIME_MILLIS" => Ok(LogicalType::TIME_MILLIS),
+            "TIME_MICROS" => Ok(LogicalType::TIME_MICROS),
+            "TIMESTAMP_MILLIS" => Ok(LogicalType::TIMESTAMP_MILLIS),
+            "TIMESTAMP_MICROS" => Ok(LogicalType::TIMESTAMP_MICROS),
+            "UINT_8" => Ok(LogicalType::UINT_8),
+            "UINT_16" => Ok(LogicalType::UINT_16),
+            "UINT_32" => Ok(LogicalType::UINT_32),
+            "UINT_64" => Ok(LogicalType::UINT_64),
+            "INT_8" => Ok(LogicalType::INT_8),
+            "INT_16" => Ok(LogicalType::INT_16),
+            "INT_32" => Ok(LogicalType::INT_32),
+            "INT_64" => Ok(LogicalType::INT_64),
+            "JSON" => Ok(LogicalType::JSON),
+            "BSON" => Ok(LogicalType::BSON),
+            "INTERVAL" => Ok(LogicalType::INTERVAL),
+            other => Err(general_err!("Invalid logical type {}", other)),
+        }
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+
+    #[test]
+    fn test_display_type() {
+        assert_eq!(Type::BOOLEAN.to_string(), "BOOLEAN");
+        assert_eq!(Type::INT32.to_string(), "INT32");
+        assert_eq!(Type::INT64.to_string(), "INT64");
+        assert_eq!(Type::INT96.to_string(), "INT96");
+        assert_eq!(Type::FLOAT.to_string(), "FLOAT");
+        assert_eq!(Type::DOUBLE.to_string(), "DOUBLE");
+        assert_eq!(Type::BYTE_ARRAY.to_string(), "BYTE_ARRAY");
+        assert_eq!(
+            Type::FIXED_LEN_BYTE_ARRAY.to_string(),
+            "FIXED_LEN_BYTE_ARRAY"
+        );
+    }
+
+    #[test]
+    fn test_from_type() {
+        assert_eq!(Type::from(parquet::Type::BOOLEAN), Type::BOOLEAN);
+        assert_eq!(Type::from(parquet::Type::INT32), Type::INT32);
+        assert_eq!(Type::from(parquet::Type::INT64), Type::INT64);
+        assert_eq!(Type::from(parquet::Type::INT96), Type::INT96);
+        assert_eq!(Type::from(parquet::Type::FLOAT), Type::FLOAT);
+        assert_eq!(Type::from(parquet::Type::DOUBLE), Type::DOUBLE);
+        assert_eq!(Type::from(parquet::Type::BYTE_ARRAY), Type::BYTE_ARRAY);
+        assert_eq!(
+            Type::from(parquet::Type::FIXED_LEN_BYTE_ARRAY),
+            Type::FIXED_LEN_BYTE_ARRAY
+        );
+    }
+
+    #[test]
+    fn test_into_type() {
+        assert_eq!(parquet::Type::BOOLEAN, Type::BOOLEAN.into());
+        assert_eq!(parquet::Type::INT32, Type::INT32.into());
+        assert_eq!(parquet::Type::INT64, Type::INT64.into());
+        assert_eq!(parquet::Type::INT96, Type::INT96.into());
+        assert_eq!(parquet::Type::FLOAT, Type::FLOAT.into());
+        assert_eq!(parquet::Type::DOUBLE, Type::DOUBLE.into());
+        assert_eq!(parquet::Type::BYTE_ARRAY, Type::BYTE_ARRAY.into());
+        assert_eq!(
+            parquet::Type::FIXED_LEN_BYTE_ARRAY,
+            Type::FIXED_LEN_BYTE_ARRAY.into()
+        );
+    }
+
+    #[test]
+    fn test_from_string_into_type() {
+        assert_eq!(
+            Type::BOOLEAN.to_string().parse::<Type>().unwrap(),
+            Type::BOOLEAN
+        );
+        assert_eq!(
+            Type::INT32.to_string().parse::<Type>().unwrap(),
+            Type::INT32
+        );
+        assert_eq!(
+            Type::INT64.to_string().parse::<Type>().unwrap(),
+            Type::INT64
+        );
+        assert_eq!(
+            Type::INT96.to_string().parse::<Type>().unwrap(),
+            Type::INT96
+        );
+        assert_eq!(
+            Type::FLOAT.to_string().parse::<Type>().unwrap(),
+            Type::FLOAT
+        );
+        assert_eq!(
+            Type::DOUBLE.to_string().parse::<Type>().unwrap(),
+            Type::DOUBLE
+        );
+        assert_eq!(
+            Type::BYTE_ARRAY.to_string().parse::<Type>().unwrap(),
+            Type::BYTE_ARRAY
+        );
+        assert_eq!("BINARY".parse::<Type>().unwrap(), Type::BYTE_ARRAY);
+        assert_eq!(
+            Type::FIXED_LEN_BYTE_ARRAY
+                .to_string()
+                .parse::<Type>()
+                .unwrap(),
+            Type::FIXED_LEN_BYTE_ARRAY
+        );
+    }
+
+    #[test]
+    fn test_display_logical_type() {
+        assert_eq!(LogicalType::NONE.to_string(), "NONE");
+        assert_eq!(LogicalType::UTF8.to_string(), "UTF8");
+        assert_eq!(LogicalType::MAP.to_string(), "MAP");
+        assert_eq!(LogicalType::MAP_KEY_VALUE.to_string(), "MAP_KEY_VALUE");
+        assert_eq!(LogicalType::LIST.to_string(), "LIST");
+        assert_eq!(LogicalType::ENUM.to_string(), "ENUM");
+        assert_eq!(LogicalType::DECIMAL.to_string(), "DECIMAL");
+        assert_eq!(LogicalType::DATE.to_string(), "DATE");
+        assert_eq!(LogicalType::TIME_MILLIS.to_string(), "TIME_MILLIS");
+        assert_eq!(LogicalType::DATE.to_string(), "DATE");
+        assert_eq!(LogicalType::TIME_MICROS.to_string(), "TIME_MICROS");
+        assert_eq!(
+            LogicalType::TIMESTAMP_MILLIS.to_string(),
+            "TIMESTAMP_MILLIS"
+        );
+        assert_eq!(
+            LogicalType::TIMESTAMP_MICROS.to_string(),
+            "TIMESTAMP_MICROS"
+        );
+        assert_eq!(LogicalType::UINT_8.to_string(), "UINT_8");
+        assert_eq!(LogicalType::UINT_16.to_string(), "UINT_16");
+        assert_eq!(LogicalType::UINT_32.to_string(), "UINT_32");
+        assert_eq!(LogicalType::UINT_64.to_string(), "UINT_64");
+        assert_eq!(LogicalType::INT_8.to_string(), "INT_8");
+        assert_eq!(LogicalType::INT_16.to_string(), "INT_16");
+        assert_eq!(LogicalType::INT_32.to_string(), "INT_32");
+        assert_eq!(LogicalType::INT_64.to_string(), "INT_64");
+        assert_eq!(LogicalType::JSON.to_string(), "JSON");
+        assert_eq!(LogicalType::BSON.to_string(), "BSON");
+        assert_eq!(LogicalType::INTERVAL.to_string(), "INTERVAL");
+    }
+
+    #[test]
+    fn test_from_logical_type() {
+        assert_eq!(LogicalType::from(None), LogicalType::NONE);
+        assert_eq!(
+            LogicalType::from(Some(parquet::ConvertedType::UTF8)),
+            LogicalType::UTF8
+        );
+        assert_eq!(
+            LogicalType::from(Some(parquet::ConvertedType::MAP)),
+            LogicalType::MAP
+        );
+        assert_eq!(
+            LogicalType::from(Some(parquet::ConvertedType::MAP_KEY_VALUE)),
+            LogicalType::MAP_KEY_VALUE
+        );
+        assert_eq!(
+            LogicalType::from(Some(parquet::ConvertedType::LIST)),
+            LogicalType::LIST
+        );
+        assert_eq!(
+            LogicalType::from(Some(parquet::ConvertedType::ENUM)),
+            LogicalType::ENUM
+        );
+        assert_eq!(
+            LogicalType::from(Some(parquet::ConvertedType::DECIMAL)),
+            LogicalType::DECIMAL
+        );
+        assert_eq!(
+            LogicalType::from(Some(parquet::ConvertedType::DATE)),
+            LogicalType::DATE
+        );
+        assert_eq!(
+            LogicalType::from(Some(parquet::ConvertedType::TIME_MILLIS)),
+            LogicalType::TIME_MILLIS
+        );
+        assert_eq!(
+            LogicalType::from(Some(parquet::ConvertedType::TIME_MICROS)),
+            LogicalType::TIME_MICROS
+        );
+        assert_eq!(
+            LogicalType::from(Some(parquet::ConvertedType::TIMESTAMP_MILLIS)),
+            LogicalType::TIMESTAMP_MILLIS
+        );
+        assert_eq!(
+            LogicalType::from(Some(parquet::ConvertedType::TIMESTAMP_MICROS)),
+            LogicalType::TIMESTAMP_MICROS
+        );
+        assert_eq!(
+            LogicalType::from(Some(parquet::ConvertedType::UINT_8)),
+            LogicalType::UINT_8
+        );
+        assert_eq!(
+            LogicalType::from(Some(parquet::ConvertedType::UINT_16)),
+            LogicalType::UINT_16
+        );
+        assert_eq!(
+            LogicalType::from(Some(parquet::ConvertedType::UINT_32)),
+            LogicalType::UINT_32
+        );
+        assert_eq!(
+            LogicalType::from(Some(parquet::ConvertedType::UINT_64)),
+            LogicalType::UINT_64
+        );
+        assert_eq!(
+            LogicalType::from(Some(parquet::ConvertedType::INT_8)),
+            LogicalType::INT_8
+        );
+        assert_eq!(
+            LogicalType::from(Some(parquet::ConvertedType::INT_16)),
+            LogicalType::INT_16
+        );
+        assert_eq!(
+            LogicalType::from(Some(parquet::ConvertedType::INT_32)),
+            LogicalType::INT_32
+        );
+        assert_eq!(
+            LogicalType::from(Some(parquet::ConvertedType::INT_64)),
+            LogicalType::INT_64
+        );
+        assert_eq!(
+            LogicalType::from(Some(parquet::ConvertedType::JSON)),
+            LogicalType::JSON
+        );
+        assert_eq!(
+            LogicalType::from(Some(parquet::ConvertedType::BSON)),
+            LogicalType::BSON
+        );
+        assert_eq!(
+            LogicalType::from(Some(parquet::ConvertedType::INTERVAL)),
+            LogicalType::INTERVAL
+        );
+    }
+
+    #[test]
+    fn test_into_logical_type() {
+        let converted_type: Option<parquet::ConvertedType> = None;
+        assert_eq!(converted_type, LogicalType::NONE.into());
+        assert_eq!(Some(parquet::ConvertedType::UTF8), LogicalType::UTF8.into());
+        assert_eq!(Some(parquet::ConvertedType::MAP), LogicalType::MAP.into());
+        assert_eq!(
+            Some(parquet::ConvertedType::MAP_KEY_VALUE),
+            LogicalType::MAP_KEY_VALUE.into()
+        );
+        assert_eq!(Some(parquet::ConvertedType::LIST), LogicalType::LIST.into());
+        assert_eq!(Some(parquet::ConvertedType::ENUM), LogicalType::ENUM.into());
+        assert_eq!(
+            Some(parquet::ConvertedType::DECIMAL),
+            LogicalType::DECIMAL.into()
+        );
+        assert_eq!(Some(parquet::ConvertedType::DATE), LogicalType::DATE.into());
+        assert_eq!(
+            Some(parquet::ConvertedType::TIME_MILLIS),
+            LogicalType::TIME_MILLIS.into()
+        );
+        assert_eq!(
+            Some(parquet::ConvertedType::TIME_MICROS),
+            LogicalType::TIME_MICROS.into()
+        );
+        assert_eq!(
+            Some(parquet::ConvertedType::TIMESTAMP_MILLIS),
+            LogicalType::TIMESTAMP_MILLIS.into()
+        );
+        assert_eq!(
+            Some(parquet::ConvertedType::TIMESTAMP_MICROS),
+            LogicalType::TIMESTAMP_MICROS.into()
+        );
+        assert_eq!(
+            Some(parquet::ConvertedType::UINT_8),
+            LogicalType::UINT_8.into()
+        );
+        assert_eq!(
+            Some(parquet::ConvertedType::UINT_16),
+            LogicalType::UINT_16.into()
+        );
+        assert_eq!(
+            Some(parquet::ConvertedType::UINT_32),
+            LogicalType::UINT_32.into()
+        );
+        assert_eq!(
+            Some(parquet::ConvertedType::UINT_64),
+            LogicalType::UINT_64.into()
+        );
+        assert_eq!(
+            Some(parquet::ConvertedType::INT_8),
+            LogicalType::INT_8.into()
+        );
+        assert_eq!(
+            Some(parquet::ConvertedType::INT_16),
+            LogicalType::INT_16.into()
+        );
+        assert_eq!(
+            Some(parquet::ConvertedType::INT_32),
+            LogicalType::INT_32.into()
+        );
+        assert_eq!(
+            Some(parquet::ConvertedType::INT_64),
+            LogicalType::INT_64.into()
+        );
+        assert_eq!(Some(parquet::ConvertedType::JSON), LogicalType::JSON.into());
+        assert_eq!(Some(parquet::ConvertedType::BSON), LogicalType::BSON.into());
+        assert_eq!(
+            Some(parquet::ConvertedType::INTERVAL),
+            LogicalType::INTERVAL.into()
+        );
+    }
+
+    #[test]
+    fn test_from_string_into_logical_type() {
+        assert_eq!(
+            LogicalType::NONE
+                .to_string()
+                .parse::<LogicalType>()
+                .unwrap(),
+            LogicalType::NONE
+        );
+        assert_eq!(
+            LogicalType::UTF8
+                .to_string()
+                .parse::<LogicalType>()
+                .unwrap(),
+            LogicalType::UTF8
+        );
+        assert_eq!(
+            LogicalType::MAP.to_string().parse::<LogicalType>().unwrap(),
+            LogicalType::MAP
+        );
+        assert_eq!(
+            LogicalType::MAP_KEY_VALUE
+                .to_string()
+                .parse::<LogicalType>()
+                .unwrap(),
+            LogicalType::MAP_KEY_VALUE
+        );
+        assert_eq!(
+            LogicalType::LIST
+                .to_string()
+                .parse::<LogicalType>()
+                .unwrap(),
+            LogicalType::LIST
+        );
+        assert_eq!(
+            LogicalType::ENUM
+                .to_string()
+                .parse::<LogicalType>()
+                .unwrap(),
+            LogicalType::ENUM
+        );
+        assert_eq!(
+            LogicalType::DECIMAL
+                .to_string()
+                .parse::<LogicalType>()
+                .unwrap(),
+            LogicalType::DECIMAL
+        );
+        assert_eq!(
+            LogicalType::DATE
+                .to_string()
+                .parse::<LogicalType>()
+                .unwrap(),
+            LogicalType::DATE
+        );
+        assert_eq!(
+            LogicalType::TIME_MILLIS
+                .to_string()
+                .parse::<LogicalType>()
+                .unwrap(),
+            LogicalType::TIME_MILLIS
+        );
+        assert_eq!(
+            LogicalType::TIME_MICROS
+                .to_string()
+                .parse::<LogicalType>()
+                .unwrap(),
+            LogicalType::TIME_MICROS
+        );
+        assert_eq!(
+            LogicalType::TIMESTAMP_MILLIS
+                .to_string()
+                .parse::<LogicalType>()
+                .unwrap(),
+            LogicalType::TIMESTAMP_MILLIS
+        );
+        assert_eq!(
+            LogicalType::TIMESTAMP_MICROS
+                .to_string()
+                .parse::<LogicalType>()
+                .unwrap(),
+            LogicalType::TIMESTAMP_MICROS
+        );
+        assert_eq!(
+            LogicalType::UINT_8
+                .to_string()
+                .parse::<LogicalType>()
+                .unwrap(),
+            LogicalType::UINT_8
+        );
+        assert_eq!(
+            LogicalType::UINT_16
+                .to_string()
+                .parse::<LogicalType>()
+                .unwrap(),
+            LogicalType::UINT_16
+        );
+        assert_eq!(
+            LogicalType::UINT_32
+                .to_string()
+                .parse::<LogicalType>()
+                .unwrap(),
+            LogicalType::UINT_32
+        );
+        assert_eq!(
+            LogicalType::UINT_64
+                .to_string()
+                .parse::<LogicalType>()
+                .unwrap(),
+            LogicalType::UINT_64
+        );
+        assert_eq!(
+            LogicalType::INT_8
+                .to_string()
+                .parse::<LogicalType>()
+                .unwrap(),
+            LogicalType::INT_8
+        );
+        assert_eq!(
+            LogicalType::INT_16
+                .to_string()
+                .parse::<LogicalType>()
+                .unwrap(),
+            LogicalType::INT_16
+        );
+        assert_eq!(
+            LogicalType::INT_32
+                .to_string()
+                .parse::<LogicalType>()
+                .unwrap(),
+            LogicalType::INT_32
+        );
+        assert_eq!(
+            LogicalType::INT_64
+                .to_string()
+                .parse::<LogicalType>()
+                .unwrap(),
+            LogicalType::INT_64
+        );
+        assert_eq!(
+            LogicalType::JSON
+                .to_string()
+                .parse::<LogicalType>()
+                .unwrap(),
+            LogicalType::JSON
+        );
+        assert_eq!(
+            LogicalType::BSON
+                .to_string()
+                .parse::<LogicalType>()
+                .unwrap(),
+            LogicalType::BSON
+        );
+        assert_eq!(
+            LogicalType::INTERVAL
+                .to_string()
+                .parse::<LogicalType>()
+                .unwrap(),
+            LogicalType::INTERVAL
+        );
+    }
+
+    #[test]
+    fn test_display_repetition() {
+        assert_eq!(Repetition::REQUIRED.to_string(), "REQUIRED");
+        assert_eq!(Repetition::OPTIONAL.to_string(), "OPTIONAL");
+        assert_eq!(Repetition::REPEATED.to_string(), "REPEATED");
+    }
+
+    #[test]
+    fn test_from_repetition() {
+        assert_eq!(
+            Repetition::from(parquet::FieldRepetitionType::REQUIRED),
+            Repetition::REQUIRED
+        );
+        assert_eq!(
+            Repetition::from(parquet::FieldRepetitionType::OPTIONAL),
+            Repetition::OPTIONAL
+        );
+        assert_eq!(
+            Repetition::from(parquet::FieldRepetitionType::REPEATED),
+            Repetition::REPEATED
+        );
+    }
+
+    #[test]
+    fn test_into_repetition() {
+        assert_eq!(
+            parquet::FieldRepetitionType::REQUIRED,
+            Repetition::REQUIRED.into()
+        );
+        assert_eq!(
+            parquet::FieldRepetitionType::OPTIONAL,
+            Repetition::OPTIONAL.into()
+        );
+        assert_eq!(
+            parquet::FieldRepetitionType::REPEATED,
+            Repetition::REPEATED.into()
+        );
+    }
+
+    #[test]
+    fn test_from_string_into_repetition() {
+        assert_eq!(
+            Repetition::REQUIRED
+                .to_string()
+                .parse::<Repetition>()
+                .unwrap(),
+            Repetition::REQUIRED
+        );
+        assert_eq!(
+            Repetition::OPTIONAL
+                .to_string()
+                .parse::<Repetition>()
+                .unwrap(),
+            Repetition::OPTIONAL
+        );
+        assert_eq!(
+            Repetition::REPEATED
+                .to_string()
+                .parse::<Repetition>()
+                .unwrap(),
+            Repetition::REPEATED
+        );
+    }
+
+    #[test]
+    fn test_display_encoding() {
+        assert_eq!(Encoding::PLAIN.to_string(), "PLAIN");
+        assert_eq!(Encoding::PLAIN_DICTIONARY.to_string(), "PLAIN_DICTIONARY");
+        assert_eq!(Encoding::RLE.to_string(), "RLE");
+        assert_eq!(Encoding::BIT_PACKED.to_string(), "BIT_PACKED");
+        assert_eq!(
+            Encoding::DELTA_BINARY_PACKED.to_string(),
+            "DELTA_BINARY_PACKED"
+        );
+        assert_eq!(
+            Encoding::DELTA_LENGTH_BYTE_ARRAY.to_string(),
+            "DELTA_LENGTH_BYTE_ARRAY"
+        );
+        assert_eq!(Encoding::DELTA_BYTE_ARRAY.to_string(), "DELTA_BYTE_ARRAY");
+        assert_eq!(Encoding::RLE_DICTIONARY.to_string(), "RLE_DICTIONARY");
+    }
+
+    #[test]
+    fn test_from_encoding() {
+        assert_eq!(Encoding::from(parquet::Encoding::PLAIN), Encoding::PLAIN);
+        assert_eq!(
+            Encoding::from(parquet::Encoding::PLAIN_DICTIONARY),
+            Encoding::PLAIN_DICTIONARY
+        );
+        assert_eq!(Encoding::from(parquet::Encoding::RLE), Encoding::RLE);
+        assert_eq!(
+            Encoding::from(parquet::Encoding::BIT_PACKED),
+            Encoding::BIT_PACKED
+        );
+        assert_eq!(
+            Encoding::from(parquet::Encoding::DELTA_BINARY_PACKED),
+            Encoding::DELTA_BINARY_PACKED
+        );
+        assert_eq!(
+            Encoding::from(parquet::Encoding::DELTA_LENGTH_BYTE_ARRAY),
+            Encoding::DELTA_LENGTH_BYTE_ARRAY
+        );
+        assert_eq!(
+            Encoding::from(parquet::Encoding::DELTA_BYTE_ARRAY),
+            Encoding::DELTA_BYTE_ARRAY
+        );
+    }
+
+    #[test]
+    fn test_into_encoding() {
+        assert_eq!(parquet::Encoding::PLAIN, Encoding::PLAIN.into());
+        assert_eq!(
+            parquet::Encoding::PLAIN_DICTIONARY,
+            Encoding::PLAIN_DICTIONARY.into()
+        );
+        assert_eq!(parquet::Encoding::RLE, Encoding::RLE.into());
+        assert_eq!(parquet::Encoding::BIT_PACKED, Encoding::BIT_PACKED.into());
+        assert_eq!(
+            parquet::Encoding::DELTA_BINARY_PACKED,
+            Encoding::DELTA_BINARY_PACKED.into()
+        );
+        assert_eq!(
+            parquet::Encoding::DELTA_LENGTH_BYTE_ARRAY,
+            Encoding::DELTA_LENGTH_BYTE_ARRAY.into()
+        );
+        assert_eq!(
+            parquet::Encoding::DELTA_BYTE_ARRAY,
+            Encoding::DELTA_BYTE_ARRAY.into()
+        );
+    }
+
+    #[test]
+    fn test_display_compression() {
+        assert_eq!(Compression::UNCOMPRESSED.to_string(), "UNCOMPRESSED");
+        assert_eq!(Compression::SNAPPY.to_string(), "SNAPPY");
+        assert_eq!(Compression::GZIP.to_string(), "GZIP");
+        assert_eq!(Compression::LZO.to_string(), "LZO");
+        assert_eq!(Compression::BROTLI.to_string(), "BROTLI");
+        assert_eq!(Compression::LZ4.to_string(), "LZ4");
+        assert_eq!(Compression::ZSTD.to_string(), "ZSTD");
+    }
+
+    #[test]
+    fn test_from_compression() {
+        assert_eq!(
+            Compression::from(parquet::CompressionCodec::UNCOMPRESSED),
+            Compression::UNCOMPRESSED
+        );
+        assert_eq!(
+            Compression::from(parquet::CompressionCodec::SNAPPY),
+            Compression::SNAPPY
+        );
+        assert_eq!(
+            Compression::from(parquet::CompressionCodec::GZIP),
+            Compression::GZIP
+        );
+        assert_eq!(
+            Compression::from(parquet::CompressionCodec::LZO),
+            Compression::LZO
+        );
+        assert_eq!(
+            Compression::from(parquet::CompressionCodec::BROTLI),
+            Compression::BROTLI
+        );
+        assert_eq!(
+            Compression::from(parquet::CompressionCodec::LZ4),
+            Compression::LZ4
+        );
+        assert_eq!(
+            Compression::from(parquet::CompressionCodec::ZSTD),
+            Compression::ZSTD
+        );
+    }
+
+    #[test]
+    fn test_into_compression() {
+        assert_eq!(
+            parquet::CompressionCodec::UNCOMPRESSED,
+            Compression::UNCOMPRESSED.into()
+        );
+        assert_eq!(
+            parquet::CompressionCodec::SNAPPY,
+            Compression::SNAPPY.into()
+        );
+        assert_eq!(parquet::CompressionCodec::GZIP, Compression::GZIP.into());
+        assert_eq!(parquet::CompressionCodec::LZO, Compression::LZO.into());
+        assert_eq!(
+            parquet::CompressionCodec::BROTLI,
+            Compression::BROTLI.into()
+        );
+        assert_eq!(parquet::CompressionCodec::LZ4, Compression::LZ4.into());
+        assert_eq!(parquet::CompressionCodec::ZSTD, Compression::ZSTD.into());
+    }
+
+    #[test]
+    fn test_display_page_type() {
+        assert_eq!(PageType::DATA_PAGE.to_string(), "DATA_PAGE");
+        assert_eq!(PageType::INDEX_PAGE.to_string(), "INDEX_PAGE");
+        assert_eq!(PageType::DICTIONARY_PAGE.to_string(), "DICTIONARY_PAGE");
+        assert_eq!(PageType::DATA_PAGE_V2.to_string(), "DATA_PAGE_V2");
+    }
+
+    #[test]
+    fn test_from_page_type() {
+        assert_eq!(
+            PageType::from(parquet::PageType::DATA_PAGE),
+            PageType::DATA_PAGE
+        );
+        assert_eq!(
+            PageType::from(parquet::PageType::INDEX_PAGE),
+            PageType::INDEX_PAGE
+        );
+        assert_eq!(
+            PageType::from(parquet::PageType::DICTIONARY_PAGE),
+            PageType::DICTIONARY_PAGE
+        );
+        assert_eq!(
+            PageType::from(parquet::PageType::DATA_PAGE_V2),
+            PageType::DATA_PAGE_V2
+        );
+    }
+
+    #[test]
+    fn test_into_page_type() {
+        assert_eq!(parquet::PageType::DATA_PAGE, PageType::DATA_PAGE.into());
+        assert_eq!(parquet::PageType::INDEX_PAGE, PageType::INDEX_PAGE.into());
+        assert_eq!(
+            parquet::PageType::DICTIONARY_PAGE,
+            PageType::DICTIONARY_PAGE.into()
+        );
+        assert_eq!(
+            parquet::PageType::DATA_PAGE_V2,
+            PageType::DATA_PAGE_V2.into()
+        );
+    }
+
+    #[test]
+    fn test_display_sort_order() {
+        assert_eq!(SortOrder::SIGNED.to_string(), "SIGNED");
+        assert_eq!(SortOrder::UNSIGNED.to_string(), "UNSIGNED");
+        assert_eq!(SortOrder::UNDEFINED.to_string(), "UNDEFINED");
+    }
+
+    #[test]
+    fn test_display_column_order() {
+        assert_eq!(
+            ColumnOrder::TYPE_DEFINED_ORDER(SortOrder::SIGNED).to_string(),
+            "TYPE_DEFINED_ORDER(SIGNED)"
+        );
+        assert_eq!(
+            ColumnOrder::TYPE_DEFINED_ORDER(SortOrder::UNSIGNED).to_string(),
+            "TYPE_DEFINED_ORDER(UNSIGNED)"
+        );
+        assert_eq!(
+            ColumnOrder::TYPE_DEFINED_ORDER(SortOrder::UNDEFINED).to_string(),
+            "TYPE_DEFINED_ORDER(UNDEFINED)"
+        );
+        assert_eq!(ColumnOrder::UNDEFINED.to_string(), "UNDEFINED");
+    }
+
+    #[test]
+    fn test_column_order_get_sort_order() {
+        // Helper to check the order in a list of values.
+        // Only logical type is checked.
+        fn check_sort_order(types: Vec<LogicalType>, expected_order: SortOrder) {
+            for tpe in types {
+                assert_eq!(
+                    ColumnOrder::get_sort_order(tpe, Type::BYTE_ARRAY),
+                    expected_order
+                );
+            }
+        }
+
+        // Unsigned comparison (physical type does not matter)
+        let unsigned = vec![
+            LogicalType::UTF8,
+            LogicalType::JSON,
+            LogicalType::BSON,
+            LogicalType::ENUM,
+            LogicalType::UINT_8,
+            LogicalType::UINT_16,
+            LogicalType::UINT_32,
+            LogicalType::UINT_64,
+            LogicalType::INTERVAL,
+        ];
+        check_sort_order(unsigned, SortOrder::UNSIGNED);
+
+        // Signed comparison (physical type does not matter)
+        let signed = vec![
+            LogicalType::INT_8,
+            LogicalType::INT_16,
+            LogicalType::INT_32,
+            LogicalType::INT_64,
+            LogicalType::DECIMAL,
+            LogicalType::DATE,
+            LogicalType::TIME_MILLIS,
+            LogicalType::TIME_MICROS,
+            LogicalType::TIMESTAMP_MILLIS,
+            LogicalType::TIMESTAMP_MICROS,
+        ];
+        check_sort_order(signed, SortOrder::SIGNED);
+
+        // Undefined comparison
+        let undefined = vec![
+            LogicalType::LIST,
+            LogicalType::MAP,
+            LogicalType::MAP_KEY_VALUE,
+        ];
+        check_sort_order(undefined, SortOrder::UNDEFINED);
+
+        // Check None logical type
+        // This should return a sort order for byte array type.
+        check_sort_order(vec![LogicalType::NONE], SortOrder::UNSIGNED);
+    }
+
+    #[test]
+    fn test_column_order_get_default_sort_order() {
+        // Comparison based on physical type
+        assert_eq!(
+            ColumnOrder::get_default_sort_order(Type::BOOLEAN),
+            SortOrder::UNSIGNED
+        );
+        assert_eq!(
+            ColumnOrder::get_default_sort_order(Type::INT32),
+            SortOrder::SIGNED
+        );
+        assert_eq!(
+            ColumnOrder::get_default_sort_order(Type::INT64),
+            SortOrder::SIGNED
+        );
+        assert_eq!(
+            ColumnOrder::get_default_sort_order(Type::INT96),
+            SortOrder::UNDEFINED
+        );
+        assert_eq!(
+            ColumnOrder::get_default_sort_order(Type::FLOAT),
+            SortOrder::SIGNED
+        );
+        assert_eq!(
+            ColumnOrder::get_default_sort_order(Type::DOUBLE),
+            SortOrder::SIGNED
+        );
+        assert_eq!(
+            ColumnOrder::get_default_sort_order(Type::BYTE_ARRAY),
+            SortOrder::UNSIGNED
+        );
+        assert_eq!(
+            ColumnOrder::get_default_sort_order(Type::FIXED_LEN_BYTE_ARRAY),
+            SortOrder::UNSIGNED
+        );
+    }
+
+    #[test]
+    fn test_column_order_sort_order() {
+        assert_eq!(
+            ColumnOrder::TYPE_DEFINED_ORDER(SortOrder::SIGNED).sort_order(),
+            SortOrder::SIGNED
+        );
+        assert_eq!(
+            ColumnOrder::TYPE_DEFINED_ORDER(SortOrder::UNSIGNED).sort_order(),
+            SortOrder::UNSIGNED
+        );
+        assert_eq!(
+            ColumnOrder::TYPE_DEFINED_ORDER(SortOrder::UNDEFINED).sort_order(),
+            SortOrder::UNDEFINED
+        );
+        assert_eq!(ColumnOrder::UNDEFINED.sort_order(), SortOrder::SIGNED);
+    }
+}
diff --git a/rust/src/parquet/column/mod.rs b/rust/src/parquet/column/mod.rs
new file mode 100644
index 0000000..09c4bde
--- /dev/null
+++ b/rust/src/parquet/column/mod.rs
@@ -0,0 +1,124 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+//! Low level column reader and writer APIs.
+//!
+//! This API is designed for reading and writing column values, definition and repetition
+//! levels directly.
+//!
+//! # Example of writing and reading data
+//!
+//! Data has the following format:
+//! ```text
+//! +---------------+
+//! |         values|
+//! +---------------+
+//! |[1, 2]         |
+//! |[3, null, null]|
+//! +---------------+
+//! ```
+//!
+//! The example uses column writer and reader APIs to write raw values, definition and
+//! repetition levels and read them to verify write/read correctness.
+//!
+//! ```rust
+//! use std::{fs, path::Path, rc::Rc};
+//!
+//! use arrow::parquet::{
+//!     column::{reader::ColumnReader, writer::ColumnWriter},
+//!     file::{
+//!         properties::WriterProperties,
+//!         reader::{FileReader, SerializedFileReader},
+//!         writer::{FileWriter, SerializedFileWriter},
+//!     },
+//!     schema::parser::parse_message_type,
+//! };
+//!
+//! let path = Path::new("target/debug/examples/column_sample.parquet");
+//!
+//! // Writing data using column writer API.
+//!
+//! let message_type = "
+//!   message schema {
+//!     optional group values (LIST) {
+//!       repeated group list {
+//!         optional INT32 element;
+//!       }
+//!     }
+//!   }
+//! ";
+//! let schema = Rc::new(parse_message_type(message_type).unwrap());
+//! let props = Rc::new(WriterProperties::builder().build());
+//! let file = fs::File::create(path).unwrap();
+//! let mut writer = SerializedFileWriter::new(file, schema, props).unwrap();
+//! let mut row_group_writer = writer.next_row_group().unwrap();
+//! while let Some(mut col_writer) = row_group_writer.next_column().unwrap() {
+//!     match col_writer {
+//!         // You can also use `get_typed_column_writer` method to extract typed writer.
+//!         ColumnWriter::Int32ColumnWriter(ref mut typed_writer) => {
+//!             typed_writer
+//!                 .write_batch(&[1, 2, 3], Some(&[3, 3, 3, 2, 2]), Some(&[0, 1, 0, 1, 1]))
+//!                 .unwrap();
+//!         }
+//!         _ => {}
+//!     }
+//!     row_group_writer.close_column(col_writer).unwrap();
+//! }
+//! writer.close_row_group(row_group_writer).unwrap();
+//! writer.close().unwrap();
+//!
+//! // Reading data using column reader API.
+//!
+//! let file = fs::File::open(path).unwrap();
+//! let reader = SerializedFileReader::new(file).unwrap();
+//! let metadata = reader.metadata();
+//!
+//! let mut res = Ok((0, 0));
+//! let mut values = vec![0; 8];
+//! let mut def_levels = vec![0; 8];
+//! let mut rep_levels = vec![0; 8];
+//!
+//! for i in 0..metadata.num_row_groups() {
+//!     let row_group_reader = reader.get_row_group(i).unwrap();
+//!     let row_group_metadata = metadata.row_group(i);
+//!
+//!     for j in 0..row_group_metadata.num_columns() {
+//!         let mut column_reader = row_group_reader.get_column_reader(j).unwrap();
+//!         match column_reader {
+//!             // You can also use `get_typed_column_reader` method to extract typed reader.
+//!             ColumnReader::Int32ColumnReader(ref mut typed_reader) => {
+//!                 res = typed_reader.read_batch(
+//!                     8, // batch size
+//!                     Some(&mut def_levels),
+//!                     Some(&mut rep_levels),
+//!                     &mut values,
+//!                 );
+//!             }
+//!             _ => {}
+//!         }
+//!     }
+//! }
+//!
+//! assert_eq!(res, Ok((3, 5)));
+//! assert_eq!(values, vec![1, 2, 3, 0, 0, 0, 0, 0]);
+//! assert_eq!(def_levels, vec![3, 3, 3, 2, 2, 0, 0, 0]);
+//! assert_eq!(rep_levels, vec![0, 1, 0, 1, 1, 0, 0, 0]);
+//! ```
+
+pub mod page;
+pub mod reader;
+pub mod writer;
diff --git a/rust/src/parquet/column/page.rs b/rust/src/parquet/column/page.rs
new file mode 100644
index 0000000..115037c
--- /dev/null
+++ b/rust/src/parquet/column/page.rs
@@ -0,0 +1,296 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+//! Contains Parquet Page definitions and page reader interface.
+
+use crate::parquet::basic::{Encoding, PageType};
+use crate::parquet::errors::Result;
+use crate::parquet::file::{metadata::ColumnChunkMetaData, statistics::Statistics};
+use crate::parquet::util::memory::ByteBufferPtr;
+
+/// Parquet Page definition.
+///
+/// List of supported pages.
+/// These are 1-to-1 mapped from the equivalent Thrift definitions, except `buf` which
+/// used to store uncompressed bytes of the page.
+pub enum Page {
+    DataPage {
+        buf: ByteBufferPtr,
+        num_values: u32,
+        encoding: Encoding,
+        def_level_encoding: Encoding,
+        rep_level_encoding: Encoding,
+        statistics: Option<Statistics>,
+    },
+    DataPageV2 {
+        buf: ByteBufferPtr,
+        num_values: u32,
+        encoding: Encoding,
+        num_nulls: u32,
+        num_rows: u32,
+        def_levels_byte_len: u32,
+        rep_levels_byte_len: u32,
+        is_compressed: bool,
+        statistics: Option<Statistics>,
+    },
+    DictionaryPage {
+        buf: ByteBufferPtr,
+        num_values: u32,
+        encoding: Encoding,
+        is_sorted: bool,
+    },
+}
+
+impl Page {
+    /// Returns [`PageType`](`::basic::PageType`) for this page.
+    pub fn page_type(&self) -> PageType {
+        match self {
+            &Page::DataPage { .. } => PageType::DATA_PAGE,
+            &Page::DataPageV2 { .. } => PageType::DATA_PAGE_V2,
+            &Page::DictionaryPage { .. } => PageType::DICTIONARY_PAGE,
+        }
+    }
+
+    /// Returns internal byte buffer reference for this page.
+    pub fn buffer(&self) -> &ByteBufferPtr {
+        match self {
+            &Page::DataPage { ref buf, .. } => &buf,
+            &Page::DataPageV2 { ref buf, .. } => &buf,
+            &Page::DictionaryPage { ref buf, .. } => &buf,
+        }
+    }
+
+    /// Returns number of values in this page.
+    pub fn num_values(&self) -> u32 {
+        match self {
+            &Page::DataPage { num_values, .. } => num_values,
+            &Page::DataPageV2 { num_values, .. } => num_values,
+            &Page::DictionaryPage { num_values, .. } => num_values,
+        }
+    }
+
+    /// Returns this page [`Encoding`](`::basic::Encoding`).
+    pub fn encoding(&self) -> Encoding {
+        match self {
+            &Page::DataPage { encoding, .. } => encoding,
+            &Page::DataPageV2 { encoding, .. } => encoding,
+            &Page::DictionaryPage { encoding, .. } => encoding,
+        }
+    }
+
+    /// Returns optional [`Statistics`](`::file::metadata::Statistics`).
+    pub fn statistics(&self) -> Option<&Statistics> {
+        match self {
+            &Page::DataPage { ref statistics, .. } => statistics.as_ref(),
+            &Page::DataPageV2 { ref statistics, .. } => statistics.as_ref(),
+            &Page::DictionaryPage { .. } => None,
+        }
+    }
+}
+
+/// Helper struct to represent pages with potentially compressed buffer (data page v1) or
+/// compressed and concatenated buffer (def levels + rep levels + compressed values for
+/// data page v2).
+///
+/// The difference with `Page` is that `Page` buffer is always uncompressed.
+pub struct CompressedPage {
+    compressed_page: Page,
+    uncompressed_size: usize,
+}
+
+impl CompressedPage {
+    /// Creates `CompressedPage` from a page with potentially compressed buffer and
+    /// uncompressed size.
+    pub fn new(compressed_page: Page, uncompressed_size: usize) -> Self {
+        Self {
+            compressed_page,
+            uncompressed_size,
+        }
+    }
+
+    /// Returns page type.
+    pub fn page_type(&self) -> PageType {
+        self.compressed_page.page_type()
+    }
+
+    /// Returns underlying page with potentially compressed buffer.
+    pub fn compressed_page(&self) -> &Page {
+        &self.compressed_page
+    }
+
+    /// Returns uncompressed size in bytes.
+    pub fn uncompressed_size(&self) -> usize {
+        self.uncompressed_size
+    }
+
+    /// Returns compressed size in bytes.
+    ///
+    /// Note that it is assumed that buffer is compressed, but it may not be. In this
+    /// case compressed size will be equal to uncompressed size.
+    pub fn compressed_size(&self) -> usize {
+        self.compressed_page.buffer().len()
+    }
+
+    /// Number of values in page.
+    pub fn num_values(&self) -> u32 {
+        self.compressed_page.num_values()
+    }
+
+    /// Returns encoding for values in page.
+    pub fn encoding(&self) -> Encoding {
+        self.compressed_page.encoding()
+    }
+
+    /// Returns slice of compressed buffer in the page.
+    pub fn data(&self) -> &[u8] {
+        self.compressed_page.buffer().data()
+    }
+}
+
+/// Contains page write metrics.
+pub struct PageWriteSpec {
+    pub page_type: PageType,
+    pub uncompressed_size: usize,
+    pub compressed_size: usize,
+    pub num_values: u32,
+    pub offset: u64,
+    pub bytes_written: u64,
+}
+
+impl PageWriteSpec {
+    /// Creates new spec with default page write metrics.
+    pub fn new() -> Self {
+        Self {
+            page_type: PageType::DATA_PAGE,
+            uncompressed_size: 0,
+            compressed_size: 0,
+            num_values: 0,
+            offset: 0,
+            bytes_written: 0,
+        }
+    }
+}
+
+/// API for reading pages from a column chunk.
+/// This offers a iterator like API to get the next page.
+pub trait PageReader {
+    /// Gets the next page in the column chunk associated with this reader.
+    /// Returns `None` if there are no pages left.
+    fn get_next_page(&mut self) -> Result<Option<Page>>;
+}
+
+/// API for writing pages in a column chunk.
+///
+/// It is reasonable to assume that all pages will be written in the correct order, e.g.
+/// dictionary page followed by data pages, or a set of data pages, etc.
+pub trait PageWriter {
+    /// Writes a page into the output stream/sink.
+    /// Returns `PageWriteSpec` that contains information about written page metrics,
+    /// including number of bytes, size, number of values, offset, etc.
+    ///
+    /// This method is called for every compressed page we write into underlying buffer,
+    /// either data page or dictionary page.
+    fn write_page(&mut self, page: CompressedPage) -> Result<PageWriteSpec>;
+
+    /// Writes column chunk metadata into the output stream/sink.
+    ///
+    /// This method is called once before page writer is closed, normally when writes are
+    /// finalised in column writer.
+    fn write_metadata(&mut self, metadata: &ColumnChunkMetaData) -> Result<()>;
+
+    /// Closes resources and flushes underlying sink.
+    /// Page writer should not be used after this method is called.
+    fn close(&mut self) -> Result<()>;
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+
+    #[test]
+    fn test_page() {
+        let data_page = Page::DataPage {
+            buf: ByteBufferPtr::new(vec![0, 1, 2]),
+            num_values: 10,
+            encoding: Encoding::PLAIN,
+            def_level_encoding: Encoding::RLE,
+            rep_level_encoding: Encoding::RLE,
+            statistics: Some(Statistics::int32(Some(1), Some(2), None, 1, true)),
+        };
+        assert_eq!(data_page.page_type(), PageType::DATA_PAGE);
+        assert_eq!(data_page.buffer().data(), vec![0, 1, 2].as_slice());
+        assert_eq!(data_page.num_values(), 10);
+        assert_eq!(data_page.encoding(), Encoding::PLAIN);
+        assert_eq!(
+            data_page.statistics(),
+            Some(&Statistics::int32(Some(1), Some(2), None, 1, true))
+        );
+
+        let data_page_v2 = Page::DataPageV2 {
+            buf: ByteBufferPtr::new(vec![0, 1, 2]),
+            num_values: 10,
+            encoding: Encoding::PLAIN,
+            num_nulls: 5,
+            num_rows: 20,
+            def_levels_byte_len: 30,
+            rep_levels_byte_len: 40,
+            is_compressed: false,
+            statistics: Some(Statistics::int32(Some(1), Some(2), None, 1, true)),
+        };
+        assert_eq!(data_page_v2.page_type(), PageType::DATA_PAGE_V2);
+        assert_eq!(data_page_v2.buffer().data(), vec![0, 1, 2].as_slice());
+        assert_eq!(data_page_v2.num_values(), 10);
+        assert_eq!(data_page_v2.encoding(), Encoding::PLAIN);
+        assert_eq!(
+            data_page_v2.statistics(),
+            Some(&Statistics::int32(Some(1), Some(2), None, 1, true))
+        );
+
+        let dict_page = Page::DictionaryPage {
+            buf: ByteBufferPtr::new(vec![0, 1, 2]),
+            num_values: 10,
+            encoding: Encoding::PLAIN,
+            is_sorted: false,
+        };
+        assert_eq!(dict_page.page_type(), PageType::DICTIONARY_PAGE);
+        assert_eq!(dict_page.buffer().data(), vec![0, 1, 2].as_slice());
+        assert_eq!(dict_page.num_values(), 10);
+        assert_eq!(dict_page.encoding(), Encoding::PLAIN);
+        assert_eq!(dict_page.statistics(), None);
+    }
+
+    #[test]
+    fn test_compressed_page() {
+        let data_page = Page::DataPage {
+            buf: ByteBufferPtr::new(vec![0, 1, 2]),
+            num_values: 10,
+            encoding: Encoding::PLAIN,
+            def_level_encoding: Encoding::RLE,
+            rep_level_encoding: Encoding::RLE,
+            statistics: Some(Statistics::int32(Some(1), Some(2), None, 1, true)),
+        };
+
+        let cpage = CompressedPage::new(data_page, 5);
+
+        assert_eq!(cpage.page_type(), PageType::DATA_PAGE);
+        assert_eq!(cpage.uncompressed_size(), 5);
+        assert_eq!(cpage.compressed_size(), 3);
+        assert_eq!(cpage.num_values(), 10);
+        assert_eq!(cpage.encoding(), Encoding::PLAIN);
+        assert_eq!(cpage.data(), &[0, 1, 2]);
+    }
+}
diff --git a/rust/src/parquet/column/reader.rs b/rust/src/parquet/column/reader.rs
new file mode 100644
index 0000000..f3dde31
--- /dev/null
+++ b/rust/src/parquet/column/reader.rs
@@ -0,0 +1,1576 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+//! Contains column reader API.
+
+use std::{
+    cmp::{max, min},
+    collections::HashMap,
+    mem,
+};
+
+use super::page::{Page, PageReader};
+use crate::parquet::basic::*;
+use crate::parquet::data_type::*;
+use crate::parquet::encodings::{
+    decoding::{get_decoder, Decoder, DictDecoder, PlainDecoder},
+    levels::LevelDecoder,
+};
+use crate::parquet::errors::{ParquetError, Result};
+use crate::parquet::schema::types::ColumnDescPtr;
+use crate::parquet::util::memory::ByteBufferPtr;
+
+/// Column reader for a Parquet type.
+pub enum ColumnReader {
+    BoolColumnReader(ColumnReaderImpl<BoolType>),
+    Int32ColumnReader(ColumnReaderImpl<Int32Type>),
+    Int64ColumnReader(ColumnReaderImpl<Int64Type>),
+    Int96ColumnReader(ColumnReaderImpl<Int96Type>),
+    FloatColumnReader(ColumnReaderImpl<FloatType>),
+    DoubleColumnReader(ColumnReaderImpl<DoubleType>),
+    ByteArrayColumnReader(ColumnReaderImpl<ByteArrayType>),
+    FixedLenByteArrayColumnReader(ColumnReaderImpl<FixedLenByteArrayType>),
+}
+
+/// Gets a specific column reader corresponding to column descriptor `col_descr`. The
+/// column reader will read from pages in `col_page_reader`.
+pub fn get_column_reader(
+    col_descr: ColumnDescPtr,
+    col_page_reader: Box<PageReader>,
+) -> ColumnReader {
+    match col_descr.physical_type() {
+        Type::BOOLEAN => {
+            ColumnReader::BoolColumnReader(ColumnReaderImpl::new(col_descr, col_page_reader))
+        }
+        Type::INT32 => {
+            ColumnReader::Int32ColumnReader(ColumnReaderImpl::new(col_descr, col_page_reader))
+        }
+        Type::INT64 => {
+            ColumnReader::Int64ColumnReader(ColumnReaderImpl::new(col_descr, col_page_reader))
+        }
+        Type::INT96 => {
+            ColumnReader::Int96ColumnReader(ColumnReaderImpl::new(col_descr, col_page_reader))
+        }
+        Type::FLOAT => {
+            ColumnReader::FloatColumnReader(ColumnReaderImpl::new(col_descr, col_page_reader))
+        }
+        Type::DOUBLE => {
+            ColumnReader::DoubleColumnReader(ColumnReaderImpl::new(col_descr, col_page_reader))
+        }
+        Type::BYTE_ARRAY => {
+            ColumnReader::ByteArrayColumnReader(ColumnReaderImpl::new(col_descr, col_page_reader))
+        }
+        Type::FIXED_LEN_BYTE_ARRAY => ColumnReader::FixedLenByteArrayColumnReader(
+            ColumnReaderImpl::new(col_descr, col_page_reader),
+        ),
+    }
+}
+
+/// Gets a typed column reader for the specific type `T`, by "up-casting" `col_reader` of
+/// non-generic type to a generic column reader type `ColumnReaderImpl`.
+///
+/// NOTE: the caller MUST guarantee that the actual enum value for `col_reader` matches
+/// the type `T`. Otherwise, disastrous consequence could happen.
+pub fn get_typed_column_reader<T: DataType>(col_reader: ColumnReader) -> ColumnReaderImpl<T> {
+    match col_reader {
+        ColumnReader::BoolColumnReader(r) => unsafe { mem::transmute(r) },
+        ColumnReader::Int32ColumnReader(r) => unsafe { mem::transmute(r) },
+        ColumnReader::Int64ColumnReader(r) => unsafe { mem::transmute(r) },
+        ColumnReader::Int96ColumnReader(r) => unsafe { mem::transmute(r) },
+        ColumnReader::FloatColumnReader(r) => unsafe { mem::transmute(r) },
+        ColumnReader::DoubleColumnReader(r) => unsafe { mem::transmute(r) },
+        ColumnReader::ByteArrayColumnReader(r) => unsafe { mem::transmute(r) },
+        ColumnReader::FixedLenByteArrayColumnReader(r) => unsafe { mem::transmute(r) },
+    }
+}
+
+/// Typed value reader for a particular primitive column.
+pub struct ColumnReaderImpl<T: DataType> {
+    descr: ColumnDescPtr,
+    def_level_decoder: Option<LevelDecoder>,
+    rep_level_decoder: Option<LevelDecoder>,
+    page_reader: Box<PageReader>,
+    current_encoding: Option<Encoding>,
+
+    // The total number of values stored in the data page.
+    num_buffered_values: u32,
+
+    // The number of values from the current data page that has been decoded into memory
+    // so far.
+    num_decoded_values: u32,
+
+    // Cache of decoders for existing encodings
+    decoders: HashMap<Encoding, Box<Decoder<T>>>,
+}
+
+impl<T: DataType> ColumnReaderImpl<T> {
+    /// Creates new column reader based on column descriptor and page reader.
+    pub fn new(descr: ColumnDescPtr, page_reader: Box<PageReader>) -> Self {
+        Self {
+            descr,
+            def_level_decoder: None,
+            rep_level_decoder: None,
+            page_reader,
+            current_encoding: None,
+            num_buffered_values: 0,
+            num_decoded_values: 0,
+            decoders: HashMap::new(),
+        }
+    }
+
+    /// Reads a batch of values of at most `batch_size`.
+    ///
+    /// This will try to read from the row group, and fills up at most `batch_size` values
+    /// for `def_levels`, `rep_levels` and `values`. It will stop either when the row group
+    /// is depleted or `batch_size` values has been read, or there is no space in the input
+    /// slices (values/definition levels/repetition levels).
+    ///
+    /// Note that in case the field being read is not required, `values` could contain less
+    /// values than `def_levels`. Also note that this will skip reading def / rep levels if
+    /// the field is required / not repeated, respectively.
+    ///
+    /// If `def_levels` or `rep_levels` is `None`, this will also skip reading the
+    /// respective levels. This is useful when the caller of this function knows in advance
+    /// that the field is required and non-repeated, therefore can avoid allocating memory
+    /// for the levels data. Note that if field has definition levels, but caller provides
+    /// None, there might be inconsistency between levels/values (see comments below).
+    ///
+    /// Returns a tuple where the first element is the actual number of values read,
+    /// and the second element is the actual number of levels read.
+    #[inline]
+    pub fn read_batch(
+        &mut self,
+        batch_size: usize,
+        mut def_levels: Option<&mut [i16]>,
+        mut rep_levels: Option<&mut [i16]>,
+        values: &mut [T::T],
+    ) -> Result<(usize, usize)> {
+        let mut values_read = 0;
+        let mut levels_read = 0;
+
+        // Compute the smallest batch size we can read based on provided slices
+        let mut batch_size = min(batch_size, values.len());
+        if let Some(ref levels) = def_levels {
+            batch_size = min(batch_size, levels.len());
+        }
+        if let Some(ref levels) = rep_levels {
+            batch_size = min(batch_size, levels.len());
+        }
+
+        // Read exhaustively all pages until we read all batch_size values/levels
+        // or there are no more values/levels to read.
+        while max(values_read, levels_read) < batch_size {
+            if !self.has_next()? {
+                break;
+            }
+
+            // Batch size for the current iteration
+            let iter_batch_size = {
+                // Compute approximate value based on values decoded so far
+                let mut adjusted_size = min(
+                    batch_size,
+                    (self.num_buffered_values - self.num_decoded_values) as usize,
+                );
+
+                // Adjust batch size by taking into account how much space is left in values
+                // slice or levels slices (if available)
+                adjusted_size = min(adjusted_size, values.len() - values_read);
+                if let Some(ref levels) = def_levels {
+                    adjusted_size = min(adjusted_size, levels.len() - levels_read);
+                }
+                if let Some(ref levels) = rep_levels {
+                    adjusted_size = min(adjusted_size, levels.len() - levels_read);
+                }
+
+                adjusted_size
+            };
+
+            let mut values_to_read = 0;
+            let mut num_def_levels = 0;
+            let mut num_rep_levels = 0;
+
+            // If the field is required and non-repeated, there are no definition levels
+            if self.descr.max_def_level() > 0 && def_levels.as_ref().is_some() {
+                if let Some(ref mut levels) = def_levels {
+                    num_def_levels = self
+                        .read_def_levels(&mut levels[levels_read..levels_read + iter_batch_size])?;
+                    for i in levels_read..levels_read + num_def_levels {
+                        if levels[i] == self.descr.max_def_level() {
+                            values_to_read += 1;
+                        }
+                    }
+                }
+            } else {
+                // If max definition level == 0, then it is REQUIRED field, read all values.
+                // If definition levels are not provided, we still read all values.
+                values_to_read = iter_batch_size;
+            }
+
+            if self.descr.max_rep_level() > 0 && rep_levels.is_some() {
+                if let Some(ref mut levels) = rep_levels {
+                    num_rep_levels = self
+                        .read_rep_levels(&mut levels[levels_read..levels_read + iter_batch_size])?;
+
+                    // If definition levels are defined, check that rep levels == def levels
+                    if def_levels.is_some() {
+                        assert_eq!(
+                            num_def_levels, num_rep_levels,
+                            "Number of decoded rep / def levels did not match"
+                        );
+                    }
+                }
+            }
+
+            // At this point we have read values, definition and repetition levels.
+            // If both definition and repetition levels are defined, their counts
+            // should be equal. Values count is always less or equal to definition levels.
+            //
+            // Note that if field is not required, but no definition levels are provided,
+            // we would read values of batch size and (if provided, of course) repetition
+            // levels of batch size - [!] they will not be synced, because only definition
+            // levels enforce number of non-null values to read.
+
+            let curr_values_read =
+                self.read_values(&mut values[values_read..values_read + values_to_read])?;
+
+            // Update all "return" counters and internal state.
+
+            // This is to account for when def or rep levels are not provided
+            let curr_levels_read = max(num_def_levels, num_rep_levels);
+            self.num_decoded_values += max(curr_levels_read, curr_values_read) as u32;
+            levels_read += curr_levels_read;
+            values_read += curr_values_read;
+        }
+
+        Ok((values_read, levels_read))
+    }
+
+    /// Reads a new page and set up the decoders for levels, values or dictionary.
+    /// Returns false if there's no page left.
+    fn read_new_page(&mut self) -> Result<bool> {
+        #[allow(while_true)]
+        while true {
+            match self.page_reader.get_next_page()? {
+                // No more page to read
+                None => return Ok(false),
+                Some(current_page) => {
+                    match current_page {
+                        // 1. Dictionary page: configure dictionary for this page.
+                        p @ Page::DictionaryPage { .. } => {
+                            self.configure_dictionary(p)?;
+                            continue;
+                        }
+                        // 2. Data page v1
+                        Page::DataPage {
+                            buf,
+                            num_values,
+                            encoding,
+                            def_level_encoding,
+                            rep_level_encoding,
+                            statistics: _,
+                        } => {
+                            self.num_buffered_values = num_values;
+                            self.num_decoded_values = 0;
+
+                            let mut buffer_ptr = buf;
+
+                            if self.descr.max_rep_level() > 0 {
+                                let mut rep_decoder = LevelDecoder::v1(
+                                    rep_level_encoding,
+                                    self.descr.max_rep_level(),
+                                );
+                                let total_bytes = rep_decoder
+                                    .set_data(self.num_buffered_values as usize, buffer_ptr.all());
+                                buffer_ptr = buffer_ptr.start_from(total_bytes);
+                                self.rep_level_decoder = Some(rep_decoder);
+                            }
+
+                            if self.descr.max_def_level() > 0 {
+                                let mut def_decoder = LevelDecoder::v1(
+                                    def_level_encoding,
+                                    self.descr.max_def_level(),
+                                );
+                                let total_bytes = def_decoder
+                                    .set_data(self.num_buffered_values as usize, buffer_ptr.all());
+                                buffer_ptr = buffer_ptr.start_from(total_bytes);
+                                self.def_level_decoder = Some(def_decoder);
+                            }
+
+                            // Data page v1 does not have offset, all content of buffer should be passed
+                            self.set_current_page_encoding(
+                                encoding,
+                                &buffer_ptr,
+                                0,
+                                num_values as usize,
+                            )?;
+                            return Ok(true);
+                        }
+                        // 3. Data page v2
+                        Page::DataPageV2 {
+                            buf,
+                            num_values,
+                            encoding,
+                            num_nulls: _,
+                            num_rows: _,
+                            def_levels_byte_len,
+                            rep_levels_byte_len,
+                            is_compressed: _,
+                            statistics: _,
+                        } => {
+                            self.num_buffered_values = num_values;
+                            self.num_decoded_values = 0;
+
+                            let mut offset = 0;
+
+                            // DataPage v2 only supports RLE encoding for repetition levels
+                            if self.descr.max_rep_level() > 0 {
+                                let mut rep_decoder = LevelDecoder::v2(self.descr.max_rep_level());
+                                let bytes_read = rep_decoder.set_data_range(
+                                    self.num_buffered_values as usize,
+                                    &buf,
+                                    offset,
+                                    rep_levels_byte_len as usize,
+                                );
+                                offset += bytes_read;
+                                self.rep_level_decoder = Some(rep_decoder);
+                            }
+
+                            // DataPage v2 only supports RLE encoding for definition levels
+                            if self.descr.max_def_level() > 0 {
+                                let mut def_decoder = LevelDecoder::v2(self.descr.max_def_level());
+                                let bytes_read = def_decoder.set_data_range(
+                                    self.num_buffered_values as usize,
+                                    &buf,
+                                    offset,
+                                    def_levels_byte_len as usize,
+                                );
+                                offset += bytes_read;
+                                self.def_level_decoder = Some(def_decoder);
+                            }
+
+                            self.set_current_page_encoding(
+                                encoding,
+                                &buf,
+                                offset,
+                                num_values as usize,
+                            )?;
+                            return Ok(true);
+                        }
+                    };
+                }
+            }
+        }
+
+        Ok(true)
+    }
+
+    /// Resolves and updates encoding and set decoder for the current page
+    fn set_current_page_encoding(
+        &mut self,
+        mut encoding: Encoding,
+        buffer_ptr: &ByteBufferPtr,
+        offset: usize,
+        len: usize,
+    ) -> Result<()> {
+        if encoding == Encoding::PLAIN_DICTIONARY {
+            encoding = Encoding::RLE_DICTIONARY;
+        }
+
+        let decoder = if encoding == Encoding::RLE_DICTIONARY {
+            self.decoders
+                .get_mut(&encoding)
+                .expect("Decoder for dict should have been set")
+        } else {
+            // Search cache for data page decoder
+            if !self.decoders.contains_key(&encoding) {
+                // Initialize decoder for this page
+                let data_decoder = get_decoder::<T>(self.descr.clone(), encoding)?;
+                self.decoders.insert(encoding, data_decoder);
+            }
+            self.decoders.get_mut(&encoding).unwrap()
+        };
+
+        decoder.set_data(buffer_ptr.start_from(offset), len as usize)?;
+        self.current_encoding = Some(encoding);
+        Ok(())
+    }
+
+    #[inline]
+    fn has_next(&mut self) -> Result<bool> {
+        if self.num_buffered_values == 0 || self.num_buffered_values == self.num_decoded_values {
+            // TODO: should we return false if read_new_page() = true and
+            // num_buffered_values = 0?
+            if !self.read_new_page()? {
+                Ok(false)
+            } else {
+                Ok(self.num_buffered_values != 0)
+            }
+        } else {
+            Ok(true)
+        }
+    }
+
+    #[inline]
+    fn read_rep_levels(&mut self, buffer: &mut [i16]) -> Result<usize> {
+        let level_decoder = self
+            .rep_level_decoder
+            .as_mut()
+            .expect("rep_level_decoder be set");
+        level_decoder.get(buffer)
+    }
+
+    #[inline]
+    fn read_def_levels(&mut self, buffer: &mut [i16]) -> Result<usize> {
+        let level_decoder = self
+            .def_level_decoder
+            .as_mut()
+            .expect("def_level_decoder be set");
+        level_decoder.get(buffer)
+    }
+
+    #[inline]
+    fn read_values(&mut self, buffer: &mut [T::T]) -> Result<usize> {
+        let encoding = self
+            .current_encoding
+            .expect("current_encoding should be set");
+        let current_decoder = self
+            .decoders
+            .get_mut(&encoding)
+            .expect(format!("decoder for encoding {} should be set", encoding).as_str());
+        current_decoder.get(buffer)
+    }
+
+    #[inline]
+    fn configure_dictionary(&mut self, page: Page) -> Result<bool> {
+        let mut encoding = page.encoding();
+        if encoding == Encoding::PLAIN || encoding == Encoding::PLAIN_DICTIONARY {
+            encoding = Encoding::RLE_DICTIONARY
+        }
+
+        if self.decoders.contains_key(&encoding) {
+            return Err(general_err!("Column cannot have more than one dictionary"));
+        }
+
+        if encoding == Encoding::RLE_DICTIONARY {
+            let mut dictionary = PlainDecoder::<T>::new(self.descr.type_length());
+            let num_values = page.num_values();
+            dictionary.set_data(page.buffer().clone(), num_values as usize)?;
+
+            let mut decoder = DictDecoder::new();
+            decoder.set_dict(Box::new(dictionary))?;
+            self.decoders.insert(encoding, Box::new(decoder));
+            Ok(true)
+        } else {
+            Err(nyi_err!(
+                "Invalid/Unsupported encoding type for dictionary: {}",
+                encoding
+            ))
+        }
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+
+    use rand::distributions::range::SampleRange;
+    use std::{collections::VecDeque, rc::Rc, vec::IntoIter};
+
+    use crate::parquet::basic::Type as PhysicalType;
+    use crate::parquet::column::page::Page;
+    use crate::parquet::encodings::{
+        encoding::{get_encoder, DictEncoder, Encoder},
+        levels::{max_buffer_size, LevelEncoder},
+    };
+    use crate::parquet::schema::types::{ColumnDescriptor, ColumnPath, Type as SchemaType};
+    use crate::parquet::util::{
+        memory::{ByteBufferPtr, MemTracker, MemTrackerPtr},
+        test_common::random_numbers_range,
+    };
+
+    const NUM_LEVELS: usize = 128;
+    const NUM_PAGES: usize = 2;
+    const MAX_DEF_LEVEL: i16 = 5;
+    const MAX_REP_LEVEL: i16 = 5;
+
+    // Macro to generate test cases
+    macro_rules! test {
+        // branch for generating i32 cases
+        ($test_func:ident, i32, $func:ident, $def_level:expr, $rep_level:expr,
+     $num_pages:expr, $num_levels:expr, $batch_size:expr, $min:expr, $max:expr) => {
+            test_internal!(
+                $test_func,
+                Int32Type,
+                get_test_int32_type,
+                $func,
+                $def_level,
+                $rep_level,
+                $num_pages,
+                $num_levels,
+                $batch_size,
+                $min,
+                $max
+            );
+        };
+        // branch for generating i64 cases
+        ($test_func:ident, i64, $func:ident, $def_level:expr, $rep_level:expr,
+     $num_pages:expr, $num_levels:expr, $batch_size:expr, $min:expr, $max:expr) => {
+            test_internal!(
+                $test_func,
+                Int64Type,
+                get_test_int64_type,
+                $func,
+                $def_level,
+                $rep_level,
+                $num_pages,
+                $num_levels,
+                $batch_size,
+                $min,
+                $max
+            );
+        };
+    }
+
+    macro_rules! test_internal {
+        ($test_func:ident, $ty:ident, $pty:ident, $func:ident, $def_level:expr,
+     $rep_level:expr, $num_pages:expr, $num_levels:expr, $batch_size:expr,
+     $min:expr, $max:expr) => {
+            #[test]
+            fn $test_func() {
+                let desc = Rc::new(ColumnDescriptor::new(
+                    Rc::new($pty()),
+                    None,
+                    $def_level,
+                    $rep_level,
+                    ColumnPath::new(Vec::new()),
+                ));
+                let mut tester = ColumnReaderTester::<$ty>::new();
+                tester.$func(desc, $num_pages, $num_levels, $batch_size, $min, $max);
+            }
+        };
+    }
+
+    test!(
+        test_read_plain_v1_int32,
+        i32,
+        plain_v1,
+        MAX_DEF_LEVEL,
+        MAX_REP_LEVEL,
+        NUM_PAGES,
+        NUM_LEVELS,
+        16,
+        ::std::i32::MIN,
+        ::std::i32::MAX
+    );
+    test!(
+        test_read_plain_v2_int32,
+        i32,
+        plain_v2,
+        MAX_DEF_LEVEL,
+        MAX_REP_LEVEL,
+        NUM_PAGES,
+        NUM_LEVELS,
+        16,
+        ::std::i32::MIN,
+        ::std::i32::MAX
+    );
+
+    test!(
+        test_read_plain_v1_int32_uneven,
+        i32,
+        plain_v1,
+        MAX_DEF_LEVEL,
+        MAX_REP_LEVEL,
+        NUM_PAGES,
+        NUM_LEVELS,
+        17,
+        ::std::i32::MIN,
+        ::std::i32::MAX
+    );
+    test!(
+        test_read_plain_v2_int32_uneven,
+        i32,
+        plain_v2,
+        MAX_DEF_LEVEL,
+        MAX_REP_LEVEL,
+        NUM_PAGES,
+        NUM_LEVELS,
+        17,
+        ::std::i32::MIN,
+        ::std::i32::MAX
+    );
+
+    test!(
+        test_read_plain_v1_int32_multi_page,
+        i32,
+        plain_v1,
+        MAX_DEF_LEVEL,
+        MAX_REP_LEVEL,
+        NUM_PAGES,
+        NUM_LEVELS,
+        512,
+        ::std::i32::MIN,
+        ::std::i32::MAX
+    );
+    test!(
+        test_read_plain_v2_int32_multi_page,
+        i32,
+        plain_v2,
+        MAX_DEF_LEVEL,
+        MAX_REP_LEVEL,
+        NUM_PAGES,
+        NUM_LEVELS,
+        512,
+        ::std::i32::MIN,
+        ::std::i32::MAX
+    );
+
+    // test cases when column descriptor has MAX_DEF_LEVEL = 0 and MAX_REP_LEVEL = 0
+    test!(
+        test_read_plain_v1_int32_required_non_repeated,
+        i32,
+        plain_v1,
+        0,
+        0,
+        NUM_PAGES,
+        NUM_LEVELS,
+        16,
+        ::std::i32::MIN,
+        ::std::i32::MAX
+    );
+    test!(
+        test_read_plain_v2_int32_required_non_repeated,
+        i32,
+        plain_v2,
+        0,
+        0,
+        NUM_PAGES,
+        NUM_LEVELS,
+        16,
+        ::std::i32::MIN,
+        ::std::i32::MAX
+    );
+
+    test!(
+        test_read_plain_v1_int64,
+        i64,
+        plain_v1,
+        1,
+        1,
+        NUM_PAGES,
+        NUM_LEVELS,
+        16,
+        ::std::i64::MIN,
+        ::std::i64::MAX
+    );
+    test!(
+        test_read_plain_v2_int64,
+        i64,
+        plain_v2,
+        1,
+        1,
+        NUM_PAGES,
+        NUM_LEVELS,
+        16,
+        ::std::i64::MIN,
+        ::std::i64::MAX
+    );
+
+    test!(
+        test_read_plain_v1_int64_uneven,
+        i64,
+        plain_v1,
+        1,
+        1,
+        NUM_PAGES,
+        NUM_LEVELS,
+        17,
+        ::std::i64::MIN,
+        ::std::i64::MAX
+    );
+    test!(
+        test_read_plain_v2_int64_uneven,
+        i64,
+        plain_v2,
+        1,
+        1,
+        NUM_PAGES,
+        NUM_LEVELS,
+        17,
+        ::std::i64::MIN,
+        ::std::i64::MAX
+    );
+
+    test!(
+        test_read_plain_v1_int64_multi_page,
+        i64,
+        plain_v1,
+        1,
+        1,
+        NUM_PAGES,
+        NUM_LEVELS,
+        512,
+        ::std::i64::MIN,
+        ::std::i64::MAX
+    );
+    test!(
+        test_read_plain_v2_int64_multi_page,
+        i64,
+        plain_v2,
+        1,
+        1,
+        NUM_PAGES,
+        NUM_LEVELS,
+        512,
+        ::std::i64::MIN,
+        ::std::i64::MAX
+    );
+
+    // test cases when column descriptor has MAX_DEF_LEVEL = 0 and MAX_REP_LEVEL = 0
+    test!(
+        test_read_plain_v1_int64_required_non_repeated,
+        i64,
+        plain_v1,
+        0,
+        0,
+        NUM_PAGES,
+        NUM_LEVELS,
+        16,
+        ::std::i64::MIN,
+        ::std::i64::MAX
+    );
+    test!(
+        test_read_plain_v2_int64_required_non_repeated,
+        i64,
+        plain_v2,
+        0,
+        0,
+        NUM_PAGES,
+        NUM_LEVELS,
+        16,
+        ::std::i64::MIN,
+        ::std::i64::MAX
+    );
+
+    test!(
+        test_read_dict_v1_int32_small,
+        i32,
+        dict_v1,
+        MAX_DEF_LEVEL,
+        MAX_REP_LEVEL,
+        2,
+        2,
+        16,
+        0,
+        3
+    );
+    test!(
+        test_read_dict_v2_int32_small,
+        i32,
+        dict_v2,
+        MAX_DEF_LEVEL,
+        MAX_REP_LEVEL,
+        2,
+        2,
+        16,
+        0,
+        3
+    );
+
+    test!(
+        test_read_dict_v1_int32,
+        i32,
+        dict_v1,
+        MAX_DEF_LEVEL,
+        MAX_REP_LEVEL,
+        NUM_PAGES,
+        NUM_LEVELS,
+        16,
+        0,
+        3
+    );
+    test!(
+        test_read_dict_v2_int32,
+        i32,
+        dict_v2,
+        MAX_DEF_LEVEL,
+        MAX_REP_LEVEL,
+        NUM_PAGES,
+        NUM_LEVELS,
+        16,
+        0,
+        3
+    );
+
+    test!(
+        test_read_dict_v1_int32_uneven,
+        i32,
+        dict_v1,
+        MAX_DEF_LEVEL,
+        MAX_REP_LEVEL,
+        NUM_PAGES,
+        NUM_LEVELS,
+        17,
+        0,
+        3
+    );
+    test!(
+        test_read_dict_v2_int32_uneven,
+        i32,
+        dict_v2,
+        MAX_DEF_LEVEL,
+        MAX_REP_LEVEL,
+        NUM_PAGES,
+        NUM_LEVELS,
+        17,
+        0,
+        3
+    );
+
+    test!(
+        test_read_dict_v1_int32_multi_page,
+        i32,
+        dict_v1,
+        MAX_DEF_LEVEL,
+        MAX_REP_LEVEL,
+        NUM_PAGES,
+        NUM_LEVELS,
+        512,
+        0,
+        3
+    );
+    test!(
+        test_read_dict_v2_int32_multi_page,
+        i32,
+        dict_v2,
+        MAX_DEF_LEVEL,
+        MAX_REP_LEVEL,
+        NUM_PAGES,
+        NUM_LEVELS,
+        512,
+        0,
+        3
+    );
+
+    test!(
+        test_read_dict_v1_int64,
+        i64,
+        dict_v1,
+        MAX_DEF_LEVEL,
+        MAX_REP_LEVEL,
+        NUM_PAGES,
+        NUM_LEVELS,
+        16,
+        0,
+        3
+    );
+    test!(
+        test_read_dict_v2_int64,
+        i64,
+        dict_v2,
+        MAX_DEF_LEVEL,
+        MAX_REP_LEVEL,
+        NUM_PAGES,
+        NUM_LEVELS,
+        16,
+        0,
+        3
+    );
+
+    #[test]
+    fn test_read_batch_values_only() {
+        test_read_batch_int32(16, &mut vec![0; 10], None, None); // < batch_size
+        test_read_batch_int32(16, &mut vec![0; 16], None, None); // == batch_size
+        test_read_batch_int32(16, &mut vec![0; 51], None, None); // > batch_size
+    }
+
+    #[test]
+    fn test_read_batch_values_def_levels() {
+        test_read_batch_int32(16, &mut vec![0; 10], Some(&mut vec![0; 10]), None);
+        test_read_batch_int32(16, &mut vec![0; 16], Some(&mut vec![0; 16]), None);
+        test_read_batch_int32(16, &mut vec![0; 51], Some(&mut vec![0; 51]), None);
+    }
+
+    #[test]
+    fn test_read_batch_values_rep_levels() {
+        test_read_batch_int32(16, &mut vec![0; 10], None, Some(&mut vec![0; 10]));
+        test_read_batch_int32(16, &mut vec![0; 16], None, Some(&mut vec![0; 16]));
+        test_read_batch_int32(16, &mut vec![0; 51], None, Some(&mut vec![0; 51]));
+    }
+
+    #[test]
+    fn test_read_batch_different_buf_sizes() {
+        test_read_batch_int32(
+            16,
+            &mut vec![0; 8],
+            Some(&mut vec![0; 9]),
+            Some(&mut vec![0; 7]),
+        );
+        test_read_batch_int32(
+            16,
+            &mut vec![0; 1],
+            Some(&mut vec![0; 9]),
+            Some(&mut vec![0; 3]),
+        );
+    }
+
+    #[test]
+    fn test_read_batch_values_def_rep_levels() {
+        test_read_batch_int32(
+            128,
+            &mut vec![0; 128],
+            Some(&mut vec![0; 128]),
+            Some(&mut vec![0; 128]),
+        );
+    }
+
+    #[test]
+    fn test_read_batch_adjust_after_buffering_page() {
+        // This test covers scenario when buffering new page results in setting number
+        // of decoded values to 0, resulting on reading `batch_size` of values, but it is
+        // larger than we can insert into slice (affects values and levels).
+        //
+        // Note: values are chosen to reproduce the issue.
+        //
+        let primitive_type = get_test_int32_type();
+        let desc = Rc::new(ColumnDescriptor::new(
+            Rc::new(primitive_type),
+            None,
+            1,
+            1,
+            ColumnPath::new(Vec::new()),
+        ));
+
+        let num_pages = 2;
+        let num_levels = 4;
+        let batch_size = 5;
+        let values = &mut vec![0; 7];
+        let def_levels = &mut vec![0; 7];
+        let rep_levels = &mut vec![0; 7];
+
+        let mut tester = ColumnReaderTester::<Int32Type>::new();
+        tester.test_read_batch(
+            desc,
+            Encoding::RLE_DICTIONARY,
+            num_pages,
+            num_levels,
+            batch_size,
+            ::std::i32::MIN,
+            ::std::i32::MAX,
+            values,
+            Some(def_levels),
+            Some(rep_levels),
+            false,
+        );
+    }
+
+    // ----------------------------------------------------------------------
+    // Helper methods to make pages and test
+    //
+    // # Overview
+    //
+    // Most of the test functionality is implemented in `ColumnReaderTester`, which
+    // provides some general data page test methods:
+    // - `test_read_batch_general`
+    // - `test_read_batch`
+    //
+    // There are also some high level wrappers that are part of `ColumnReaderTester`:
+    // - `plain_v1` -> call `test_read_batch_general` with data page v1 and plain encoding
+    // - `plain_v2` -> call `test_read_batch_general` with data page v2 and plain encoding
+    // - `dict_v1` -> call `test_read_batch_general` with data page v1 + dictionary page
+    // - `dict_v2` -> call `test_read_batch_general` with data page v2 + dictionary page
+    //
+    // And even higher level wrappers that simplify testing of almost the same test cases:
+    // - `get_test_int32_type`, provides dummy schema type
+    // - `get_test_int64_type`, provides dummy schema type
+    // - `test_read_batch_int32`, wrapper for `read_batch` tests, since they are basically
+    //   the same, just different def/rep levels and batch size.
+    //
+    // # Page assembly
+    //
+    // Page construction and generation of values, definition and repetition levels happens
+    // in `make_pages` function.
+    // All values are randomly generated based on provided min/max, levels are calculated
+    // based on provided max level for column descriptor (which is basically either int32
+    // or int64 type in tests) and `levels_per_page` variable.
+    //
+    // We use `DataPageBuilder` and its implementation `DataPageBuilderImpl` to actually
+    // turn values, definition and repetition levels into data pages (either v1 or v2).
+    //
+    // Those data pages are then stored as part of `TestPageReader` (we just pass vector
+    // of generated pages directly), which implements `PageReader` interface.
+    //
+    // # Comparison
+    //
+    // This allows us to pass test page reader into column reader, so we can test
+    // functionality of column reader - see `test_read_batch`, where we create column
+    // reader -> typed column reader, buffer values in `read_batch` method and compare
+    // output with generated data.
+
+    // Returns dummy Parquet `Type` for primitive field, because most of our tests use
+    // INT32 physical type.
+    fn get_test_int32_type() -> SchemaType {
+        SchemaType::primitive_type_builder("a", PhysicalType::INT32)
+            .with_repetition(Repetition::REQUIRED)
+            .with_logical_type(LogicalType::INT_32)
+            .with_length(-1)
+            .build()
+            .expect("build() should be OK")
+    }
+
+    // Returns dummy Parquet `Type` for INT64 physical type.
+    fn get_test_int64_type() -> SchemaType {
+        SchemaType::primitive_type_builder("a", PhysicalType::INT64)
+            .with_repetition(Repetition::REQUIRED)
+            .with_logical_type(LogicalType::INT_64)
+            .with_length(-1)
+            .build()
+            .expect("build() should be OK")
+    }
+
+    // Tests `read_batch()` functionality for INT32.
+    //
+    // This is a high level wrapper on `ColumnReaderTester` that allows us to specify some
+    // boilerplate code for setting up definition/repetition levels and column descriptor.
+    fn test_read_batch_int32(
+        batch_size: usize,
+        values: &mut [i32],
+        def_levels: Option<&mut [i16]>,
+        rep_levels: Option<&mut [i16]>,
+    ) {
+        let primitive_type = get_test_int32_type();
+        // make field is required based on provided slices of levels
+        let max_def_level = if def_levels.is_some() {
+            MAX_DEF_LEVEL
+        } else {
+            0
+        };
+        let max_rep_level = if def_levels.is_some() {
+            MAX_REP_LEVEL
+        } else {
+            0
+        };
+
+        let desc = Rc::new(ColumnDescriptor::new(
+            Rc::new(primitive_type),
+            None,
+            max_def_level,
+            max_rep_level,
+            ColumnPath::new(Vec::new()),
+        ));
+        let mut tester = ColumnReaderTester::<Int32Type>::new();
+        tester.test_read_batch(
+            desc,
+            Encoding::RLE_DICTIONARY,
+            NUM_PAGES,
+            NUM_LEVELS,
+            batch_size,
+            ::std::i32::MIN,
+            ::std::i32::MAX,
+            values,
+            def_levels,
+            rep_levels,
+            false,
+        );
+    }
+
+    struct ColumnReaderTester<T: DataType>
+    where
+        T::T: PartialOrd + SampleRange + Copy,
+    {
+        rep_levels: Vec<i16>,
+        def_levels: Vec<i16>,
+        values: Vec<T::T>,
+    }
+
+    impl<T: DataType> ColumnReaderTester<T>
+    where
+        T::T: PartialOrd + SampleRange + Copy,
+    {
+        pub fn new() -> Self {
+            Self {
+                rep_levels: Vec::new(),
+                def_levels: Vec::new(),
+                values: Vec::new(),
+            }
+        }
+
+        // Method to generate and test data pages v1
+        fn plain_v1(
+            &mut self,
+            desc: ColumnDescPtr,
+            num_pages: usize,
+            num_levels: usize,
+            batch_size: usize,
+            min: T::T,
+            max: T::T,
+        ) {
+            self.test_read_batch_general(
+                desc,
+                Encoding::PLAIN,
+                num_pages,
+                num_levels,
+                batch_size,
+                min,
+                max,
+                false,
+            );
+        }
+
+        // Method to generate and test data pages v2
+        fn plain_v2(
+            &mut self,
+            desc: ColumnDescPtr,
+            num_pages: usize,
+            num_levels: usize,
+            batch_size: usize,
+            min: T::T,
+            max: T::T,
+        ) {
+            self.test_read_batch_general(
+                desc,
+                Encoding::PLAIN,
+                num_pages,
+                num_levels,
+                batch_size,
+                min,
+                max,
+                true,
+            );
+        }
+
+        // Method to generate and test dictionary page + data pages v1
+        fn dict_v1(
+            &mut self,
+            desc: ColumnDescPtr,
+            num_pages: usize,
+            num_levels: usize,
+            batch_size: usize,
+            min: T::T,
+            max: T::T,
+        ) {
+            self.test_read_batch_general(
+                desc,
+                Encoding::RLE_DICTIONARY,
+                num_pages,
+                num_levels,
+                batch_size,
+                min,
+                max,
+                false,
+            );
+        }
+
+        // Method to generate and test dictionary page + data pages v2
+        fn dict_v2(
+            &mut self,
+            desc: ColumnDescPtr,
+            num_pages: usize,
+            num_levels: usize,
+            batch_size: usize,
+            min: T::T,
+            max: T::T,
+        ) {
+            self.test_read_batch_general(
+                desc,
+                Encoding::RLE_DICTIONARY,
+                num_pages,
+                num_levels,
+                batch_size,
+                min,
+                max,
+                true,
+            );
+        }
+
+        // Helper function for the general case of `read_batch()` where `values`,
+        // `def_levels` and `rep_levels` are always provided with enough space.
+        fn test_read_batch_general(
+            &mut self,
+            desc: ColumnDescPtr,
+            encoding: Encoding,
+            num_pages: usize,
+            num_levels: usize,
+            batch_size: usize,
+            min: T::T,
+            max: T::T,
+            use_v2: bool,
+        ) {
+            let mut def_levels = vec![0; num_levels * num_pages];
+            let mut rep_levels = vec![0; num_levels * num_pages];
+            let mut values = vec![T::T::default(); num_levels * num_pages];
+            self.test_read_batch(
+                desc,
+                encoding,
+                num_pages,
+                num_levels,
+                batch_size,
+                min,
+                max,
+                &mut values,
+                Some(&mut def_levels),
+                Some(&mut rep_levels),
+                use_v2,
+            );
+        }
+
+        // Helper function to test `read_batch()` method with custom buffers for values,
+        // definition and repetition levels.
+        fn test_read_batch(
+            &mut self,
+            desc: ColumnDescPtr,
+            encoding: Encoding,
+            num_pages: usize,
+            num_levels: usize,
+            batch_size: usize,
+            min: T::T,
+            max: T::T,
+            values: &mut [T::T],
+            mut def_levels: Option<&mut [i16]>,
+            mut rep_levels: Option<&mut [i16]>,
+            use_v2: bool,
+        ) {
+            let mut pages = VecDeque::new();
+            make_pages::<T>(
+                desc.clone(),
+                encoding,
+                num_pages,
+                num_levels,
+                min,
+                max,
+                &mut self.def_levels,
+                &mut self.rep_levels,
+                &mut self.values,
+                &mut pages,
+                use_v2,
+            );
+            let max_def_level = desc.max_def_level();
+            let page_reader = TestPageReader::new(Vec::from(pages));
+            let column_reader: ColumnReader = get_column_reader(desc, Box::new(page_reader));
+            let mut typed_column_reader = get_typed_column_reader::<T>(column_reader);
+
+            let mut curr_values_read = 0;
+            let mut curr_levels_read = 0;
+            let mut done = false;
+            while !done {
+                let actual_def_levels = match &mut def_levels {
+                    Some(ref mut vec) => Some(&mut vec[curr_levels_read..]),
+                    None => None,
+                };
+                let actual_rep_levels = match rep_levels {
+                    Some(ref mut vec) => Some(&mut vec[curr_levels_read..]),
+                    None => None,
+                };
+
+                let (values_read, levels_read) = typed_column_reader
+                    .read_batch(
+                        batch_size,
+                        actual_def_levels,
+                        actual_rep_levels,
+                        &mut values[curr_values_read..],
+                    )
+                    .expect("read_batch() should be OK");
+
+                if values_read == 0 && levels_read == 0 {
+                    done = true;
+                }
+
+                curr_values_read += values_read;
+                curr_levels_read += levels_read;
+            }
+
+            assert!(
+                values.len() >= curr_values_read,
+                "values.len() >= values_read"
+            );
+            assert_eq!(
+                &values[0..curr_values_read],
+                &self.values[0..curr_values_read],
+                "values content doesn't match"
+            );
+
+            if let Some(ref levels) = def_levels {
+                assert!(
+                    levels.len() >= curr_levels_read,
+                    "def_levels.len() >= levels_read"
+                );
+                assert_eq!(
+                    &levels[0..curr_levels_read],
+                    &self.def_levels[0..curr_levels_read],
+                    "definition levels content doesn't match"
+                );
+            }
+
+            if let Some(ref levels) = rep_levels {
+                assert!(
+                    levels.len() >= curr_levels_read,
+                    "rep_levels.len() >= levels_read"
+                );
+                assert_eq!(
+                    &levels[0..curr_levels_read],
+                    &self.rep_levels[0..curr_levels_read],
+                    "repetition levels content doesn't match"
+                );
+            }
+
+            if def_levels.is_none() && rep_levels.is_none() {
+                assert!(
+                    curr_levels_read == 0,
+                    "expected to read 0 levels, found {}",
+                    curr_levels_read
+                );
+            } else if def_levels.is_some() && max_def_level > 0 {
+                assert!(
+                    curr_levels_read >= curr_values_read,
+                    "expected levels read to be greater than values read"
+                );
+            }
+        }
+    }
+
+    struct TestPageReader {
+        pages: IntoIter<Page>,
+    }
+
+    impl TestPageReader {
+        pub fn new(pages: Vec<Page>) -> Self {
+            Self {
+                pages: pages.into_iter(),
+            }
+        }
+    }
+
+    impl PageReader for TestPageReader {
+        fn get_next_page(&mut self) -> Result<Option<Page>> {
+            Ok(self.pages.next())
+        }
+    }
+
+    // ----------------------------------------------------------------------
+    // Utility functions for generating testing pages
+
+    trait DataPageBuilder {
+        fn add_rep_levels(&mut self, max_level: i16, rep_levels: &[i16]);
+        fn add_def_levels(&mut self, max_level: i16, def_levels: &[i16]);
+        fn add_values<T: DataType>(&mut self, encoding: Encoding, values: &[T::T]);
+        fn add_indices(&mut self, indices: ByteBufferPtr);
+        fn consume(self) -> Page;
+    }
+
+    /// A utility struct for building data pages (v1 or v2). Callers must call:
+    ///   - add_rep_levels()
+    ///   - add_def_levels()
+    ///   - add_values() for normal data page / add_indices() for dictionary data page
+    ///   - consume()
+    /// in order to populate and obtain a data page.
+    struct DataPageBuilderImpl {
+        desc: ColumnDescPtr,
+        encoding: Option<Encoding>,
+        mem_tracker: MemTrackerPtr,
+        num_values: u32,
+        buffer: Vec<u8>,
+        rep_levels_byte_len: u32,
+        def_levels_byte_len: u32,
+        datapage_v2: bool,
+    }
+
+    impl DataPageBuilderImpl {
+        // `num_values` is the number of non-null values to put in the data page.
+        // `datapage_v2` flag is used to indicate if the generated data page should use V2
+        // format or not.
+        fn new(desc: ColumnDescPtr, num_values: u32, datapage_v2: bool) -> Self {
+            DataPageBuilderImpl {
+                desc,
+                encoding: None,
+                mem_tracker: Rc::new(MemTracker::new()),
+                num_values,
+                buffer: vec![],
+                rep_levels_byte_len: 0,
+                def_levels_byte_len: 0,
+                datapage_v2,
+            }
+        }
+
+        // Adds levels to the buffer and return number of encoded bytes
+        fn add_levels(&mut self, max_level: i16, levels: &[i16]) -> u32 {
+            let size = max_buffer_size(Encoding::RLE, max_level, levels.len());
+            let mut level_encoder = LevelEncoder::v1(Encoding::RLE, max_level, vec![0; size]);
+            level_encoder.put(levels).expect("put() should be OK");
+            let encoded_levels = level_encoder.consume().expect("consume() should be OK");
+            // Actual encoded bytes (without length offset)
+            let encoded_bytes = &encoded_levels[mem::size_of::<i32>()..];
+            if self.datapage_v2 {
+                // Level encoder always initializes with offset of i32, where it stores length of
+                // encoded data; for data page v2 we explicitly store length, therefore we should
+                // skip i32 bytes.
+                self.buffer.extend_from_slice(encoded_bytes);
+            } else {
+                self.buffer.extend_from_slice(encoded_levels.as_slice());
+            }
+            encoded_bytes.len() as u32
+        }
+    }
+
+    impl DataPageBuilder for DataPageBuilderImpl {
+        fn add_rep_levels(&mut self, max_levels: i16, rep_levels: &[i16]) {
+            self.num_values = rep_levels.len() as u32;
+            self.rep_levels_byte_len = self.add_levels(max_levels, rep_levels);
+        }
+
+        fn add_def_levels(&mut self, max_levels: i16, def_levels: &[i16]) {
+            assert!(
+                self.num_values == def_levels.len() as u32,
+                "Must call `add_rep_levels() first!`"
+            );
+
+            self.def_levels_byte_len = self.add_levels(max_levels, def_levels);
+        }
+
+        fn add_values<T: DataType>(&mut self, encoding: Encoding, values: &[T::T]) {
+            assert!(
+                self.num_values >= values.len() as u32,
+                "num_values: {}, values.len(): {}",
+                self.num_values,
+                values.len()
+            );
+            self.encoding = Some(encoding);
+            let mut encoder: Box<Encoder<T>> =
+                get_encoder::<T>(self.desc.clone(), encoding, self.mem_tracker.clone())
+                    .expect("get_encoder() should be OK");
+            encoder.put(values).expect("put() should be OK");
+            let encoded_values = encoder
+                .flush_buffer()
+                .expect("consume_buffer() should be OK");
+            self.buffer.extend_from_slice(encoded_values.data());
+        }
+
+        fn add_indices(&mut self, indices: ByteBufferPtr) {
+            self.encoding = Some(Encoding::RLE_DICTIONARY);
+            self.buffer.extend_from_slice(indices.data());
+        }
+
+        fn consume(self) -> Page {
+            if self.datapage_v2 {
+                Page::DataPageV2 {
+                    buf: ByteBufferPtr::new(self.buffer),
+                    num_values: self.num_values,
+                    encoding: self.encoding.unwrap(),
+                    num_nulls: 0, // set to dummy value - don't need this when reading data page
+                    num_rows: self.num_values, // also don't need this when reading data page
+                    def_levels_byte_len: self.def_levels_byte_len,
+                    rep_levels_byte_len: self.rep_levels_byte_len,
+                    is_compressed: false,
+                    statistics: None, // set to None, we do not need statistics for tests
+                }
+            } else {
+                Page::DataPage {
+                    buf: ByteBufferPtr::new(self.buffer),
+                    num_values: self.num_values,
+                    encoding: self.encoding.unwrap(),
+                    def_level_encoding: Encoding::RLE,
+                    rep_level_encoding: Encoding::RLE,
+                    statistics: None, // set to None, we do not need statistics for tests
+                }
+            }
+        }
+    }
+
+    fn make_pages<T: DataType>(
+        desc: ColumnDescPtr,
+        encoding: Encoding,
+        num_pages: usize,
+        levels_per_page: usize,
+        min: T::T,
+        max: T::T,
+        def_levels: &mut Vec<i16>,
+        rep_levels: &mut Vec<i16>,
+        values: &mut Vec<T::T>,
+        pages: &mut VecDeque<Page>,
+        use_v2: bool,
+    ) where
+        T::T: PartialOrd + SampleRange + Copy,
+    {
+        let mut num_values = 0;
+        let max_def_level = desc.max_def_level();
+        let max_rep_level = desc.max_rep_level();
+
+        let mem_tracker = Rc::new(MemTracker::new());
+        let mut dict_encoder = DictEncoder::<T>::new(desc.clone(), mem_tracker);
+
+        for i in 0..num_pages {
+            let mut num_values_cur_page = 0;
+            let level_range = i * levels_per_page..(i + 1) * levels_per_page;
+
+            if max_def_level > 0 {
+                random_numbers_range(levels_per_page, 0, max_def_level + 1, def_levels);
+                for dl in &def_levels[level_range.clone()] {
+                    if *dl == max_def_level {
+                        num_values_cur_page += 1;
+                    }
+                }
+            } else {
+                num_values_cur_page = levels_per_page;
+            }
+            if max_rep_level > 0 {
+                random_numbers_range(levels_per_page, 0, max_rep_level + 1, rep_levels);
+            }
+            random_numbers_range(num_values_cur_page, min, max, values);
+
+            // Generate the current page
+
+            let mut pb = DataPageBuilderImpl::new(desc.clone(), num_values_cur_page as u32, use_v2);
+            if max_rep_level > 0 {
+                pb.add_rep_levels(max_rep_level, &rep_levels[level_range.clone()]);
+            }
+            if max_def_level > 0 {
+                pb.add_def_levels(max_def_level, &def_levels[level_range]);
+            }
+
+            let value_range = num_values..num_values + num_values_cur_page;
+            match encoding {
+                Encoding::PLAIN_DICTIONARY | Encoding::RLE_DICTIONARY => {
+                    let _ = dict_encoder.put(&values[value_range.clone()]);
+                    let indices = dict_encoder
+                        .write_indices()
+                        .expect("write_indices() should be OK");
+                    pb.add_indices(indices);
+                }
+                Encoding::PLAIN => {
+                    pb.add_values::<T>(encoding, &values[value_range]);
+                }
+                enc @ _ => panic!("Unexpected encoding {}", enc),
+            }
+
+            let data_page = pb.consume();
+            pages.push_back(data_page);
+            num_values += num_values_cur_page;
+        }
+
+        if encoding == Encoding::PLAIN_DICTIONARY || encoding == Encoding::RLE_DICTIONARY {
+            let dict = dict_encoder
+                .write_dict()
+                .expect("write_dict() should be OK");
+            let dict_page = Page::DictionaryPage {
+                buf: dict,
+                num_values: dict_encoder.num_entries() as u32,
+                encoding: Encoding::RLE_DICTIONARY,
+                is_sorted: false,
+            };
+            pages.push_front(dict_page);
+        }
+    }
+}
diff --git a/rust/src/parquet/column/writer.rs b/rust/src/parquet/column/writer.rs
new file mode 100644
index 0000000..4798d9a
--- /dev/null
+++ b/rust/src/parquet/column/writer.rs
@@ -0,0 +1,1617 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+//! Contains column writer API.
+
+use std::{cmp, collections::VecDeque, mem, rc::Rc};
+
+use crate::parquet::basic::{Compression, Encoding, PageType, Type};
+use crate::parquet::column::page::{CompressedPage, Page, PageWriteSpec, PageWriter};
+use crate::parquet::compression::{create_codec, Codec};
+use crate::parquet::data_type::*;
+use crate::parquet::encodings::{
+    encoding::{get_encoder, DictEncoder, Encoder},
+    levels::{max_buffer_size, LevelEncoder},
+};
+use crate::parquet::errors::{ParquetError, Result};
+use crate::parquet::file::{
+    metadata::ColumnChunkMetaData,
+    properties::{WriterProperties, WriterPropertiesPtr, WriterVersion},
+};
+use crate::parquet::schema::types::ColumnDescPtr;
+use crate::parquet::util::memory::{ByteBufferPtr, MemTracker};
+
+/// Column writer for a Parquet type.
+pub enum ColumnWriter {
+    BoolColumnWriter(ColumnWriterImpl<BoolType>),
+    Int32ColumnWriter(ColumnWriterImpl<Int32Type>),
+    Int64ColumnWriter(ColumnWriterImpl<Int64Type>),
+    Int96ColumnWriter(ColumnWriterImpl<Int96Type>),
+    FloatColumnWriter(ColumnWriterImpl<FloatType>),
+    DoubleColumnWriter(ColumnWriterImpl<DoubleType>),
+    ByteArrayColumnWriter(ColumnWriterImpl<ByteArrayType>),
+    FixedLenByteArrayColumnWriter(ColumnWriterImpl<FixedLenByteArrayType>),
+}
+
+/// Gets a specific column writer corresponding to column descriptor `descr`.
+pub fn get_column_writer(
+    descr: ColumnDescPtr,
+    props: WriterPropertiesPtr,
+    page_writer: Box<PageWriter>,
+) -> ColumnWriter {
+    match descr.physical_type() {
+        Type::BOOLEAN => {
+            ColumnWriter::BoolColumnWriter(ColumnWriterImpl::new(descr, props, page_writer))
+        }
+        Type::INT32 => {
+            ColumnWriter::Int32ColumnWriter(ColumnWriterImpl::new(descr, props, page_writer))
+        }
+        Type::INT64 => {
+            ColumnWriter::Int64ColumnWriter(ColumnWriterImpl::new(descr, props, page_writer))
+        }
+        Type::INT96 => {
+            ColumnWriter::Int96ColumnWriter(ColumnWriterImpl::new(descr, props, page_writer))
+        }
+        Type::FLOAT => {
+            ColumnWriter::FloatColumnWriter(ColumnWriterImpl::new(descr, props, page_writer))
+        }
+        Type::DOUBLE => {
+            ColumnWriter::DoubleColumnWriter(ColumnWriterImpl::new(descr, props, page_writer))
+        }
+        Type::BYTE_ARRAY => {
+            ColumnWriter::ByteArrayColumnWriter(ColumnWriterImpl::new(descr, props, page_writer))
+        }
+        Type::FIXED_LEN_BYTE_ARRAY => ColumnWriter::FixedLenByteArrayColumnWriter(
+            ColumnWriterImpl::new(descr, props, page_writer),
+        ),
+    }
+}
+
+/// Gets a typed column writer for the specific type `T`, by "up-casting" `col_writer` of
+/// non-generic type to a generic column writer type `ColumnWriterImpl`.
+///
+/// NOTE: the caller MUST guarantee that the actual enum value for `col_writer` matches
+/// the type `T`. Otherwise, disastrous consequence could happen.
+pub fn get_typed_column_writer<T: DataType>(col_writer: ColumnWriter) -> ColumnWriterImpl<T> {
+    match col_writer {
+        ColumnWriter::BoolColumnWriter(r) => unsafe { mem::transmute(r) },
+        ColumnWriter::Int32ColumnWriter(r) => unsafe { mem::transmute(r) },
+        ColumnWriter::Int64ColumnWriter(r) => unsafe { mem::transmute(r) },
+        ColumnWriter::Int96ColumnWriter(r) => unsafe { mem::transmute(r) },
+        ColumnWriter::FloatColumnWriter(r) => unsafe { mem::transmute(r) },
+        ColumnWriter::DoubleColumnWriter(r) => unsafe { mem::transmute(r) },
+        ColumnWriter::ByteArrayColumnWriter(r) => unsafe { mem::transmute(r) },
+        ColumnWriter::FixedLenByteArrayColumnWriter(r) => unsafe { mem::transmute(r) },
+    }
+}
+
+/// Typed column writer for a primitive column.
+pub struct ColumnWriterImpl<T: DataType> {
+    // Column writer properties
+    descr: ColumnDescPtr,
+    props: WriterPropertiesPtr,
+    page_writer: Box<PageWriter>,
+    has_dictionary: bool,
+    dict_encoder: Option<DictEncoder<T>>,
+    encoder: Box<Encoder<T>>,
+    codec: Compression,
+    compressor: Option<Box<Codec>>,
+    // Metrics per page
+    num_buffered_values: u32,
+    num_buffered_encoded_values: u32,
+    num_buffered_rows: u32,
+    // Metrics per column writer
+    total_bytes_written: u64,
+    total_rows_written: u64,
+    total_uncompressed_size: u64,
+    total_compressed_size: u64,
+    total_num_values: u64,
+    dictionary_page_offset: Option<u64>,
+    data_page_offset: Option<u64>,
+    // Reused buffers
+    def_levels_sink: Vec<i16>,
+    rep_levels_sink: Vec<i16>,
+    data_pages: VecDeque<CompressedPage>,
+}
+
+impl<T: DataType> ColumnWriterImpl<T> {
+    pub fn new(
+        descr: ColumnDescPtr,
+        props: WriterPropertiesPtr,
+        page_writer: Box<PageWriter>,
+    ) -> Self {
+        let codec = props.compression(descr.path());
+        let compressor = create_codec(codec).unwrap();
+
+        // Optionally set dictionary encoder.
+        let dict_encoder =
+            if props.dictionary_enabled(descr.path()) && Self::has_dictionary_support(&props) {
+                Some(DictEncoder::new(descr.clone(), Rc::new(MemTracker::new())))
+            } else {
+                None
+            };
+
+        // Whether or not this column writer has a dictionary encoding.
+        let has_dictionary = dict_encoder.is_some();
+
+        // Set either main encoder or fallback encoder.
+        let fallback_encoder = get_encoder(
+            descr.clone(),
+            props
+                .encoding(descr.path())
+                .unwrap_or(Self::fallback_encoding(&props)),
+            Rc::new(MemTracker::new()),
+        )
+        .unwrap();
+
+        Self {
+            descr,
+            props,
+            page_writer,
+            has_dictionary,
+            dict_encoder,
+            encoder: fallback_encoder,
+            codec,
+            compressor,
+            num_buffered_values: 0,
+            num_buffered_encoded_values: 0,
+            num_buffered_rows: 0,
+            total_bytes_written: 0,
+            total_rows_written: 0,
+            total_uncompressed_size: 0,
+            total_compressed_size: 0,
+            total_num_values: 0,
+            dictionary_page_offset: None,
+            data_page_offset: None,
+            def_levels_sink: vec![],
+            rep_levels_sink: vec![],
+            data_pages: VecDeque::new(),
+        }
+    }
+
+    /// Writes batch of values, definition levels and repetition levels.
+    /// Returns number of values processed (written).
+    ///
+    /// If definition and repetition levels are provided, we write fully those levels and
+    /// select how many values to write (this number will be returned), since number of
+    /// actual written values may be smaller than provided values.
+    ///
+    /// If only values are provided, then all values are written and the length of
+    /// of the values buffer is returned.
+    ///
+    /// Definition and/or repetition levels can be omitted, if values are
+    /// non-nullable and/or non-repeated.
+    pub fn write_batch(
+        &mut self,
+        values: &[T::T],
+        def_levels: Option<&[i16]>,
+        rep_levels: Option<&[i16]>,
+    ) -> Result<usize> {
+        // We check for DataPage limits only after we have inserted the values. If a user
+        // writes a large number of values, the DataPage size can be well above the limit.
+        //
+        // The purpose of this chunking is to bound this. Even if a user writes large number
+        // of values, the chunking will ensure that we add data page at a reasonable pagesize
+        // limit.
+
+        // TODO: find out why we don't account for size of levels when we estimate page size.
+
+        // Find out the minimal length to prevent index out of bound errors.
+        let mut min_len = values.len();
+        if let Some(levels) = def_levels {
+            min_len = cmp::min(min_len, levels.len());
+        }
+        if let Some(levels) = rep_levels {
+            min_len = cmp::min(min_len, levels.len());
+        }
+
+        // Find out number of batches to process.
+        let write_batch_size = self.props.write_batch_size();
+        let num_batches = min_len / write_batch_size;
+
+        let mut values_offset = 0;
+        let mut levels_offset = 0;
+
+        for _ in 0..num_batches {
+            values_offset += self.write_mini_batch(
+                &values[values_offset..values_offset + write_batch_size],
+                def_levels.map(|lv| &lv[levels_offset..levels_offset + write_batch_size]),
+                rep_levels.map(|lv| &lv[levels_offset..levels_offset + write_batch_size]),
+            )?;
+            levels_offset += write_batch_size;
+        }
+
+        values_offset += self.write_mini_batch(
+            &values[values_offset..],
+            def_levels.map(|lv| &lv[levels_offset..]),
+            rep_levels.map(|lv| &lv[levels_offset..]),
+        )?;
+
+        // Return total number of values processed.
+        Ok(values_offset)
+    }
+
+    /// Returns total number of bytes written by this column writer so far.
+    /// This value is also returned when column writer is closed.
+    pub fn get_total_bytes_written(&self) -> u64 {
+        self.total_bytes_written
+    }
+
+    /// Returns total number of rows written by this column writer so far.
+    /// This value is also returned when column writer is closed.
+    pub fn get_total_rows_written(&self) -> u64 {
+        self.total_rows_written
+    }
+
+    /// Finalises writes and closes the column writer.
+    /// Returns total bytes written, total rows written and column chunk metadata.
+    pub fn close(mut self) -> Result<(u64, u64, ColumnChunkMetaData)> {
+        if self.dict_encoder.is_some() {
+            self.write_dictionary_page()?;
+        }
+        self.flush_data_pages()?;
+        let metadata = self.write_column_metadata()?;
+        self.dict_encoder = None;
+        self.page_writer.close()?;
+
+        Ok((self.total_bytes_written, self.total_rows_written, metadata))
+    }
+
+    /// Writes mini batch of values, definition and repetition levels.
+    /// This allows fine-grained processing of values and maintaining a reasonable
+    /// page size.
+    fn write_mini_batch(
+        &mut self,
+        values: &[T::T],
+        def_levels: Option<&[i16]>,
+        rep_levels: Option<&[i16]>,
+    ) -> Result<usize> {
+        let num_values;
+        let mut values_to_write = 0;
+
+        // Check if number of definition levels is the same as number of repetition levels.
+        if def_levels.is_some() && rep_levels.is_some() {
+            let def = def_levels.unwrap();
+            let rep = rep_levels.unwrap();
+            if def.len() != rep.len() {
+                return Err(general_err!(
+                    "Inconsistent length of definition and repetition levels: {} != {}",
+                    def.len(),
+                    rep.len()
+                ));
+            }
+        }
+
+        // Process definition levels and determine how many values to write.
+        if self.descr.max_def_level() > 0 {
+            if def_levels.is_none() {
+                return Err(general_err!(
+                    "Definition levels are required, because max definition level = {}",
+                    self.descr.max_def_level()
+                ));
+            }
+
+            let levels = def_levels.unwrap();
+            num_values = levels.len();
+            for &level in levels {
+                values_to_write += (level == self.descr.max_def_level()) as usize;
+            }
+
+            self.write_definition_levels(levels);
+        } else {
+            values_to_write = values.len();
+            num_values = values_to_write;
+        }
+
+        // Process repetition levels and determine how many rows we are about to process.
+        if self.descr.max_rep_level() > 0 {
+            // A row could contain more than one value.
+            if rep_levels.is_none() {
+                return Err(general_err!(
+                    "Repetition levels are required, because max repetition level = {}",
+                    self.descr.max_rep_level()
+                ));
+            }
+
+            // Count the occasions where we start a new row
+            let levels = rep_levels.unwrap();
+            for &level in levels {
+                self.num_buffered_rows += (level == 0) as u32
+            }
+
+            self.write_repetition_levels(levels);
+        } else {
+            // Each value is exactly one row.
+            // Equals to the number of values, we count nulls as well.
+            self.num_buffered_rows += num_values as u32;
+        }
+
+        // Check that we have enough values to write.
+        if values.len() < values_to_write {
+            return Err(general_err!(
+                "Expected to write {} values, but have only {}",
+                values_to_write,
+                values.len()
+            ));
+        }
+
+        // TODO: update page statistics
+
+        self.write_values(&values[0..values_to_write])?;
+
+        self.num_buffered_values += num_values as u32;
+        self.num_buffered_encoded_values += values_to_write as u32;
+
+        if self.should_add_data_page() {
+            self.add_data_page()?;
+        }
+
+        if self.should_dict_fallback() {
+            self.dict_fallback()?;
+        }
+
+        Ok(values_to_write)
+    }
+
+    #[inline]
+    fn write_definition_levels(&mut self, def_levels: &[i16]) {
+        self.def_levels_sink.extend_from_slice(def_levels);
+    }
+
+    #[inline]
+    fn write_repetition_levels(&mut self, rep_levels: &[i16]) {
+        self.rep_levels_sink.extend_from_slice(rep_levels);
+    }
+
+    #[inline]
+    fn write_values(&mut self, values: &[T::T]) -> Result<()> {
+        match self.dict_encoder {
+            Some(ref mut encoder) => encoder.put(values),
+            None => self.encoder.put(values),
+        }
+    }
+
+    /// Returns true if we need to fall back to non-dictionary encoding.
+    ///
+    /// We can only fall back if dictionary encoder is set and we have exceeded dictionary
+    /// size.
+    #[inline]
+    fn should_dict_fallback(&self) -> bool {
+        match self.dict_encoder {
+            Some(ref encoder) => {
+                encoder.dict_encoded_size() >= self.props.dictionary_pagesize_limit()
+            }
+            None => false,
+        }
+    }
+
+    /// Returns true if there is enough data for a data page, false otherwise.
+    #[inline]
+    fn should_add_data_page(&self) -> bool {
+        self.encoder.estimated_data_encoded_size() >= self.props.data_pagesize_limit()
+    }
+
+    /// Performs dictionary fallback.
+    /// Prepares and writes dictionary and all data pages into page writer.
+    fn dict_fallback(&mut self) -> Result<()> {
+        // At this point we know that we need to fall back.
+        self.write_dictionary_page()?;
+        self.flush_data_pages()?;
+        self.dict_encoder = None;
+        Ok(())
+    }
+
+    /// Adds data page.
+    /// Data page is either buffered in case of dictionary encoding or written directly.
+    fn add_data_page(&mut self) -> Result<()> {
+        // Extract encoded values
+        let value_bytes = match self.dict_encoder {
+            Some(ref mut encoder) => encoder.write_indices()?,
+            None => self.encoder.flush_buffer()?,
+        };
+
+        // Select encoding based on current encoder and writer version (v1 or v2).
+        let encoding = if self.dict_encoder.is_some() {
+            self.props.dictionary_data_page_encoding()
+        } else {
+            self.encoder.encoding()
+        };
+
+        let max_def_level = self.descr.max_def_level();
+        let max_rep_level = self.descr.max_rep_level();
+
+        let compressed_page = match self.props.writer_version() {
+            WriterVersion::PARQUET_1_0 => {
+                let mut buffer = vec![];
+
+                if max_rep_level > 0 {
+                    buffer.extend_from_slice(
+                        &self.encode_levels_v1(
+                            Encoding::RLE,
+                            &self.rep_levels_sink[..],
+                            max_rep_level,
+                        )?[..],
+                    );
+                }
+
+                if max_def_level > 0 {
+                    buffer.extend_from_slice(
+                        &self.encode_levels_v1(
+                            Encoding::RLE,
+                            &self.def_levels_sink[..],
+                            max_def_level,
+                        )?[..],
+                    );
+                }
+
+                buffer.extend_from_slice(value_bytes.data());
+                let uncompressed_size = buffer.len();
+
+                if let Some(ref mut cmpr) = self.compressor {
+                    let mut compressed_buf = Vec::with_capacity(value_bytes.data().len());
+                    cmpr.compress(&buffer[..], &mut compressed_buf)?;
+                    buffer = compressed_buf;
+                }
+
+                let data_page = Page::DataPage {
+                    buf: ByteBufferPtr::new(buffer),
+                    num_values: self.num_buffered_values,
+                    encoding,
+                    def_level_encoding: Encoding::RLE,
+                    rep_level_encoding: Encoding::RLE,
+                    // TODO: process statistics
+                    statistics: None,
+                };
+
+                CompressedPage::new(data_page, uncompressed_size)
+            }
+            WriterVersion::PARQUET_2_0 => {
+                let mut rep_levels_byte_len = 0;
+                let mut def_levels_byte_len = 0;
+                let mut buffer = vec![];
+
+                if max_rep_level > 0 {
+                    let levels = self.encode_levels_v2(&self.rep_levels_sink[..], max_rep_level)?;
+                    rep_levels_byte_len = levels.len();
+                    buffer.extend_from_slice(&levels[..]);
+                }
+
+                if max_def_level > 0 {
+                    let levels = self.encode_levels_v2(&self.def_levels_sink[..], max_def_level)?;
+                    def_levels_byte_len = levels.len();
+                    buffer.extend_from_slice(&levels[..]);
+                }
+
+                let uncompressed_size =
+                    rep_levels_byte_len + def_levels_byte_len + value_bytes.len();
+
+                // Data Page v2 compresses values only.
+                match self.compressor {
+                    Some(ref mut cmpr) => {
+                        let mut compressed_buf = Vec::with_capacity(value_bytes.data().len());
+                        cmpr.compress(value_bytes.data(), &mut compressed_buf)?;
+                        buffer.extend_from_slice(&compressed_buf[..]);
+                    }
+                    None => {
+                        buffer.extend_from_slice(value_bytes.data());
+                    }
+                }
+
+                let data_page = Page::DataPageV2 {
+                    buf: ByteBufferPtr::new(buffer),
+                    num_values: self.num_buffered_values,
+                    encoding,
+                    num_nulls: self.num_buffered_values - self.num_buffered_encoded_values,
+                    num_rows: self.num_buffered_rows,
+                    def_levels_byte_len: def_levels_byte_len as u32,
+                    rep_levels_byte_len: rep_levels_byte_len as u32,
+                    is_compressed: self.compressor.is_some(),
+                    // TODO: process statistics
+                    statistics: None,
+                };
+
+                CompressedPage::new(data_page, uncompressed_size)
+            }
+        };
+
+        // Check if we need to buffer data page or flush it to the sink directly.
+        if self.dict_encoder.is_some() {
+            self.data_pages.push_back(compressed_page);
+        } else {
+            self.write_data_page(compressed_page)?;
+        }
+
+        // Update total number of rows.
+        self.total_rows_written += self.num_buffered_rows as u64;
+
+        // Reset state.
+        self.rep_levels_sink.clear();
+        self.def_levels_sink.clear();
+        self.num_buffered_values = 0;
+        self.num_buffered_encoded_values = 0;
+        self.num_buffered_rows = 0;
+
+        Ok(())
+    }
+
+    /// Finalises any outstanding data pages and flushes buffered data pages from
+    /// dictionary encoding into underlying sink.
+    #[inline]
+    fn flush_data_pages(&mut self) -> Result<()> {
+        // Write all outstanding data to a new page.
+        if self.num_buffered_values > 0 {
+            self.add_data_page()?;
+        }
+
+        while let Some(page) = self.data_pages.pop_front() {
+            self.write_data_page(page)?;
+        }
+
+        Ok(())
+    }
+
+    /// Assembles and writes column chunk metadata.
+    fn write_column_metadata(&mut self) -> Result<ColumnChunkMetaData> {
+        let total_compressed_size = self.total_compressed_size as i64;
+        let total_uncompressed_size = self.total_uncompressed_size as i64;
+        let num_values = self.total_num_values as i64;
+        let dict_page_offset = self.dictionary_page_offset.map(|v| v as i64);
+        // If data page offset is not set, then no pages have been written
+        let data_page_offset = self.data_page_offset.unwrap_or(0) as i64;
+
+        let file_offset;
+        let mut encodings = Vec::new();
+
+        if self.has_dictionary {
+            assert!(dict_page_offset.is_some(), "Dictionary offset is not set");
+            file_offset = dict_page_offset.unwrap() + total_compressed_size;
+            // NOTE: This should be in sync with writing dictionary pages.
+            encodings.push(self.props.dictionary_page_encoding());
+            encodings.push(self.props.dictionary_data_page_encoding());
+            // Fallback to alternative encoding, add it to the list.
+            if self.dict_encoder.is_none() {
+                encodings.push(self.encoder.encoding());
+            }
+        } else {
+            file_offset = data_page_offset + total_compressed_size;
+            encodings.push(self.encoder.encoding());
+        }
+        // We use only RLE level encoding for data page v1 and data page v2.
+        encodings.push(Encoding::RLE);
+
+        let metadata = ColumnChunkMetaData::builder(self.descr.clone())
+            .set_compression(self.codec)
+            .set_encodings(encodings)
+            .set_file_offset(file_offset)
+            .set_total_compressed_size(total_compressed_size)
+            .set_total_uncompressed_size(total_uncompressed_size)
+            .set_num_values(num_values)
+            .set_data_page_offset(data_page_offset)
+            .set_dictionary_page_offset(dict_page_offset)
+            .build()?;
+
+        self.page_writer.write_metadata(&metadata)?;
+
+        Ok(metadata)
+    }
+
+    /// Encodes definition or repetition levels for Data Page v1.
+    #[inline]
+    fn encode_levels_v1(
+        &self,
+        encoding: Encoding,
+        levels: &[i16],
+        max_level: i16,
+    ) -> Result<Vec<u8>> {
+        let size = max_buffer_size(encoding, max_level, levels.len());
+        let mut encoder = LevelEncoder::v1(encoding, max_level, vec![0; size]);
+        encoder.put(&levels)?;
+        encoder.consume()
+    }
+
+    /// Encodes definition or repetition levels for Data Page v2.
+    /// Encoding is always RLE.
+    #[inline]
+    fn encode_levels_v2(&self, levels: &[i16], max_level: i16) -> Result<Vec<u8>> {
+        let size = max_buffer_size(Encoding::RLE, max_level, levels.len());
+        let mut encoder = LevelEncoder::v2(max_level, vec![0; size]);
+        encoder.put(&levels)?;
+        encoder.consume()
+    }
+
+    /// Writes compressed data page into underlying sink and updates global metrics.
+    #[inline]
+    fn write_data_page(&mut self, page: CompressedPage) -> Result<()> {
+        let page_spec = self.page_writer.write_page(page)?;
+        self.update_metrics_for_page(page_spec);
+        Ok(())
+    }
+
+    /// Writes dictionary page into underlying sink.
+    #[inline]
+    fn write_dictionary_page(&mut self) -> Result<()> {
+        if self.dict_encoder.is_none() {
+            return Err(general_err!("Dictionary encoder is not set"));
+        }
+
+        let compressed_page = {
+            let encoder = self.dict_encoder.as_ref().unwrap();
+            let is_sorted = encoder.is_sorted();
+            let num_values = encoder.num_entries();
+            let mut values_buf = encoder.write_dict()?;
+            let uncompressed_size = values_buf.len();
+
+            if let Some(ref mut cmpr) = self.compressor {
+                let mut output_buf = Vec::with_capacity(uncompressed_size);
+                cmpr.compress(values_buf.data(), &mut output_buf)?;
+                values_buf = ByteBufferPtr::new(output_buf);
+            }
+
+            let dict_page = Page::DictionaryPage {
+                buf: values_buf,
+                num_values: num_values as u32,
+                encoding: self.props.dictionary_page_encoding(),
+                is_sorted,
+            };
+            CompressedPage::new(dict_page, uncompressed_size)
+        };
+
+        let page_spec = self.page_writer.write_page(compressed_page)?;
+        self.update_metrics_for_page(page_spec);
+        Ok(())
+    }
+
+    /// Updates column writer metrics with each page metadata.
+    #[inline]
+    fn update_metrics_for_page(&mut self, page_spec: PageWriteSpec) {
+        self.total_uncompressed_size += page_spec.uncompressed_size as u64;
+        self.total_compressed_size += page_spec.compressed_size as u64;
+        self.total_num_values += page_spec.num_values as u64;
+        self.total_bytes_written += page_spec.bytes_written;
+
+        match page_spec.page_type {
+            PageType::DATA_PAGE | PageType::DATA_PAGE_V2 => {
+                if self.data_page_offset.is_none() {
+                    self.data_page_offset = Some(page_spec.offset);
+                }
+            }
+            PageType::DICTIONARY_PAGE => {
+                assert!(
+                    self.dictionary_page_offset.is_none(),
+                    "Dictionary offset is already set"
+                );
+                self.dictionary_page_offset = Some(page_spec.offset);
+            }
+            _ => {}
+        }
+    }
+
+    /// Returns reference to the underlying page writer.
+    /// This method is intended to use in tests only.
+    fn get_page_writer_ref(&self) -> &Box<PageWriter> {
+        &self.page_writer
+    }
+}
+
+// ----------------------------------------------------------------------
+// Encoding support for column writer.
+// This mirrors parquet-mr default encodings for writes. See:
+// https://github.com/apache/parquet-mr/blob/master/parquet-column/src/main/java/org/apache/parquet/column/values/factory/DefaultV1ValuesWriterFactory.java
+// https://github.com/apache/parquet-mr/blob/master/parquet-column/src/main/java/org/apache/parquet/column/values/factory/DefaultV2ValuesWriterFactory.java
+
+/// Trait to define default encoding for types, including whether or not the type
+/// supports dictionary encoding.
+trait EncodingWriteSupport {
+    /// Returns encoding for a column when no other encoding is provided in writer
+    /// properties.
+    fn fallback_encoding(props: &WriterProperties) -> Encoding;
+
+    /// Returns true if dictionary is supported for column writer, false otherwise.
+    fn has_dictionary_support(props: &WriterProperties) -> bool;
+}
+
+// Basic implementation, always falls back to PLAIN and supports dictionary.
+impl<T: DataType> EncodingWriteSupport for ColumnWriterImpl<T> {
+    default fn fallback_encoding(_props: &WriterProperties) -> Encoding {
+        Encoding::PLAIN
+    }
+
+    default fn has_dictionary_support(_props: &WriterProperties) -> bool {
+        true
+    }
+}
+
+impl EncodingWriteSupport for ColumnWriterImpl<BoolType> {
+    fn fallback_encoding(props: &WriterProperties) -> Encoding {
+        match props.writer_version() {
+            WriterVersion::PARQUET_1_0 => Encoding::PLAIN,
+            WriterVersion::PARQUET_2_0 => Encoding::RLE,
+        }
+    }
+
+    // Boolean column does not support dictionary encoding and should fall back to
+    // whatever fallback encoding is defined.
+    fn has_dictionary_support(_props: &WriterProperties) -> bool {
+        false
+    }
+}
+
+impl EncodingWriteSupport for ColumnWriterImpl<Int32Type> {
+    fn fallback_encoding(props: &WriterProperties) -> Encoding {
+        match props.writer_version() {
+            WriterVersion::PARQUET_1_0 => Encoding::PLAIN,
+            WriterVersion::PARQUET_2_0 => Encoding::DELTA_BINARY_PACKED,
+        }
+    }
+}
+
+impl EncodingWriteSupport for ColumnWriterImpl<Int64Type> {
+    fn fallback_encoding(props: &WriterProperties) -> Encoding {
+        match props.writer_version() {
+            WriterVersion::PARQUET_1_0 => Encoding::PLAIN,
+            WriterVersion::PARQUET_2_0 => Encoding::DELTA_BINARY_PACKED,
+        }
+    }
+}
+
+impl EncodingWriteSupport for ColumnWriterImpl<ByteArrayType> {
+    fn fallback_encoding(props: &WriterProperties) -> Encoding {
+        match props.writer_version() {
+            WriterVersion::PARQUET_1_0 => Encoding::PLAIN,
+            WriterVersion::PARQUET_2_0 => Encoding::DELTA_BYTE_ARRAY,
+        }
+    }
+}
+
+impl EncodingWriteSupport for ColumnWriterImpl<FixedLenByteArrayType> {
+    fn fallback_encoding(props: &WriterProperties) -> Encoding {
+        match props.writer_version() {
+            WriterVersion::PARQUET_1_0 => Encoding::PLAIN,
+            WriterVersion::PARQUET_2_0 => Encoding::DELTA_BYTE_ARRAY,
+        }
+    }
+
+    fn has_dictionary_support(props: &WriterProperties) -> bool {
+        match props.writer_version() {
+            // Dictionary encoding was not enabled in PARQUET 1.0
+            WriterVersion::PARQUET_1_0 => false,
+            WriterVersion::PARQUET_2_0 => true,
+        }
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+
+    use std::error::Error;
+
+    use rand::distributions::range::SampleRange;
+
+    use crate::parquet::column::{
+        page::PageReader,
+        reader::{get_column_reader, get_typed_column_reader, ColumnReaderImpl},
+    };
+    use crate::parquet::file::{
+        properties::WriterProperties, reader::SerializedPageReader, writer::SerializedPageWriter,
+    };
+    use crate::parquet::schema::types::{ColumnDescriptor, ColumnPath, Type as SchemaType};
+    use crate::parquet::util::{
+        io::{FileSink, FileSource},
+        test_common::{get_temp_file, random_numbers_range},
+    };
+
+    #[test]
+    fn test_column_writer_inconsistent_def_rep_length() {
+        let page_writer = get_test_page_writer();
+        let props = Rc::new(WriterProperties::builder().build());
+        let mut writer = get_test_column_writer::<Int32Type>(page_writer, 1, 1, props);
+        let res = writer.write_batch(&[1, 2, 3, 4], Some(&[1, 1, 1]), Some(&[0, 0]));
+        assert!(res.is_err());
+        if let Err(err) = res {
+            assert_eq!(
+                err.description(),
+                "Inconsistent length of definition and repetition levels: 3 != 2"
+            );
+        }
+    }
+
+    #[test]
+    fn test_column_writer_invalid_def_levels() {
+        let page_writer = get_test_page_writer();
+        let props = Rc::new(WriterProperties::builder().build());
+        let mut writer = get_test_column_writer::<Int32Type>(page_writer, 1, 0, props);
+        let res = writer.write_batch(&[1, 2, 3, 4], None, None);
+        assert!(res.is_err());
+        if let Err(err) = res {
+            assert_eq!(
+                err.description(),
+                "Definition levels are required, because max definition level = 1"
+            );
+        }
+    }
+
+    #[test]
+    fn test_column_writer_invalid_rep_levels() {
+        let page_writer = get_test_page_writer();
+        let props = Rc::new(WriterProperties::builder().build());
+        let mut writer = get_test_column_writer::<Int32Type>(page_writer, 0, 1, props);
+        let res = writer.write_batch(&[1, 2, 3, 4], None, None);
+        assert!(res.is_err());
+        if let Err(err) = res {
+            assert_eq!(
+                err.description(),
+                "Repetition levels are required, because max repetition level = 1"
+            );
+        }
+    }
+
+    #[test]
+    fn test_column_writer_not_enough_values_to_write() {
+        let page_writer = get_test_page_writer();
+        let props = Rc::new(WriterProperties::builder().build());
+        let mut writer = get_test_column_writer::<Int32Type>(page_writer, 1, 0, props);
+        let res = writer.write_batch(&[1, 2], Some(&[1, 1, 1, 1]), None);
+        assert!(res.is_err());
+        if let Err(err) = res {
+            assert_eq!(
+                err.description(),
+                "Expected to write 4 values, but have only 2"
+            );
+        }
+    }
+
+    #[test]
+    #[should_panic(expected = "Dictionary offset is already set")]
+    fn test_column_writer_write_only_one_dictionary_page() {
+        let page_writer = get_test_page_writer();
+        let props = Rc::new(WriterProperties::builder().build());
+        let mut writer = get_test_column_writer::<Int32Type>(page_writer, 0, 0, props);
+        writer.write_batch(&[1, 2, 3, 4], None, None).unwrap();
+        // First page should be correctly written.
+        let res = writer.write_dictionary_page();
+        assert!(res.is_ok());
+        writer.write_dictionary_page().unwrap();
+    }
+
+    #[test]
+    fn test_column_writer_error_when_writing_disabled_dictionary() {
+        let page_writer = get_test_page_writer();
+        let props = Rc::new(
+            WriterProperties::builder()
+                .set_dictionary_enabled(false)
+                .build(),
+        );
+        let mut writer = get_test_column_writer::<Int32Type>(page_writer, 0, 0, props);
+        writer.write_batch(&[1, 2, 3, 4], None, None).unwrap();
+        let res = writer.write_dictionary_page();
+        assert!(res.is_err());
+        if let Err(err) = res {
+            assert_eq!(err.description(), "Dictionary encoder is not set");
+        }
+    }
+
+    #[test]
+    fn test_column_writer_boolean_type_does_not_support_dictionary() {
+        let page_writer = get_test_page_writer();
+        let props = Rc::new(
+            WriterProperties::builder()
+                .set_dictionary_enabled(true)
+                .build(),
+        );
+        let mut writer = get_test_column_writer::<BoolType>(page_writer, 0, 0, props);
+        writer
+            .write_batch(&[true, false, true, false], None, None)
+            .unwrap();
+
+        let (bytes_written, rows_written, metadata) = writer.close().unwrap();
+        // PlainEncoder uses bit writer to write boolean values, which all fit into 1 byte.
+        assert_eq!(bytes_written, 1);
+        assert_eq!(rows_written, 4);
+        assert_eq!(metadata.encodings(), &vec![Encoding::PLAIN, Encoding::RLE]);
+        assert_eq!(metadata.num_values(), 4); // just values
+        assert_eq!(metadata.dictionary_page_offset(), None);
+    }
+
+    #[test]
+    fn test_column_writer_default_encoding_support_bool() {
+        check_encoding_write_support::<BoolType>(
+            WriterVersion::PARQUET_1_0,
+            true,
+            &[true, false],
+            None,
+            &[Encoding::PLAIN, Encoding::RLE],
+        );
+        check_encoding_write_support::<BoolType>(
+            WriterVersion::PARQUET_1_0,
+            false,
+            &[true, false],
+            None,
+            &[Encoding::PLAIN, Encoding::RLE],
+        );
+        check_encoding_write_support::<BoolType>(
+            WriterVersion::PARQUET_2_0,
+            true,
+            &[true, false],
+            None,
+            &[Encoding::RLE, Encoding::RLE],
+        );
+        check_encoding_write_support::<BoolType>(
+            WriterVersion::PARQUET_2_0,
+            false,
+            &[true, false],
+            None,
+            &[Encoding::RLE, Encoding::RLE],
+        );
+    }
+
+    #[test]
+    fn test_column_writer_default_encoding_support_int32() {
+        check_encoding_write_support::<Int32Type>(
+            WriterVersion::PARQUET_1_0,
+            true,
+            &[1, 2],
+            Some(0),
+            &[Encoding::PLAIN, Encoding::RLE_DICTIONARY, Encoding::RLE],
+        );
+        check_encoding_write_support::<Int32Type>(
+            WriterVersion::PARQUET_1_0,
+            false,
+            &[1, 2],
+            None,
+            &[Encoding::PLAIN, Encoding::RLE],
+        );
+        check_encoding_write_support::<Int32Type>(
+            WriterVersion::PARQUET_2_0,
+            true,
+            &[1, 2],
+            Some(0),
+            &[Encoding::PLAIN, Encoding::RLE_DICTIONARY, Encoding::RLE],
+        );
+        check_encoding_write_support::<Int32Type>(
+            WriterVersion::PARQUET_2_0,
+            false,
+            &[1, 2],
+            None,
+            &[Encoding::DELTA_BINARY_PACKED, Encoding::RLE],
+        );
+    }
+
+    #[test]
+    fn test_column_writer_default_encoding_support_int64() {
+        check_encoding_write_support::<Int64Type>(
+            WriterVersion::PARQUET_1_0,
+            true,
+            &[1, 2],
+            Some(0),
+            &[Encoding::PLAIN, Encoding::RLE_DICTIONARY, Encoding::RLE],
+        );
+        check_encoding_write_support::<Int64Type>(
+            WriterVersion::PARQUET_1_0,
+            false,
+            &[1, 2],
+            None,
+            &[Encoding::PLAIN, Encoding::RLE],
+        );
+        check_encoding_write_support::<Int64Type>(
+            WriterVersion::PARQUET_2_0,
+            true,
+            &[1, 2],
+            Some(0),
+            &[Encoding::PLAIN, Encoding::RLE_DICTIONARY, Encoding::RLE],
+        );
+        check_encoding_write_support::<Int64Type>(
+            WriterVersion::PARQUET_2_0,
+            false,
+            &[1, 2],
+            None,
+            &[Encoding::DELTA_BINARY_PACKED, Encoding::RLE],
+        );
+    }
+
+    #[test]
+    fn test_column_writer_default_encoding_support_int96() {
+        check_encoding_write_support::<Int96Type>(
+            WriterVersion::PARQUET_1_0,
+            true,
+            &[Int96::from(vec![1, 2, 3])],
+            Some(0),
+            &[Encoding::PLAIN, Encoding::RLE_DICTIONARY, Encoding::RLE],
+        );
+        check_encoding_write_support::<Int96Type>(
+            WriterVersion::PARQUET_1_0,
+            false,
+            &[Int96::from(vec![1, 2, 3])],
+            None,
+            &[Encoding::PLAIN, Encoding::RLE],
+        );
+        check_encoding_write_support::<Int96Type>(
+            WriterVersion::PARQUET_2_0,
+            true,
+            &[Int96::from(vec![1, 2, 3])],
+            Some(0),
+            &[Encoding::PLAIN, Encoding::RLE_DICTIONARY, Encoding::RLE],
+        );
+        check_encoding_write_support::<Int96Type>(
+            WriterVersion::PARQUET_2_0,
+            false,
+            &[Int96::from(vec![1, 2, 3])],
+            None,
+            &[Encoding::PLAIN, Encoding::RLE],
+        );
+    }
+
+    #[test]
+    fn test_column_writer_default_encoding_support_float() {
+        check_encoding_write_support::<FloatType>(
+            WriterVersion::PARQUET_1_0,
+            true,
+            &[1.0, 2.0],
+            Some(0),
+            &[Encoding::PLAIN, Encoding::RLE_DICTIONARY, Encoding::RLE],
+        );
+        check_encoding_write_support::<FloatType>(
+            WriterVersion::PARQUET_1_0,
+            false,
+            &[1.0, 2.0],
+            None,
+            &[Encoding::PLAIN, Encoding::RLE],
+        );
+        check_encoding_write_support::<FloatType>(
+            WriterVersion::PARQUET_2_0,
+            true,
+            &[1.0, 2.0],
+            Some(0),
+            &[Encoding::PLAIN, Encoding::RLE_DICTIONARY, Encoding::RLE],
+        );
+        check_encoding_write_support::<FloatType>(
+            WriterVersion::PARQUET_2_0,
+            false,
+            &[1.0, 2.0],
+            None,
+            &[Encoding::PLAIN, Encoding::RLE],
+        );
+    }
+
+    #[test]
+    fn test_column_writer_default_encoding_support_double() {
+        check_encoding_write_support::<DoubleType>(
+            WriterVersion::PARQUET_1_0,
+            true,
+            &[1.0, 2.0],
+            Some(0),
+            &[Encoding::PLAIN, Encoding::RLE_DICTIONARY, Encoding::RLE],
+        );
+        check_encoding_write_support::<DoubleType>(
+            WriterVersion::PARQUET_1_0,
+            false,
+            &[1.0, 2.0],
+            None,
+            &[Encoding::PLAIN, Encoding::RLE],
+        );
+        check_encoding_write_support::<DoubleType>(
+            WriterVersion::PARQUET_2_0,
+            true,
+            &[1.0, 2.0],
+            Some(0),
+            &[Encoding::PLAIN, Encoding::RLE_DICTIONARY, Encoding::RLE],
+        );
+        check_encoding_write_support::<DoubleType>(
+            WriterVersion::PARQUET_2_0,
+            false,
+            &[1.0, 2.0],
+            None,
+            &[Encoding::PLAIN, Encoding::RLE],
+        );
+    }
+
+    #[test]
+    fn test_column_writer_default_encoding_support_byte_array() {
+        check_encoding_write_support::<ByteArrayType>(
+            WriterVersion::PARQUET_1_0,
+            true,
+            &[ByteArray::from(vec![1u8])],
+            Some(0),
+            &[Encoding::PLAIN, Encoding::RLE_DICTIONARY, Encoding::RLE],
+        );
+        check_encoding_write_support::<ByteArrayType>(
+            WriterVersion::PARQUET_1_0,
+            false,
+            &[ByteArray::from(vec![1u8])],
+            None,
+            &[Encoding::PLAIN, Encoding::RLE],
+        );
+        check_encoding_write_support::<ByteArrayType>(
+            WriterVersion::PARQUET_2_0,
+            true,
+            &[ByteArray::from(vec![1u8])],
+            Some(0),
+            &[Encoding::PLAIN, Encoding::RLE_DICTIONARY, Encoding::RLE],
+        );
+        check_encoding_write_support::<ByteArrayType>(
+            WriterVersion::PARQUET_2_0,
+            false,
+            &[ByteArray::from(vec![1u8])],
+            None,
+            &[Encoding::DELTA_BYTE_ARRAY, Encoding::RLE],
+        );
+    }
+
+    #[test]
+    fn test_column_writer_default_encoding_support_fixed_len_byte_array() {
+        check_encoding_write_support::<FixedLenByteArrayType>(
+            WriterVersion::PARQUET_1_0,
+            true,
+            &[ByteArray::from(vec![1u8])],
+            None,
+            &[Encoding::PLAIN, Encoding::RLE],
+        );
+        check_encoding_write_support::<FixedLenByteArrayType>(
+            WriterVersion::PARQUET_1_0,
+            false,
+            &[ByteArray::from(vec![1u8])],
+            None,
+            &[Encoding::PLAIN, Encoding::RLE],
+        );
+        check_encoding_write_support::<FixedLenByteArrayType>(
+            WriterVersion::PARQUET_2_0,
+            true,
+            &[ByteArray::from(vec![1u8])],
+            Some(0),
+            &[Encoding::PLAIN, Encoding::RLE_DICTIONARY, Encoding::RLE],
+        );
+        check_encoding_write_support::<FixedLenByteArrayType>(
+            WriterVersion::PARQUET_2_0,
+            false,
+            &[ByteArray::from(vec![1u8])],
+            None,
+            &[Encoding::DELTA_BYTE_ARRAY, Encoding::RLE],
+        );
+    }
+
+    #[test]
+    fn test_column_writer_check_metadata() {
+        let page_writer = get_test_page_writer();
+        let props = Rc::new(WriterProperties::builder().build());
+        let mut writer = get_test_column_writer::<Int32Type>(page_writer, 0, 0, props);
+        writer.write_batch(&[1, 2, 3, 4], None, None).unwrap();
+
+        let (bytes_written, rows_written, metadata) = writer.close().unwrap();
+        assert_eq!(bytes_written, 20);
+        assert_eq!(rows_written, 4);
+        assert_eq!(
+            metadata.encodings(),
+            &vec![Encoding::PLAIN, Encoding::RLE_DICTIONARY, Encoding::RLE]
+        );
+        assert_eq!(metadata.num_values(), 8); // dictionary + value indexes
+        assert_eq!(metadata.compressed_size(), 20);
+        assert_eq!(metadata.uncompressed_size(), 20);
+        assert_eq!(metadata.data_page_offset(), 0);
+        assert_eq!(metadata.dictionary_page_offset(), Some(0));
+    }
+
+    #[test]
+    fn test_column_writer_empty_column_roundtrip() {
+        let props = WriterProperties::builder().build();
+        column_roundtrip::<Int32Type>("test_col_writer_rnd_1", props, &[], None, None);
+    }
+
+    #[test]
+    fn test_column_writer_non_nullable_values_roundtrip() {
+        let props = WriterProperties::builder().build();
+        column_roundtrip_random::<Int32Type>(
+            "test_col_writer_rnd_2",
+            props,
+            1024,
+            ::std::i32::MIN,
+            ::std::i32::MAX,
+            0,
+            0,
+        );
+    }
+
+    #[test]
+    fn test_column_writer_nullable_non_repeated_values_roundtrip() {
+        let props = WriterProperties::builder().build();
+        column_roundtrip_random::<Int32Type>(
+            "test_column_writer_nullable_non_repeated_values_roundtrip",
+            props,
+            1024,
+            ::std::i32::MIN,
+            ::std::i32::MAX,
+            10,
+            0,
+        );
+    }
+
+    #[test]
+    fn test_column_writer_nullable_repeated_values_roundtrip() {
+        let props = WriterProperties::builder().build();
+        column_roundtrip_random::<Int32Type>(
+            "test_col_writer_rnd_3",
+            props,
+            1024,
+            ::std::i32::MIN,
+            ::std::i32::MAX,
+            10,
+            10,
+        );
+    }
+
+    #[test]
+    fn test_column_writer_dictionary_fallback_small_data_page() {
+        let props = WriterProperties::builder()
+            .set_dictionary_pagesize_limit(32)
+            .set_data_pagesize_limit(32)
+            .build();
+        column_roundtrip_random::<Int32Type>(
+            "test_col_writer_rnd_4",
+            props,
+            1024,
+            ::std::i32::MIN,
+            ::std::i32::MAX,
+            10,
+            10,
+        );
+    }
+
+    #[test]
+    fn test_column_writer_small_write_batch_size() {
+        for i in vec![1, 2, 5, 10, 11, 1023] {
+            let props = WriterProperties::builder().set_write_batch_size(i).build();
+
+            column_roundtrip_random::<Int32Type>(
+                "test_col_writer_rnd_5",
+                props,
+                1024,
+                ::std::i32::MIN,
+                ::std::i32::MAX,
+                10,
+                10,
+            );
+        }
+    }
+
+    #[test]
+    fn test_column_writer_dictionary_disabled_v1() {
+        let props = WriterProperties::builder()
+            .set_writer_version(WriterVersion::PARQUET_1_0)
+            .set_dictionary_enabled(false)
+            .build();
+        column_roundtrip_random::<Int32Type>(
+            "test_col_writer_rnd_6",
+            props,
+            1024,
+            ::std::i32::MIN,
+            ::std::i32::MAX,
+            10,
+            10,
+        );
+    }
+
+    #[test]
+    fn test_column_writer_dictionary_disabled_v2() {
+        let props = WriterProperties::builder()
+            .set_writer_version(WriterVersion::PARQUET_2_0)
+            .set_dictionary_enabled(false)
+            .build();
+        column_roundtrip_random::<Int32Type>(
+            "test_col_writer_rnd_7",
+            props,
+            1024,
+            ::std::i32::MIN,
+            ::std::i32::MAX,
+            10,
+            10,
+        );
+    }
+
+    #[test]
+    fn test_column_writer_compression_v1() {
+        let props = WriterProperties::builder()
+            .set_writer_version(WriterVersion::PARQUET_1_0)
+            .set_compression(Compression::SNAPPY)
+            .build();
+        column_roundtrip_random::<Int32Type>(
+            "test_col_writer_rnd_8",
+            props,
+            2048,
+            ::std::i32::MIN,
+            ::std::i32::MAX,
+            10,
+            10,
+        );
+    }
+
+    #[test]
+    fn test_column_writer_compression_v2() {
+        let props = WriterProperties::builder()
+            .set_writer_version(WriterVersion::PARQUET_2_0)
+            .set_compression(Compression::SNAPPY)
+            .build();
+        column_roundtrip_random::<Int32Type>(
+            "test_col_writer_rnd_9",
+            props,
+            2048,
+            ::std::i32::MIN,
+            ::std::i32::MAX,
+            10,
+            10,
+        );
+    }
+
+    /// Performs write-read roundtrip with randomly generated values and levels.
+    /// `max_size` is maximum number of values or levels (if `max_def_level` > 0) to write
+    /// for a column.
+    fn column_roundtrip_random<'a, T: DataType>(
+        file_name: &'a str,
+        props: WriterProperties,
+        max_size: usize,
+        min_value: T::T,
+        max_value: T::T,
+        max_def_level: i16,
+        max_rep_level: i16,
+    ) where
+        T::T: PartialOrd + SampleRange + Copy,
+    {
+        let mut num_values: usize = 0;
+
+        let mut buf: Vec<i16> = Vec::new();
+        let def_levels = if max_def_level > 0 {
+            random_numbers_range(max_size, 0, max_def_level + 1, &mut buf);
+            for &dl in &buf[..] {
+                if dl == max_def_level {
+                    num_values += 1;
+                }
+            }
+            Some(&buf[..])
+        } else {
+            num_values = max_size;
+            None
+        };
+
+        let mut buf: Vec<i16> = Vec::new();
+        let rep_levels = if max_rep_level > 0 {
+            random_numbers_range(max_size, 0, max_rep_level + 1, &mut buf);
+            Some(&buf[..])
+        } else {
+            None
+        };
+
+        let mut values: Vec<T::T> = Vec::new();
+        random_numbers_range(num_values, min_value, max_value, &mut values);
+
+        column_roundtrip::<T>(file_name, props, &values[..], def_levels, rep_levels);
+    }
+
+    /// Performs write-read roundtrip and asserts written values and levels.
+    fn column_roundtrip<'a, T: DataType>(
+        file_name: &'a str,
+        props: WriterProperties,
+        values: &[T::T],
+        def_levels: Option<&[i16]>,
+        rep_levels: Option<&[i16]>,
+    ) {
+        let file = get_temp_file(file_name, &[]);
+        let sink = FileSink::new(&file);
+        let page_writer = Box::new(SerializedPageWriter::new(sink));
+
+        let max_def_level = match def_levels {
+            Some(buf) => *buf.iter().max().unwrap_or(&0i16),
+            None => 0i16,
+        };
+
+        let max_rep_level = match rep_levels {
+            Some(buf) => *buf.iter().max().unwrap_or(&0i16),
+            None => 0i16,
+        };
+
+        let mut max_batch_size = values.len();
+        if let Some(levels) = def_levels {
+            max_batch_size = cmp::max(max_batch_size, levels.len());
+        }
+        if let Some(levels) = rep_levels {
+            max_batch_size = cmp::max(max_batch_size, levels.len());
+        }
+
+        let mut writer =
+            get_test_column_writer::<T>(page_writer, max_def_level, max_rep_level, Rc::new(props));
+
+        let values_written = writer.write_batch(values, def_levels, rep_levels).unwrap();
+        assert_eq!(values_written, values.len());
+        let (bytes_written, rows_written, column_metadata) = writer.close().unwrap();
+
+        let source = FileSource::new(&file, 0, bytes_written as usize);
+        let page_reader = Box::new(
+            SerializedPageReader::new(
+                source,
+                column_metadata.num_values(),
+                column_metadata.compression(),
+                T::get_physical_type(),
+            )
+            .unwrap(),
+        );
+        let reader = get_test_column_reader::<T>(page_reader, max_def_level, max_rep_level);
+
+        let mut actual_values = vec![T::T::default(); max_batch_size];
+        let mut actual_def_levels = match def_levels {
+            Some(_) => Some(vec![0i16; max_batch_size]),
+            None => None,
+        };
+        let mut actual_rep_levels = match rep_levels {
+            Some(_) => Some(vec![0i16; max_batch_size]),
+            None => None,
+        };
+
+        let (values_read, levels_read) = read_fully(
+            reader,
+            max_batch_size,
+            actual_def_levels.as_mut(),
+            actual_rep_levels.as_mut(),
+            actual_values.as_mut_slice(),
+        );
+
+        // Assert values, definition and repetition levels.
+
+        assert_eq!(&actual_values[..values_read], values);
+        match actual_def_levels {
+            Some(ref vec) => assert_eq!(Some(&vec[..levels_read]), def_levels),
+            None => assert_eq!(None, def_levels),
+        }
+        match actual_rep_levels {
+            Some(ref vec) => assert_eq!(Some(&vec[..levels_read]), rep_levels),
+            None => assert_eq!(None, rep_levels),
+        }
+
+        // Assert written rows.
+
+        if let Some(levels) = actual_rep_levels {
+            let mut actual_rows_written = 0;
+            for l in levels {
+                if l == 0 {
+                    actual_rows_written += 1;
+                }
+            }
+            assert_eq!(actual_rows_written, rows_written);
+        } else if actual_def_levels.is_some() {
+            assert_eq!(levels_read as u64, rows_written);
+        } else {
+            assert_eq!(values_read as u64, rows_written);
+        }
+    }
+
+    /// Performs write of provided values and returns column metadata of those values.
+    /// Used to test encoding support for column writer.
+    fn column_write_and_get_metadata<T: DataType>(
+        props: WriterProperties,
+        values: &[T::T],
+    ) -> ColumnChunkMetaData {
+        let page_writer = get_test_page_writer();
+        let props = Rc::new(props);
+        let mut writer = get_test_column_writer::<T>(page_writer, 0, 0, props);
+        writer.write_batch(values, None, None).unwrap();
+        let (_, _, metadata) = writer.close().unwrap();
+        metadata
+    }
+
+    // Function to use in tests for EncodingWriteSupport. This checks that dictionary
+    // offset and encodings to make sure that column writer uses provided by trait
+    // encodings.
+    fn check_encoding_write_support<T: DataType>(
+        version: WriterVersion,
+        dict_enabled: bool,
+        data: &[T::T],
+        dictionary_page_offset: Option<i64>,
+        encodings: &[Encoding],
+    ) {
+        let props = WriterProperties::builder()
+            .set_writer_version(version)
+            .set_dictionary_enabled(dict_enabled)
+            .build();
+        let meta = column_write_and_get_metadata::<T>(props, data);
+        assert_eq!(meta.dictionary_page_offset(), dictionary_page_offset);
+        assert_eq!(meta.encodings(), &encodings);
+    }
+
+    /// Reads one batch of data, considering that batch is large enough to capture all of
+    /// the values and levels.
+    fn read_fully<T: DataType>(
+        mut reader: ColumnReaderImpl<T>,
+        batch_size: usize,
+        mut def_levels: Option<&mut Vec<i16>>,
+        mut rep_levels: Option<&mut Vec<i16>>,
+        values: &mut [T::T],
+    ) -> (usize, usize) {
+        let actual_def_levels = match &mut def_levels {
+            Some(ref mut vec) => Some(&mut vec[..]),
+            None => None,
+        };
+        let actual_rep_levels = match rep_levels {
+            Some(ref mut vec) => Some(&mut vec[..]),
+            None => None,
+        };
+        reader
+            .read_batch(batch_size, actual_def_levels, actual_rep_levels, values)
+            .unwrap()
+    }
+
+    /// Returns column writer.
+    fn get_test_column_writer<T: DataType>(
+        page_writer: Box<PageWriter>,
+        max_def_level: i16,
+        max_rep_level: i16,
+        props: WriterPropertiesPtr,
+    ) -> ColumnWriterImpl<T> {
+        let descr = Rc::new(get_test_column_descr::<T>(max_def_level, max_rep_level));
+        let column_writer = get_column_writer(descr, props, page_writer);
+        get_typed_column_writer::<T>(column_writer)
+    }
+
+    /// Returns column reader.
+    fn get_test_column_reader<T: DataType>(
+        page_reader: Box<PageReader>,
+        max_def_level: i16,
+        max_rep_level: i16,
+    ) -> ColumnReaderImpl<T> {
+        let descr = Rc::new(get_test_column_descr::<T>(max_def_level, max_rep_level));
+        let column_reader = get_column_reader(descr, page_reader);
+        get_typed_column_reader::<T>(column_reader)
+    }
+
+    /// Returns descriptor for primitive column.
+    fn get_test_column_descr<T: DataType>(
+        max_def_level: i16,
+        max_rep_level: i16,
+    ) -> ColumnDescriptor {
+        let path = ColumnPath::from("col");
+        let tpe = SchemaType::primitive_type_builder("col", T::get_physical_type())
+            // length is set for "encoding support" tests for FIXED_LEN_BYTE_ARRAY type,
+            // it should be no-op for other types
+            .with_length(1)
+            .build()
+            .unwrap();
+        ColumnDescriptor::new(Rc::new(tpe), None, max_def_level, max_rep_level, path)
+    }
+
+    /// Returns page writer that collects pages without serializing them.
+    fn get_test_page_writer() -> Box<PageWriter> {
+        Box::new(TestPageWriter {})
+    }
+
+    struct TestPageWriter {}
+
+    impl PageWriter for TestPageWriter {
+        fn write_page(&mut self, page: CompressedPage) -> Result<PageWriteSpec> {
+            let mut res = PageWriteSpec::new();
+            res.page_type = page.page_type();
+            res.uncompressed_size = page.uncompressed_size();
+            res.compressed_size = page.compressed_size();
+            res.num_values = page.num_values();
+            res.offset = 0;
+            res.bytes_written = page.data().len() as u64;
+            Ok(res)
+        }
+
+        fn write_metadata(&mut self, _metadata: &ColumnChunkMetaData) -> Result<()> {
+            Ok(())
+        }
+
+        fn close(&mut self) -> Result<()> {
+            Ok(())
+        }
+    }
+}
diff --git a/rust/src/parquet/compression.rs b/rust/src/parquet/compression.rs
new file mode 100644
index 0000000..3690cca
--- /dev/null
+++ b/rust/src/parquet/compression.rs
@@ -0,0 +1,321 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+//! Contains codec interface and supported codec implementations.
+//!
+//! See [`Compression`](`::basic::Compression`) enum for all available compression
+//! algorithms.
+//!
+//! # Example
+//!
+//! ```rust
+//! use arrow::parquet::{basic::Compression, compression::create_codec};
+//!
+//! let mut codec = match create_codec(Compression::SNAPPY) {
+//!     Ok(Some(codec)) => codec,
+//!     _ => panic!(),
+//! };
+//!
+//! let data = vec![b'p', b'a', b'r', b'q', b'u', b'e', b't'];
+//! let mut compressed = vec![];
+//! codec.compress(&data[..], &mut compressed).unwrap();
+//!
+//! let mut output = vec![];
+//! codec.decompress(&compressed[..], &mut output).unwrap();
+//!
+//! assert_eq!(output, data);
+//! ```
+
+use std::io::{self, Read, Write};
+
+use brotli;
+use flate2::{read, write, Compression};
+use lz4;
+use snap::{decompress_len, max_compress_len, Decoder, Encoder};
+use zstd;
+
+use crate::parquet::basic::Compression as CodecType;
+use crate::parquet::errors::{ParquetError, Result};
+
+/// Parquet compression codec interface.
+pub trait Codec {
+    /// Compresses data stored in slice `input_buf` and writes the compressed result
+    /// to `output_buf`.
+    /// Note that you'll need to call `clear()` before reusing the same `output_buf` across
+    /// different `compress` calls.
+    fn compress(&mut self, input_buf: &[u8], output_buf: &mut Vec<u8>) -> Result<()>;
+
+    /// Decompresses data stored in slice `input_buf` and writes output to `output_buf`.
+    /// Returns the total number of bytes written.
+    fn decompress(&mut self, input_buf: &[u8], output_buf: &mut Vec<u8>) -> Result<usize>;
+}
+
+/// Given the compression type `codec`, returns a codec used to compress and decompress
+/// bytes for the compression type.
+/// This returns `None` if the codec type is `UNCOMPRESSED`.
+pub fn create_codec(codec: CodecType) -> Result<Option<Box<Codec>>> {
+    match codec {
+        CodecType::BROTLI => Ok(Some(Box::new(BrotliCodec::new()))),
+        CodecType::GZIP => Ok(Some(Box::new(GZipCodec::new()))),
+        CodecType::SNAPPY => Ok(Some(Box::new(SnappyCodec::new()))),
+        CodecType::LZ4 => Ok(Some(Box::new(LZ4Codec::new()))),
+        CodecType::ZSTD => Ok(Some(Box::new(ZSTDCodec::new()))),
+        CodecType::UNCOMPRESSED => Ok(None),
+        _ => Err(nyi_err!("The codec type {} is not supported yet", codec)),
+    }
+}
+
+/// Codec for Snappy compression format.
+pub struct SnappyCodec {
+    decoder: Decoder,
+    encoder: Encoder,
+}
+
+impl SnappyCodec {
+    /// Creates new Snappy compression codec.
+    fn new() -> Self {
+        Self {
+            decoder: Decoder::new(),
+            encoder: Encoder::new(),
+        }
+    }
+}
+
+impl Codec for SnappyCodec {
+    fn decompress(&mut self, input_buf: &[u8], output_buf: &mut Vec<u8>) -> Result<usize> {
+        let len = decompress_len(input_buf)?;
+        output_buf.resize(len, 0);
+        self.decoder
+            .decompress(input_buf, output_buf)
+            .map_err(|e| e.into())
+    }
+
+    fn compress(&mut self, input_buf: &[u8], output_buf: &mut Vec<u8>) -> Result<()> {
+        let required_len = max_compress_len(input_buf.len());
+        if output_buf.len() < required_len {
+            output_buf.resize(required_len, 0);
+        }
+        let n = self.encoder.compress(input_buf, &mut output_buf[..])?;
+        output_buf.truncate(n);
+        Ok(())
+    }
+}
+
+/// Codec for GZIP compression algorithm.
+pub struct GZipCodec {}
+
+impl GZipCodec {
+    /// Creates new GZIP compression codec.
+    fn new() -> Self {
+        Self {}
+    }
+}
+
+impl Codec for GZipCodec {
+    fn decompress(&mut self, input_buf: &[u8], output_buf: &mut Vec<u8>) -> Result<usize> {
+        let mut decoder = read::GzDecoder::new(input_buf);
+        decoder.read_to_end(output_buf).map_err(|e| e.into())
+    }
+
+    fn compress(&mut self, input_buf: &[u8], output_buf: &mut Vec<u8>) -> Result<()> {
+        let mut encoder = write::GzEncoder::new(output_buf, Compression::default());
+        encoder.write_all(input_buf)?;
+        encoder.try_finish().map_err(|e| e.into())
+    }
+}
+
+const BROTLI_DEFAULT_BUFFER_SIZE: usize = 4096;
+const BROTLI_DEFAULT_COMPRESSION_QUALITY: u32 = 1; // supported levels 0-9
+const BROTLI_DEFAULT_LG_WINDOW_SIZE: u32 = 22; // recommended between 20-22
+
+/// Codec for Brotli compression algorithm.
+pub struct BrotliCodec {}
+
+impl BrotliCodec {
+    /// Creates new Brotli compression codec.
+    fn new() -> Self {
+        Self {}
+    }
+}
+
+impl Codec for BrotliCodec {
+    fn decompress(&mut self, input_buf: &[u8], output_buf: &mut Vec<u8>) -> Result<usize> {
+        brotli::Decompressor::new(input_buf, BROTLI_DEFAULT_BUFFER_SIZE)
+            .read_to_end(output_buf)
+            .map_err(|e| e.into())
+    }
+
+    fn compress(&mut self, input_buf: &[u8], output_buf: &mut Vec<u8>) -> Result<()> {
+        let mut encoder = brotli::CompressorWriter::new(
+            output_buf,
+            BROTLI_DEFAULT_BUFFER_SIZE,
+            BROTLI_DEFAULT_COMPRESSION_QUALITY,
+            BROTLI_DEFAULT_LG_WINDOW_SIZE,
+        );
+        encoder.write_all(&input_buf[..])?;
+        encoder.flush().map_err(|e| e.into())
+    }
+}
+
+const LZ4_BUFFER_SIZE: usize = 4096;
+
+/// Codec for LZ4 compression algorithm.
+pub struct LZ4Codec {}
+
+impl LZ4Codec {
+    /// Creates new LZ4 compression codec.
+    fn new() -> Self {
+        Self {}
+    }
+}
+
+impl Codec for LZ4Codec {
+    fn decompress(&mut self, input_buf: &[u8], output_buf: &mut Vec<u8>) -> Result<usize> {
+        let mut decoder = lz4::Decoder::new(input_buf)?;
+        let mut buffer: [u8; LZ4_BUFFER_SIZE] = [0; LZ4_BUFFER_SIZE];
+        let mut total_len = 0;
+        loop {
+            let len = decoder.read(&mut buffer)?;
+            if len == 0 {
+                break;
+            }
+            total_len += len;
+            output_buf.write_all(&buffer[0..len])?;
+        }
+        Ok(total_len)
+    }
+
+    fn compress(&mut self, input_buf: &[u8], output_buf: &mut Vec<u8>) -> Result<()> {
+        let mut encoder = lz4::EncoderBuilder::new().build(output_buf)?;
+        let mut from = 0;
+        loop {
+            let to = ::std::cmp::min(from + LZ4_BUFFER_SIZE, input_buf.len());
+            encoder.write_all(&input_buf[from..to])?;
+            from += LZ4_BUFFER_SIZE;
+            if from >= input_buf.len() {
+                break;
+            }
+        }
+        encoder.finish().1.map_err(|e| e.into())
+    }
+}
+
+/// Codec for Zstandard compression algorithm.
+pub struct ZSTDCodec {}
+
+impl ZSTDCodec {
+    /// Creates new Zstandard compression codec.
+    fn new() -> Self {
+        Self {}
+    }
+}
+
+/// Compression level (1-21) for ZSTD. Choose 1 here for better compression speed.
+const ZSTD_COMPRESSION_LEVEL: i32 = 1;
+
+impl Codec for ZSTDCodec {
+    fn decompress(&mut self, input_buf: &[u8], output_buf: &mut Vec<u8>) -> Result<usize> {
+        let mut decoder = zstd::Decoder::new(input_buf)?;
+        match io::copy(&mut decoder, output_buf) {
+            Ok(n) => Ok(n as usize),
+            Err(e) => Err(e.into()),
+        }
+    }
+
+    fn compress(&mut self, input_buf: &[u8], output_buf: &mut Vec<u8>) -> Result<()> {
+        let mut encoder = zstd::Encoder::new(output_buf, ZSTD_COMPRESSION_LEVEL)?;
+        encoder.write_all(&input_buf[..])?;
+        match encoder.finish() {
+            Ok(_) => Ok(()),
+            Err(e) => Err(e.into()),
+        }
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+
+    use crate::parquet::util::test_common::*;
+
+    fn test_roundtrip(c: CodecType, data: &Vec<u8>) {
+        let mut c1 = create_codec(c).unwrap().unwrap();
+        let mut c2 = create_codec(c).unwrap().unwrap();
+
+        // Compress with c1
+        let mut compressed = Vec::new();
+        let mut decompressed = Vec::new();
+        c1.compress(data.as_slice(), &mut compressed)
+            .expect("Error when compressing");
+
+        // Decompress with c2
+        let mut decompressed_size = c2
+            .decompress(compressed.as_slice(), &mut decompressed)
+            .expect("Error when decompressing");
+        assert_eq!(data.len(), decompressed_size);
+        decompressed.truncate(decompressed_size);
+        assert_eq!(*data, decompressed);
+
+        compressed.clear();
+
+        // Compress with c2
+        c2.compress(data.as_slice(), &mut compressed)
+            .expect("Error when compressing");
+
+        // Decompress with c1
+        decompressed_size = c1
+            .decompress(compressed.as_slice(), &mut decompressed)
+            .expect("Error when decompressing");
+        assert_eq!(data.len(), decompressed_size);
+        decompressed.truncate(decompressed_size);
+        assert_eq!(*data, decompressed);
+    }
+
+    fn test_codec(c: CodecType) {
+        let sizes = vec![100, 10000, 100000];
+        for size in sizes {
+            let mut data = random_bytes(size);
+            test_roundtrip(c, &mut data);
+        }
+    }
+
+    #[test]
+    fn test_codec_snappy() {
+        test_codec(CodecType::SNAPPY);
+    }
+
+    #[test]
+    fn test_codec_gzip() {
+        test_codec(CodecType::GZIP);
+    }
+
+    #[test]
+    fn test_codec_brotli() {
+        test_codec(CodecType::BROTLI);
+    }
+
+    #[test]
+    fn test_codec_lz4() {
+        test_codec(CodecType::LZ4);
+    }
+
+    #[test]
+    fn test_codec_zstd() {
+        test_codec(CodecType::ZSTD);
+    }
+
+}
diff --git a/rust/src/parquet/data_type.rs b/rust/src/parquet/data_type.rs
new file mode 100644
index 0000000..26bdebd
--- /dev/null
+++ b/rust/src/parquet/data_type.rs
@@ -0,0 +1,463 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+//! Data types that connect Parquet physical types with their Rust-specific
+//! representations.
+
+use std::mem;
+
+use byteorder::{BigEndian, ByteOrder};
+
+use crate::parquet::basic::Type;
+use crate::parquet::util::memory::{ByteBuffer, ByteBufferPtr};
+
+/// Rust representation for logical type INT96, value is backed by an array of `u32`.
+/// The type only takes 12 bytes, without extra padding.
+#[derive(Clone, Debug)]
+pub struct Int96 {
+    value: Option<[u32; 3]>,
+}
+
+impl Int96 {
+    /// Creates new INT96 type struct with no data set.
+    pub fn new() -> Self {
+        Self { value: None }
+    }
+
+    /// Returns underlying data as slice of [`u32`].
+    pub fn data(&self) -> &[u32] {
+        assert!(self.value.is_some());
+        self.value.as_ref().unwrap()
+    }
+
+    /// Sets data for this INT96 type.
+    pub fn set_data(&mut self, elem0: u32, elem1: u32, elem2: u32) {
+        self.value = Some([elem0, elem1, elem2]);
+    }
+}
+
+impl Default for Int96 {
+    fn default() -> Self {
+        Self { value: None }
+    }
+}
+
+impl PartialEq for Int96 {
+    fn eq(&self, other: &Int96) -> bool {
+        self.data() == other.data()
+    }
+}
+
+impl From<Vec<u32>> for Int96 {
+    fn from(buf: Vec<u32>) -> Self {
+        assert_eq!(buf.len(), 3);
+        let mut result = Self::new();
+        result.set_data(buf[0], buf[1], buf[2]);
+        result
+    }
+}
+
+/// Rust representation for BYTE_ARRAY and FIXED_LEN_BYTE_ARRAY Parquet physical types.
+/// Value is backed by a byte buffer.
+#[derive(Clone, Debug)]
+pub struct ByteArray {
+    data: Option<ByteBufferPtr>,
+}
+
+impl ByteArray {
+    /// Creates new byte array with no data set.
+    pub fn new() -> Self {
+        ByteArray { data: None }
+    }
+
+    /// Gets length of the underlying byte buffer.
+    pub fn len(&self) -> usize {
+        assert!(self.data.is_some());
+        self.data.as_ref().unwrap().len()
+    }
+
+    /// Returns slice of data.
+    pub fn data(&self) -> &[u8] {
+        assert!(self.data.is_some());
+        self.data.as_ref().unwrap().as_ref()
+    }
+
+    /// Set data from another byte buffer.
+    pub fn set_data(&mut self, data: ByteBufferPtr) {
+        self.data = Some(data);
+    }
+
+    /// Returns `ByteArray` instance with slice of values for a data.
+    pub fn slice(&self, start: usize, len: usize) -> Self {
+        assert!(self.data.is_some());
+        Self::from(self.data.as_ref().unwrap().range(start, len))
+    }
+}
+
+impl From<Vec<u8>> for ByteArray {
+    fn from(buf: Vec<u8>) -> ByteArray {
+        Self {
+            data: Some(ByteBufferPtr::new(buf)),
+        }
+    }
+}
+
+impl<'a> From<&'a str> for ByteArray {
+    fn from(s: &'a str) -> ByteArray {
+        let mut v = Vec::new();
+        v.extend_from_slice(s.as_bytes());
+        Self {
+            data: Some(ByteBufferPtr::new(v)),
+        }
+    }
+}
+
+impl From<ByteBufferPtr> for ByteArray {
+    fn from(ptr: ByteBufferPtr) -> ByteArray {
+        Self { data: Some(ptr) }
+    }
+}
+
+impl From<ByteBuffer> for ByteArray {
+    fn from(mut buf: ByteBuffer) -> ByteArray {
+        Self {
+            data: Some(buf.consume()),
+        }
+    }
+}
+
+impl Default for ByteArray {
+    fn default() -> Self {
+        ByteArray { data: None }
+    }
+}
+
+impl PartialEq for ByteArray {
+    fn eq(&self, other: &ByteArray) -> bool {
+        self.data() == other.data()
+    }
+}
+
+/// Rust representation for Decimal values.
+///
+/// This is not a representation of Parquet physical type, but rather a wrapper for
+/// DECIMAL logical type, and serves as container for raw parts of decimal values:
+/// unscaled value in bytes, precision and scale.
+#[derive(Clone, Debug)]
+pub enum Decimal {
+    /// Decimal backed by `i32`.
+    Int32 {
+        value: [u8; 4],
+        precision: i32,
+        scale: i32,
+    },
+    /// Decimal backed by `i64`.
+    Int64 {
+        value: [u8; 8],
+        precision: i32,
+        scale: i32,
+    },
+    /// Decimal backed by byte array.
+    Bytes {
+        value: ByteArray,
+        precision: i32,
+        scale: i32,
+    },
+}
+
+impl Decimal {
+    /// Creates new decimal value from `i32`.
+    pub fn from_i32(value: i32, precision: i32, scale: i32) -> Self {
+        let mut bytes = [0; 4];
+        BigEndian::write_i32(&mut bytes, value);
+        Decimal::Int32 {
+            value: bytes,
+            precision,
+            scale,
+        }
+    }
+
+    /// Creates new decimal value from `i64`.
+    pub fn from_i64(value: i64, precision: i32, scale: i32) -> Self {
+        let mut bytes = [0; 8];
+        BigEndian::write_i64(&mut bytes, value);
+        Decimal::Int64 {
+            value: bytes,
+            precision,
+            scale,
+        }
+    }
+
+    /// Creates new decimal value from `ByteArray`.
+    pub fn from_bytes(value: ByteArray, precision: i32, scale: i32) -> Self {
+        Decimal::Bytes {
+            value,
+            precision,
+            scale,
+        }
+    }
+
+    /// Returns bytes of unscaled value.
+    pub fn data(&self) -> &[u8] {
+        match *self {
+            Decimal::Int32 { ref value, .. } => value,
+            Decimal::Int64 { ref value, .. } => value,
+            Decimal::Bytes { ref value, .. } => value.data(),
+        }
+    }
+
+    /// Returns decimal precision.
+    pub fn precision(&self) -> i32 {
+        match *self {
+            Decimal::Int32 { precision, .. } => precision,
+            Decimal::Int64 { precision, .. } => precision,
+            Decimal::Bytes { precision, .. } => precision,
+        }
+    }
+
+    /// Returns decimal scale.
+    pub fn scale(&self) -> i32 {
+        match *self {
+            Decimal::Int32 { scale, .. } => scale,
+            Decimal::Int64 { scale, .. } => scale,
+            Decimal::Bytes { scale, .. } => scale,
+        }
+    }
+}
+
+impl Default for Decimal {
+    fn default() -> Self {
+        Self::from_i32(0, 0, 0)
+    }
+}
+
+impl PartialEq for Decimal {
+    fn eq(&self, other: &Decimal) -> bool {
+        self.precision() == other.precision()
+            && self.scale() == other.scale()
+            && self.data() == other.data()
+    }
+}
+
+/// Converts an instance of data type to a slice of bytes as `u8`.
+pub trait AsBytes {
+    /// Returns slice of bytes for this data type.
+    fn as_bytes(&self) -> &[u8];
+}
+
+macro_rules! gen_as_bytes {
+    ($source_ty:ident) => {
+        impl AsBytes for $source_ty {
+            fn as_bytes(&self) -> &[u8] {
+                unsafe {
+                    ::std::slice::from_raw_parts(
+                        self as *const $source_ty as *const u8,
+                        ::std::mem::size_of::<$source_ty>(),
+                    )
+                }
+            }
+        }
+    };
+}
+
+gen_as_bytes!(bool);
+gen_as_bytes!(u8);
+gen_as_bytes!(i32);
+gen_as_bytes!(u32);
+gen_as_bytes!(i64);
+gen_as_bytes!(f32);
+gen_as_bytes!(f64);
+
+impl AsBytes for Int96 {
+    fn as_bytes(&self) -> &[u8] {
+        unsafe { ::std::slice::from_raw_parts(self.data() as *const [u32] as *const u8, 12) }
+    }
+}
+
+impl AsBytes for ByteArray {
+    fn as_bytes(&self) -> &[u8] {
+        self.data()
+    }
+}
+
+impl AsBytes for Decimal {
+    fn as_bytes(&self) -> &[u8] {
+        self.data()
+    }
+}
+
+impl AsBytes for Vec<u8> {
+    fn as_bytes(&self) -> &[u8] {
+        self.as_slice()
+    }
+}
+
+impl<'a> AsBytes for &'a str {
+    fn as_bytes(&self) -> &[u8] {
+        (self as &str).as_bytes()
+    }
+}
+
+impl AsBytes for str {
+    fn as_bytes(&self) -> &[u8] {
+        (self as &str).as_bytes()
+    }
+}
+
+/// Contains the Parquet physical type information as well as the Rust primitive type
+/// presentation.
+pub trait DataType: 'static {
+    type T: ::std::cmp::PartialEq
+        + ::std::fmt::Debug
+        + ::std::default::Default
+        + ::std::clone::Clone
+        + AsBytes;
+
+    /// Returns Parquet physical type.
+    fn get_physical_type() -> Type;
+
+    /// Returns size in bytes for Rust representation of the physical type.
+    fn get_type_size() -> usize;
+}
+
+macro_rules! make_type {
+    ($name:ident, $physical_ty:path, $native_ty:ty, $size:expr) => {
+        pub struct $name {}
+
+        impl DataType for $name {
+            type T = $native_ty;
+
+            fn get_physical_type() -> Type {
+                $physical_ty
+            }
+
+            fn get_type_size() -> usize {
+                $size
+            }
+        }
+    };
+}
+
+/// Generate struct definitions for all physical types
+
+make_type!(BoolType, Type::BOOLEAN, bool, 1);
+make_type!(Int32Type, Type::INT32, i32, 4);
+make_type!(Int64Type, Type::INT64, i64, 8);
+make_type!(Int96Type, Type::INT96, Int96, mem::size_of::<Int96>());
+make_type!(FloatType, Type::FLOAT, f32, 4);
+make_type!(DoubleType, Type::DOUBLE, f64, 8);
+make_type!(
+    ByteArrayType,
+    Type::BYTE_ARRAY,
+    ByteArray,
+    mem::size_of::<ByteArray>()
+);
+make_type!(
+    FixedLenByteArrayType,
+    Type::FIXED_LEN_BYTE_ARRAY,
+    ByteArray,
+    mem::size_of::<ByteArray>()
+);
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+
+    #[test]
+    fn test_as_bytes() {
+        assert_eq!(false.as_bytes(), &[0]);
+        assert_eq!(true.as_bytes(), &[1]);
+        assert_eq!((7 as i32).as_bytes(), &[7, 0, 0, 0]);
+        assert_eq!((555 as i32).as_bytes(), &[43, 2, 0, 0]);
+        assert_eq!((555 as u32).as_bytes(), &[43, 2, 0, 0]);
+        assert_eq!(i32::max_value().as_bytes(), &[255, 255, 255, 127]);
+        assert_eq!(i32::min_value().as_bytes(), &[0, 0, 0, 128]);
+        assert_eq!((7 as i64).as_bytes(), &[7, 0, 0, 0, 0, 0, 0, 0]);
+        assert_eq!((555 as i64).as_bytes(), &[43, 2, 0, 0, 0, 0, 0, 0]);
+        assert_eq!(
+            (i64::max_value()).as_bytes(),
+            &[255, 255, 255, 255, 255, 255, 255, 127]
+        );
+        assert_eq!((i64::min_value()).as_bytes(), &[0, 0, 0, 0, 0, 0, 0, 128]);
+        assert_eq!((3.14 as f32).as_bytes(), &[195, 245, 72, 64]);
+        assert_eq!(
+            (3.14 as f64).as_bytes(),
+            &[31, 133, 235, 81, 184, 30, 9, 64]
+        );
+        assert_eq!("hello".as_bytes(), &[b'h', b'e', b'l', b'l', b'o']);
+        assert_eq!(
+            Vec::from("hello".as_bytes()).as_bytes(),
+            &[b'h', b'e', b'l', b'l', b'o']
+        );
+
+        // Test Int96
+        let i96 = Int96::from(vec![1, 2, 3]);
+        assert_eq!(i96.as_bytes(), &[1, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 0]);
+
+        // Test ByteArray
+        let ba = ByteArray::from(vec![1, 2, 3]);
+        assert_eq!(ba.as_bytes(), &[1, 2, 3]);
+
+        // Test Decimal
+        let decimal = Decimal::from_i32(123, 5, 2);
+        assert_eq!(decimal.as_bytes(), &[0, 0, 0, 123]);
+        let decimal = Decimal::from_i64(123, 5, 2);
+        assert_eq!(decimal.as_bytes(), &[0, 0, 0, 0, 0, 0, 0, 123]);
+        let decimal = Decimal::from_bytes(ByteArray::from(vec![1, 2, 3]), 5, 2);
+        assert_eq!(decimal.as_bytes(), &[1, 2, 3]);
+    }
+
+    #[test]
+    fn test_int96_from() {
+        assert_eq!(
+            Int96::from(vec![1, 12345, 1234567890]).data(),
+            &[1, 12345, 1234567890]
+        );
+    }
+
+    #[test]
+    fn test_byte_array_from() {
+        assert_eq!(
+            ByteArray::from(vec![b'A', b'B', b'C']).data(),
+            &[b'A', b'B', b'C']
+        );
+        assert_eq!(ByteArray::from("ABC").data(), &[b'A', b'B', b'C']);
+        assert_eq!(
+            ByteArray::from(ByteBufferPtr::new(vec![1u8, 2u8, 3u8, 4u8, 5u8])).data(),
+            &[1u8, 2u8, 3u8, 4u8, 5u8]
+        );
+        let mut buf = ByteBuffer::new();
+        buf.set_data(vec![6u8, 7u8, 8u8, 9u8, 10u8]);
+        assert_eq!(ByteArray::from(buf).data(), &[6u8, 7u8, 8u8, 9u8, 10u8]);
+    }
+
+    #[test]
+    fn test_decimal_partial_eq() {
+        assert_eq!(Decimal::default(), Decimal::from_i32(0, 0, 0));
+        assert_eq!(Decimal::from_i32(222, 5, 2), Decimal::from_i32(222, 5, 2));
+        assert_eq!(
+            Decimal::from_bytes(ByteArray::from(vec![0, 0, 0, 3]), 5, 2),
+            Decimal::from_i32(3, 5, 2)
+        );
+
+        assert!(Decimal::from_i32(222, 5, 2) != Decimal::from_i32(111, 5, 2));
+        assert!(Decimal::from_i32(222, 5, 2) != Decimal::from_i32(222, 6, 2));
+        assert!(Decimal::from_i32(222, 5, 2) != Decimal::from_i32(222, 5, 3));
+
+        assert!(Decimal::from_i64(222, 5, 2) != Decimal::from_i32(222, 5, 2));
+    }
+}
diff --git a/rust/src/parquet/encodings/decoding.rs b/rust/src/parquet/encodings/decoding.rs
new file mode 100644
index 0000000..c6a6fd4
--- /dev/null
+++ b/rust/src/parquet/encodings/decoding.rs
@@ -0,0 +1,1403 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+//! Contains all supported decoders for Parquet.
+
+use std::{cmp, marker::PhantomData, mem, slice::from_raw_parts_mut};
+
+use super::rle::RleDecoder;
+
+use byteorder::{ByteOrder, LittleEndian};
+
+use crate::parquet::basic::*;
+use crate::parquet::data_type::*;
+use crate::parquet::errors::{ParquetError, Result};
+use crate::parquet::schema::types::ColumnDescPtr;
+use crate::parquet::util::{
+    bit_util::BitReader,
+    memory::{ByteBuffer, ByteBufferPtr},
+};
+
+// ----------------------------------------------------------------------
+// Decoders
+
+/// A Parquet decoder for the data type `T`.
+pub trait Decoder<T: DataType> {
+    /// Sets the data to decode to be `data`, which should contain `num_values` of values
+    /// to decode.
+    fn set_data(&mut self, data: ByteBufferPtr, num_values: usize) -> Result<()>;
+
+    /// Consumes values from this decoder and write the results to `buffer`. This will try
+    /// to fill up `buffer`.
+    ///
+    /// Returns the actual number of values decoded, which should be equal to `buffer.len()`
+    /// unless the remaining number of values is less than `buffer.len()`.
+    fn get(&mut self, buffer: &mut [T::T]) -> Result<usize>;
+
+    /// Returns the number of values left in this decoder stream.
+    fn values_left(&self) -> usize;
+
+    /// Returns the encoding for this decoder.
+    fn encoding(&self) -> Encoding;
+}
+
+/// Gets a decoder for the column descriptor `descr` and encoding type `encoding`.
+///
+/// NOTE: the primitive type in `descr` MUST match the data type `T`, otherwise
+/// disastrous consequence could occur.
+pub fn get_decoder<T: DataType>(
+    descr: ColumnDescPtr,
+    encoding: Encoding,
+) -> Result<Box<Decoder<T>>> {
+    let decoder: Box<Decoder<T>> = match encoding {
+        Encoding::PLAIN => Box::new(PlainDecoder::new(descr.type_length())),
+        Encoding::RLE_DICTIONARY | Encoding::PLAIN_DICTIONARY => {
+            return Err(general_err!(
+                "Cannot initialize this encoding through this function"
+            ));
+        }
+        Encoding::RLE => Box::new(RleValueDecoder::new()),
+        Encoding::DELTA_BINARY_PACKED => Box::new(DeltaBitPackDecoder::new()),
+        Encoding::DELTA_LENGTH_BYTE_ARRAY => Box::new(DeltaLengthByteArrayDecoder::new()),
+        Encoding::DELTA_BYTE_ARRAY => Box::new(DeltaByteArrayDecoder::new()),
+        e => return Err(nyi_err!("Encoding {} is not supported", e)),
+    };
+    Ok(decoder)
+}
+
+// ----------------------------------------------------------------------
+// PLAIN Decoding
+
+/// Plain decoding that supports all types.
+/// Values are encoded back to back. For native types, data is encoded as little endian.
+/// Floating point types are encoded in IEEE.
+/// See [`PlainDecoder`](`::encoding::PlainEncoder`) for more information.
+pub struct PlainDecoder<T: DataType> {
+    // The remaining number of values in the byte array
+    num_values: usize,
+
+    // The current starting index in the byte array.
+    start: usize,
+
+    // The length for the type `T`. Only used when `T` is `FixedLenByteArrayType`
+    type_length: i32,
+
+    // The byte array to decode from. Not set if `T` is bool.
+    data: Option<ByteBufferPtr>,
+
+    // Read `data` bit by bit. Only set if `T` is bool.
+    bit_reader: Option<BitReader>,
+
+    // To allow `T` in the generic parameter for this struct. This doesn't take any space.
+    _phantom: PhantomData<T>,
+}
+
+impl<T: DataType> PlainDecoder<T> {
+    /// Creates new plain decoder.
+    pub fn new(type_length: i32) -> Self {
+        PlainDecoder {
+            data: None,
+            bit_reader: None,
+            type_length,
+            num_values: 0,
+            start: 0,
+            _phantom: PhantomData,
+        }
+    }
+}
+
+impl<T: DataType> Decoder<T> for PlainDecoder<T> {
+    #[inline]
+    default fn set_data(&mut self, data: ByteBufferPtr, num_values: usize) -> Result<()> {
+        self.num_values = num_values;
+        self.start = 0;
+        self.data = Some(data);
+        Ok(())
+    }
+
+    #[inline]
+    fn values_left(&self) -> usize {
+        self.num_values
+    }
+
+    #[inline]
+    fn encoding(&self) -> Encoding {
+        Encoding::PLAIN
+    }
+
+    #[inline]
+    default fn get(&mut self, buffer: &mut [T::T]) -> Result<usize> {
+        assert!(self.data.is_some());
+
+        let data = self.data.as_mut().unwrap();
+        let num_values = cmp::min(buffer.len(), self.num_values);
+        let bytes_left = data.len() - self.start;
+        let bytes_to_decode = mem::size_of::<T::T>() * num_values;
+        if bytes_left < bytes_to_decode {
+            return Err(eof_err!("Not enough bytes to decode"));
+        }
+        let raw_buffer: &mut [u8] =
+            unsafe { from_raw_parts_mut(buffer.as_ptr() as *mut u8, bytes_to_decode) };
+        raw_buffer.copy_from_slice(data.range(self.start, bytes_to_decode).as_ref());
+        self.start += bytes_to_decode;
+        self.num_values -= num_values;
+
+        Ok(num_values)
+    }
+}
+
+impl Decoder<Int96Type> for PlainDecoder<Int96Type> {
+    fn get(&mut self, buffer: &mut [Int96]) -> Result<usize> {
+        assert!(self.data.is_some());
+
+        let data = self.data.as_ref().unwrap();
+        let num_values = cmp::min(buffer.len(), self.num_values);
+        let bytes_left = data.len() - self.start;
+        let bytes_to_decode = 12 * num_values;
+        if bytes_left < bytes_to_decode {
+            return Err(eof_err!("Not enough bytes to decode"));
+        }
+
+        let data_range = data.range(self.start, bytes_to_decode);
+        let bytes: &[u8] = data_range.data();
+        self.start += bytes_to_decode;
+
+        let mut pos = 0; // position in byte array
+        for i in 0..num_values {
+            let elem0 = LittleEndian::read_u32(&bytes[pos..pos + 4]);
+            let elem1 = LittleEndian::read_u32(&bytes[pos + 4..pos + 8]);
+            let elem2 = LittleEndian::read_u32(&bytes[pos + 8..pos + 12]);
+            buffer[i].set_data(elem0, elem1, elem2);
+            pos += 12;
+        }
+        self.num_values -= num_values;
+
+        Ok(num_values)
+    }
+}
+
+impl Decoder<BoolType> for PlainDecoder<BoolType> {
+    fn set_data(&mut self, data: ByteBufferPtr, num_values: usize) -> Result<()> {
+        self.num_values = num_values;
+        self.bit_reader = Some(BitReader::new(data));
+        Ok(())
+    }
+
+    fn get(&mut self, buffer: &mut [bool]) -> Result<usize> {
+        assert!(self.bit_reader.is_some());
+
+        let bit_reader = self.bit_reader.as_mut().unwrap();
+        let values_read = bit_reader.get_batch::<bool>(buffer, 1);
+        self.num_values -= values_read;
+
+        Ok(values_read)
+    }
+}
+
+impl Decoder<ByteArrayType> for PlainDecoder<ByteArrayType> {
+    fn get(&mut self, buffer: &mut [ByteArray]) -> Result<usize> {
+        assert!(self.data.is_some());
+
+        let data = self.data.as_mut().unwrap();
+        let num_values = cmp::min(buffer.len(), self.num_values);
+        for i in 0..num_values {
+            let len: usize = read_num_bytes!(u32, 4, data.start_from(self.start).as_ref()) as usize;
+            self.start += mem::size_of::<u32>();
+            if data.len() < self.start + len {
+                return Err(eof_err!("Not enough bytes to decode"));
+            }
+            buffer[i].set_data(data.range(self.start, len));
+            self.start += len;
+        }
+        self.num_values -= num_values;
+
+        Ok(num_values)
+    }
+}
+
+impl Decoder<FixedLenByteArrayType> for PlainDecoder<FixedLenByteArrayType> {
+    fn get(&mut self, buffer: &mut [ByteArray]) -> Result<usize> {
+        assert!(self.data.is_some());
+        assert!(self.type_length > 0);
+
+        let data = self.data.as_mut().unwrap();
+        let type_length = self.type_length as usize;
+        let num_values = cmp::min(buffer.len(), self.num_values);
+        for i in 0..num_values {
+            if data.len() < self.start + type_length {
+                return Err(eof_err!("Not enough bytes to decode"));
+            }
+            buffer[i].set_data(data.range(self.start, type_length));
+            self.start += type_length;
+        }
+        self.num_values -= num_values;
+
+        Ok(num_values)
+    }
+}
+
+// ----------------------------------------------------------------------
+// RLE_DICTIONARY/PLAIN_DICTIONARY Decoding
+
+/// Dictionary decoder.
+/// The dictionary encoding builds a dictionary of values encountered in a given column.
+/// The dictionary is be stored in a dictionary page per column chunk.
+/// See [`DictEncoder`](`::encoding::DictEncoder`) for more information.
+pub struct DictDecoder<T: DataType> {
+    // The dictionary, which maps ids to the values
+    dictionary: Vec<T::T>,
+
+    // Whether `dictionary` has been initialized
+    has_dictionary: bool,
+
+    // The decoder for the value ids
+    rle_decoder: Option<RleDecoder>,
+
+    // Number of values left in the data stream
+    num_values: usize,
+}
+
+impl<T: DataType> DictDecoder<T> {
+    /// Creates new dictionary decoder.
+    pub fn new() -> Self {
+        Self {
+            dictionary: vec![],
+            has_dictionary: false,
+            rle_decoder: None,
+            num_values: 0,
+        }
+    }
+
+    /// Decodes and sets values for dictionary using `decoder` decoder.
+    pub fn set_dict(&mut self, mut decoder: Box<Decoder<T>>) -> Result<()> {
+        let num_values = decoder.values_left();
+        self.dictionary.resize(num_values, T::T::default());
+        let _ = decoder.get(&mut self.dictionary)?;
+        self.has_dictionary = true;
+        Ok(())
+    }
+}
+
+impl<T: DataType> Decoder<T> for DictDecoder<T> {
+    fn set_data(&mut self, data: ByteBufferPtr, num_values: usize) -> Result<()> {
+        // First byte in `data` is bit width
+        let bit_width = data.as_ref()[0];
+        let mut rle_decoder = RleDecoder::new(bit_width);
+        rle_decoder.set_data(data.start_from(1));
+        self.num_values = num_values;
+        self.rle_decoder = Some(rle_decoder);
+        Ok(())
+    }
+
+    fn get(&mut self, buffer: &mut [T::T]) -> Result<usize> {
+        assert!(self.rle_decoder.is_some());
+        assert!(self.has_dictionary, "Must call set_dict() first!");
+
+        let rle = self.rle_decoder.as_mut().unwrap();
+        let num_values = cmp::min(buffer.len(), self.num_values);
+        rle.get_batch_with_dict(&self.dictionary[..], buffer, num_values)
+    }
+
+    /// Number of values left in this decoder stream
+    fn values_left(&self) -> usize {
+        self.num_values
+    }
+
+    fn encoding(&self) -> Encoding {
+        Encoding::RLE_DICTIONARY
+    }
+}
+
+// ----------------------------------------------------------------------
+// RLE Decoding
+
+/// RLE/Bit-Packing hybrid decoding for values.
+/// Currently is used only for data pages v2 and supports boolean types.
+/// See [`RleValueEncoder`](`::encoding::RleValueEncoder`) for more information.
+pub struct RleValueDecoder<T: DataType> {
+    values_left: usize,
+    decoder: Option<RleDecoder>,
+    _phantom: PhantomData<T>,
+}
+
+impl<T: DataType> RleValueDecoder<T> {
+    pub fn new() -> Self {
+        Self {
+            values_left: 0,
+            decoder: None,
+            _phantom: PhantomData,
+        }
+    }
+
+    #[inline]
+    fn set_data_internal(&mut self, data: ByteBufferPtr, num_values: usize) -> Result<()> {
+        // We still need to remove prefix of i32 from the stream.
+        let i32_size = mem::size_of::<i32>();
+        let data_size = read_num_bytes!(i32, i32_size, data.as_ref()) as usize;
+        let rle_decoder = self
+            .decoder
+            .as_mut()
+            .expect("RLE decoder is not initialized");
+        rle_decoder.set_data(data.range(i32_size, data_size));
+        self.values_left = num_values;
+        Ok(())
+    }
+}
+
+impl<T: DataType> Decoder<T> for RleValueDecoder<T> {
+    #[inline]
+    default fn set_data(&mut self, _data: ByteBufferPtr, _num_values: usize) -> Result<()> {
+        panic!("RleValueDecoder only supports BoolType");
+    }
+
+    #[inline]
+    fn values_left(&self) -> usize {
+        self.values_left
+    }
+
+    #[inline]
+    fn encoding(&self) -> Encoding {
+        Encoding::RLE
+    }
+
+    #[inline]
+    fn get(&mut self, buffer: &mut [T::T]) -> Result<usize> {
+        let rle_decoder = self
+            .decoder
+            .as_mut()
+            .expect("RLE decoder is not initialized");
+        let values_read = rle_decoder.get_batch(buffer)?;
+        self.values_left -= values_read;
+        Ok(values_read)
+    }
+}
+
+impl Decoder<BoolType> for RleValueDecoder<BoolType> {
+    #[inline]
+    fn set_data(&mut self, data: ByteBufferPtr, num_values: usize) -> Result<()> {
+        // Only support RLE value reader for boolean values with bit width of 1.
+        self.decoder = Some(RleDecoder::new(1));
+        self.set_data_internal(data, num_values)
+    }
+}
+
+// ----------------------------------------------------------------------
+// DELTA_BINARY_PACKED Decoding
+
+/// Delta binary packed decoder.
+/// Supports INT32 and INT64 types.
+/// See [`DeltaBitPackEncoder`](`::encoding::DeltaBitPackEncoder`) for more information.
+pub struct DeltaBitPackDecoder<T: DataType> {
+    bit_reader: BitReader,
+    initialized: bool,
+
+    // Header info
+    num_values: usize,
+    num_mini_blocks: i64,
+    values_per_mini_block: usize,
+    values_current_mini_block: usize,
+    first_value: i64,
+    first_value_read: bool,
+
+    // Per block info
+    min_delta: i64,
+    mini_block_idx: usize,
+    delta_bit_width: u8,
+    delta_bit_widths: ByteBuffer,
+    deltas_in_mini_block: Vec<T::T>, // eagerly loaded deltas for a mini block
+    use_batch: bool,
+
+    current_value: i64,
+
+    _phantom: PhantomData<T>,
+}
+
+impl<T: DataType> DeltaBitPackDecoder<T> {
+    /// Creates new delta bit packed decoder.
+    pub fn new() -> Self {
+        Self {
+            bit_reader: BitReader::from(vec![]),
+            initialized: false,
+            num_values: 0,
+            num_mini_blocks: 0,
+            values_per_mini_block: 0,
+            values_current_mini_block: 0,
+            first_value: 0,
+            first_value_read: false,
+            min_delta: 0,
+            mini_block_idx: 0,
+            delta_bit_width: 0,
+            delta_bit_widths: ByteBuffer::new(),
+            deltas_in_mini_block: vec![],
+            use_batch: mem::size_of::<T::T>() == 4,
+            current_value: 0,
+            _phantom: PhantomData,
+        }
+    }
+
+    /// Returns underlying bit reader offset.
+    pub fn get_offset(&self) -> usize {
+        assert!(self.initialized, "Bit reader is not initialized");
+        self.bit_reader.get_byte_offset()
+    }
+
+    /// Initializes new mini block.
+    #[inline]
+    fn init_block(&mut self) -> Result<()> {
+        self.min_delta = self
+            .bit_reader
+            .get_zigzag_vlq_int()
+            .ok_or(eof_err!("Not enough data to decode 'min_delta'"))?;
+
+        let mut widths = vec![];
+        for _ in 0..self.num_mini_blocks {
+            let w = self
+                .bit_reader
+                .get_aligned::<u8>(1)
+                .ok_or(eof_err!("Not enough data to decode 'width'"))?;
+            widths.push(w);
+        }
+
+        self.delta_bit_widths.set_data(widths);
+        self.mini_block_idx = 0;
+        self.delta_bit_width = self.delta_bit_widths.data()[0];
+        self.values_current_mini_block = self.values_per_mini_block;
+        Ok(())
+    }
+
+    /// Loads delta into mini block.
+    #[inline]
+    fn load_deltas_in_mini_block(&mut self) -> Result<()> {
+        self.deltas_in_mini_block.clear();
+        if self.use_batch {
+            self.deltas_in_mini_block
+                .resize(self.values_current_mini_block, T::T::default());
+            let loaded = self.bit_reader.get_batch::<T::T>(
+                &mut self.deltas_in_mini_block[..],
+                self.delta_bit_width as usize,
+            );
+            assert!(loaded == self.values_current_mini_block);
+        } else {
+            for _ in 0..self.values_current_mini_block {
+                // TODO: load one batch at a time similar to int32
+                let delta = self
+                    .bit_reader
+                    .get_value::<T::T>(self.delta_bit_width as usize)
+                    .ok_or(eof_err!("Not enough data to decode 'delta'"))?;
+                self.deltas_in_mini_block.push(delta);
+            }
+        }
+
+        Ok(())
+    }
+}
+
+impl<T: DataType> Decoder<T> for DeltaBitPackDecoder<T> {
+    // # of total values is derived from encoding
+    #[inline]
+    default fn set_data(&mut self, data: ByteBufferPtr, _: usize) -> Result<()> {
+        self.bit_reader = BitReader::new(data);
+        self.initialized = true;
+
+        let block_size = self
+            .bit_reader
+            .get_vlq_int()
+            .ok_or(eof_err!("Not enough data to decode 'block_size'"))?;
+        self.num_mini_blocks = self
+            .bit_reader
+            .get_vlq_int()
+            .ok_or(eof_err!("Not enough data to decode 'num_mini_blocks'"))?;
+        self.num_values =
+            self.bit_reader
+                .get_vlq_int()
+                .ok_or(eof_err!("Not enough data to decode 'num_values'"))? as usize;
+        self.first_value = self
+            .bit_reader
+            .get_zigzag_vlq_int()
+            .ok_or(eof_err!("Not enough data to decode 'first_value'"))?;
+
+        // Reset decoding state
+        self.first_value_read = false;
+        self.mini_block_idx = 0;
+        self.delta_bit_widths.clear();
+        self.values_current_mini_block = 0;
+
+        self.values_per_mini_block = (block_size / self.num_mini_blocks) as usize;
+        assert!(self.values_per_mini_block % 8 == 0);
+
+        Ok(())
+    }
+
+    default fn get(&mut self, buffer: &mut [T::T]) -> Result<usize> {
+        assert!(self.initialized, "Bit reader is not initialized");
+
+        let num_values = cmp::min(buffer.len(), self.num_values);
+        for i in 0..num_values {
+            if !self.first_value_read {
+                self.set_decoded_value(buffer, i, self.first_value);
+                self.current_value = self.first_value;
+                self.first_value_read = true;
+                continue;
+            }
+
+            if self.values_current_mini_block == 0 {
+                self.mini_block_idx += 1;
+                if self.mini_block_idx < self.delta_bit_widths.size() {
+                    self.delta_bit_width = self.delta_bit_widths.data()[self.mini_block_idx];
+                    self.values_current_mini_block = self.values_per_mini_block;
+                } else {
+                    self.init_block()?;
+                }
+                self.load_deltas_in_mini_block()?;
+            }
+
+            // we decrement values in current mini block, so we need to invert index for delta
+            let delta =
+                self.get_delta(self.deltas_in_mini_block.len() - self.values_current_mini_block);
+            // It is OK for deltas to contain "overflowed" values after encoding,
+            // e.g. i64::MAX - i64::MIN, so we use `wrapping_add` to "overflow" again and
+            // restore original value.
+            self.current_value = self.current_value.wrapping_add(self.min_delta);
+            self.current_value = self.current_value.wrapping_add(delta as i64);
+            self.set_decoded_value(buffer, i, self.current_value);
+            self.values_current_mini_block -= 1;
+        }
+
+        self.num_values -= num_values;
+        Ok(num_values)
+    }
+
+    fn values_left(&self) -> usize {
+        self.num_values
+    }
+
+    fn encoding(&self) -> Encoding {
+        Encoding::DELTA_BINARY_PACKED
+    }
+}
+
+/// Helper trait to define specific conversions when decoding values
+trait DeltaBitPackDecoderConversion<T: DataType> {
+    /// Sets decoded value based on type `T`.
+    #[inline]
+    fn get_delta(&self, index: usize) -> i64;
+
+    #[inline]
+    fn set_decoded_value(&self, buffer: &mut [T::T], index: usize, value: i64);
+}
+
+impl<T: DataType> DeltaBitPackDecoderConversion<T> for DeltaBitPackDecoder<T> {
+    #[inline]
+    default fn get_delta(&self, _: usize) -> i64 {
+        panic!("DeltaBitPackDecoder only supports Int32Type and Int64Type")
+    }
+
+    #[inline]
+    default fn set_decoded_value(&self, _: &mut [T::T], _: usize, _: i64) {
+        panic!("DeltaBitPackDecoder only supports Int32Type and Int64Type")
+    }
+}
+
+impl DeltaBitPackDecoderConversion<Int32Type> for DeltaBitPackDecoder<Int32Type> {
+    #[inline]
+    fn get_delta(&self, index: usize) -> i64 {
+        self.deltas_in_mini_block[index] as i64
+    }
+
+    #[inline]
+    fn set_decoded_value(&self, buffer: &mut [i32], index: usize, value: i64) {
+        buffer[index] = value as i32;
+    }
+}
+
+impl DeltaBitPackDecoderConversion<Int64Type> for DeltaBitPackDecoder<Int64Type> {
+    #[inline]
+    fn get_delta(&self, index: usize) -> i64 {
+        self.deltas_in_mini_block[index]
+    }
+
+    #[inline]
+    fn set_decoded_value(&self, buffer: &mut [i64], index: usize, value: i64) {
+        buffer[index] = value;
+    }
+}
+
+// ----------------------------------------------------------------------
+// DELTA_LENGTH_BYTE_ARRAY Decoding
+
+/// Delta length byte array decoder.
+/// Only applied to byte arrays to separate the length values and the data, the lengths
+/// are encoded using DELTA_BINARY_PACKED encoding.
+/// See [`DeltaLengthByteArrayEncoder`](`::encoding::DeltaLengthByteArrayEncoder`)
+/// for more information.
+pub struct DeltaLengthByteArrayDecoder<T: DataType> {
+    // Lengths for each byte array in `data`
+    // TODO: add memory tracker to this
+    lengths: Vec<i32>,
+
+    // Current index into `lengths`
+    current_idx: usize,
+
+    // Concatenated byte array data
+    data: Option<ByteBufferPtr>,
+
+    // Offset into `data`, always point to the beginning of next byte array.
+    offset: usize,
+
+    // Number of values left in this decoder stream
+    num_values: usize,
+
+    // Placeholder to allow `T` as generic parameter
+    _phantom: PhantomData<T>,
+}
+
+impl<T: DataType> DeltaLengthByteArrayDecoder<T> {
+    /// Creates new delta length byte array decoder.
+    pub fn new() -> Self {
+        Self {
+            lengths: vec![],
+            current_idx: 0,
+            data: None,
+            offset: 0,
+            num_values: 0,
+            _phantom: PhantomData,
+        }
+    }
+}
+
+impl<T: DataType> Decoder<T> for DeltaLengthByteArrayDecoder<T> {
+    default fn set_data(&mut self, _: ByteBufferPtr, _: usize) -> Result<()> {
+        Err(general_err!(
+            "DeltaLengthByteArrayDecoder only support ByteArrayType"
+        ))
+    }
+
+    default fn get(&mut self, _: &mut [T::T]) -> Result<usize> {
+        Err(general_err!(
+            "DeltaLengthByteArrayDecoder only support ByteArrayType"
+        ))
+    }
+
+    fn values_left(&self) -> usize {
+        self.num_values
+    }
+
+    fn encoding(&self) -> Encoding {
+        Encoding::DELTA_LENGTH_BYTE_ARRAY
+    }
+}
+
+impl Decoder<ByteArrayType> for DeltaLengthByteArrayDecoder<ByteArrayType> {
+    fn set_data(&mut self, data: ByteBufferPtr, num_values: usize) -> Result<()> {
+        let mut len_decoder = DeltaBitPackDecoder::<Int32Type>::new();
+        len_decoder.set_data(data.all(), num_values)?;
+        let num_lengths = len_decoder.values_left();
+        self.lengths.resize(num_lengths, 0);
+        len_decoder.get(&mut self.lengths[..])?;
+
+        self.data = Some(data.start_from(len_decoder.get_offset()));
+        self.offset = 0;
+        self.current_idx = 0;
+        self.num_values = num_lengths;
+        Ok(())
+    }
+
+    fn get(&mut self, buffer: &mut [ByteArray]) -> Result<usize> {
+        assert!(self.data.is_some());
+
+        let data = self.data.as_ref().unwrap();
+        let num_values = cmp::min(buffer.len(), self.num_values);
+        for i in 0..num_values {
+            let len = self.lengths[self.current_idx] as usize;
+            buffer[i].set_data(data.range(self.offset, len));
+            self.offset += len;
+            self.current_idx += 1;
+        }
+
+        self.num_values -= num_values;
+        Ok(num_values)
+    }
+}
+
+// ----------------------------------------------------------------------
+// DELTA_BYTE_ARRAY Decoding
+
+/// Delta byte array decoder.
+/// Prefix lengths are encoded using `DELTA_BINARY_PACKED` encoding, Suffixes are stored
+/// using `DELTA_LENGTH_BYTE_ARRAY` encoding.
+/// See [`DeltaByteArrayEncoder`](`::encoding::DeltaByteArrayEncoder`) for more
+/// information.
+pub struct DeltaByteArrayDecoder<T: DataType> {
+    // Prefix lengths for each byte array
+    // TODO: add memory tracker to this
+    prefix_lengths: Vec<i32>,
+
+    // The current index into `prefix_lengths`,
+    current_idx: usize,
+
+    // Decoder for all suffixes, the # of which should be the same as
+    // `prefix_lengths.len()`
+    suffix_decoder: Option<DeltaLengthByteArrayDecoder<ByteArrayType>>,
+
+    // The last byte array, used to derive the current prefix
+    previous_value: Vec<u8>,
+
+    // Number of values left
+    num_values: usize,
+
+    // Placeholder to allow `T` as generic parameter
+    _phantom: PhantomData<T>,
+}
+
+impl<T: DataType> DeltaByteArrayDecoder<T> {
+    /// Creates new delta byte array decoder.
+    pub fn new() -> Self {
+        Self {
+            prefix_lengths: vec![],
+            current_idx: 0,
+            suffix_decoder: None,
+            previous_value: vec![],
+            num_values: 0,
+            _phantom: PhantomData,
+        }
+    }
+}
+
+impl<'m, T: DataType> Decoder<T> for DeltaByteArrayDecoder<T> {
+    default fn set_data(&mut self, _: ByteBufferPtr, _: usize) -> Result<()> {
+        Err(general_err!(
+            "DeltaByteArrayDecoder only supports ByteArrayType and FixedLenByteArrayType"
+        ))
+    }
+
+    default fn get(&mut self, _: &mut [T::T]) -> Result<usize> {
+        Err(general_err!(
+            "DeltaByteArrayDecoder only supports ByteArrayType and FixedLenByteArrayType"
+        ))
+    }
+
+    fn values_left(&self) -> usize {
+        self.num_values
+    }
+
+    fn encoding(&self) -> Encoding {
+        Encoding::DELTA_BYTE_ARRAY
+    }
+}
+
+impl Decoder<ByteArrayType> for DeltaByteArrayDecoder<ByteArrayType> {
+    fn set_data(&mut self, data: ByteBufferPtr, num_values: usize) -> Result<()> {
+        let mut prefix_len_decoder = DeltaBitPackDecoder::<Int32Type>::new();
+        prefix_len_decoder.set_data(data.all(), num_values)?;
+        let num_prefixes = prefix_len_decoder.values_left();
+        self.prefix_lengths.resize(num_prefixes, 0);
+        prefix_len_decoder.get(&mut self.prefix_lengths[..])?;
+
+        let mut suffix_decoder = DeltaLengthByteArrayDecoder::new();
+        suffix_decoder.set_data(data.start_from(prefix_len_decoder.get_offset()), num_values)?;
+        self.suffix_decoder = Some(suffix_decoder);
+        self.num_values = num_prefixes;
+        self.current_idx = 0;
+        self.previous_value.clear();
+        Ok(())
+    }
+
+    fn get(&mut self, buffer: &mut [ByteArray]) -> Result<usize> {
+        assert!(self.suffix_decoder.is_some());
+
+        let num_values = cmp::min(buffer.len(), self.num_values);
+        let mut v: [ByteArray; 1] = [ByteArray::new(); 1];
+        for i in 0..num_values {
+            // Process suffix
+            // TODO: this is awkward - maybe we should add a non-vectorized API?
+            let suffix_decoder = self.suffix_decoder.as_mut().unwrap();
+            suffix_decoder.get(&mut v[..])?;
+            let suffix = v[0].data();
+
+            // Extract current prefix length, can be 0
+            let prefix_len = self.prefix_lengths[self.current_idx] as usize;
+
+            // Concatenate prefix with suffix
+            let mut result = Vec::new();
+            result.extend_from_slice(&self.previous_value[0..prefix_len]);
+            result.extend_from_slice(suffix);
+
+            let data = ByteBufferPtr::new(result.clone());
+            buffer[i].set_data(data);
+            self.previous_value = result;
+            self.current_idx += 1;
+        }
+
+        self.num_values -= num_values;
+        Ok(num_values)
+    }
+}
+
+impl Decoder<FixedLenByteArrayType> for DeltaByteArrayDecoder<FixedLenByteArrayType> {
+    fn set_data(&mut self, data: ByteBufferPtr, num_values: usize) -> Result<()> {
+        let s: &mut DeltaByteArrayDecoder<ByteArrayType> = unsafe { mem::transmute(self) };
+        s.set_data(data, num_values)
+    }
+
+    fn get(&mut self, buffer: &mut [ByteArray]) -> Result<usize> {
+        let s: &mut DeltaByteArrayDecoder<ByteArrayType> = unsafe { mem::transmute(self) };
+        s.get(buffer)
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use super::{super::encoding::*, *};
+
+    use std::{mem, rc::Rc};
+
+    use crate::parquet::schema::types::{
+        ColumnDescPtr, ColumnDescriptor, ColumnPath, Type as SchemaType,
+    };
+    use crate::parquet::util::{bit_util::set_array_bit, memory::MemTracker, test_common::RandGen};
+
+    #[test]
+    fn test_get_decoders() {
+        // supported encodings
+        create_and_check_decoder::<Int32Type>(Encoding::PLAIN, None);
+        create_and_check_decoder::<Int32Type>(Encoding::DELTA_BINARY_PACKED, None);
+        create_and_check_decoder::<Int32Type>(Encoding::DELTA_LENGTH_BYTE_ARRAY, None);
+        create_and_check_decoder::<Int32Type>(Encoding::DELTA_BYTE_ARRAY, None);
+        create_and_check_decoder::<BoolType>(Encoding::RLE, None);
+
+        // error when initializing
+        create_and_check_decoder::<Int32Type>(
+            Encoding::RLE_DICTIONARY,
+            Some(general_err!(
+                "Cannot initialize this encoding through this function"
+            )),
+        );
+        create_and_check_decoder::<Int32Type>(
+            Encoding::PLAIN_DICTIONARY,
+            Some(general_err!(
+                "Cannot initialize this encoding through this function"
+            )),
+        );
+
+        // unsupported
+        create_and_check_decoder::<Int32Type>(
+            Encoding::BIT_PACKED,
+            Some(nyi_err!("Encoding BIT_PACKED is not supported")),
+        );
+    }
+
+    #[test]
+    fn test_plain_decode_int32() {
+        let data = vec![42, 18, 52];
+        let data_bytes = Int32Type::to_byte_array(&data[..]);
+        let mut buffer = vec![0; 3];
+        test_plain_decode::<Int32Type>(
+            ByteBufferPtr::new(data_bytes),
+            3,
+            -1,
+            &mut buffer[..],
+            &data[..],
+        );
+    }
+
+    #[test]
+    fn test_plain_decode_int64() {
+        let data = vec![42, 18, 52];
+        let data_bytes = Int64Type::to_byte_array(&data[..]);
+        let mut buffer = vec![0; 3];
+        test_plain_decode::<Int64Type>(
+            ByteBufferPtr::new(data_bytes),
+            3,
+            -1,
+            &mut buffer[..],
+            &data[..],
+        );
+    }
+
+    #[test]
+    fn test_plain_decode_float() {
+        let data = vec![3.14, 2.414, 12.51];
+        let data_bytes = FloatType::to_byte_array(&data[..]);
+        let mut buffer = vec![0.0; 3];
+        test_plain_decode::<FloatType>(
+            ByteBufferPtr::new(data_bytes),
+            3,
+            -1,
+            &mut buffer[..],
+            &data[..],
+        );
+    }
+
+    #[test]
+    fn test_plain_decode_double() {
+        let data = vec![3.14f64, 2.414f64, 12.51f64];
+        let data_bytes = DoubleType::to_byte_array(&data[..]);
+        let mut buffer = vec![0.0f64; 3];
+        test_plain_decode::<DoubleType>(
+            ByteBufferPtr::new(data_bytes),
+            3,
+            -1,
+            &mut buffer[..],
+            &data[..],
+        );
+    }
+
+    #[test]
+    fn test_plain_decode_int96() {
+        let mut data = vec![Int96::new(); 4];
+        data[0].set_data(11, 22, 33);
+        data[1].set_data(44, 55, 66);
+        data[2].set_data(10, 20, 30);
+        data[3].set_data(40, 50, 60);
+        let data_bytes = Int96Type::to_byte_array(&data[..]);
+        let mut buffer = vec![Int96::new(); 4];
+        test_plain_decode::<Int96Type>(
+            ByteBufferPtr::new(data_bytes),
+            4,
+            -1,
+            &mut buffer[..],
+            &data[..],
+        );
+    }
+
+    #[test]
+    fn test_plain_decode_bool() {
+        let data = vec![
+            false, true, false, false, true, false, true, true, false, true,
+        ];
+        let data_bytes = BoolType::to_byte_array(&data[..]);
+        let mut buffer = vec![false; 10];
+        test_plain_decode::<BoolType>(
+            ByteBufferPtr::new(data_bytes),
+            10,
+            -1,
+            &mut buffer[..],
+            &data[..],
+        );
+    }
+
+    #[test]
+    fn test_plain_decode_byte_array() {
+        let mut data = vec![ByteArray::new(); 2];
+        data[0].set_data(ByteBufferPtr::new(String::from("hello").into_bytes()));
+        data[1].set_data(ByteBufferPtr::new(String::from("parquet").into_bytes()));
+        let data_bytes = ByteArrayType::to_byte_array(&data[..]);
+        let mut buffer = vec![ByteArray::new(); 2];
+        test_plain_decode::<ByteArrayType>(
+            ByteBufferPtr::new(data_bytes),
+            2,
+            -1,
+            &mut buffer[..],
+            &data[..],
+        );
+    }
+
+    #[test]
+    fn test_plain_decode_fixed_len_byte_array() {
+        let mut data = vec![ByteArray::default(); 3];
+        data[0].set_data(ByteBufferPtr::new(String::from("bird").into_bytes()));
+        data[1].set_data(ByteBufferPtr::new(String::from("come").into_bytes()));
+        data[2].set_data(ByteBufferPtr::new(String::from("flow").into_bytes()));
+        let data_bytes = FixedLenByteArrayType::to_byte_array(&data[..]);
+        let mut buffer = vec![ByteArray::default(); 3];
+        test_plain_decode::<FixedLenByteArrayType>(
+            ByteBufferPtr::new(data_bytes),
+            3,
+            4,
+            &mut buffer[..],
+            &data[..],
+        );
+    }
+
+    #[test]
+    #[should_panic(expected = "RleValueEncoder only supports BoolType")]
+    fn test_rle_value_encode_int32_not_supported() {
+        let mut encoder = RleValueEncoder::<Int32Type>::new();
+        encoder.put(&vec![1, 2, 3, 4]).unwrap();
+    }
+
+    #[test]
+    #[should_panic(expected = "RleValueDecoder only supports BoolType")]
+    fn test_rle_value_decode_int32_not_supported() {
+        let mut decoder = RleValueDecoder::<Int32Type>::new();
+        decoder
+            .set_data(ByteBufferPtr::new(vec![5, 0, 0, 0]), 1)
+            .unwrap();
+    }
+
+    #[test]
+    fn test_rle_value_decode_bool_decode() {
+        // Test multiple 'put' calls on the same encoder
+        let data = vec![
+            BoolType::gen_vec(-1, 256),
+            BoolType::gen_vec(-1, 257),
+            BoolType::gen_vec(-1, 126),
+        ];
+        test_rle_value_decode::<BoolType>(data);
+    }
+
+    #[test]
+    #[should_panic(expected = "Bit reader is not initialized")]
+    fn test_delta_bit_packed_not_initialized_offset() {
+        // Fail if set_data() is not called before get_offset()
+        let decoder = DeltaBitPackDecoder::<Int32Type>::new();
+        decoder.get_offset();
+    }
+
+    #[test]
+    #[should_panic(expected = "Bit reader is not initialized")]
+    fn test_delta_bit_packed_not_initialized_get() {
+        // Fail if set_data() is not called before get()
+        let mut decoder = DeltaBitPackDecoder::<Int32Type>::new();
+        let mut buffer = vec![];
+        decoder.get(&mut buffer).unwrap();
+    }
+
+    #[test]
+    fn test_delta_bit_packed_int32_empty() {
+        let data = vec![vec![0; 0]];
+        test_delta_bit_packed_decode::<Int32Type>(data);
+    }
+
+    #[test]
+    fn test_delta_bit_packed_int32_repeat() {
+        let block_data = vec![
+            1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5,
+            6, 7, 8,
+        ];
+        test_delta_bit_packed_decode::<Int32Type>(vec![block_data]);
+    }
+
+    #[test]
+    fn test_delta_bit_packed_int32_uneven() {
+        let block_data = vec![1, -2, 3, -4, 5, 6, 7, 8, 9, 10, 11];
+        test_delta_bit_packed_decode::<Int32Type>(vec![block_data]);
+    }
+
+    #[test]
+    fn test_delta_bit_packed_int32_same_values() {
+        let block_data = vec![
+            127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+        ];
+        test_delta_bit_packed_decode::<Int32Type>(vec![block_data]);
+
+        let block_data = vec![
+            -127, -127, -127, -127, -127, -127, -127, -127, -127, -127, -127, -127, -127, -127,
+            -127, -127,
+        ];
+        test_delta_bit_packed_decode::<Int32Type>(vec![block_data]);
+    }
+
+    #[test]
+    fn test_delta_bit_packed_int32_min_max() {
+        let block_data = vec![
+            i32::min_value(),
+            i32::max_value(),
+            i32::min_value(),
+            i32::max_value(),
+            i32::min_value(),
+            i32::max_value(),
+            i32::min_value(),
+            i32::max_value(),
+        ];
+        test_delta_bit_packed_decode::<Int32Type>(vec![block_data]);
+    }
+
+    #[test]
+    fn test_delta_bit_packed_int32_multiple_blocks() {
+        // Test multiple 'put' calls on the same encoder
+        let data = vec![
+            Int32Type::gen_vec(-1, 64),
+            Int32Type::gen_vec(-1, 128),
+            Int32Type::gen_vec(-1, 64),
+        ];
+        test_delta_bit_packed_decode::<Int32Type>(data);
+    }
+
+    #[test]
+    fn test_delta_bit_packed_int32_data_across_blocks() {
+        // Test multiple 'put' calls on the same encoder
+        let data = vec![Int32Type::gen_vec(-1, 256), Int32Type::gen_vec(-1, 257)];
+        test_delta_bit_packed_decode::<Int32Type>(data);
+    }
+
+    #[test]
+    fn test_delta_bit_packed_int32_with_empty_blocks() {
+        let data = vec![
+            Int32Type::gen_vec(-1, 128),
+            vec![0; 0],
+            Int32Type::gen_vec(-1, 64),
+        ];
+        test_delta_bit_packed_decode::<Int32Type>(data);
+    }
+
+    #[test]
+    fn test_delta_bit_packed_int64_empty() {
+        let data = vec![vec![0; 0]];
+        test_delta_bit_packed_decode::<Int64Type>(data);
+    }
+
+    #[test]
+    fn test_delta_bit_packed_int64_min_max() {
+        let block_data = vec![
+            i64::min_value(),
+            i64::max_value(),
+            i64::min_value(),
+            i64::max_value(),
+            i64::min_value(),
+            i64::max_value(),
+            i64::min_value(),
+            i64::max_value(),
+        ];
+        test_delta_bit_packed_decode::<Int64Type>(vec![block_data]);
+    }
+
+    #[test]
+    fn test_delta_bit_packed_int64_multiple_blocks() {
+        // Test multiple 'put' calls on the same encoder
+        let data = vec![
+            Int64Type::gen_vec(-1, 64),
+            Int64Type::gen_vec(-1, 128),
+            Int64Type::gen_vec(-1, 64),
+        ];
+        test_delta_bit_packed_decode::<Int64Type>(data);
+    }
+
+    #[test]
+    fn test_delta_bit_packed_decoder_sample() {
+        let data_bytes = vec![
+            128, 1, 4, 3, 58, 28, 6, 0, 0, 0, 0, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+            0, 0, 0, 0, 0, 0,
+        ];
+        let buffer = ByteBufferPtr::new(data_bytes);
+        let mut decoder: DeltaBitPackDecoder<Int32Type> = DeltaBitPackDecoder::new();
+        decoder.set_data(buffer, 3).unwrap();
+        // check exact offsets, because when reading partial values we end up with
+        // some data not being read from bit reader
+        assert_eq!(decoder.get_offset(), 5);
+        let mut result = vec![0, 0, 0];
+        decoder.get(&mut result).unwrap();
+        assert_eq!(decoder.get_offset(), 34);
+        assert_eq!(result, vec![29, 43, 89]);
+    }
+
+    #[test]
+    fn test_delta_byte_array_same_arrays() {
+        let data = vec![
+            vec![ByteArray::from(vec![1, 2, 3, 4, 5, 6])],
+            vec![
+                ByteArray::from(vec![1, 2, 3, 4, 5, 6]),
+                ByteArray::from(vec![1, 2, 3, 4, 5, 6]),
+            ],
+            vec![
+                ByteArray::from(vec![1, 2, 3, 4, 5, 6]),
+                ByteArray::from(vec![1, 2, 3, 4, 5, 6]),
+            ],
+        ];
+        test_delta_byte_array_decode(data);
+    }
+
+    #[test]
+    fn test_delta_byte_array_unique_arrays() {
+        let data = vec![
+            vec![ByteArray::from(vec![1])],
+            vec![ByteArray::from(vec![2, 3]), ByteArray::from(vec![4, 5, 6])],
+            vec![
+                ByteArray::from(vec![7, 8]),
+                ByteArray::from(vec![9, 0, 1, 2]),
+            ],
+        ];
+        test_delta_byte_array_decode(data);
+    }
+
+    #[test]
+    fn test_delta_byte_array_single_array() {
+        let data = vec![vec![ByteArray::from(vec![1, 2, 3, 4, 5, 6])]];
+        test_delta_byte_array_decode(data);
+    }
+
+    fn test_plain_decode<T: DataType>(
+        data: ByteBufferPtr,
+        num_values: usize,
+        type_length: i32,
+        buffer: &mut [T::T],
+        expected: &[T::T],
+    ) {
+        let mut decoder: PlainDecoder<T> = PlainDecoder::new(type_length);
+        let result = decoder.set_data(data, num_values);
+        assert!(result.is_ok());
+        let result = decoder.get(&mut buffer[..]);
+        assert!(result.is_ok());
+        assert_eq!(decoder.values_left(), 0);
+        assert_eq!(buffer, expected);
+    }
+
+    fn test_rle_value_decode<T: DataType>(data: Vec<Vec<T::T>>) {
+        test_encode_decode::<T>(data, Encoding::RLE);
+    }
+
+    fn test_delta_bit_packed_decode<T: DataType>(data: Vec<Vec<T::T>>) {
+        test_encode_decode::<T>(data, Encoding::DELTA_BINARY_PACKED);
+    }
+
+    fn test_delta_byte_array_decode(data: Vec<Vec<ByteArray>>) {
+        test_encode_decode::<ByteArrayType>(data, Encoding::DELTA_BYTE_ARRAY);
+    }
+
+    // Input data represents vector of data slices to write (test multiple `put()` calls)
+    // For example,
+    //   vec![vec![1, 2, 3]] invokes `put()` once and writes {1, 2, 3}
+    //   vec![vec![1, 2], vec![3]] invokes `put()` twice and writes {1, 2, 3}
+    fn test_encode_decode<T: DataType>(data: Vec<Vec<T::T>>, encoding: Encoding) {
+        // Type length should not really matter for encode/decode test,
+        // otherwise change it based on type
+        let col_descr = create_test_col_desc_ptr(-1, T::get_physical_type());
+
+        // Encode data
+        let mut encoder = get_encoder::<T>(col_descr.clone(), encoding, Rc::new(MemTracker::new()))
+            .expect("get encoder");
+
+        for v in &data[..] {
+            encoder.put(&v[..]).expect("ok to encode");
+        }
+        let bytes = encoder.flush_buffer().expect("ok to flush buffer");
+
+        // Flatten expected data as contiguous array of values
+        let expected: Vec<T::T> = data.iter().flat_map(|s| s.clone()).collect();
+
+        // Decode data and compare with original
+        let mut decoder = get_decoder::<T>(col_descr.clone(), encoding).expect("get decoder");
+
+        let mut result = vec![T::T::default(); expected.len()];
+        decoder
+            .set_data(bytes, expected.len())
+            .expect("ok to set data");
+        let mut result_num_values = 0;
+        while decoder.values_left() > 0 {
+            result_num_values += decoder
+                .get(&mut result[result_num_values..])
+                .expect("ok to decode");
+        }
+        assert_eq!(result_num_values, expected.len());
+        assert_eq!(result, expected);
+    }
+
+    fn create_and_check_decoder<T: DataType>(encoding: Encoding, err: Option<ParquetError>) {
+        let descr = create_test_col_desc_ptr(-1, T::get_physical_type());
+        let decoder = get_decoder::<T>(descr, encoding);
+        match err {
+            Some(parquet_error) => {
+                assert!(decoder.is_err());
+                assert_eq!(decoder.err().unwrap(), parquet_error);
+            }
+            None => {
+                assert!(decoder.is_ok());
+                assert_eq!(decoder.unwrap().encoding(), encoding);
+            }
+        }
+    }
+
+    // Creates test column descriptor.
+    fn create_test_col_desc_ptr(type_len: i32, t: Type) -> ColumnDescPtr {
+        let ty = SchemaType::primitive_type_builder("t", t)
+            .with_length(type_len)
+            .build()
+            .unwrap();
+        Rc::new(ColumnDescriptor::new(
+            Rc::new(ty),
+            None,
+            0,
+            0,
+            ColumnPath::new(vec![]),
+        ))
+    }
+
+    fn usize_to_bytes(v: usize) -> [u8; 4] {
+        unsafe { mem::transmute::<u32, [u8; 4]>(v as u32) }
+    }
+
+    /// A util trait to convert slices of different types to byte arrays
+    trait ToByteArray<T: DataType> {
+        fn to_byte_array(data: &[T::T]) -> Vec<u8>;
+    }
+
+    impl<T> ToByteArray<T> for T
+    where
+        T: DataType,
+    {
+        default fn to_byte_array(data: &[T::T]) -> Vec<u8> {
+            let mut v = vec![];
+            let type_len = ::std::mem::size_of::<T::T>();
+            v.extend_from_slice(unsafe {
+                ::std::slice::from_raw_parts(data.as_ptr() as *const u8, data.len() * type_len)
+            });
+            v
+        }
+    }
+
+    impl ToByteArray<BoolType> for BoolType {
+        fn to_byte_array(data: &[bool]) -> Vec<u8> {
+            let mut v = vec![];
+            for i in 0..data.len() {
+                if i % 8 == 0 {
+                    v.push(0);
+                }
+                if data[i] {
+                    set_array_bit(&mut v[..], i);
+                }
+            }
+            v
+        }
+    }
+
+    impl ToByteArray<Int96Type> for Int96Type {
+        fn to_byte_array(data: &[Int96]) -> Vec<u8> {
+            let mut v = vec![];
+            for d in data {
+                unsafe {
+                    let copy = ::std::slice::from_raw_parts(d.data().as_ptr() as *const u8, 12);
+                    v.extend_from_slice(copy);
+                };
+            }
+            v
+        }
+    }
+
+    impl ToByteArray<ByteArrayType> for ByteArrayType {
+        fn to_byte_array(data: &[ByteArray]) -> Vec<u8> {
+            let mut v = vec![];
+            for d in data {
+                let buf = d.data();
+                let len = &usize_to_bytes(buf.len());
+                v.extend_from_slice(len);
+                v.extend(buf);
+            }
+            v
+        }
+    }
+
+    impl ToByteArray<FixedLenByteArrayType> for FixedLenByteArrayType {
+        fn to_byte_array(data: &[ByteArray]) -> Vec<u8> {
+            let mut v = vec![];
+            for d in data {
+                let buf = d.data();
+                v.extend(buf);
+            }
+            v
+        }
+    }
+}
diff --git a/rust/src/parquet/encodings/encoding.rs b/rust/src/parquet/encodings/encoding.rs
new file mode 100644
index 0000000..cecb03c
--- /dev/null
+++ b/rust/src/parquet/encodings/encoding.rs
@@ -0,0 +1,1360 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+//! Contains all supported encoders for Parquet.
+
+use std::{cmp, io::Write, marker::PhantomData, mem, slice};
+
+use crate::parquet::basic::*;
+use crate::parquet::data_type::*;
+use crate::parquet::encodings::rle::RleEncoder;
+use crate::parquet::errors::{ParquetError, Result};
+use crate::parquet::schema::types::ColumnDescPtr;
+use crate::parquet::util::{
+    bit_util::{log2, num_required_bits, BitWriter},
+    hash_util,
+    memory::{Buffer, ByteBuffer, ByteBufferPtr, MemTrackerPtr},
+};
+
+// ----------------------------------------------------------------------
+// Encoders
+
+/// An Parquet encoder for the data type `T`.
+///
+/// Currently this allocates internal buffers for the encoded values. After done putting
+/// values, caller should call `flush_buffer()` to get an immutable buffer pointer.
+pub trait Encoder<T: DataType> {
+    /// Encodes data from `values`.
+    fn put(&mut self, values: &[T::T]) -> Result<()>;
+
+    /// Returns the encoding type of this encoder.
+    fn encoding(&self) -> Encoding;
+
+    /// Returns an estimate of the encoded data, in bytes.
+    /// Method call must be O(1).
+    fn estimated_data_encoded_size(&self) -> usize;
+
+    /// Flushes the underlying byte buffer that's being processed by this encoder, and
+    /// return the immutable copy of it. This will also reset the internal state.
+    fn flush_buffer(&mut self) -> Result<ByteBufferPtr>;
+}
+
+/// Gets a encoder for the particular data type `T` and encoding `encoding`. Memory usage
+/// for the encoder instance is tracked by `mem_tracker`.
+pub fn get_encoder<T: DataType>(
+    desc: ColumnDescPtr,
+    encoding: Encoding,
+    mem_tracker: MemTrackerPtr,
+) -> Result<Box<Encoder<T>>> {
+    let encoder: Box<Encoder<T>> = match encoding {
+        Encoding::PLAIN => Box::new(PlainEncoder::new(desc, mem_tracker, vec![])),
+        Encoding::RLE_DICTIONARY | Encoding::PLAIN_DICTIONARY => {
+            return Err(general_err!(
+                "Cannot initialize this encoding through this function"
+            ));
+        }
+        Encoding::RLE => Box::new(RleValueEncoder::new()),
+        Encoding::DELTA_BINARY_PACKED => Box::new(DeltaBitPackEncoder::new()),
+        Encoding::DELTA_LENGTH_BYTE_ARRAY => Box::new(DeltaLengthByteArrayEncoder::new()),
+        Encoding::DELTA_BYTE_ARRAY => Box::new(DeltaByteArrayEncoder::new()),
+        e => return Err(nyi_err!("Encoding {} is not supported", e)),
+    };
+    Ok(encoder)
+}
+
+// ----------------------------------------------------------------------
+// Plain encoding
+
+/// Plain encoding that supports all types.
+/// Values are encoded back to back.
+/// The plain encoding is used whenever a more efficient encoding can not be used.
+/// It stores the data in the following format:
+/// - BOOLEAN - 1 bit per value, 0 is false; 1 is true.
+/// - INT32 - 4 bytes per value, stored as little-endian.
+/// - INT64 - 8 bytes per value, stored as little-endian.
+/// - FLOAT - 4 bytes per value, stored as IEEE little-endian.
+/// - DOUBLE - 8 bytes per value, stored as IEEE little-endian.
+/// - BYTE_ARRAY - 4 byte length stored as little endian, followed by bytes.
+/// - FIXED_LEN_BYTE_ARRAY - just the bytes are stored.
+pub struct PlainEncoder<T: DataType> {
+    buffer: ByteBuffer,
+    bit_writer: BitWriter,
+    desc: ColumnDescPtr,
+    _phantom: PhantomData<T>,
+}
+
+impl<T: DataType> PlainEncoder<T> {
+    /// Creates new plain encoder.
+    pub fn new(desc: ColumnDescPtr, mem_tracker: MemTrackerPtr, vec: Vec<u8>) -> Self {
+        let mut byte_buffer = ByteBuffer::new().with_mem_tracker(mem_tracker);
+        byte_buffer.set_data(vec);
+        Self {
+            buffer: byte_buffer,
+            bit_writer: BitWriter::new(256),
+            desc,
+            _phantom: PhantomData,
+        }
+    }
+}
+
+impl<T: DataType> Encoder<T> for PlainEncoder<T> {
+    default fn put(&mut self, values: &[T::T]) -> Result<()> {
+        let bytes = unsafe {
+            slice::from_raw_parts(
+                values as *const [T::T] as *const u8,
+                mem::size_of::<T::T>() * values.len(),
+            )
+        };
+        self.buffer.write(bytes)?;
+        Ok(())
+    }
+
+    fn encoding(&self) -> Encoding {
+        Encoding::PLAIN
+    }
+
+    fn estimated_data_encoded_size(&self) -> usize {
+        self.buffer.size() + self.bit_writer.bytes_written()
+    }
+
+    #[inline]
+    default fn flush_buffer(&mut self) -> Result<ByteBufferPtr> {
+        self.buffer.write(self.bit_writer.flush_buffer())?;
+        self.buffer.flush()?;
+        self.bit_writer.clear();
+
+        Ok(self.buffer.consume())
+    }
+}
+
+impl Encoder<BoolType> for PlainEncoder<BoolType> {
+    fn put(&mut self, values: &[bool]) -> Result<()> {
+        for v in values {
+            self.bit_writer.put_value(*v as u64, 1);
+        }
+        Ok(())
+    }
+}
+
+impl Encoder<Int96Type> for PlainEncoder<Int96Type> {
+    fn put(&mut self, values: &[Int96]) -> Result<()> {
+        for v in values {
+            self.buffer.write(v.as_bytes())?;
+        }
+        self.buffer.flush()?;
+        Ok(())
+    }
+}
+
+impl Encoder<ByteArrayType> for PlainEncoder<ByteArrayType> {
+    fn put(&mut self, values: &[ByteArray]) -> Result<()> {
+        for v in values {
+            self.buffer.write(&(v.len().to_le() as u32).as_bytes())?;
+            self.buffer.write(v.data())?;
+        }
+        self.buffer.flush()?;
+        Ok(())
+    }
+}
+
+impl Encoder<FixedLenByteArrayType> for PlainEncoder<FixedLenByteArrayType> {
+    fn put(&mut self, values: &[ByteArray]) -> Result<()> {
+        for v in values {
+            self.buffer.write(v.data())?;
+        }
+        self.buffer.flush()?;
+        Ok(())
+    }
+}
+
+// ----------------------------------------------------------------------
+// Dictionary encoding
+
+const INITIAL_HASH_TABLE_SIZE: usize = 1024;
+const MAX_HASH_LOAD: f32 = 0.7;
+const HASH_SLOT_EMPTY: i32 = -1;
+
+/// Dictionary encoder.
+/// The dictionary encoding builds a dictionary of values encountered in a given column.
+/// The dictionary page is written first, before the data pages of the column chunk.
+///
+/// Dictionary page format: the entries in the dictionary - in dictionary order -
+/// using the plain encoding.
+///
+/// Data page format: the bit width used to encode the entry ids stored as 1 byte
+/// (max bit width = 32), followed by the values encoded using RLE/Bit packed described
+/// above (with the given bit width).
+pub struct DictEncoder<T: DataType> {
+    // Descriptor for the column to be encoded.
+    desc: ColumnDescPtr,
+
+    // Size of the table. **Must be** a power of 2.
+    hash_table_size: usize,
+
+    // Store `hash_table_size` - 1, so that `j & mod_bitmask` is equivalent to
+    // `j % hash_table_size`, but uses far fewer CPU cycles.
+    mod_bitmask: u32,
+
+    // Stores indices which map (many-to-one) to the values in the `uniques` array.
+    // Here we are using fix-sized array with linear probing.
+    // A slot with `HASH_SLOT_EMPTY` indicates the slot is not currently occupied.
+    hash_slots: Buffer<i32>,
+
+    // Indices that have not yet be written out by `write_indices()`.
+    buffered_indices: Buffer<i32>,
+
+    // The unique observed values.
+    uniques: Buffer<T::T>,
+
+    // Size in bytes needed to encode this dictionary.
+    uniques_size_in_bytes: usize,
+
+    // Tracking memory usage for the various data structures in this struct.
+    mem_tracker: MemTrackerPtr,
+}
+
+impl<T: DataType> DictEncoder<T> {
+    /// Creates new dictionary encoder.
+    pub fn new(desc: ColumnDescPtr, mem_tracker: MemTrackerPtr) -> Self {
+        let mut slots = Buffer::new().with_mem_tracker(mem_tracker.clone());
+        slots.resize(INITIAL_HASH_TABLE_SIZE, -1);
+        Self {
+            desc,
+            hash_table_size: INITIAL_HASH_TABLE_SIZE,
+            mod_bitmask: (INITIAL_HASH_TABLE_SIZE - 1) as u32,
+            hash_slots: slots,
+            buffered_indices: Buffer::new().with_mem_tracker(mem_tracker.clone()),
+            uniques: Buffer::new().with_mem_tracker(mem_tracker.clone()),
+            uniques_size_in_bytes: 0,
+            mem_tracker,
+        }
+    }
+
+    /// Returns true if dictionary entries are sorted, false otherwise.
+    #[inline]
+    pub fn is_sorted(&self) -> bool {
+        // Sorting is not supported currently.
+        false
+    }
+
+    /// Returns number of unique values (keys) in the dictionary.
+    pub fn num_entries(&self) -> usize {
+        self.uniques.size()
+    }
+
+    /// Returns size of unique values (keys) in the dictionary, in bytes.
+    pub fn dict_encoded_size(&self) -> usize {
+        self.uniques_size_in_bytes
+    }
+
+    /// Writes out the dictionary values with PLAIN encoding in a byte buffer, and return
+    /// the result.
+    #[inline]
+    pub fn write_dict(&self) -> Result<ByteBufferPtr> {
+        let mut plain_encoder =
+            PlainEncoder::<T>::new(self.desc.clone(), self.mem_tracker.clone(), vec![]);
+        plain_encoder.put(self.uniques.data())?;
+        plain_encoder.flush_buffer()
+    }
+
+    /// Writes out the dictionary values with RLE encoding in a byte buffer, and return the
+    /// result.
+    #[inline]
+    pub fn write_indices(&mut self) -> Result<ByteBufferPtr> {
+        // TODO: the caller should allocate the buffer
+        let buffer_len = self.estimated_data_encoded_size();
+        let mut buffer: Vec<u8> = vec![0; buffer_len as usize];
+        buffer[0] = self.bit_width() as u8;
+        self.mem_tracker.alloc(buffer.capacity() as i64);
+
+        // Write bit width in the first byte
+        buffer.write((self.bit_width() as u8).as_bytes())?;
+        let mut encoder = RleEncoder::new_from_buf(self.bit_width(), buffer, 1);
+        for index in self.buffered_indices.data() {
+            if !encoder.put(*index as u64)? {
+                return Err(general_err!("Encoder doesn't have enough space"));
+            }
+        }
+        self.buffered_indices.clear();
+        Ok(ByteBufferPtr::new(encoder.consume()?))
+    }
+
+    #[inline]
+    fn put_one(&mut self, value: &T::T) -> Result<()> {
+        let mut j = (hash_util::hash(value, 0) & self.mod_bitmask) as usize;
+        let mut index = self.hash_slots[j];
+
+        while index != HASH_SLOT_EMPTY && self.uniques[index as usize] != *value {
+            j += 1;
+            if j == self.hash_table_size {
+                j = 0;
+            }
+            index = self.hash_slots[j];
+        }
+
+        if index == HASH_SLOT_EMPTY {
+            index = self.uniques.size() as i32;
+            self.hash_slots[j] = index;
+            self.add_dict_key(value.clone());
+
+            if self.uniques.size() > (self.hash_table_size as f32 * MAX_HASH_LOAD) as usize {
+                self.double_table_size();
+            }
+        }
+
+        self.buffered_indices.push(index);
+        Ok(())
+    }
+
+    #[inline]
+    fn add_dict_key(&mut self, value: T::T) {
+        self.uniques_size_in_bytes += self.get_encoded_size(&value);
+        self.uniques.push(value);
+    }
+
+    #[inline]
+    fn bit_width(&self) -> u8 {
+        let num_entries = self.uniques.size();
+        if num_entries == 0 {
+            0
+        } else if num_entries == 1 {
+            1
+        } else {
+            log2(num_entries as u64) as u8
+        }
+    }
+
+    #[inline]
+    fn double_table_size(&mut self) {
+        let new_size = self.hash_table_size * 2;
+        let mut new_hash_slots = Buffer::new().with_mem_tracker(self.mem_tracker.clone());
+        new_hash_slots.resize(new_size, HASH_SLOT_EMPTY);
+        for i in 0..self.hash_table_size {
+            let index = self.hash_slots[i];
+            if index == HASH_SLOT_EMPTY {
+                continue;
+            }
+            let value = &self.uniques[index as usize];
+            let mut j = (hash_util::hash(value, 0) & ((new_size - 1) as u32)) as usize;
+            let mut slot = new_hash_slots[j];
+            while slot != HASH_SLOT_EMPTY && self.uniques[slot as usize] != *value {
+                j += 1;
+                if j == new_size {
+                    j = 0;
+                }
+                slot = new_hash_slots[j];
+            }
+
+            new_hash_slots[j] = index;
+        }
+
+        self.hash_table_size = new_size;
+        self.mod_bitmask = (new_size - 1) as u32;
+        mem::replace(&mut self.hash_slots, new_hash_slots);
+    }
+}
+
+impl<T: DataType> Encoder<T> for DictEncoder<T> {
+    #[inline]
+    fn put(&mut self, values: &[T::T]) -> Result<()> {
+        for i in values {
+            self.put_one(&i)?
+        }
+        Ok(())
+    }
+
+    #[inline]
+    fn encoding(&self) -> Encoding {
+        Encoding::PLAIN_DICTIONARY
+    }
+
+    #[inline]
+    fn estimated_data_encoded_size(&self) -> usize {
+        let bit_width = self.bit_width();
+        1 + RleEncoder::min_buffer_size(bit_width)
+            + RleEncoder::max_buffer_size(bit_width, self.buffered_indices.size())
+    }
+
+    #[inline]
+    fn flush_buffer(&mut self) -> Result<ByteBufferPtr> {
+        self.write_indices()
+    }
+}
+
+/// Provides encoded size for a data type.
+/// This is a workaround to calculate dictionary size in bytes.
+trait DictEncodedSize<T: DataType> {
+    #[inline]
+    fn get_encoded_size(&self, value: &T::T) -> usize;
+}
+
+impl<T: DataType> DictEncodedSize<T> for DictEncoder<T> {
+    #[inline]
+    default fn get_encoded_size(&self, _: &T::T) -> usize {
+        mem::size_of::<T::T>()
+    }
+}
+
+impl DictEncodedSize<ByteArrayType> for DictEncoder<ByteArrayType> {
+    #[inline]
+    fn get_encoded_size(&self, value: &ByteArray) -> usize {
+        mem::size_of::<u32>() + value.len()
+    }
+}
+
+impl DictEncodedSize<FixedLenByteArrayType> for DictEncoder<FixedLenByteArrayType> {
+    #[inline]
+    fn get_encoded_size(&self, _value: &ByteArray) -> usize {
+        self.desc.type_length() as usize
+    }
+}
+
+// ----------------------------------------------------------------------
+// RLE encoding
+
+const DEFAULT_RLE_BUFFER_LEN: usize = 1024;
+
+/// RLE/Bit-Packing hybrid encoding for values.
+/// Currently is used only for data pages v2 and supports boolean types.
+pub struct RleValueEncoder<T: DataType> {
+    // Buffer with raw values that we collect,
+    // when flushing buffer they are encoded using RLE encoder
+    encoder: Option<RleEncoder>,
+    _phantom: PhantomData<T>,
+}
+
+impl<T: DataType> RleValueEncoder<T> {
+    /// Creates new rle value encoder.
+    pub fn new() -> Self {
+        Self {
+            encoder: None,
+            _phantom: PhantomData,
+        }
+    }
+}
+
+impl<T: DataType> Encoder<T> for RleValueEncoder<T> {
+    #[inline]
+    default fn put(&mut self, _values: &[T::T]) -> Result<()> {
+        panic!("RleValueEncoder only supports BoolType");
+    }
+
+    fn encoding(&self) -> Encoding {
+        Encoding::RLE
+    }
+
+    #[inline]
+    default fn estimated_data_encoded_size(&self) -> usize {
+        match self.encoder {
+            Some(ref enc) => enc.len(),
+            None => 0,
+        }
+    }
+
+    #[inline]
+    default fn flush_buffer(&mut self) -> Result<ByteBufferPtr> {
+        panic!("RleValueEncoder only supports BoolType");
+    }
+}
+
+impl Encoder<BoolType> for RleValueEncoder<BoolType> {
+    #[inline]
+    default fn put(&mut self, values: &[bool]) -> Result<()> {
+        if self.encoder.is_none() {
+            self.encoder = Some(RleEncoder::new(1, DEFAULT_RLE_BUFFER_LEN));
+        }
+        let rle_encoder = self.encoder.as_mut().unwrap();
+        for value in values {
+            if !rle_encoder.put(*value as u64)? {
+                return Err(general_err!("RLE buffer is full"));
+            }
+        }
+        Ok(())
+    }
+
+    #[inline]
+    fn flush_buffer(&mut self) -> Result<ByteBufferPtr> {
+        assert!(
+            self.encoder.is_some(),
+            "RLE value encoder is not initialized"
+        );
+        let rle_encoder = self.encoder.as_mut().unwrap();
+
+        // Flush all encoder buffers and raw values
+        let encoded_data = {
+            let buf = rle_encoder.flush_buffer()?;
+
+            // Note that buf does not have any offset, all data is encoded bytes
+            let len = (buf.len() as i32).to_le();
+            let len_bytes = len.as_bytes();
+            let mut encoded_data = Vec::new();
+            encoded_data.extend_from_slice(len_bytes);
+            encoded_data.extend_from_slice(buf);
+            encoded_data
+        };
+        // Reset rle encoder for the next batch
+        rle_encoder.clear();
+
+        Ok(ByteBufferPtr::new(encoded_data))
+    }
+}
+
+// ----------------------------------------------------------------------
+// DELTA_BINARY_PACKED encoding
+
+const MAX_PAGE_HEADER_WRITER_SIZE: usize = 32;
+const MAX_BIT_WRITER_SIZE: usize = 10 * 1024 * 1024;
+const DEFAULT_BLOCK_SIZE: usize = 128;
+const DEFAULT_NUM_MINI_BLOCKS: usize = 4;
+
+/// Delta bit packed encoder.
+/// Consists of a header followed by blocks of delta encoded values binary packed.
+///
+/// Delta-binary-packing:
+/// ```shell
+///   [page-header] [block 1], [block 2], ... [block N]
+/// ```
+///
+/// Each page header consists of:
+/// ```shell
+///   [block size] [number of miniblocks in a block] [total value count] [first value]
+/// ```
+///
+/// Each block consists of:
+/// ```shell
+///   [min delta] [list of bitwidths of miniblocks] [miniblocks]
+/// ```
+///
+/// Current implementation writes values in `put` method, multiple calls to `put` to
+/// existing block or start new block if block size is exceeded. Calling `flush_buffer`
+/// writes out all data and resets internal state, including page header.
+///
+/// Supports only INT32 and INT64.
+pub struct DeltaBitPackEncoder<T: DataType> {
+    page_header_writer: BitWriter,
+    bit_writer: BitWriter,
+    total_values: usize,
+    first_value: i64,
+    current_value: i64,
+    block_size: usize,
+    mini_block_size: usize,
+    num_mini_blocks: usize,
+    values_in_block: usize,
+    deltas: Vec<i64>,
+    _phantom: PhantomData<T>,
+}
+
+impl<T: DataType> DeltaBitPackEncoder<T> {
+    /// Creates new delta bit packed encoder.
+    pub fn new() -> Self {
+        let block_size = DEFAULT_BLOCK_SIZE;
+        let num_mini_blocks = DEFAULT_NUM_MINI_BLOCKS;
+        let mini_block_size = block_size / num_mini_blocks;
+        assert!(mini_block_size % 8 == 0);
+        Self::assert_supported_type();
+
+        DeltaBitPackEncoder {
+            page_header_writer: BitWriter::new(MAX_PAGE_HEADER_WRITER_SIZE),
+            bit_writer: BitWriter::new(MAX_BIT_WRITER_SIZE),
+            total_values: 0,
+            first_value: 0,
+            current_value: 0, // current value to keep adding deltas
+            block_size,       // can write fewer values than block size for last block
+            mini_block_size,
+            num_mini_blocks,
+            values_in_block: 0, // will be at most block_size
+            deltas: vec![0; block_size],
+            _phantom: PhantomData,
+        }
+    }
+
+    /// Writes page header for blocks, this method is invoked when we are done encoding
+    /// values. It is also okay to encode when no values have been provided
+    fn write_page_header(&mut self) {
+        // We ignore the result of each 'put' operation, because MAX_PAGE_HEADER_WRITER_SIZE
+        // is chosen to fit all header values and guarantees that writes will not fail.
+
+        // Write the size of each block
+        self.page_header_writer.put_vlq_int(self.block_size as u64);
+        // Write the number of mini blocks
+        self.page_header_writer
+            .put_vlq_int(self.num_mini_blocks as u64);
+        // Write the number of all values (including non-encoded first value)
+        self.page_header_writer
+            .put_vlq_int(self.total_values as u64);
+        // Write first value
+        self.page_header_writer.put_zigzag_vlq_int(self.first_value);
+    }
+
+    // Write current delta buffer (<= 'block size' values) into bit writer
+    fn flush_block_values(&mut self) -> Result<()> {
+        if self.values_in_block == 0 {
+            return Ok(());
+        }
+
+        let mut min_delta = i64::max_value();
+        for i in 0..self.values_in_block {
+            min_delta = cmp::min(min_delta, self.deltas[i]);
+        }
+
+        // Write min delta
+        self.bit_writer.put_zigzag_vlq_int(min_delta);
+
+        // Slice to store bit width for each mini block
+        // apply unsafe allocation to avoid double mutable borrow
+        let mini_block_widths: &mut [u8] = unsafe {
+            let tmp_slice = self.bit_writer.get_next_byte_ptr(self.num_mini_blocks)?;
+            slice::from_raw_parts_mut(tmp_slice.as_ptr() as *mut u8, self.num_mini_blocks)
+        };
+
+        for i in 0..self.num_mini_blocks {
+            // Find how many values we need to encode - either block size or whatever values
+            // left
+            let n = cmp::min(self.mini_block_size, self.values_in_block);
+            if n == 0 {
+                break;
+            }
+
+            // Compute the max delta in current mini block
+            let mut max_delta = i64::min_value();
+            for j in 0..n {
+                max_delta = cmp::max(max_delta, self.deltas[i * self.mini_block_size + j]);
+            }
+
+            // Compute bit width to store (max_delta - min_delta)
+            let bit_width = num_required_bits(self.subtract_u64(max_delta, min_delta));
+            mini_block_widths[i] = bit_width as u8;
+
+            // Encode values in current mini block using min_delta and bit_width
+            for j in 0..n {
+                let packed_value =
+                    self.subtract_u64(self.deltas[i * self.mini_block_size + j], min_delta);
+                self.bit_writer.put_value(packed_value, bit_width);
+            }
+
+            // Pad the last block (n < mini_block_size)
+            for _ in n..self.mini_block_size {
+                self.bit_writer.put_value(0, bit_width);
+            }
+
+            self.values_in_block -= n;
+        }
+
+        assert!(
+            self.values_in_block == 0,
+            "Expected 0 values in block, found {}",
+            self.values_in_block
+        );
+        Ok(())
+    }
+}
+
+// Implementation is shared between Int32Type and Int64Type,
+// see `DeltaBitPackEncoderConversion` below for specifics.
+impl<T: DataType> Encoder<T> for DeltaBitPackEncoder<T> {
+    fn put(&mut self, values: &[T::T]) -> Result<()> {
+        if values.is_empty() {
+            return Ok(());
+        }
+
+        let mut idx;
+        // Define values to encode, initialize state
+        if self.total_values == 0 {
+            self.first_value = self.as_i64(values, 0);
+            self.current_value = self.first_value;
+            idx = 1;
+        } else {
+            idx = 0;
+        }
+        // Add all values (including first value)
+        self.total_values += values.len();
+
+        // Write block
+        while idx < values.len() {
+            let value = self.as_i64(values, idx);
+            self.deltas[self.values_in_block] = self.subtract(value, self.current_value);
+            self.current_value = value;
+            idx += 1;
+            self.values_in_block += 1;
+            if self.values_in_block == self.block_size {
+                self.flush_block_values()?;
+            }
+        }
+        Ok(())
+    }
+
+    fn encoding(&self) -> Encoding {
+        Encoding::DELTA_BINARY_PACKED
+    }
+
+    fn estimated_data_encoded_size(&self) -> usize {
+        self.bit_writer.bytes_written()
+    }
+
+    fn flush_buffer(&mut self) -> Result<ByteBufferPtr> {
+        // Write remaining values
+        self.flush_block_values()?;
+        // Write page header with total values
+        self.write_page_header();
+
+        let mut buffer = ByteBuffer::new();
+        buffer.write(self.page_header_writer.flush_buffer())?;
+        buffer.write(self.bit_writer.flush_buffer())?;
+        buffer.flush()?;
+
+        // Reset state
+        self.page_header_writer.clear();
+        self.bit_writer.clear();
+        self.total_values = 0;
+        self.first_value = 0;
+        self.current_value = 0;
+        self.values_in_block = 0;
+
+        Ok(buffer.consume())
+    }
+}
+
+/// Helper trait to define specific conversions and subtractions when computing deltas
+trait DeltaBitPackEncoderConversion<T: DataType> {
+    // Method should panic if type is not supported, otherwise no-op
+    #[inline]
+    fn assert_supported_type();
+
+    #[inline]
+    fn as_i64(&self, values: &[T::T], index: usize) -> i64;
+
+    #[inline]
+    fn subtract(&self, left: i64, right: i64) -> i64;
+
+    #[inline]
+    fn subtract_u64(&self, left: i64, right: i64) -> u64;
+}
+
+impl<T: DataType> DeltaBitPackEncoderConversion<T> for DeltaBitPackEncoder<T> {
+    #[inline]
+    default fn assert_supported_type() {
+        panic!("DeltaBitPackDecoder only supports Int32Type and Int64Type");
+    }
+
+    #[inline]
+    default fn as_i64(&self, _values: &[T::T], _index: usize) -> i64 {
+        0
+    }
+
+    #[inline]
+    default fn subtract(&self, _left: i64, _right: i64) -> i64 {
+        0
+    }
+
+    #[inline]
+    default fn subtract_u64(&self, _left: i64, _right: i64) -> u64 {
+        0
+    }
+}
+
+impl DeltaBitPackEncoderConversion<Int32Type> for DeltaBitPackEncoder<Int32Type> {
+    #[inline]
+    fn assert_supported_type() {
+        // no-op: supported type
+    }
+
+    #[inline]
+    fn as_i64(&self, values: &[i32], index: usize) -> i64 {
+        values[index] as i64
+    }
+
+    #[inline]
+    fn subtract(&self, left: i64, right: i64) -> i64 {
+        // It is okay for values to overflow, wrapping_sub wrapping around at the boundary
+        (left as i32).wrapping_sub(right as i32) as i64
+    }
+
+    #[inline]
+    fn subtract_u64(&self, left: i64, right: i64) -> u64 {
+        // Conversion of i32 -> u32 -> u64 is to avoid non-zero left most bytes in int
+        // representation
+        (left as i32).wrapping_sub(right as i32) as u32 as u64
+    }
+}
+
+impl DeltaBitPackEncoderConversion<Int64Type> for DeltaBitPackEncoder<Int64Type> {
+    #[inline]
+    fn assert_supported_type() {
+        // no-op: supported type
+    }
+
+    #[inline]
+    fn as_i64(&self, values: &[i64], index: usize) -> i64 {
+        values[index]
+    }
+
+    #[inline]
+    fn subtract(&self, left: i64, right: i64) -> i64 {
+        // It is okay for values to overflow, wrapping_sub wrapping around at the boundary
+        left.wrapping_sub(right)
+    }
+
+    #[inline]
+    fn subtract_u64(&self, left: i64, right: i64) -> u64 {
+        left.wrapping_sub(right) as u64
+    }
+}
+
+// ----------------------------------------------------------------------
+// DELTA_LENGTH_BYTE_ARRAY encoding
+
+/// Encoding for byte arrays to separate the length values and the data.
+/// The lengths are encoded using DELTA_BINARY_PACKED encoding, data is
+/// stored as raw bytes.
+pub struct DeltaLengthByteArrayEncoder<T: DataType> {
+    // length encoder
+    len_encoder: DeltaBitPackEncoder<Int32Type>,
+    // byte array data
+    data: Vec<ByteArray>,
+    // data size in bytes of encoded values
+    encoded_size: usize,
+    _phantom: PhantomData<T>,
+}
+
+impl<T: DataType> DeltaLengthByteArrayEncoder<T> {
+    /// Creates new delta length byte array encoder.
+    pub fn new() -> Self {
+        Self {
+            len_encoder: DeltaBitPackEncoder::new(),
+            data: vec![],
+            encoded_size: 0,
+            _phantom: PhantomData,
+        }
+    }
+}
+
+impl<T: DataType> Encoder<T> for DeltaLengthByteArrayEncoder<T> {
+    default fn put(&mut self, _values: &[T::T]) -> Result<()> {
+        panic!("DeltaLengthByteArrayEncoder only supports ByteArrayType");
+    }
+
+    fn encoding(&self) -> Encoding {
+        Encoding::DELTA_LENGTH_BYTE_ARRAY
+    }
+
+    fn estimated_data_encoded_size(&self) -> usize {
+        self.len_encoder.estimated_data_encoded_size() + self.encoded_size
+    }
+
+    default fn flush_buffer(&mut self) -> Result<ByteBufferPtr> {
+        panic!("DeltaLengthByteArrayEncoder only supports ByteArrayType");
+    }
+}
+
+impl Encoder<ByteArrayType> for DeltaLengthByteArrayEncoder<ByteArrayType> {
+    fn put(&mut self, values: &[ByteArray]) -> Result<()> {
+        let lengths: Vec<i32> = values
+            .iter()
+            .map(|byte_array| byte_array.len() as i32)
+            .collect();
+        self.len_encoder.put(&lengths)?;
+        for byte_array in values {
+            self.encoded_size += byte_array.len();
+            self.data.push(byte_array.clone());
+        }
+        Ok(())
+    }
+
+    fn flush_buffer(&mut self) -> Result<ByteBufferPtr> {
+        let mut total_bytes = vec![];
+        let lengths = self.len_encoder.flush_buffer()?;
+        total_bytes.extend_from_slice(lengths.data());
+        self.data.iter().for_each(|byte_array| {
+            total_bytes.extend_from_slice(byte_array.data());
+        });
+        self.data.clear();
+        self.encoded_size = 0;
+        Ok(ByteBufferPtr::new(total_bytes))
+    }
+}
+
+// ----------------------------------------------------------------------
+// DELTA_BYTE_ARRAY encoding
+
+/// Encoding for byte arrays, prefix lengths are encoded using DELTA_BINARY_PACKED
+/// encoding, followed by suffixes with DELTA_LENGTH_BYTE_ARRAY encoding.
+pub struct DeltaByteArrayEncoder<T: DataType> {
+    prefix_len_encoder: DeltaBitPackEncoder<Int32Type>,
+    suffix_writer: DeltaLengthByteArrayEncoder<T>,
+    previous: Vec<u8>,
+    _phantom: PhantomData<T>,
+}
+
+impl<T: DataType> DeltaByteArrayEncoder<T> {
+    /// Creates new delta byte array encoder.
+    pub fn new() -> Self {
+        Self {
+            prefix_len_encoder: DeltaBitPackEncoder::<Int32Type>::new(),
+            suffix_writer: DeltaLengthByteArrayEncoder::<T>::new(),
+            previous: vec![],
+            _phantom: PhantomData,
+        }
+    }
+}
+
+impl<T: DataType> Encoder<T> for DeltaByteArrayEncoder<T> {
+    default fn put(&mut self, _values: &[T::T]) -> Result<()> {
+        panic!("DeltaByteArrayEncoder only supports ByteArrayType and FixedLenByteArrayType");
+    }
+
+    fn encoding(&self) -> Encoding {
+        Encoding::DELTA_BYTE_ARRAY
+    }
+
+    fn estimated_data_encoded_size(&self) -> usize {
+        self.prefix_len_encoder.estimated_data_encoded_size()
+            + self.suffix_writer.estimated_data_encoded_size()
+    }
+
+    default fn flush_buffer(&mut self) -> Result<ByteBufferPtr> {
+        panic!("DeltaByteArrayEncoder only supports ByteArrayType and FixedLenByteArrayType");
+    }
+}
+
+impl Encoder<ByteArrayType> for DeltaByteArrayEncoder<ByteArrayType> {
+    fn put(&mut self, values: &[ByteArray]) -> Result<()> {
+        let mut prefix_lengths: Vec<i32> = vec![];
+        let mut suffixes: Vec<ByteArray> = vec![];
+
+        for byte_array in values {
+            let current = byte_array.data();
+            // Maximum prefix length that is shared between previous value and current value
+            let prefix_len = cmp::min(self.previous.len(), current.len());
+            let mut match_len = 0;
+            while match_len < prefix_len && self.previous[match_len] == current[match_len] {
+                match_len += 1;
+            }
+            prefix_lengths.push(match_len as i32);
+            suffixes.push(byte_array.slice(match_len, byte_array.len() - match_len));
+            // Update previous for the next prefix
+            self.previous.clear();
+            self.previous.extend_from_slice(current);
+        }
+        self.prefix_len_encoder.put(&prefix_lengths)?;
+        self.suffix_writer.put(&suffixes)?;
+        Ok(())
+    }
+
+    fn flush_buffer(&mut self) -> Result<ByteBufferPtr> {
+        // TODO: investigate if we can merge lengths and suffixes
+        // without copying data into new vector.
+        let mut total_bytes = vec![];
+        // Insert lengths ...
+        let lengths = self.prefix_len_encoder.flush_buffer()?;
+        total_bytes.extend_from_slice(lengths.data());
+        // ... followed by suffixes
+        let suffixes = self.suffix_writer.flush_buffer()?;
+        total_bytes.extend_from_slice(suffixes.data());
+
+        self.previous.clear();
+        Ok(ByteBufferPtr::new(total_bytes))
+    }
+}
+
+impl Encoder<FixedLenByteArrayType> for DeltaByteArrayEncoder<FixedLenByteArrayType> {
+    fn put(&mut self, values: &[ByteArray]) -> Result<()> {
+        let s: &mut DeltaByteArrayEncoder<ByteArrayType> = unsafe { mem::transmute(self) };
+        s.put(values)
+    }
+
+    fn flush_buffer(&mut self) -> Result<ByteBufferPtr> {
+        let s: &mut DeltaByteArrayEncoder<ByteArrayType> = unsafe { mem::transmute(self) };
+        s.flush_buffer()
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+
+    use std::rc::Rc;
+
+    use crate::parquet::decoding::{get_decoder, Decoder, DictDecoder, PlainDecoder};
+    use crate::parquet::schema::types::{
+        ColumnDescPtr, ColumnDescriptor, ColumnPath, Type as SchemaType,
+    };
+    use crate::parquet::util::{memory::MemTracker, test_common::RandGen};
+
+    const TEST_SET_SIZE: usize = 1024;
+
+    #[test]
+    fn test_get_encoders() {
+        // supported encodings
+        create_and_check_encoder::<Int32Type>(Encoding::PLAIN, None);
+        create_and_check_encoder::<Int32Type>(Encoding::DELTA_BINARY_PACKED, None);
+        create_and_check_encoder::<Int32Type>(Encoding::DELTA_LENGTH_BYTE_ARRAY, None);
+        create_and_check_encoder::<Int32Type>(Encoding::DELTA_BYTE_ARRAY, None);
+        create_and_check_encoder::<BoolType>(Encoding::RLE, None);
+
+        // error when initializing
+        create_and_check_encoder::<Int32Type>(
+            Encoding::RLE_DICTIONARY,
+            Some(general_err!(
+                "Cannot initialize this encoding through this function"
+            )),
+        );
+        create_and_check_encoder::<Int32Type>(
+            Encoding::PLAIN_DICTIONARY,
+            Some(general_err!(
+                "Cannot initialize this encoding through this function"
+            )),
+        );
+
+        // unsupported
+        create_and_check_encoder::<Int32Type>(
+            Encoding::BIT_PACKED,
+            Some(nyi_err!("Encoding BIT_PACKED is not supported")),
+        );
+    }
+
+    #[test]
+    fn test_bool() {
+        BoolType::test(Encoding::PLAIN, TEST_SET_SIZE, -1);
+        BoolType::test(Encoding::PLAIN_DICTIONARY, TEST_SET_SIZE, -1);
+        BoolType::test(Encoding::RLE, TEST_SET_SIZE, -1);
+    }
+
+    #[test]
+    fn test_i32() {
+        Int32Type::test(Encoding::PLAIN, TEST_SET_SIZE, -1);
+        Int32Type::test(Encoding::PLAIN_DICTIONARY, TEST_SET_SIZE, -1);
+        Int32Type::test(Encoding::DELTA_BINARY_PACKED, TEST_SET_SIZE, -1);
+    }
+
+    #[test]
+    fn test_i64() {
+        Int64Type::test(Encoding::PLAIN, TEST_SET_SIZE, -1);
+        Int64Type::test(Encoding::PLAIN_DICTIONARY, TEST_SET_SIZE, -1);
+        Int64Type::test(Encoding::DELTA_BINARY_PACKED, TEST_SET_SIZE, -1);
+    }
+
+    #[test]
+    fn test_i96() {
+        Int96Type::test(Encoding::PLAIN, TEST_SET_SIZE, -1);
+        Int96Type::test(Encoding::PLAIN_DICTIONARY, TEST_SET_SIZE, -1);
+    }
+
+    #[test]
+    fn test_float() {
+        FloatType::test(Encoding::PLAIN, TEST_SET_SIZE, -1);
+        FloatType::test(Encoding::PLAIN_DICTIONARY, TEST_SET_SIZE, -1);
+    }
+
+    #[test]
+    fn test_double() {
+        DoubleType::test(Encoding::PLAIN, TEST_SET_SIZE, -1);
+        DoubleType::test(Encoding::PLAIN_DICTIONARY, TEST_SET_SIZE, -1);
+    }
+
+    #[test]
+    fn test_byte_array() {
+        ByteArrayType::test(Encoding::PLAIN, TEST_SET_SIZE, -1);
+        ByteArrayType::test(Encoding::PLAIN_DICTIONARY, TEST_SET_SIZE, -1);
+        ByteArrayType::test(Encoding::DELTA_LENGTH_BYTE_ARRAY, TEST_SET_SIZE, -1);
+        ByteArrayType::test(Encoding::DELTA_BYTE_ARRAY, TEST_SET_SIZE, -1);
+    }
+
+    #[test]
+    fn test_fixed_lenbyte_array() {
+        FixedLenByteArrayType::test(Encoding::PLAIN, TEST_SET_SIZE, 100);
+        FixedLenByteArrayType::test(Encoding::PLAIN_DICTIONARY, TEST_SET_SIZE, 100);
+        FixedLenByteArrayType::test(Encoding::DELTA_BYTE_ARRAY, TEST_SET_SIZE, 100);
+    }
+
+    #[test]
+    fn test_dict_encoded_size() {
+        fn run_test<T: DataType>(type_length: i32, values: &[T::T], expected_size: usize) {
+            let mut encoder = create_test_dict_encoder::<T>(type_length);
+            assert_eq!(encoder.dict_encoded_size(), 0);
+            encoder.put(values).unwrap();
+            assert_eq!(encoder.dict_encoded_size(), expected_size);
+            // We do not reset encoded size of the dictionary keys after flush_buffer
+            encoder.flush_buffer().unwrap();
+            assert_eq!(encoder.dict_encoded_size(), expected_size);
+        }
+
+        // Only 2 variations of values 1 byte each
+        run_test::<BoolType>(-1, &[true, false, true, false, true], 2);
+        run_test::<Int32Type>(-1, &[1i32, 2i32, 3i32, 4i32, 5i32], 20);
+        run_test::<Int64Type>(-1, &[1i64, 2i64, 3i64, 4i64, 5i64], 40);
+        run_test::<FloatType>(-1, &[1f32, 2f32, 3f32, 4f32, 5f32], 20);
+        run_test::<DoubleType>(-1, &[1f64, 2f64, 3f64, 4f64, 5f64], 40);
+        // Int96: len + reference
+        run_test::<Int96Type>(
+            -1,
+            &[Int96::from(vec![1, 2, 3]), Int96::from(vec![2, 3, 4])],
+            32,
+        );
+        run_test::<ByteArrayType>(-1, &[ByteArray::from("abcd"), ByteArray::from("efj")], 15);
+        run_test::<FixedLenByteArrayType>(2, &[ByteArray::from("ab"), ByteArray::from("bc")], 4);
+    }
+
+    #[test]
+    fn test_estimated_data_encoded_size() {
+        fn run_test<T: DataType>(
+            encoding: Encoding,
+            type_length: i32,
+            values: &[T::T],
+            initial_size: usize,
+            max_size: usize,
+            flush_size: usize,
+        ) {
+            let mut encoder = match encoding {
+                Encoding::PLAIN_DICTIONARY | Encoding::RLE_DICTIONARY => {
+                    Box::new(create_test_dict_encoder::<T>(type_length))
+                }
+                _ => create_test_encoder::<T>(type_length, encoding),
+            };
+            assert_eq!(encoder.estimated_data_encoded_size(), initial_size);
+
+            encoder.put(values).unwrap();
+            assert_eq!(encoder.estimated_data_encoded_size(), max_size);
+
+            encoder.flush_buffer().unwrap();
+            assert_eq!(encoder.estimated_data_encoded_size(), flush_size);
+        }
+
+        // PLAIN
+        run_test::<Int32Type>(Encoding::PLAIN, -1, &vec![123; 1024], 0, 4096, 0);
+
+        // DICTIONARY
+        // NOTE: The final size is almost the same because the dictionary entries are
+        // preserved after encoded values have been written.
+        run_test::<Int32Type>(Encoding::RLE_DICTIONARY, -1, &vec![123, 1024], 11, 68, 66);
+
+        // DELTA_BINARY_PACKED
+        run_test::<Int32Type>(
+            Encoding::DELTA_BINARY_PACKED,
+            -1,
+            &vec![123; 1024],
+            0,
+            35,
+            0,
+        );
+
+        // RLE
+        let mut values = vec![];
+        values.extend_from_slice(&vec![true; 16]);
+        values.extend_from_slice(&vec![false; 16]);
+        run_test::<BoolType>(Encoding::RLE, -1, &values, 0, 2, 0);
+
+        // DELTA_LENGTH_BYTE_ARRAY
+        run_test::<ByteArrayType>(
+            Encoding::DELTA_LENGTH_BYTE_ARRAY,
+            -1,
+            &[ByteArray::from("ab"), ByteArray::from("abc")],
+            0,
+            5, // only value bytes, length encoder is not flushed yet
+            0,
+        );
+
+        // DELTA_BYTE_ARRAY
+        run_test::<ByteArrayType>(
+            Encoding::DELTA_BYTE_ARRAY,
+            -1,
+            &[ByteArray::from("ab"), ByteArray::from("abc")],
+            0,
+            3, // only suffix bytes, length encoder is not flushed yet
+            0,
+        );
+    }
+
+    // See: https://github.com/sunchao/parquet-rs/issues/47
+    #[test]
+    fn test_issue_47() {
+        let mut encoder = create_test_encoder::<ByteArrayType>(0, Encoding::DELTA_BYTE_ARRAY);
+        let mut decoder = create_test_decoder::<ByteArrayType>(0, Encoding::DELTA_BYTE_ARRAY);
+
+        let mut input = vec![];
+        input.push(ByteArray::from("aa"));
+        input.push(ByteArray::from("aaa"));
+        input.push(ByteArray::from("aa"));
+        input.push(ByteArray::from("aaa"));
+        let mut output = vec![ByteArray::default(); input.len()];
+
+        let mut result = put_and_get(&mut encoder, &mut decoder, &input[..2], &mut output[..2]);
+        assert!(
+            result.is_ok(),
+            "first put_and_get() failed with: {}",
+            result.unwrap_err()
+        );
+        result = put_and_get(&mut encoder, &mut decoder, &input[2..], &mut output[2..]);
+        assert!(
+            result.is_ok(),
+            "second put_and_get() failed with: {}",
+            result.unwrap_err()
+        );
+        assert_eq!(output, input);
+    }
+
+    trait EncodingTester<T: DataType> {
+        fn test(enc: Encoding, total: usize, type_length: i32) {
+            let result = match enc {
+                Encoding::PLAIN_DICTIONARY | Encoding::RLE_DICTIONARY => {
+                    Self::test_dict_internal(total, type_length)
+                }
+                enc @ _ => Self::test_internal(enc, total, type_length),
+            };
+
+            assert!(
+                result.is_ok(),
+                "Expected result to be OK but got err:\n {}",
+                result.unwrap_err()
+            );
+        }
+
+        fn test_internal(enc: Encoding, total: usize, type_length: i32) -> Result<()>;
+
+        fn test_dict_internal(total: usize, type_length: i32) -> Result<()>;
+    }
+
+    impl<T: DataType> EncodingTester<T> for T {
+        fn test_internal(enc: Encoding, total: usize, type_length: i32) -> Result<()> {
+            let mut encoder = create_test_encoder::<T>(type_length, enc);
+            let mut decoder = create_test_decoder::<T>(type_length, enc);
+            let mut values = <T as RandGen<T>>::gen_vec(type_length, total);
+            let mut result_data = vec![T::T::default(); total];
+
+            let mut actual_total = put_and_get(
+                &mut encoder,
+                &mut decoder,
+                &values[..],
+                &mut result_data[..],
+            )?;
+            assert_eq!(actual_total, total);
+            assert_eq!(result_data, values);
+
+            // Encode more data after flush and test with decoder
+
+            values = <T as RandGen<T>>::gen_vec(type_length, total);
+            actual_total = put_and_get(
+                &mut encoder,
+                &mut decoder,
+                &values[..],
+                &mut result_data[..],
+            )?;
+            assert_eq!(actual_total, total);
+            assert_eq!(result_data, values);
+
+            Ok(())
+        }
+
+        fn test_dict_internal(total: usize, type_length: i32) -> Result<()> {
+            let mut encoder = create_test_dict_encoder::<T>(type_length);
+            let mut values = <T as RandGen<T>>::gen_vec(type_length, total);
+            encoder.put(&values[..])?;
+
+            let mut data = encoder.flush_buffer()?;
+            let mut decoder = create_test_dict_decoder::<T>();
+            let mut dict_decoder = PlainDecoder::<T>::new(type_length);
+            dict_decoder.set_data(encoder.write_dict()?, encoder.num_entries())?;
+            decoder.set_dict(Box::new(dict_decoder))?;
+            let mut result_data = vec![T::T::default(); total];
+            decoder.set_data(data, total)?;
+            let mut actual_total = decoder.get(&mut result_data)?;
+
+            assert_eq!(actual_total, total);
+            assert_eq!(result_data, values);
+
+            // Encode more data after flush and test with decoder
+
+            values = <T as RandGen<T>>::gen_vec(type_length, total);
+            encoder.put(&values[..])?;
+            data = encoder.flush_buffer()?;
+
+            let mut dict_decoder = PlainDecoder::<T>::new(type_length);
+            dict_decoder.set_data(encoder.write_dict()?, encoder.num_entries())?;
+            decoder.set_dict(Box::new(dict_decoder))?;
+            decoder.set_data(data, total)?;
+            actual_total = decoder.get(&mut result_data)?;
+
+            assert_eq!(actual_total, total);
+            assert_eq!(result_data, values);
+
+            Ok(())
+        }
+    }
+
+    fn put_and_get<T: DataType>(
+        encoder: &mut Box<Encoder<T>>,
+        decoder: &mut Box<Decoder<T>>,
+        input: &[T::T],
+        output: &mut [T::T],
+    ) -> Result<usize> {
+        encoder.put(input)?;
+        let data = encoder.flush_buffer()?;
+        decoder.set_data(data, input.len())?;
+        decoder.get(output)
+    }
+
+    fn create_and_check_encoder<T: DataType>(encoding: Encoding, err: Option<ParquetError>) {
+        let descr = create_test_col_desc_ptr(-1, T::get_physical_type());
+        let mem_tracker = Rc::new(MemTracker::new());
+        let encoder = get_encoder::<T>(descr, encoding, mem_tracker);
+        match err {
+            Some(parquet_error) => {
+                assert!(encoder.is_err());
+                assert_eq!(encoder.err().unwrap(), parquet_error);
+            }
+            None => {
+                assert!(encoder.is_ok());
+                assert_eq!(encoder.unwrap().encoding(), encoding);
+            }
+        }
+    }
+
+    // Creates test column descriptor.
+    fn create_test_col_desc_ptr(type_len: i32, t: Type) -> ColumnDescPtr {
+        let ty = SchemaType::primitive_type_builder("t", t)
+            .with_length(type_len)
+            .build()
+            .unwrap();
+        Rc::new(ColumnDescriptor::new(
+            Rc::new(ty),
+            None,
+            0,
+            0,
+            ColumnPath::new(vec![]),
+        ))
+    }
+
+    fn create_test_encoder<T: DataType>(type_len: i32, enc: Encoding) -> Box<Encoder<T>> {
+        let desc = create_test_col_desc_ptr(type_len, T::get_physical_type());
+        let mem_tracker = Rc::new(MemTracker::new());
+        get_encoder(desc, enc, mem_tracker).unwrap()
+    }
+
+    fn create_test_decoder<T: DataType>(type_len: i32, enc: Encoding) -> Box<Decoder<T>> {
+        let desc = create_test_col_desc_ptr(type_len, T::get_physical_type());
+        get_decoder(desc, enc).unwrap()
+    }
+
+    fn create_test_dict_encoder<T: DataType>(type_len: i32) -> DictEncoder<T> {
+        let desc = create_test_col_desc_ptr(type_len, T::get_physical_type());
+        let mem_tracker = Rc::new(MemTracker::new());
+        DictEncoder::<T>::new(desc, mem_tracker)
+    }
+
+    fn create_test_dict_decoder<T: DataType>() -> DictDecoder<T> {
+        DictDecoder::<T>::new()
+    }
+}
diff --git a/rust/src/parquet/encodings/levels.rs b/rust/src/parquet/encodings/levels.rs
new file mode 100644
index 0000000..ec65198
--- /dev/null
+++ b/rust/src/parquet/encodings/levels.rs
@@ -0,0 +1,529 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+use std::{cmp, mem};
+
+use super::rle::{RleDecoder, RleEncoder};
+
+use crate::parquet::basic::Encoding;
+use crate::parquet::data_type::AsBytes;
+use crate::parquet::errors::{ParquetError, Result};
+use crate::parquet::util::{
+    bit_util::{ceil, log2, BitReader, BitWriter},
+    memory::ByteBufferPtr,
+};
+
+/// Computes max buffer size for level encoder/decoder based on encoding, max
+/// repetition/definition level and number of total buffered values (includes null
+/// values).
+#[inline]
+pub fn max_buffer_size(encoding: Encoding, max_level: i16, num_buffered_values: usize) -> usize {
+    let bit_width = log2(max_level as u64 + 1) as u8;
+    match encoding {
+        Encoding::RLE => {
+            RleEncoder::max_buffer_size(bit_width, num_buffered_values)
+                + RleEncoder::min_buffer_size(bit_width)
+        }
+        Encoding::BIT_PACKED => ceil((num_buffered_values * bit_width as usize) as i64, 8) as usize,
+        _ => panic!("Unsupported encoding type {}", encoding),
+    }
+}
+
+/// Encoder for definition/repetition levels.
+/// Currently only supports RLE and BIT_PACKED (dev/null) encoding, including v2.
+pub enum LevelEncoder {
+    RLE(RleEncoder),
+    RLE_V2(RleEncoder),
+    BIT_PACKED(u8, BitWriter),
+}
+
+impl LevelEncoder {
+    /// Creates new level encoder based on encoding, max level and underlying byte buffer.
+    /// For bit packed encoding it is assumed that buffer is already allocated with
+    /// `levels::max_buffer_size` method.
+    ///
+    /// Used to encode levels for Data Page v1.
+    ///
+    /// Panics, if encoding is not supported.
+    pub fn v1(encoding: Encoding, max_level: i16, byte_buffer: Vec<u8>) -> Self {
+        let bit_width = log2(max_level as u64 + 1) as u8;
+        match encoding {
+            Encoding::RLE => LevelEncoder::RLE(RleEncoder::new_from_buf(
+                bit_width,
+                byte_buffer,
+                mem::size_of::<i32>(),
+            )),
+            Encoding::BIT_PACKED => {
+                // Here we set full byte buffer without adjusting for num_buffered_values,
+                // because byte buffer will already be allocated with size from
+                // `max_buffer_size()` method.
+                LevelEncoder::BIT_PACKED(bit_width, BitWriter::new_from_buf(byte_buffer, 0))
+            }
+            _ => panic!("Unsupported encoding type {}", encoding),
+        }
+    }
+
+    /// Creates new level encoder based on RLE encoding. Used to encode Data Page v2
+    /// repetition and definition levels.
+    pub fn v2(max_level: i16, byte_buffer: Vec<u8>) -> Self {
+        let bit_width = log2(max_level as u64 + 1) as u8;
+        LevelEncoder::RLE_V2(RleEncoder::new_from_buf(bit_width, byte_buffer, 0))
+    }
+
+    /// Put/encode levels vector into this level encoder.
+    /// Returns number of encoded values that are less than or equal to length of the input
+    /// buffer.
+    ///
+    /// RLE and BIT_PACKED level encoders return Err() when internal buffer overflows or
+    /// flush fails.
+    #[inline]
+    pub fn put(&mut self, buffer: &[i16]) -> Result<usize> {
+        let mut num_encoded = 0;
+        match *self {
+            LevelEncoder::RLE(ref mut encoder) | LevelEncoder::RLE_V2(ref mut encoder) => {
+                for value in buffer {
+                    if !encoder.put(*value as u64)? {
+                        return Err(general_err!("RLE buffer is full"));
+                    }
+                    num_encoded += 1;
+                }
+                encoder.flush()?;
+            }
+            LevelEncoder::BIT_PACKED(bit_width, ref mut encoder) => {
+                for value in buffer {
+                    if !encoder.put_value(*value as u64, bit_width as usize) {
+                        return Err(general_err!("Not enough bytes left"));
+                    }
+                    num_encoded += 1;
+                }
+                encoder.flush();
+            }
+        }
+        Ok(num_encoded)
+    }
+
+    /// Finalizes level encoder, flush all intermediate buffers and return resulting
+    /// encoded buffer. Returned buffer is already truncated to encoded bytes only.
+    #[inline]
+    pub fn consume(self) -> Result<Vec<u8>> {
+        match self {
+            LevelEncoder::RLE(encoder) => {
+                let mut encoded_data = encoder.consume()?;
+                // Account for the buffer offset
+                let encoded_len = encoded_data.len() - mem::size_of::<i32>();
+                let len = (encoded_len as i32).to_le();
+                let len_bytes = len.as_bytes();
+                encoded_data[0..len_bytes.len()].copy_from_slice(len_bytes);
+                Ok(encoded_data)
+            }
+            LevelEncoder::RLE_V2(encoder) => encoder.consume(),
+            LevelEncoder::BIT_PACKED(_, encoder) => Ok(encoder.consume()),
+        }
+    }
+}
+
+/// Decoder for definition/repetition levels.
+/// Currently only supports RLE and BIT_PACKED encoding for Data Page v1 and
+/// RLE for Data Page v2.
+pub enum LevelDecoder {
+    RLE(Option<usize>, RleDecoder),
+    RLE_V2(Option<usize>, RleDecoder),
+    BIT_PACKED(Option<usize>, u8, BitReader),
+}
+
+impl LevelDecoder {
+    /// Creates new level decoder based on encoding and max definition/repetition level.
+    /// This method only initializes level decoder, `set_data` method must be called
+    /// before reading any value.
+    ///
+    /// Used to encode levels for Data Page v1.
+    ///
+    /// Panics if encoding is not supported
+    pub fn v1(encoding: Encoding, max_level: i16) -> Self {
+        let bit_width = log2(max_level as u64 + 1) as u8;
+        match encoding {
+            Encoding::RLE => LevelDecoder::RLE(None, RleDecoder::new(bit_width)),
+            Encoding::BIT_PACKED => {
+                LevelDecoder::BIT_PACKED(None, bit_width, BitReader::from(Vec::new()))
+            }
+            _ => panic!("Unsupported encoding type {}", encoding),
+        }
+    }
+
+    /// Creates new level decoder based on RLE encoding.
+    /// Used to decode Data Page v2 repetition and definition levels.
+    ///
+    /// To set data for this decoder, use `set_data_range` method.
+    pub fn v2(max_level: i16) -> Self {
+        let bit_width = log2(max_level as u64 + 1) as u8;
+        LevelDecoder::RLE_V2(None, RleDecoder::new(bit_width))
+    }
+
+    /// Sets data for this level decoder, and returns total number of bytes set.
+    /// This is used for Data Page v1 levels.
+    ///
+    /// `data` is encoded data as byte buffer, `num_buffered_values` represents total
+    /// number of values that is expected.
+    ///
+    /// Both RLE and BIT_PACKED level decoders set `num_buffered_values` as total number of
+    /// values that they can return and track num values.
+    #[inline]
+    pub fn set_data(&mut self, num_buffered_values: usize, data: ByteBufferPtr) -> usize {
+        match *self {
+            LevelDecoder::RLE(ref mut num_values, ref mut decoder) => {
+                *num_values = Some(num_buffered_values);
+                let i32_size = mem::size_of::<i32>();
+                let data_size = read_num_bytes!(i32, i32_size, data.as_ref()) as usize;
+                decoder.set_data(data.range(i32_size, data_size));
+                i32_size + data_size
+            }
+            LevelDecoder::BIT_PACKED(ref mut num_values, bit_width, ref mut decoder) => {
+                *num_values = Some(num_buffered_values);
+                // Set appropriate number of bytes: if max size is larger than buffer - set full
+                // buffer
+                let num_bytes = ceil((num_buffered_values * bit_width as usize) as i64, 8);
+                let data_size = cmp::min(num_bytes as usize, data.len());
+                decoder.reset(data.range(data.start(), data_size));
+                data_size
+            }
+            _ => panic!(),
+        }
+    }
+
+    /// Sets byte array explicitly when start position `start` and length `len` are known
+    /// in advance. Only supported by RLE level decoder and used for Data Page v2 levels.
+    /// Returns number of total bytes set for this decoder (len).
+    #[inline]
+    pub fn set_data_range(
+        &mut self,
+        num_buffered_values: usize,
+        data: &ByteBufferPtr,
+        start: usize,
+        len: usize,
+    ) -> usize {
+        match *self {
+            LevelDecoder::RLE_V2(ref mut num_values, ref mut decoder) => {
+                decoder.set_data(data.range(start, len));
+                *num_values = Some(num_buffered_values);
+                len
+            }
+            _ => panic!("set_data_range() method is only supported by RLE v2 encoding type"),
+        }
+    }
+
+    /// Returns true if data is set for decoder, false otherwise.
+    #[inline]
+    pub fn is_data_set(&self) -> bool {
+        match self {
+            LevelDecoder::RLE(ref num_values, _) => num_values.is_some(),
+            LevelDecoder::RLE_V2(ref num_values, _) => num_values.is_some(),
+            LevelDecoder::BIT_PACKED(ref num_values, ..) => num_values.is_some(),
+        }
+    }
+
+    /// Decodes values and puts them into `buffer`.
+    /// Returns number of values that were successfully decoded (less than or equal to
+    /// buffer length).
+    #[inline]
+    pub fn get(&mut self, buffer: &mut [i16]) -> Result<usize> {
+        assert!(self.is_data_set(), "No data set for decoding");
+        match *self {
+            LevelDecoder::RLE(ref mut num_values, ref mut decoder)
+            | LevelDecoder::RLE_V2(ref mut num_values, ref mut decoder) => {
+                // Max length we can read
+                let len = cmp::min(num_values.unwrap(), buffer.len());
+                let values_read = decoder.get_batch::<i16>(&mut buffer[0..len])?;
+                *num_values = num_values.map(|len| len - values_read);
+                Ok(values_read)
+            }
+            LevelDecoder::BIT_PACKED(ref mut num_values, bit_width, ref mut decoder) => {
+                // When extracting values from bit reader, it might return more values than left
+                // because of padding to a full byte, we use num_values to track precise number
+                // of values.
+                let len = cmp::min(num_values.unwrap(), buffer.len());
+                let values_read = decoder.get_batch::<i16>(&mut buffer[..len], bit_width as usize);
+                *num_values = num_values.map(|len| len - values_read);
+                Ok(values_read)
+            }
+        }
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+
+    use crate::parquet::util::test_common::random_numbers_range;
+
+    fn test_internal_roundtrip(enc: Encoding, levels: &[i16], max_level: i16, v2: bool) {
+        let size = max_buffer_size(enc, max_level, levels.len());
+        let mut encoder = if v2 {
+            LevelEncoder::v2(max_level, vec![0; size])
+        } else {
+            LevelEncoder::v1(enc, max_level, vec![0; size])
+        };
+        encoder.put(&levels).expect("put() should be OK");
+        let encoded_levels = encoder.consume().expect("consume() should be OK");
+
+        let byte_buf = ByteBufferPtr::new(encoded_levels);
+        let mut decoder;
+        if v2 {
+            decoder = LevelDecoder::v2(max_level);
+            decoder.set_data_range(levels.len(), &byte_buf, 0, byte_buf.len());
+        } else {
+            decoder = LevelDecoder::v1(enc, max_level);
+            decoder.set_data(levels.len(), byte_buf);
+        };
+
+        let mut buffer = vec![0; levels.len()];
+        let num_decoded = decoder.get(&mut buffer).expect("get() should be OK");
+        assert_eq!(num_decoded, levels.len());
+        assert_eq!(buffer, levels);
+    }
+
+    // Performs incremental read until all bytes are read
+    fn test_internal_roundtrip_incremental(
+        enc: Encoding,
+        levels: &[i16],
+        max_level: i16,
+        v2: bool,
+    ) {
+        let size = max_buffer_size(enc, max_level, levels.len());
+        let mut encoder = if v2 {
+            LevelEncoder::v2(max_level, vec![0; size])
+        } else {
+            LevelEncoder::v1(enc, max_level, vec![0; size])
+        };
+        encoder.put(&levels).expect("put() should be OK");
+        let encoded_levels = encoder.consume().expect("consume() should be OK");
+
+        let byte_buf = ByteBufferPtr::new(encoded_levels);
+        let mut decoder;
+        if v2 {
+            decoder = LevelDecoder::v2(max_level);
+            decoder.set_data_range(levels.len(), &byte_buf, 0, byte_buf.len());
+        } else {
+            decoder = LevelDecoder::v1(enc, max_level);
+            decoder.set_data(levels.len(), byte_buf);
+        }
+
+        let mut buffer = vec![0; levels.len() * 2];
+        let mut total_decoded = 0;
+        let mut safe_stop = levels.len() * 2; // still terminate in case of issues in the code
+        while safe_stop > 0 {
+            safe_stop -= 1;
+            let num_decoded = decoder
+                .get(&mut buffer[total_decoded..total_decoded + 1])
+                .expect("get() should be OK");
+            if num_decoded == 0 {
+                break;
+            }
+            total_decoded += num_decoded;
+        }
+        assert!(
+            safe_stop > 0,
+            "Failed to read values incrementally, reached safe stop"
+        );
+        assert_eq!(total_decoded, levels.len());
+        assert_eq!(&buffer[0..levels.len()], levels);
+    }
+
+    // Tests encoding/decoding of values when output buffer is larger than number of
+    // encoded values
+    fn test_internal_roundtrip_underflow(enc: Encoding, levels: &[i16], max_level: i16, v2: bool) {
+        let size = max_buffer_size(enc, max_level, levels.len());
+        let mut encoder = if v2 {
+            LevelEncoder::v2(max_level, vec![0; size])
+        } else {
+            LevelEncoder::v1(enc, max_level, vec![0; size])
+        };
+        // Encode only one value
+        let num_encoded = encoder.put(&levels[0..1]).expect("put() should be OK");
+        let encoded_levels = encoder.consume().expect("consume() should be OK");
+        assert_eq!(num_encoded, 1);
+
+        let byte_buf = ByteBufferPtr::new(encoded_levels);
+        let mut decoder;
+        // Set one encoded value as `num_buffered_values`
+        if v2 {
+            decoder = LevelDecoder::v2(max_level);
+            decoder.set_data_range(1, &byte_buf, 0, byte_buf.len());
+        } else {
+            decoder = LevelDecoder::v1(enc, max_level);
+            decoder.set_data(1, byte_buf);
+        }
+
+        let mut buffer = vec![0; levels.len()];
+        let num_decoded = decoder.get(&mut buffer).expect("get() should be OK");
+        assert_eq!(num_decoded, num_encoded);
+        assert_eq!(buffer[0..num_decoded], levels[0..num_decoded]);
+    }
+
+    // Tests when encoded values are larger than encoder's buffer
+    fn test_internal_roundtrip_overflow(enc: Encoding, levels: &[i16], max_level: i16, v2: bool) {
+        let size = max_buffer_size(enc, max_level, levels.len());
+        let mut encoder = if v2 {
+            LevelEncoder::v2(max_level, vec![0; size])
+        } else {
+            LevelEncoder::v1(enc, max_level, vec![0; size])
+        };
+        let mut found_err = false;
+        // Insert a large number of values, so we run out of space
+        for _ in 0..100 {
+            match encoder.put(&levels) {
+                Err(err) => {
+                    assert!(format!("{}", err).contains("Not enough bytes left"));
+                    found_err = true;
+                    break;
+                }
+                Ok(_) => {}
+            }
+        }
+        if !found_err {
+            panic!("Failed test: no buffer overflow");
+        }
+    }
+
+    #[test]
+    fn test_roundtrip_one() {
+        let levels = vec![0, 1, 1, 1, 1, 0, 0, 0, 0, 1];
+        let max_level = 1;
+        test_internal_roundtrip(Encoding::RLE, &levels, max_level, false);
+        test_internal_roundtrip(Encoding::BIT_PACKED, &levels, max_level, false);
+        test_internal_roundtrip(Encoding::RLE, &levels, max_level, true);
+    }
+
+    #[test]
+    fn test_roundtrip() {
+        let levels = vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9];
+        let max_level = 10;
+        test_internal_roundtrip(Encoding::RLE, &levels, max_level, false);
+        test_internal_roundtrip(Encoding::BIT_PACKED, &levels, max_level, false);
+        test_internal_roundtrip(Encoding::RLE, &levels, max_level, true);
+    }
+
+    #[test]
+    fn test_roundtrip_incremental() {
+        let levels = vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9];
+        let max_level = 10;
+        test_internal_roundtrip_incremental(Encoding::RLE, &levels, max_level, false);
+        test_internal_roundtrip_incremental(Encoding::BIT_PACKED, &levels, max_level, false);
+        test_internal_roundtrip_incremental(Encoding::RLE, &levels, max_level, true);
+    }
+
+    #[test]
+    fn test_roundtrip_all_zeros() {
+        let levels = vec![0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
+        let max_level = 1;
+        test_internal_roundtrip(Encoding::RLE, &levels, max_level, false);
+        test_internal_roundtrip(Encoding::BIT_PACKED, &levels, max_level, false);
+        test_internal_roundtrip(Encoding::RLE, &levels, max_level, true);
+    }
+
+    #[test]
+    fn test_roundtrip_random() {
+        // This test is mainly for bit packed level encoder/decoder
+        let mut levels = Vec::new();
+        let max_level = 5;
+        random_numbers_range::<i16>(120, 0, max_level, &mut levels);
+        test_internal_roundtrip(Encoding::RLE, &levels, max_level, false);
+        test_internal_roundtrip(Encoding::BIT_PACKED, &levels, max_level, false);
+        test_internal_roundtrip(Encoding::RLE, &levels, max_level, true);
+    }
+
+    #[test]
+    fn test_roundtrip_underflow() {
+        let levels = vec![1, 1, 2, 3, 2, 1, 1, 2, 3, 1];
+        let max_level = 3;
+        test_internal_roundtrip_underflow(Encoding::RLE, &levels, max_level, false);
+        test_internal_roundtrip_underflow(Encoding::BIT_PACKED, &levels, max_level, false);
+        test_internal_roundtrip_underflow(Encoding::RLE, &levels, max_level, true);
+    }
+
+    #[test]
+    fn test_roundtrip_overflow() {
+        let levels = vec![1, 1, 2, 3, 2, 1, 1, 2, 3, 1];
+        let max_level = 3;
+        test_internal_roundtrip_overflow(Encoding::RLE, &levels, max_level, false);
+        test_internal_roundtrip_overflow(Encoding::BIT_PACKED, &levels, max_level, false);
+        test_internal_roundtrip_overflow(Encoding::RLE, &levels, max_level, true);
+    }
+
+    #[test]
+    fn test_rle_decoder_set_data_range() {
+        // Buffer containing both repetition and definition levels
+        let buffer = ByteBufferPtr::new(vec![5, 198, 2, 5, 42, 168, 10, 0, 2, 3, 36, 73]);
+
+        let max_rep_level = 1;
+        let mut decoder = LevelDecoder::v2(max_rep_level);
+        assert_eq!(decoder.set_data_range(10, &buffer, 0, 3), 3);
+        let mut result = vec![0; 10];
+        let num_decoded = decoder.get(&mut result).expect("get() should be OK");
+        assert_eq!(num_decoded, 10);
+        assert_eq!(result, vec![0, 1, 1, 0, 0, 0, 1, 1, 0, 1]);
+
+        let max_def_level = 2;
+        let mut decoder = LevelDecoder::v2(max_def_level);
+        assert_eq!(decoder.set_data_range(10, &buffer, 3, 5), 5);
+        let mut result = vec![0; 10];
+        let num_decoded = decoder.get(&mut result).expect("get() should be OK");
+        assert_eq!(num_decoded, 10);
+        assert_eq!(result, vec![2, 2, 2, 0, 0, 2, 2, 2, 2, 2]);
+    }
+
+    #[test]
+    #[should_panic(expected = "set_data_range() method is only supported by RLE v2 encoding type")]
+    fn test_bit_packed_decoder_set_data_range() {
+        // Buffer containing both repetition and definition levels
+        let buffer = ByteBufferPtr::new(vec![1, 2, 3, 4, 5]);
+        let max_level = 1;
+        let mut decoder = LevelDecoder::v1(Encoding::BIT_PACKED, max_level);
+        decoder.set_data_range(10, &buffer, 0, 3);
+    }
+
+    #[test]
+    fn test_bit_packed_decoder_set_data() {
+        // Test the maximum size that is assigned based on number of values and buffer length
+        let buffer = ByteBufferPtr::new(vec![1, 2, 3, 4, 5]);
+        let max_level = 1;
+        let mut decoder = LevelDecoder::v1(Encoding::BIT_PACKED, max_level);
+        // This should reset to entire buffer
+        assert_eq!(decoder.set_data(1024, buffer.all()), buffer.len());
+        // This should set smallest num bytes
+        assert_eq!(decoder.set_data(3, buffer.all()), 1);
+    }
+
+    #[test]
+    #[should_panic(expected = "No data set for decoding")]
+    fn test_rle_level_decoder_get_no_set_data() {
+        // `get()` normally panics because bit_reader is not set for RLE decoding
+        // we have explicit check now in set_data
+        let max_rep_level = 2;
+        let mut decoder = LevelDecoder::v1(Encoding::RLE, max_rep_level);
+        let mut buffer = vec![0; 16];
+        decoder.get(&mut buffer).unwrap();
+    }
+
+    #[test]
+    #[should_panic(expected = "No data set for decoding")]
+    fn test_bit_packed_level_decoder_get_no_set_data() {
+        let max_rep_level = 2;
+        let mut decoder = LevelDecoder::v1(Encoding::BIT_PACKED, max_rep_level);
+        let mut buffer = vec![0; 16];
+        decoder.get(&mut buffer).unwrap();
+    }
+}
diff --git a/rust/src/lib.rs b/rust/src/parquet/encodings/mod.rs
similarity index 76%
copy from rust/src/lib.rs
copy to rust/src/parquet/encodings/mod.rs
index f41d08f..33b1e23 100644
--- a/rust/src/lib.rs
+++ b/rust/src/parquet/encodings/mod.rs
@@ -15,18 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-#![feature(specialization)]
-
-pub mod array;
-pub mod array_data;
-pub mod array_ops;
-pub mod bitmap;
-pub mod buffer;
-pub mod builder;
-pub mod csv;
-pub mod datatypes;
-pub mod error;
-pub mod memory;
-pub mod record_batch;
-pub mod tensor;
-pub mod util;
+pub mod decoding;
+pub mod encoding;
+pub mod levels;
+mod rle;
diff --git a/rust/src/parquet/encodings/rle.rs b/rust/src/parquet/encodings/rle.rs
new file mode 100644
index 0000000..5b56c2a
--- /dev/null
+++ b/rust/src/parquet/encodings/rle.rs
@@ -0,0 +1,839 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+use std::{
+    cmp,
+    mem::{size_of, transmute_copy},
+};
+
+use crate::parquet::errors::{ParquetError, Result};
+use crate::parquet::util::{
+    bit_util::{self, BitReader, BitWriter},
+    memory::ByteBufferPtr,
+};
+
+/// Rle/Bit-Packing Hybrid Encoding
+/// The grammar for this encoding looks like the following (copied verbatim
+/// from https://github.com/Parquet/parquet-format/blob/master/Encodings.md):
+///
+/// rle-bit-packed-hybrid: <length> <encoded-data>
+/// length := length of the <encoded-data> in bytes stored as 4 bytes little endian
+/// encoded-data := <run>*
+/// run := <bit-packed-run> | <rle-run>
+/// bit-packed-run := <bit-packed-header> <bit-packed-values>
+/// bit-packed-header := varint-encode(<bit-pack-count> << 1 | 1)
+/// we always bit-pack a multiple of 8 values at a time, so we only store the number of
+/// values / 8
+/// bit-pack-count := (number of values in this run) / 8
+/// bit-packed-values := *see 1 below*
+/// rle-run := <rle-header> <repeated-value>
+/// rle-header := varint-encode( (number of times repeated) << 1)
+/// repeated-value := value that is repeated, using a fixed-width of
+/// round-up-to-next-byte(bit-width)
+
+/// Maximum groups per bit-packed run. Current value is 64.
+const MAX_GROUPS_PER_BIT_PACKED_RUN: usize = 1 << 6;
+const MAX_VALUES_PER_BIT_PACKED_RUN: usize = MAX_GROUPS_PER_BIT_PACKED_RUN * 8;
+const MAX_WRITER_BUF_SIZE: usize = 1 << 10;
+
+/// A RLE/Bit-Packing hybrid encoder.
+// TODO: tracking memory usage
+pub struct RleEncoder {
+    // Number of bits needed to encode the value. Must be in the range of [0, 64].
+    bit_width: u8,
+
+    // Underlying writer which holds an internal buffer.
+    bit_writer: BitWriter,
+
+    // If this is true, the buffer is full and subsequent `put()` calls will fail.
+    buffer_full: bool,
+
+    // The maximum byte size a single run can take.
+    max_run_byte_size: usize,
+
+    // Buffered values for bit-packed runs.
+    buffered_values: [u64; 8],
+
+    // Number of current buffered values. Must be less than 8.
+    num_buffered_values: usize,
+
+    // The current (also last) value that was written and the count of how many
+    // times in a row that value has been seen.
+    current_value: u64,
+
+    // The number of repetitions for `current_value`. If this gets too high we'd
+    // switch to use RLE encoding.
+    repeat_count: usize,
+
+    // Number of bit-packed values in the current run. This doesn't include values
+    // in `buffered_values`.
+    bit_packed_count: usize,
+
+    // The position of the indicator byte in the `bit_writer`.
+    indicator_byte_pos: i64,
+}
+
+impl RleEncoder {
+    pub fn new(bit_width: u8, buffer_len: usize) -> Self {
+        let buffer = vec![0; buffer_len];
+        RleEncoder::new_from_buf(bit_width, buffer, 0)
+    }
+
+    /// Initialize the encoder from existing `buffer` and the starting offset `start`.
+    pub fn new_from_buf(bit_width: u8, buffer: Vec<u8>, start: usize) -> Self {
+        assert!(bit_width <= 64, "bit_width ({}) out of range.", bit_width);
+        let max_run_byte_size = RleEncoder::min_buffer_size(bit_width);
+        assert!(
+            buffer.len() >= max_run_byte_size,
+            "buffer length {} must be greater than {}",
+            buffer.len(),
+            max_run_byte_size
+        );
+        let bit_writer = BitWriter::new_from_buf(buffer, start);
+        RleEncoder {
+            bit_width,
+            bit_writer,
+            buffer_full: false,
+            max_run_byte_size,
+            buffered_values: [0; 8],
+            num_buffered_values: 0,
+            current_value: 0,
+            repeat_count: 0,
+            bit_packed_count: 0,
+            indicator_byte_pos: -1,
+        }
+    }
+
+    /// Returns the minimum buffer size needed to use the encoder for `bit_width`.
+    /// This is the maximum length of a single run for `bit_width`.
+    pub fn min_buffer_size(bit_width: u8) -> usize {
+        let max_bit_packed_run_size = 1 + bit_util::ceil(
+            (MAX_VALUES_PER_BIT_PACKED_RUN * bit_width as usize) as i64,
+            8,
+        );
+        let max_rle_run_size =
+            bit_util::MAX_VLQ_BYTE_LEN + bit_util::ceil(bit_width as i64, 8) as usize;
+        ::std::cmp::max(max_bit_packed_run_size as usize, max_rle_run_size)
+    }
+
+    /// Returns the maximum buffer size takes to encode `num_values` values with
+    /// `bit_width`.
+    pub fn max_buffer_size(bit_width: u8, num_values: usize) -> usize {
+        // First the maximum size for bit-packed run
+        let bytes_per_run = bit_width;
+        let num_runs = bit_util::ceil(num_values as i64, 8) as usize;
+        let bit_packed_max_size = num_runs + num_runs * bytes_per_run as usize;
+
+        // Second the maximum size for RLE run
+        let min_rle_run_size = 1 + bit_util::ceil(bit_width as i64, 8) as usize;
+        let rle_max_size = bit_util::ceil(num_values as i64, 8) as usize * min_rle_run_size;
+        ::std::cmp::max(bit_packed_max_size, rle_max_size) as usize
+    }
+
+    /// Encodes `value`, which must be representable with `bit_width` bits.
+    /// Returns true if the value fits in buffer, false if it doesn't, or
+    /// error if something is wrong.
+    #[inline]
+    pub fn put(&mut self, value: u64) -> Result<bool> {
+        // This function buffers 8 values at a time. After seeing 8 values, it
+        // decides whether the current run should be encoded in bit-packed or RLE.
+        if self.buffer_full {
+            // The value cannot fit in the current buffer.
+            return Ok(false);
+        }
+        if self.current_value == value {
+            self.repeat_count += 1;
+            if self.repeat_count > 8 {
+                // A continuation of last value. No need to buffer.
+                return Ok(true);
+            }
+        } else {
+            if self.repeat_count >= 8 {
+                // The current RLE run has ended and we've gathered enough. Flush first.
+                assert_eq!(self.bit_packed_count, 0);
+                self.flush_rle_run()?;
+            }
+            self.repeat_count = 1;
+            self.current_value = value;
+        }
+
+        self.buffered_values[self.num_buffered_values] = value;
+        self.num_buffered_values += 1;
+        if self.num_buffered_values == 8 {
+            // Buffered values are full. Flush them.
+            assert_eq!(self.bit_packed_count % 8, 0);
+            self.flush_buffered_values()?;
+        }
+
+        Ok(true)
+    }
+
+    #[inline]
+    pub fn buffer(&self) -> &[u8] {
+        self.bit_writer.buffer()
+    }
+
+    #[inline]
+    pub fn len(&self) -> usize {
+        self.bit_writer.bytes_written()
+    }
+
+    #[inline]
+    pub fn consume(mut self) -> Result<Vec<u8>> {
+        self.flush()?;
+        Ok(self.bit_writer.consume())
+    }
+
+    /// Borrow equivalent of the `consume` method.
+    /// Call `clear()` after invoking this method.
+    #[inline]
+    pub fn flush_buffer(&mut self) -> Result<&[u8]> {
+        self.flush()?;
+        Ok(self.bit_writer.flush_buffer())
+    }
+
+    /// Clears the internal state so this encoder can be reused (e.g., after becoming full).
+    #[inline]
+    pub fn clear(&mut self) {
+        self.bit_writer.clear();
+        self.buffer_full = false;
+        self.num_buffered_values = 0;
+        self.current_value = 0;
+        self.repeat_count = 0;
+        self.bit_packed_count = 0;
+        self.indicator_byte_pos = -1;
+    }
+
+    /// Flushes all remaining values and return the final byte buffer maintained by the
+    /// internal writer.
+    #[inline]
+    pub fn flush(&mut self) -> Result<()> {
+        if self.bit_packed_count > 0 || self.repeat_count > 0 || self.num_buffered_values > 0 {
+            let all_repeat = self.bit_packed_count == 0
+                && (self.repeat_count == self.num_buffered_values || self.num_buffered_values == 0);
+            if self.repeat_count > 0 && all_repeat {
+                self.flush_rle_run()?;
+            } else {
+                // Buffer the last group of bit-packed values to 8 by padding with 0s.
+                if self.num_buffered_values > 0 {
+                    while self.num_buffered_values < 8 {
+                        self.buffered_values[self.num_buffered_values] = 0;
+                        self.num_buffered_values += 1;
+                    }
+                }
+                self.bit_packed_count += self.num_buffered_values;
+                self.flush_bit_packed_run(true)?;
+                self.repeat_count = 0;
+            }
+        }
+        Ok(())
+    }
+
+    #[inline]
+    fn flush_rle_run(&mut self) -> Result<()> {
+        assert!(self.repeat_count > 0);
+        let indicator_value = self.repeat_count << 1 | 0;
+        let mut result = self.bit_writer.put_vlq_int(indicator_value as u64);
+        result &= self.bit_writer.put_aligned(
+            self.current_value,
+            bit_util::ceil(self.bit_width as i64, 8) as usize,
+        );
+        if !result {
+            return Err(general_err!("Failed to write RLE run"));
+        }
+        self.num_buffered_values = 0;
+        self.repeat_count = 0;
+        Ok(())
+    }
+
+    #[inline]
+    fn flush_bit_packed_run(&mut self, update_indicator_byte: bool) -> Result<()> {
+        if self.indicator_byte_pos < 0 {
... 17325 lines suppressed ...