You are viewing a plain text version of this content. The canonical link for it is here.
Posted to github@arrow.apache.org by GitBox <gi...@apache.org> on 2021/12/22 16:11:09 UTC

[GitHub] [arrow-rs] tustvold commented on a change in pull request #1082: Optimized ByteArrayReader (#1040)

tustvold commented on a change in pull request #1082:
URL: https://github.com/apache/arrow-rs/pull/1082#discussion_r774005033



##########
File path: parquet/src/arrow/array_reader/byte_array.rs
##########
@@ -0,0 +1,639 @@
+use crate::arrow::array_reader::{read_records, ArrayReader};
+use crate::arrow::record_reader::buffer::{RecordBuffer, TypedBuffer, ValueBuffer};
+use crate::arrow::record_reader::GenericRecordReader;
+use crate::arrow::schema::parquet_to_arrow_field;
+use crate::basic::Encoding;
+use crate::column::page::PageIterator;
+use crate::column::reader::decoder::{ColumnValueDecoder, ValuesWriter};
+use crate::data_type::Int32Type;
+use crate::decoding::{Decoder, DeltaBitPackDecoder};
+use crate::encodings::rle::RleDecoder;
+use crate::errors::{ParquetError, Result};
+use crate::memory::ByteBufferPtr;
+use crate::schema::types::ColumnDescPtr;
+use arrow::array::{
+    ArrayData, ArrayDataBuilder, ArrayRef, BinaryArray, LargeBinaryArray,
+    LargeStringArray, OffsetSizeTrait, StringArray,
+};
+use arrow::buffer::Buffer;
+use arrow::datatypes::DataType as ArrowType;
+use std::any::Any;
+use std::ops::Range;
+use std::sync::Arc;
+
+enum Reader {
+    Binary(GenericRecordReader<OffsetBuffer<i32>, ByteArrayDecoder<i32>>),
+    LargeBinary(GenericRecordReader<OffsetBuffer<i64>, ByteArrayDecoder<i64>>),
+    Utf8(GenericRecordReader<OffsetBuffer<i32>, ByteArrayDecoder<i32>>),
+    LargeUtf8(GenericRecordReader<OffsetBuffer<i64>, ByteArrayDecoder<i64>>),
+}
+
+fn consume_array_data<I: OffsetSizeTrait>(
+    data_type: ArrowType,
+    reader: &mut GenericRecordReader<OffsetBuffer<I>, ByteArrayDecoder<I>>,
+) -> Result<ArrayData> {
+    let buffer = reader.consume_record_data()?;
+    let mut array_data_builder = ArrayDataBuilder::new(data_type)
+        .len(buffer.len())
+        .add_buffer(buffer.offsets.into())
+        .add_buffer(buffer.values.into());
+
+    if let Some(buffer) = reader.consume_bitmap_buffer()? {
+        array_data_builder = array_data_builder.null_bit_buffer(buffer);
+    }
+    Ok(unsafe { array_data_builder.build_unchecked() })
+}
+
+pub struct ByteArrayReader {
+    data_type: ArrowType,
+    pages: Box<dyn PageIterator>,
+    def_levels_buffer: Option<Buffer>,
+    rep_levels_buffer: Option<Buffer>,
+    column_desc: ColumnDescPtr,
+    record_reader: Reader,
+}
+
+impl ByteArrayReader {
+    /// Construct primitive array reader.
+    pub fn new(
+        pages: Box<dyn PageIterator>,
+        column_desc: ColumnDescPtr,
+        arrow_type: Option<ArrowType>,
+    ) -> Result<Self> {
+        Self::new_with_options(pages, column_desc, arrow_type, false)
+    }
+
+    /// Construct primitive array reader with ability to only compute null mask and not
+    /// buffer level data
+    pub fn new_with_options(
+        pages: Box<dyn PageIterator>,
+        column_desc: ColumnDescPtr,
+        arrow_type: Option<ArrowType>,
+        null_mask_only: bool,
+    ) -> Result<Self> {
+        // Check if Arrow type is specified, else create it from Parquet type
+        let data_type = match arrow_type {
+            Some(t) => t,
+            None => parquet_to_arrow_field(column_desc.as_ref())?
+                .data_type()
+                .clone(),
+        };
+
+        let record_reader = match data_type {
+            ArrowType::Binary => Reader::Binary(GenericRecordReader::new_with_options(
+                column_desc.clone(),
+                null_mask_only,
+            )),
+            ArrowType::LargeBinary => {
+                Reader::LargeBinary(GenericRecordReader::new_with_options(
+                    column_desc.clone(),
+                    null_mask_only,
+                ))
+            }
+            ArrowType::Utf8 => Reader::Utf8(GenericRecordReader::new_with_options(
+                column_desc.clone(),
+                null_mask_only,
+            )),
+            ArrowType::LargeUtf8 => {
+                Reader::LargeUtf8(GenericRecordReader::new_with_options(
+                    column_desc.clone(),
+                    null_mask_only,
+                ))
+            }
+            _ => {
+                return Err(general_err!(
+                    "invalid data type for ByteArrayReader - {}",
+                    data_type
+                ))
+            }
+        };
+
+        Ok(Self {
+            data_type,
+            pages,
+            def_levels_buffer: None,
+            rep_levels_buffer: None,
+            column_desc,
+            record_reader,
+        })
+    }
+}
+
+impl ArrayReader for ByteArrayReader {
+    fn as_any(&self) -> &dyn Any {
+        self
+    }
+
+    fn get_data_type(&self) -> &ArrowType {
+        &self.data_type
+    }
+
+    fn next_batch(&mut self, batch_size: usize) -> crate::errors::Result<ArrayRef> {
+        let data = match &mut self.record_reader {
+            Reader::Binary(r) | Reader::Utf8(r) => {

Review comment:
       I wasn't sure how to achieve that without either leaking the index type into ByteArrayReader, or creating some sort of trait and using dynamic dispatch. An enumeration seemed like the simplest approach, and is likely to have negligible performance implications?




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: github-unsubscribe@arrow.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org