You are viewing a plain text version of this content. The canonical link for it is here.
Posted to github@arrow.apache.org by GitBox <gi...@apache.org> on 2020/09/10 15:28:41 UTC

[GitHub] [arrow] emkornfield commented on a change in pull request #8136: ARROW-9078: [C++] Parquet read / write extension type with nested storage type

emkornfield commented on a change in pull request #8136:
URL: https://github.com/apache/arrow/pull/8136#discussion_r486435150



##########
File path: cpp/src/parquet/arrow/schema.cc
##########
@@ -682,48 +689,78 @@ Status GetOriginSchema(const std::shared_ptr<const KeyValueMetadata>& metadata,
 // Restore original Arrow field information that was serialized as Parquet metadata
 // but that is not necessarily present in the field reconstitued from Parquet data
 // (for example, Parquet timestamp types doesn't carry timezone information).
-Status ApplyOriginalMetadata(std::shared_ptr<Field> field, const Field& origin_field,
-                             std::shared_ptr<Field>* out) {
+
+Status ApplyOriginalStorageMetadata(const Field& origin_field, SchemaField* inferred) {
   auto origin_type = origin_field.type();
-  if (field->type()->id() == ::arrow::Type::TIMESTAMP) {
+  auto inferred_type = inferred->field->type();
+
+  if (inferred_type->id() == ::arrow::Type::TIMESTAMP) {
     // Restore time zone, if any
-    const auto& ts_type = static_cast<const ::arrow::TimestampType&>(*field->type());
+    const auto& ts_type = static_cast<const ::arrow::TimestampType&>(*inferred_type);
     const auto& ts_origin_type = static_cast<const ::arrow::TimestampType&>(*origin_type);
 
     // If the unit is the same and the data is tz-aware, then set the original
     // time zone, since Parquet has no native storage for timezones
     if (ts_type.unit() == ts_origin_type.unit() && ts_type.timezone() == "UTC" &&
         ts_origin_type.timezone() != "") {
-      field = field->WithType(origin_type);
+      inferred->field = inferred->field->WithType(origin_type);
     }
   }
+
   if (origin_type->id() == ::arrow::Type::DICTIONARY &&
-      field->type()->id() != ::arrow::Type::DICTIONARY &&
-      IsDictionaryReadSupported(*field->type())) {
+      inferred_type->id() != ::arrow::Type::DICTIONARY &&
+      IsDictionaryReadSupported(*inferred_type)) {
     const auto& dict_origin_type =
         static_cast<const ::arrow::DictionaryType&>(*origin_type);
-    field = field->WithType(
-        ::arrow::dictionary(::arrow::int32(), field->type(), dict_origin_type.ordered()));
+    inferred->field = inferred->field->WithType(
+        ::arrow::dictionary(::arrow::int32(), inferred_type, dict_origin_type.ordered()));
+  }
+
+  // Restore field metadata
+  std::shared_ptr<const KeyValueMetadata> field_metadata = origin_field.metadata();
+  if (field_metadata != nullptr) {
+    if (inferred->field->metadata()) {
+      // Prefer the metadata keys (like field_id) from the current metadata
+      field_metadata = field_metadata->Merge(*inferred->field->metadata());
+    }
+    inferred->field = inferred->field->WithMetadata(field_metadata);
   }
 
   if (origin_type->id() == ::arrow::Type::EXTENSION) {
     // Restore extension type, if the storage type is as read from Parquet
     const auto& ex_type = checked_cast<const ::arrow::ExtensionType&>(*origin_type);
-    if (ex_type.storage_type()->Equals(*field->type())) {
-      field = field->WithType(origin_type);
+    if (ex_type.storage_type()->Equals(*inferred_type)) {
+      inferred->field = inferred->field->WithType(origin_type);
     }
   }
 
-  // Restore field metadata
-  std::shared_ptr<const KeyValueMetadata> field_metadata = origin_field.metadata();
-  if (field_metadata != nullptr) {
-    if (field->metadata()) {
-      // Prefer the metadata keys (like field_id) from the current metadata
-      field_metadata = field_metadata->Merge(*field->metadata());
+  // TODO Should apply metadata recursively, but for that we need to move metadata
+  // application inside NodeToSchemaField (ARROW-9943)
+
+  return Status::OK();
+}
+
+Status ApplyOriginalMetadata(const Field& origin_field, SchemaField* inferred) {
+  auto origin_type = origin_field.type();
+  auto inferred_type = inferred->field->type();
+
+  if (origin_type->id() == ::arrow::Type::EXTENSION) {
+    const auto& ex_type = checked_cast<const ::arrow::ExtensionType&>(*origin_type);
+    auto origin_storage_field = origin_field.WithType(ex_type.storage_type());
+
+    // Apply metadata recursively to storage type

Review comment:
       this seems to conflict with the TODO above.




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
users@infra.apache.org