You are viewing a plain text version of this content. The canonical link for it is here.
Posted to issues@iceberg.apache.org by "akshayakp97 (via GitHub)" <gi...@apache.org> on 2023/05/03 20:10:47 UTC

[GitHub] [iceberg] akshayakp97 commented on a diff in pull request #7505: Move all S3FileIO related properties into a separate class S3FileIOProperties

akshayakp97 commented on code in PR #7505:
URL: https://github.com/apache/iceberg/pull/7505#discussion_r1184207004


##########
aws/src/main/java/org/apache/iceberg/aws/s3/S3FileIOProperties.java:
##########
@@ -0,0 +1,666 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iceberg.aws.s3;
+
+import java.io.Serializable;
+import java.util.Collections;
+import java.util.Map;
+import java.util.Set;
+import java.util.stream.Collectors;
+import org.apache.iceberg.aws.glue.GlueCatalog;
+import org.apache.iceberg.exceptions.ValidationException;
+import org.apache.iceberg.relocated.com.google.common.base.Preconditions;
+import org.apache.iceberg.relocated.com.google.common.collect.Sets;
+import org.apache.iceberg.util.PropertyUtil;
+import software.amazon.awssdk.services.s3.model.ObjectCannedACL;
+import software.amazon.awssdk.services.s3.model.Tag;
+
+public class S3FileIOProperties implements Serializable {
+
+  /**
+   * Type of S3 Server side encryption used, default to {@link
+   * S3FileIOProperties#S3FILEIO_SSE_TYPE_NONE}.
+   *
+   * <p>For more details: https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html
+   */
+  public static final String S3FILEIO_SSE_TYPE = "s3.sse.type";
+
+  /** No server side encryption. */
+  public static final String S3FILEIO_SSE_TYPE_NONE = "none";
+
+  /**
+   * S3 SSE-KMS encryption.
+   *
+   * <p>For more details: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html
+   */
+  public static final String S3FILEIO_SSE_TYPE_KMS = "kms";
+
+  /**
+   * S3 SSE-S3 encryption.
+   *
+   * <p>For more details:
+   * https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html
+   */
+  public static final String S3FILEIO_SSE_TYPE_S3 = "s3";
+
+  /**
+   * S3 SSE-C encryption.
+   *
+   * <p>For more details:
+   * https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html
+   */
+  public static final String S3FILEIO_SSE_TYPE_CUSTOM = "custom";
+
+  /**
+   * If S3 encryption type is SSE-KMS, input is a KMS Key ID or ARN. In case this property is not
+   * set, default key "aws/s3" is used. If encryption type is SSE-C, input is a custom base-64
+   * AES256 symmetric key.
+   */
+  public static final String S3FILEIO_SSE_KEY = "s3.sse.key";
+
+  /**
+   * If S3 encryption type is SSE-C, input is the base-64 MD5 digest of the secret key. This MD5
+   * must be explicitly passed in by the caller to ensure key integrity.
+   */
+  public static final String S3FILEIO_SSE_MD5 = "s3.sse.md5";
+
+  /**
+   * Number of threads to use for uploading parts to S3 (shared pool across all output streams),
+   * default to {@link Runtime#availableProcessors()}
+   */
+  public static final String S3FILEIO_MULTIPART_UPLOAD_THREADS = "s3.multipart.num-threads";
+
+  /**
+   * The size of a single part for multipart upload requests in bytes (default: 32MB). based on S3
+   * requirement, the part size must be at least 5MB. Too ensure performance of the reader and
+   * writer, the part size must be less than 2GB.
+   *
+   * <p>For more details, see https://docs.aws.amazon.com/AmazonS3/latest/dev/qfacts.html
+   */
+  public static final String S3FILEIO_MULTIPART_SIZE = "s3.multipart.part-size-bytes";
+
+  public static final int S3FILEIO_MULTIPART_SIZE_DEFAULT = 32 * 1024 * 1024;
+  public static final int S3FILEIO_MULTIPART_SIZE_MIN = 5 * 1024 * 1024;
+
+  /**
+   * The threshold expressed as a factor times the multipart size at which to switch from uploading
+   * using a single put object request to uploading using multipart upload (default: 1.5).
+   */
+  public static final String S3FILEIO_MULTIPART_THRESHOLD_FACTOR = "s3.multipart.threshold";
+
+  public static final double S3FILEIO_MULTIPART_THRESHOLD_FACTOR_DEFAULT = 1.5;
+
+  /**
+   * Location to put staging files for upload to S3, default to temp directory set in
+   * java.io.tmpdir.
+   */
+  public static final String S3FILEIO_STAGING_DIRECTORY = "s3.staging-dir";
+
+  /**
+   * Used to configure canned access control list (ACL) for S3 client to use during write. If not
+   * set, ACL will not be set for requests.
+   *
+   * <p>The input must be one of {@link software.amazon.awssdk.services.s3.model.ObjectCannedACL},
+   * such as 'public-read-write' For more details:
+   * https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html
+   */
+  public static final String S3FILEIO_ACL = "s3.acl";
+
+  /**
+   * Configure an alternative endpoint of the S3 service for S3FileIO to access.
+   *
+   * <p>This could be used to use S3FileIO with any s3-compatible object storage service that has a
+   * different endpoint, or access a private S3 endpoint in a virtual private cloud.
+   */
+  public static final String S3FILEIO_ENDPOINT = "s3.endpoint";
+
+  /**
+   * If set {@code true}, requests to S3FileIO will use Path-Style, otherwise, Virtual Hosted-Style
+   * will be used.
+   *
+   * <p>For more details: https://docs.aws.amazon.com/AmazonS3/latest/userguide/VirtualHosting.html
+   */
+  public static final String S3FILEIO_PATH_STYLE_ACCESS = "s3.path-style-access";
+
+  public static final boolean S3FILEIO_PATH_STYLE_ACCESS_DEFAULT = false;
+
+  /**
+   * Configure the static access key ID used to access S3FileIO.
+   *
+   * <p>When set, the default client factory will use the basic or session credentials provided
+   * instead of reading the default credential chain to create S3 access credentials. If {@link
+   * #S3FILEIO_SESSION_TOKEN} is set, session credential is used, otherwise basic credential is
+   * used.
+   */
+  public static final String S3FILEIO_ACCESS_KEY_ID = "s3.access-key-id";
+
+  /**
+   * Configure the static secret access key used to access S3FileIO.
+   *
+   * <p>When set, the default client factory will use the basic or session credentials provided
+   * instead of reading the default credential chain to create S3 access credentials. If {@link
+   * #S3FILEIO_SESSION_TOKEN} is set, session credential is used, otherwise basic credential is
+   * used.
+   */
+  public static final String S3FILEIO_SECRET_ACCESS_KEY = "s3.secret-access-key";
+
+  /**
+   * Configure the static session token used to access S3FileIO.
+   *
+   * <p>When set, the default client factory will use the session credentials provided instead of
+   * reading the default credential chain to create S3 access credentials.
+   */
+  public static final String S3FILEIO_SESSION_TOKEN = "s3.session-token";
+
+  /**
+   * Enable to make S3FileIO, to make cross-region call to the region specified in the ARN of an
+   * access point.
+   *
+   * <p>By default, attempting to use an access point in a different region will throw an exception.
+   * When enabled, this property allows using access points in other regions.
+   *
+   * <p>For more details see:
+   * https://sdk.amazonaws.com/java/api/latest/software/amazon/awssdk/services/s3/S3Configuration.html#useArnRegionEnabled--
+   */
+  public static final String S3_USE_ARN_REGION_ENABLED = "s3.use-arn-region-enabled";
+
+  public static final boolean S3_USE_ARN_REGION_ENABLED_DEFAULT = false;
+
+  /** Enables eTag checks for S3 PUT and MULTIPART upload requests. */
+  public static final String S3_CHECKSUM_ENABLED = "s3.checksum-enabled";
+
+  public static final boolean S3_CHECKSUM_ENABLED_DEFAULT = false;
+
+  public static final String S3_REMOTE_SIGNING_ENABLED = "s3.remote-signing-enabled";
+
+  public static final boolean S3_REMOTE_SIGNING_ENABLED_DEFAULT = false;
+
+  /** Configure the batch size used when deleting multiple files from a given S3 bucket */
+  public static final String S3FILEIO_DELETE_BATCH_SIZE = "s3.delete.batch-size";
+
+  /**
+   * Default batch size used when deleting files.
+   *
+   * <p>Refer to https://github.com/apache/hadoop/commit/56dee667707926f3796c7757be1a133a362f05c9
+   * for more details on why this value was chosen.
+   */
+  public static final int S3FILEIO_DELETE_BATCH_SIZE_DEFAULT = 250;
+
+  /**
+   * Max possible batch size for deletion. Currently, a max of 1000 keys can be deleted in one
+   * batch. https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjects.html
+   */
+  public static final int S3FILEIO_DELETE_BATCH_SIZE_MAX = 1000;
+
+  /**
+   * Used by {@link S3FileIO} to tag objects when writing. To set, we can pass a catalog property.
+   *
+   * <p>For more details, see
+   * https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-tagging.html
+   *
+   * <p>Example: s3.write.tags.my_key=my_val
+   */
+  public static final String S3_WRITE_TAGS_PREFIX = "s3.write.tags.";
+
+  /**
+   * Used by {@link GlueCatalog} to tag objects when writing. To set, we can pass a catalog
+   * property.
+   *
+   * <p>For more details, see
+   * https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-tagging.html
+   *
+   * <p>Example: s3.write.table-tag-enabled=true
+   */
+  public static final String S3_WRITE_TABLE_TAG_ENABLED = "s3.write.table-tag-enabled";
+
+  public static final boolean S3_WRITE_TABLE_TAG_ENABLED_DEFAULT = false;
+
+  /**
+   * Used by {@link GlueCatalog} to tag objects when writing. To set, we can pass a catalog
+   * property.
+   *
+   * <p>For more details, see
+   * https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-tagging.html
+   *
+   * <p>Example: s3.write.namespace-tag-enabled=true
+   */
+  public static final String S3_WRITE_NAMESPACE_TAG_ENABLED = "s3.write.namespace-tag-enabled";
+
+  public static final boolean S3_WRITE_NAMESPACE_TAG_ENABLED_DEFAULT = false;
+
+  /**
+   * Tag name that will be used by {@link #S3_WRITE_TAGS_PREFIX} when {@link
+   * #S3_WRITE_TABLE_TAG_ENABLED} is enabled
+   *
+   * <p>Example: iceberg.table=tableName
+   */
+  public static final String S3_TAG_ICEBERG_TABLE = "iceberg.table";
+
+  /**
+   * Tag name that will be used by {@link #S3_WRITE_TAGS_PREFIX} when {@link
+   * #S3_WRITE_NAMESPACE_TAG_ENABLED} is enabled
+   *
+   * <p>Example: iceberg.namespace=namespaceName
+   */
+  public static final String S3_TAG_ICEBERG_NAMESPACE = "iceberg.namespace";
+
+  /**
+   * Used by {@link S3FileIO} to tag objects when deleting. When this config is set, objects are
+   * tagged with the configured key-value pairs before deletion. This is considered a soft-delete,
+   * because users are able to configure tag-based object lifecycle policy at bucket level to
+   * transition objects to different tiers.
+   *
+   * <p>For more details, see
+   * https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html
+   *
+   * <p>Example: s3.delete.tags.my_key=my_val
+   */
+  public static final String S3_DELETE_TAGS_PREFIX = "s3.delete.tags.";
+
+  /**
+   * Number of threads to use for adding delete tags to S3 objects, default to {@link
+   * Runtime#availableProcessors()}
+   */
+  public static final String S3FILEIO_DELETE_THREADS = "s3.delete.num-threads";
+
+  /**
+   * Determines if {@link S3FileIO} deletes the object when io.delete() is called, default to true.
+   * Once disabled, users are expected to set tags through {@link #S3_DELETE_TAGS_PREFIX} and manage
+   * deleted files through S3 lifecycle policy.
+   */
+  public static final String S3_DELETE_ENABLED = "s3.delete-enabled";
+
+  public static final boolean S3_DELETE_ENABLED_DEFAULT = true;
+
+  /**
+   * Determines if S3 client will use the Acceleration Mode, default to false.
+   *
+   * <p>For more details, see
+   * https://docs.aws.amazon.com/AmazonS3/latest/userguide/transfer-acceleration.html
+   */
+  public static final String S3_ACCELERATION_ENABLED = "s3.acceleration-enabled";
+
+  public static final boolean S3_ACCELERATION_ENABLED_DEFAULT = false;
+
+  /**
+   * Determines if S3 client will use the Dualstack Mode, default to false.
+   *
+   * <p>For more details, see
+   * https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html
+   */
+  public static final String S3_DUALSTACK_ENABLED = "s3.dualstack-enabled";
+
+  public static final boolean S3_DUALSTACK_ENABLED_DEFAULT = false;
+
+  /**
+   * Used by {@link S3FileIO}, prefix used for bucket access point configuration. To set, we can
+   * pass a catalog property.
+   *
+   * <p>For more details, see https://aws.amazon.com/s3/features/access-points/
+   *
+   * <p>Example: s3.access-points.my-bucket=access-point
+   */
+  public static final String S3_ACCESS_POINTS_PREFIX = "s3.access-points.";
+
+  /**
+   * This flag controls whether the S3 client will be initialized during the S3FileIO
+   * initialization, instead of default lazy initialization upon use. This is needed for cases that
+   * the credentials to use might change and needs to be preloaded.
+   */
+  public static final String S3_PRELOAD_CLIENT_ENABLED = "s3.preload-client-enabled";
+
+  public static final boolean S3_PRELOAD_CLIENT_ENABLED_DEFAULT = false;
+
+  private String s3FileIoSseType;

Review Comment:
   Sorry, what do you mean by "match the config names"? If by config you mean `S3FileIOProperties`, then shouldn't we update all these variable names to `s3FileIoXXX`? 



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: issues-unsubscribe@iceberg.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscribe@iceberg.apache.org
For additional commands, e-mail: issues-help@iceberg.apache.org