You are viewing a plain text version of this content. The canonical link for it is here.
Posted to issues@flink.apache.org by GitBox <gi...@apache.org> on 2019/07/23 08:32:23 UTC

[GitHub] [flink] wuchong commented on a change in pull request #9203: [FLINK-13375][table-api] Move ExecutionConfigOptions and OptimizerConfigOptions to table-api

wuchong commented on a change in pull request #9203: [FLINK-13375][table-api] Move ExecutionConfigOptions and OptimizerConfigOptions to table-api
URL: https://github.com/apache/flink/pull/9203#discussion_r306192644
 
 

 ##########
 File path: flink-table/flink-table-api-java/src/main/java/org/apache/flink/table/api/config/ExecutionConfigOptions.java
 ##########
 @@ -0,0 +1,202 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.table.api.config;
+
+import org.apache.flink.configuration.ConfigOption;
+
+import static org.apache.flink.configuration.ConfigOptions.key;
+
+/**
+ * This class holds configuration constants used by Flink's table module.
+ */
+public class ExecutionConfigOptions {
+
+	// ------------------------------------------------------------------------
+	//  Source Options
+	// ------------------------------------------------------------------------
+	public static final ConfigOption<String> SQL_EXEC_SOURCE_IDLE_TIMEOUT =
+			key("sql.exec.source.idle.timeout")
+					.defaultValue("-1 ms")
+					.withDescription("When a source do not receive any elements for the timeout time, " +
+							"it will be marked as temporarily idle. This allows downstream " +
+							"tasks to advance their watermarks without the need to wait for " +
+							"watermarks from this source while it is idle.");
+
+	// ------------------------------------------------------------------------
+	//  Sort Options
+	// ------------------------------------------------------------------------
+	public static final ConfigOption<Integer> SQL_EXEC_SORT_DEFAULT_LIMIT =
+			key("sql.exec.sort.default.limit")
+					.defaultValue(200)
+					.withDescription("Default limit when user don't set a limit after order by. ");
+
+	public static final ConfigOption<Integer> SQL_EXEC_SORT_MAX_NUM_FILE_HANDLES =
+			key("sql.exec.sort.max-num-file-handles")
+					.defaultValue(128)
+					.withDescription("The maximal fan-in for external merge sort. It limits the number of file handles per operator. " +
+							"If it is too small, may cause intermediate merging. But if it is too large, " +
+							"it will cause too many files opened at the same time, consume memory and lead to random reading.");
+
+	public static final ConfigOption<Boolean> SQL_EXEC_SORT_ASYNC_MERGE_ENABLED =
+			key("sql.exec.sort.async-merge.enabled")
+					.defaultValue(true)
+					.withDescription("Whether to asynchronously merge sorted spill files.");
+
+	// ------------------------------------------------------------------------
+	//  Spill Options
+	// ------------------------------------------------------------------------
+	public static final ConfigOption<Boolean> SQL_EXEC_SPILL_COMPRESSION_ENABLED =
+			key("sql.exec.spill.compression.enabled")
+					.defaultValue(true)
+					.withDescription("Whether to compress spilled data. " +
+							"(Now include sort and hash agg and hash join)");
+
+	public static final ConfigOption<String> SQL_EXEC_SPILL_COMPRESSION_CODEC =
+			key("sql.exec.spill.compression.codec")
+					.defaultValue("lz4")
+					.withDescription("Use that compression codec to compress spilled file. " +
+							"Now we only support lz4.");
+
+	public static final ConfigOption<Integer> SQL_EXEC_SPILL_COMPRESSION_BLOCK_SIZE =
+			key("sql.exec.spill.compression.block-size")
+					.defaultValue(64 * 1024)
+					.withDescription("The buffer is to compress. The larger the buffer," +
+							" the better the compression ratio, but the more memory consumption.");
+
+	// ------------------------------------------------------------------------
+	//  Resource Options
+	// ------------------------------------------------------------------------
+
+	public static final ConfigOption<Integer> SQL_RESOURCE_DEFAULT_PARALLELISM =
 
 Review comment:
   +1 to `.exec.resource`

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
users@infra.apache.org


With regards,
Apache Git Services