You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@flink.apache.org by dw...@apache.org on 2020/05/19 09:08:40 UTC

[flink] branch release-1.11 updated: [FLINK-17027][hotfix] Rename back-off infix to backoff in new Elasticsearch properties

This is an automated email from the ASF dual-hosted git repository.

dwysakowicz pushed a commit to branch release-1.11
in repository https://gitbox.apache.org/repos/asf/flink.git


The following commit(s) were added to refs/heads/release-1.11 by this push:
     new e7d126f  [FLINK-17027][hotfix] Rename back-off infix to backoff in new Elasticsearch properties
e7d126f is described below

commit e7d126fd1a8d02d521b32fb9a027ce26a1a9da1b
Author: Dawid Wysakowicz <dw...@apache.org>
AuthorDate: Tue May 19 09:30:25 2020 +0200

    [FLINK-17027][hotfix] Rename back-off infix to backoff in new Elasticsearch properties
---
 docs/dev/table/connect.md                                           | 2 +-
 docs/dev/table/connect.zh.md                                        | 2 +-
 .../connectors/elasticsearch/table/ElasticsearchOptions.java        | 6 +++---
 .../elasticsearch/table/Elasticsearch6DynamicSinkFactoryTest.java   | 4 ++--
 .../elasticsearch/table/Elasticsearch7DynamicSinkFactoryTest.java   | 4 ++--
 5 files changed, 9 insertions(+), 9 deletions(-)

diff --git a/docs/dev/table/connect.md b/docs/dev/table/connect.md
index 52a22b5..4a1e83a 100644
--- a/docs/dev/table/connect.md
+++ b/docs/dev/table/connect.md
@@ -1102,7 +1102,7 @@ connector:
       max-size: 42 mb           # optional: maximum size of buffered actions in bytes per bulk request
                                 #   (only MB granularity is supported)
       interval: 60000           # optional: bulk flush interval (in milliseconds)
-      back-off:                 # optional: backoff strategy ("disabled" by default)
+      backoff:                 # optional: backoff strategy ("disabled" by default)
         type: ...               #   valid strategies are "disabled", "constant", or "exponential"
         max-retries: 3          # optional: maximum number of retries
         delay: 30000            # optional: delay between each backoff attempt (in milliseconds)
diff --git a/docs/dev/table/connect.zh.md b/docs/dev/table/connect.zh.md
index 71b490c..a6ac36b 100644
--- a/docs/dev/table/connect.zh.md
+++ b/docs/dev/table/connect.zh.md
@@ -1102,7 +1102,7 @@ connector:
       max-size: 42 mb           # optional: maximum size of buffered actions in bytes per bulk request
                                 #   (only MB granularity is supported)
       interval: 60000           # optional: bulk flush interval (in milliseconds)
-      back-off:                 # optional: backoff strategy ("disabled" by default)
+      backoff:                 # optional: backoff strategy ("disabled" by default)
         type: ...               #   valid strategies are "disabled", "constant", or "exponential"
         max-retries: 3          # optional: maximum number of retries
         delay: 30000            # optional: delay between each backoff attempt (in milliseconds)
diff --git a/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/table/ElasticsearchOptions.java b/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/table/ElasticsearchOptions.java
index c68ca68..176414d 100644
--- a/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/table/ElasticsearchOptions.java
+++ b/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/table/ElasticsearchOptions.java
@@ -97,17 +97,17 @@ public class ElasticsearchOptions {
 			.noDefaultValue()
 			.withDescription("Bulk flush interval");
 	public static final ConfigOption<BackOffType> BULK_FLUSH_BACKOFF_TYPE_OPTION =
-		ConfigOptions.key("sink.bulk-flush.back-off.strategy")
+		ConfigOptions.key("sink.bulk-flush.backoff.strategy")
 			.enumType(BackOffType.class)
 			.defaultValue(BackOffType.DISABLED)
 			.withDescription("Backoff strategy");
 	public static final ConfigOption<Integer> BULK_FLUSH_BACKOFF_MAX_RETRIES_OPTION =
-		ConfigOptions.key("sink.bulk-flush.back-off.max-retries")
+		ConfigOptions.key("sink.bulk-flush.backoff.max-retries")
 			.intType()
 			.noDefaultValue()
 			.withDescription("Maximum number of retries.");
 	public static final ConfigOption<Duration> BULK_FLUSH_BACKOFF_DELAY_OPTION =
-		ConfigOptions.key("sink.bulk-flush.back-off.delay")
+		ConfigOptions.key("sink.bulk-flush.backoff.delay")
 			.durationType()
 			.noDefaultValue()
 			.withDescription("Delay between each backoff attempt.");
diff --git a/flink-connectors/flink-connector-elasticsearch6/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/table/Elasticsearch6DynamicSinkFactoryTest.java b/flink-connectors/flink-connector-elasticsearch6/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/table/Elasticsearch6DynamicSinkFactoryTest.java
index f1be1b2..6d0878f 100644
--- a/flink-connectors/flink-connector-elasticsearch6/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/table/Elasticsearch6DynamicSinkFactoryTest.java
+++ b/flink-connectors/flink-connector-elasticsearch6/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/table/Elasticsearch6DynamicSinkFactoryTest.java
@@ -122,7 +122,7 @@ public class Elasticsearch6DynamicSinkFactoryTest {
 
 		thrown.expect(ValidationException.class);
 		thrown.expectMessage(
-			"'sink.bulk-flush.back-off.max-retries' must be at least 1. Got: 0");
+			"'sink.bulk-flush.backoff.max-retries' must be at least 1. Got: 0");
 		sinkFactory.createDynamicTableSink(
 			context()
 				.withSchema(TableSchema.builder()
@@ -162,7 +162,7 @@ public class Elasticsearch6DynamicSinkFactoryTest {
 
 		thrown.expect(ValidationException.class);
 		thrown.expectMessage(
-			"Invalid value for option 'sink.bulk-flush.back-off.delay'.");
+			"Invalid value for option 'sink.bulk-flush.backoff.delay'.");
 		sinkFactory.createDynamicTableSink(
 			context()
 				.withSchema(TableSchema.builder()
diff --git a/flink-connectors/flink-connector-elasticsearch7/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/table/Elasticsearch7DynamicSinkFactoryTest.java b/flink-connectors/flink-connector-elasticsearch7/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/table/Elasticsearch7DynamicSinkFactoryTest.java
index a830fa3..4fe3214 100644
--- a/flink-connectors/flink-connector-elasticsearch7/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/table/Elasticsearch7DynamicSinkFactoryTest.java
+++ b/flink-connectors/flink-connector-elasticsearch7/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/table/Elasticsearch7DynamicSinkFactoryTest.java
@@ -118,7 +118,7 @@ public class Elasticsearch7DynamicSinkFactoryTest {
 
 		thrown.expect(ValidationException.class);
 		thrown.expectMessage(
-			"'sink.bulk-flush.back-off.max-retries' must be at least 1. Got: 0");
+			"'sink.bulk-flush.backoff.max-retries' must be at least 1. Got: 0");
 		sinkFactory.createDynamicTableSink(
 			context()
 				.withSchema(TableSchema.builder()
@@ -156,7 +156,7 @@ public class Elasticsearch7DynamicSinkFactoryTest {
 
 		thrown.expect(ValidationException.class);
 		thrown.expectMessage(
-			"Invalid value for option 'sink.bulk-flush.back-off.delay'.");
+			"Invalid value for option 'sink.bulk-flush.backoff.delay'.");
 		sinkFactory.createDynamicTableSink(
 			context()
 				.withSchema(TableSchema.builder()