You are viewing a plain text version of this content. The canonical link for it is here.
Posted to builds@beam.apache.org by Apache Jenkins Server <je...@builds.apache.org> on 2022/11/08 10:05:31 UTC

Build failed in Jenkins: beam_LoadTests_Go_GBK_Flink_Batch #706

See <https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/706/display/redirect?page=changes>

Changes:

[Robert Bradshaw] Better surfacing of Scala support via Scio.

[bulat.safiullin] [Website] change case-study-card width on mobile

[vitaly.terentyev] Add sparkreceiver:2 module.

[vitaly.terentyev] Fix sparkreceiver dependencies

[noreply] Print diff and scope to state path

[noreply] Correctly print diff and swallow empty commits for the moment

[noreply] Remove quiet flag on debug

[noreply] Use git diff instead of git diff-index to avoid file timestamp changes

[noreply] Make `documentation/io/connectors/` canonical (#23877)

[noreply] [Tour of Beam] Learning content for "Introduction" module (#23085)

[noreply] feat: implement bigtable io connector with write capabilities (#23411)

[noreply] Bump google.golang.org/api from 0.101.0 to 0.102.0 in /sdks (#23957)

[noreply] Enforce splitting invariants by ensuring split state is reset in the

[noreply] Add files then check cached diff to get untracked files

[noreply] Switch && for || to fix bug in #23889 resolution (#24017)


------------------------------------------
[...truncated 31.12 KB...]
  coders: <
    key: "c0"
    value: <
      spec: <
        urn: "beam:coder:bytes:v1"
      >
    >
  >
  coders: <
    key: "c1"
    value: <
      spec: <
        urn: "beam:coder:kv:v1"
      >
      component_coder_ids: "c0"
      component_coder_ids: "c0"
    >
  >
  coders: <
    key: "c2"
    value: <
      spec: <
        urn: "beam:coder:global_window:v1"
      >
    >
  >
  coders: <
    key: "c3"
    value: <
      spec: <
        urn: "beam:coder:iterable:v1"
      >
      component_coder_ids: "c0"
    >
  >
  coders: <
    key: "c4"
    value: <
      spec: <
        urn: "beam:coder:kv:v1"
      >
      component_coder_ids: "c0"
      component_coder_ids: "c3"
    >
  >
  coders: <
    key: "c5"
    value: <
      spec: <
        urn: "beam:coder:row:v1"
        payload: "\n\021\n\013num_records\032\002\020\004\n\024\n\016initial_splits\032\002\020\004\n\016\n\010key_size\032\002\020\004\n\020\n\nvalue_size\032\002\020\004\n\022\n\014num_hot_keys\032\002\020\004\n\026\n\020hot_key_fraction\032\002\020\006\022$f691cccd-3963-4ed9-9f25-d9fdfd07b30d"
      >
    >
  >
  coders: <
    key: "c6"
    value: <
      spec: <
        urn: "beam:go:coder:custom:v1"
        payload: "ChdvZmZzZXRyYW5nZS5SZXN0cmljdGlvbhJTCBpKT2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UuUmVzdHJpY3Rpb24atAEKS2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UucmVzdEVuYxJlCBYiUwgaSk9naXRodWIuY29tL2FwYWNoZS9iZWFtL3Nka3MvdjIvZ28vcGtnL2JlYW0vaW8vcnRyYWNrZXJzL29mZnNldHJhbmdlLlJlc3RyaWN0aW9uKgYIFBICCAgqBAgZQAEitAEKS2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UucmVzdERlYxJlCBYiBggUEgIICCpTCBpKT2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UuUmVzdHJpY3Rpb24qBAgZQAE="
      >
    >
  >
  coders: <
    key: "c7"
    value: <
      spec: <
        urn: "beam:coder:length_prefix:v1"
      >
      component_coder_ids: "c6"
    >
  >
  coders: <
    key: "c8"
    value: <
      spec: <
        urn: "beam:coder:bool:v1"
      >
    >
  >
  coders: <
    key: "c9"
    value: <
      spec: <
        urn: "beam:coder:kv:v1"
      >
      component_coder_ids: "c7"
      component_coder_ids: "c8"
    >
  >
  environments: <
    key: "go"
    value: <
      urn: "beam:env:docker:v1"
      payload: "\n6gcr.io/apache-beam-testing/beam-sdk/beam_go_sdk:latest"
      capabilities: "beam:protocol:progress_reporting:v1"
      capabilities: "beam:protocol:multi_core_bundle_processing:v1"
      capabilities: "beam:transform:sdf_truncate_sized_restrictions:v1"
      capabilities: "beam:protocol:****_status:v1"
      capabilities: "beam:protocol:monitoring_info_short_ids:v1"
      capabilities: "beam:version:sdk_base:go"
      capabilities: "beam:coder:bytes:v1"
      capabilities: "beam:coder:bool:v1"
      capabilities: "beam:coder:varint:v1"
      capabilities: "beam:coder:double:v1"
      capabilities: "beam:coder:string_utf8:v1"
      capabilities: "beam:coder:length_prefix:v1"
      capabilities: "beam:coder:kv:v1"
      capabilities: "beam:coder:iterable:v1"
      capabilities: "beam:coder:state_backed_iterable:v1"
      capabilities: "beam:coder:windowed_value:v1"
      capabilities: "beam:coder:global_window:v1"
      capabilities: "beam:coder:interval_window:v1"
      capabilities: "beam:coder:row:v1"
      capabilities: "beam:coder:nullable:v1"
      dependencies: <
        type_urn: "beam:artifact:type:file:v1"
        role_urn: "beam:artifact:role:go_****_binary:v1"
      >
    >
  >
>
root_transform_ids: "s1"
root_transform_ids: "e4"
root_transform_ids: "e5"
root_transform_ids: "e6"
root_transform_ids: "e7"
requirements: "beam:requirement:pardo:splittable_dofn:v1"
2022/11/08 10:04:14 Using specified **** binary: 'linux_amd64/group_by_key'
2022/11/08 10:04:14 Prepared job with id: load-tests-go-flink-batch-gbk-1-1108065410_02718407-71d9-4b7f-9eb5-19f0fc10ca7a and staging token: load-tests-go-flink-batch-gbk-1-1108065410_02718407-71d9-4b7f-9eb5-19f0fc10ca7a
2022/11/08 10:04:19 Staged binary artifact with token: 
2022/11/08 10:04:20 Submitted job: load0tests0go0flink0batch0gbk0101108065410-root-1108100419-a72350f5_e570676f-acf3-4505-a9ce-9e3aa61ade99
2022/11/08 10:04:20 Job state: STOPPED
2022/11/08 10:04:20 Job state: STARTING
2022/11/08 10:04:20 Job state: RUNNING
2022/11/08 10:05:29  (): java.lang.RuntimeException: java.util.concurrent.ExecutionException: java.lang.RuntimeException: Error while waiting for job to be initialized
	at org.apache.flink.util.ExceptionUtils.rethrow(ExceptionUtils.java:316)
	at org.apache.flink.api.java.ExecutionEnvironment.executeAsync(ExecutionEnvironment.java:1061)
	at org.apache.flink.api.java.ExecutionEnvironment.execute(ExecutionEnvironment.java:958)
	at org.apache.beam.runners.flink.FlinkBatchPortablePipelineTranslator$BatchTranslationContext.execute(FlinkBatchPortablePipelineTranslator.java:195)
	at org.apache.beam.runners.flink.FlinkPipelineRunner.runPipelineWithTranslator(FlinkPipelineRunner.java:132)
	at org.apache.beam.runners.flink.FlinkPipelineRunner.run(FlinkPipelineRunner.java:99)
	at org.apache.beam.runners.jobsubmission.JobInvocation.runPipeline(JobInvocation.java:86)
	at org.apache.beam.vendor.guava.v26_0_jre.com.google.common.util.concurrent.TrustedListenableFutureTask$TrustedFutureInterruptibleTask.runInterruptibly(TrustedListenableFutureTask.java:125)
	at org.apache.beam.vendor.guava.v26_0_jre.com.google.common.util.concurrent.InterruptibleTask.run(InterruptibleTask.java:57)
	at org.apache.beam.vendor.guava.v26_0_jre.com.google.common.util.concurrent.TrustedListenableFutureTask.run(TrustedListenableFutureTask.java:78)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:750)
Caused by: java.util.concurrent.ExecutionException: java.lang.RuntimeException: Error while waiting for job to be initialized
	at java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:357)
	at java.util.concurrent.CompletableFuture.get(CompletableFuture.java:1908)
	at org.apache.flink.api.java.ExecutionEnvironment.executeAsync(ExecutionEnvironment.java:1056)
	... 11 more
Caused by: java.lang.RuntimeException: Error while waiting for job to be initialized
	at org.apache.flink.client.ClientUtils.waitUntilJobInitializationFinished(ClientUtils.java:160)
	at org.apache.flink.client.deployment.executors.AbstractSessionClusterExecutor.lambda$execute$2(AbstractSessionClusterExecutor.java:82)
	at org.apache.flink.util.function.FunctionUtils.lambda$uncheckedFunction$2(FunctionUtils.java:73)
	at java.util.concurrent.CompletableFuture.uniApply(CompletableFuture.java:616)
	at java.util.concurrent.CompletableFuture$UniApply.tryFire(CompletableFuture.java:591)
	at java.util.concurrent.CompletableFuture$Completion.exec(CompletableFuture.java:457)
	at java.util.concurrent.ForkJoinTask.doExec(ForkJoinTask.java:289)
	at java.util.concurrent.ForkJoinPool$WorkQueue.runTask(ForkJoinPool.java:1056)
	at java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1692)
	at java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:175)
Caused by: java.util.concurrent.ExecutionException: org.apache.flink.runtime.concurrent.FutureUtils$RetryException: Could not complete the operation. Number of retries has been exhausted.
	at java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:357)
	at java.util.concurrent.CompletableFuture.get(CompletableFuture.java:1908)
	at org.apache.flink.client.deployment.executors.AbstractSessionClusterExecutor.lambda$null$0(AbstractSessionClusterExecutor.java:83)
	at org.apache.flink.client.ClientUtils.waitUntilJobInitializationFinished(ClientUtils.java:140)
	... 9 more
Caused by: org.apache.flink.runtime.concurrent.FutureUtils$RetryException: Could not complete the operation. Number of retries has been exhausted.
	at org.apache.flink.runtime.concurrent.FutureUtils.lambda$retryOperationWithDelay$9(FutureUtils.java:386)
	at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:774)
	at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:750)
	at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:488)
	at java.util.concurrent.CompletableFuture.postFire(CompletableFuture.java:575)
	at java.util.concurrent.CompletableFuture$UniCompose.tryFire(CompletableFuture.java:943)
	at java.util.concurrent.CompletableFuture$Completion.run(CompletableFuture.java:456)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:750)
Caused by: java.util.concurrent.CompletionException: org.apache.flink.runtime.rest.util.RestClientException: Response was neither of the expected type([simple type, class org.apache.flink.runtime.rest.messages.job.JobDetailsInfo]) nor an error.
	at java.util.concurrent.CompletableFuture.encodeRelay(CompletableFuture.java:326)
	at java.util.concurrent.CompletableFuture.completeRelay(CompletableFuture.java:338)
	at java.util.concurrent.CompletableFuture.uniRelay(CompletableFuture.java:925)
	at java.util.concurrent.CompletableFuture.uniCompose(CompletableFuture.java:967)
	at java.util.concurrent.CompletableFuture$UniCompose.tryFire(CompletableFuture.java:940)
	... 4 more
Caused by: org.apache.flink.runtime.rest.util.RestClientException: Response was neither of the expected type([simple type, class org.apache.flink.runtime.rest.messages.job.JobDetailsInfo]) nor an error.
	at org.apache.flink.runtime.rest.RestClient.parseResponse(RestClient.java:502)
	at org.apache.flink.runtime.rest.RestClient.lambda$submitRequest$3(RestClient.java:466)
	at java.util.concurrent.CompletableFuture.uniCompose(CompletableFuture.java:966)
	... 5 more
Caused by: org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.exc.MismatchedInputException: Cannot map `null` into type `long` (set DeserializationConfig.DeserializationFeature.FAIL_ON_NULL_FOR_PRIMITIVES to 'false' to allow)
 at [Source: UNKNOWN; line: -1, column: -1] (through reference chain: org.apache.flink.runtime.rest.messages.job.JobDetailsInfo["maxParallelism"])
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.exc.MismatchedInputException.from(MismatchedInputException.java:63)
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.DeserializationContext.reportInputMismatch(DeserializationContext.java:1575)
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.deser.std.NumberDeserializers$PrimitiveOrWrapperDeserializer.getNullValue(NumberDeserializers.java:176)
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.deser.impl.PropertyValueBuffer._findMissing(PropertyValueBuffer.java:204)
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.deser.impl.PropertyValueBuffer.getParameters(PropertyValueBuffer.java:160)
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.deser.ValueInstantiator.createFromObjectWith(ValueInstantiator.java:288)
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.deser.impl.PropertyBasedCreator.build(PropertyBasedCreator.java:202)
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.deser.BeanDeserializer._deserializeUsingPropertyBased(BeanDeserializer.java:520)
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.deser.BeanDeserializerBase.deserializeFromObjectUsingNonDefault(BeanDeserializerBase.java:1390)
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.deser.BeanDeserializer.deserializeFromObject(BeanDeserializer.java:362)
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.deser.BeanDeserializer.deserialize(BeanDeserializer.java:195)
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.deser.DefaultDeserializationContext.readRootValue(DefaultDeserializationContext.java:322)
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.ObjectMapper._readValue(ObjectMapper.java:4569)
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.ObjectMapper.readValue(ObjectMapper.java:2867)
	at org.apache.flink.runtime.rest.RestClient.parseResponse(RestClient.java:475)
	... 7 more
2022/11/08 10:05:29  (): org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.exc.MismatchedInputException: Cannot map `null` into type `long` (set DeserializationConfig.DeserializationFeature.FAIL_ON_NULL_FOR_PRIMITIVES to 'false' to allow)
 at [Source: UNKNOWN; line: -1, column: -1] (through reference chain: org.apache.flink.runtime.rest.messages.job.JobDetailsInfo["maxParallelism"])
2022/11/08 10:05:29 Job state: FAILED
2022/11/08 10:05:29 Failed to execute job: job load0tests0go0flink0batch0gbk0101108065410-root-1108100419-a72350f5_e570676f-acf3-4505-a9ce-9e3aa61ade99 failed
panic: Failed to execute job: job load0tests0go0flink0batch0gbk0101108065410-root-1108100419-a72350f5_e570676f-acf3-4505-a9ce-9e3aa61ade99 failed

goroutine 1 [running]:
github.com/apache/beam/sdks/v2/go/pkg/beam/log.Fatalf({0x17f0de8, 0xc000122000}, {0x163d7b1?, 0x22511c0?}, {0xc000643e70?, 0x0?, 0x0?})
	<https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/ws/src/sdks/go/pkg/beam/log/log.go>:153 +0xa5
main.main()
	<https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/ws/src/sdks/go/test/load/group_by_key/group_by_key.go>:98 +0x3ec

> Task :sdks:go:test:load:run FAILED

FAILURE: Build failed with an exception.

* Where:
Build file '<https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/ws/src/sdks/go/test/load/build.gradle'> line: 31

* What went wrong:
Execution failed for task ':sdks:go:test:load:run'.
> Process 'command 'sh'' finished with non-zero exit value 2

* Try:
> Run with --stacktrace option to get the stack trace.
> Run with --info or --debug option to get more log output.

* Get more help at https://help.gradle.org

BUILD FAILED in 1m 59s
12 actionable tasks: 6 executed, 4 from cache, 2 up-to-date

Publishing build scan...
https://gradle.com/s/fh6llbwwgyt24

Build step 'Invoke Gradle script' changed build result to FAILURE
Build step 'Invoke Gradle script' marked build as failure

---------------------------------------------------------------------
To unsubscribe, e-mail: builds-unsubscribe@beam.apache.org
For additional commands, e-mail: builds-help@beam.apache.org


Jenkins build is back to normal : beam_LoadTests_Go_GBK_Flink_Batch #731

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See <https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/731/display/redirect?page=changes>


---------------------------------------------------------------------
To unsubscribe, e-mail: builds-unsubscribe@beam.apache.org
For additional commands, e-mail: builds-help@beam.apache.org


Build failed in Jenkins: beam_LoadTests_Go_GBK_Flink_Batch #730

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See <https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/730/display/redirect?page=changes>

Changes:

[bulat.safiullin] [Website] update copy-to-clipboard.js #24372

[noreply] Bump google.golang.org/grpc from 1.50.1 to 1.51.0 in /sdks (#24281)

[noreply] [Playground] use JAVA SDK 2.43.0 in Examples CI (#24429)

[noreply] Update authors.yml (#24433)

[noreply] Bump cloud.google.com/go/spanner from 1.36.0 to 1.40.0 in /sdks (#24423)

[noreply] Add Large Language Model RunInference Example (#24350)

[noreply] [Playground] [Backend] minor fixes for error msgs (#23999)

[noreply] pg_24284_now_closing_parenthesis on cancel button is visible (#24327)

[noreply] [Github Actions] - Cut Release Branch Workflow (#24020)

[noreply] Add six to build-requirements.txt (#24434)

[noreply] Add Pytorch RunInference GPU benchmark (#24347)

[noreply] Fix multiple mutations affecting the same entity in Datastore write

[noreply] Fix BlobstorageIO.checksum Attribute Error (#24442)

[noreply] Bump github.com/tetratelabs/wazero in /sdks (#24453)

[noreply] [BEAM-12164]  Support querying against Postgres for the SpannerIO change


------------------------------------------
[...truncated 31.22 KB...]
        urn: "beam:coder:kv:v1"
      >
      component_coder_ids: "c0"
      component_coder_ids: "c0"
    >
  >
  coders: <
    key: "c2"
    value: <
      spec: <
        urn: "beam:coder:global_window:v1"
      >
    >
  >
  coders: <
    key: "c3"
    value: <
      spec: <
        urn: "beam:coder:iterable:v1"
      >
      component_coder_ids: "c0"
    >
  >
  coders: <
    key: "c4"
    value: <
      spec: <
        urn: "beam:coder:kv:v1"
      >
      component_coder_ids: "c0"
      component_coder_ids: "c3"
    >
  >
  coders: <
    key: "c5"
    value: <
      spec: <
        urn: "beam:coder:row:v1"
        payload: "\n\021\n\013num_records\032\002\020\004\n\024\n\016initial_splits\032\002\020\004\n\016\n\010key_size\032\002\020\004\n\020\n\nvalue_size\032\002\020\004\n\022\n\014num_hot_keys\032\002\020\004\n\026\n\020hot_key_fraction\032\002\020\006\022$f691cccd-3963-4ed9-9f25-d9fdfd07b30d"
      >
    >
  >
  coders: <
    key: "c6"
    value: <
      spec: <
        urn: "beam:go:coder:custom:v1"
        payload: "ChdvZmZzZXRyYW5nZS5SZXN0cmljdGlvbhJTCBpKT2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UuUmVzdHJpY3Rpb24atAEKS2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UucmVzdEVuYxJlCBYiUwgaSk9naXRodWIuY29tL2FwYWNoZS9iZWFtL3Nka3MvdjIvZ28vcGtnL2JlYW0vaW8vcnRyYWNrZXJzL29mZnNldHJhbmdlLlJlc3RyaWN0aW9uKgYIFBICCAgqBAgZQAEitAEKS2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UucmVzdERlYxJlCBYiBggUEgIICCpTCBpKT2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UuUmVzdHJpY3Rpb24qBAgZQAE="
      >
    >
  >
  coders: <
    key: "c7"
    value: <
      spec: <
        urn: "beam:coder:length_prefix:v1"
      >
      component_coder_ids: "c6"
    >
  >
  coders: <
    key: "c8"
    value: <
      spec: <
        urn: "beam:coder:bool:v1"
      >
    >
  >
  coders: <
    key: "c9"
    value: <
      spec: <
        urn: "beam:coder:kv:v1"
      >
      component_coder_ids: "c7"
      component_coder_ids: "c8"
    >
  >
  environments: <
    key: "go"
    value: <
      urn: "beam:env:docker:v1"
      payload: "\n6gcr.io/apache-beam-testing/beam-sdk/beam_go_sdk:latest"
      capabilities: "beam:protocol:progress_reporting:v1"
      capabilities: "beam:protocol:multi_core_bundle_processing:v1"
      capabilities: "beam:transform:sdf_truncate_sized_restrictions:v1"
      capabilities: "beam:protocol:****_status:v1"
      capabilities: "beam:protocol:monitoring_info_short_ids:v1"
      capabilities: "beam:version:sdk_base:go"
      capabilities: "beam:coder:bytes:v1"
      capabilities: "beam:coder:bool:v1"
      capabilities: "beam:coder:varint:v1"
      capabilities: "beam:coder:double:v1"
      capabilities: "beam:coder:string_utf8:v1"
      capabilities: "beam:coder:length_prefix:v1"
      capabilities: "beam:coder:kv:v1"
      capabilities: "beam:coder:iterable:v1"
      capabilities: "beam:coder:state_backed_iterable:v1"
      capabilities: "beam:coder:windowed_value:v1"
      capabilities: "beam:coder:global_window:v1"
      capabilities: "beam:coder:interval_window:v1"
      capabilities: "beam:coder:row:v1"
      capabilities: "beam:coder:nullable:v1"
      dependencies: <
        type_urn: "beam:artifact:type:file:v1"
        role_urn: "beam:artifact:role:go_****_binary:v1"
      >
    >
  >
>
root_transform_ids: "s1"
root_transform_ids: "e4"
root_transform_ids: "e5"
root_transform_ids: "e6"
root_transform_ids: "e7"
requirements: "beam:requirement:pardo:splittable_dofn:v1"
2022/12/01 10:05:32 Using specified **** binary: 'linux_amd64/group_by_key'
2022/12/01 10:05:33 Prepared job with id: load-tests-go-flink-batch-gbk-1-1201065426_81f0ca06-6a3d-4532-a16e-decdd33550d0 and staging token: load-tests-go-flink-batch-gbk-1-1201065426_81f0ca06-6a3d-4532-a16e-decdd33550d0
2022/12/01 10:05:43 Staged binary artifact with token: 
2022/12/01 10:05:45 Submitted job: load0tests0go0flink0batch0gbk0101201065426-root-1201100543-9608d127_4c35cef2-160d-4804-90b3-fb3839be5b16
2022/12/01 10:05:45 Job state: STOPPED
2022/12/01 10:05:45 Job state: STARTING
2022/12/01 10:05:45 Job state: RUNNING
2022/12/01 11:00:25  (): org.apache.flink.client.program.ProgramInvocationException: Job failed (JobID: c38ccbc6547474c26607040fca9ccd02)
	at org.apache.flink.client.deployment.ClusterClientJobClientAdapter.lambda$null$6(ClusterClientJobClientAdapter.java:130)
	at java.util.concurrent.CompletableFuture.uniApply(CompletableFuture.java:616)
	at java.util.concurrent.CompletableFuture$UniApply.tryFire(CompletableFuture.java:591)
	at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:488)
	at java.util.concurrent.CompletableFuture.complete(CompletableFuture.java:1975)
	at org.apache.flink.util.concurrent.FutureUtils.lambda$retryOperationWithDelay$9(FutureUtils.java:403)
	at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:774)
	at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:750)
	at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:488)
	at java.util.concurrent.CompletableFuture.complete(CompletableFuture.java:1975)
	at org.apache.flink.client.program.rest.RestClusterClient.lambda$pollResourceAsync$26(RestClusterClient.java:708)
	at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:774)
	at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:750)
	at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:488)
	at java.util.concurrent.CompletableFuture.complete(CompletableFuture.java:1975)
	at org.apache.flink.util.concurrent.FutureUtils.lambda$retryOperationWithDelay$9(FutureUtils.java:403)
	at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:774)
	at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:750)
	at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:488)
	at java.util.concurrent.CompletableFuture.postFire(CompletableFuture.java:575)
	at java.util.concurrent.CompletableFuture$UniCompose.tryFire(CompletableFuture.java:943)
	at java.util.concurrent.CompletableFuture$Completion.run(CompletableFuture.java:456)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:750)
Caused by: org.apache.flink.runtime.client.JobExecutionException: Job execution failed.
	at org.apache.flink.runtime.jobmaster.JobResult.toJobExecutionResult(JobResult.java:144)
	at org.apache.flink.client.deployment.ClusterClientJobClientAdapter.lambda$null$6(ClusterClientJobClientAdapter.java:128)
	... 24 more
Caused by: org.apache.flink.runtime.JobException: Recovery is suppressed by NoRestartBackoffTimeStrategy
	at org.apache.flink.runtime.executiongraph.failover.flip1.ExecutionFailureHandler.handleFailure(ExecutionFailureHandler.java:138)
	at org.apache.flink.runtime.executiongraph.failover.flip1.ExecutionFailureHandler.getFailureHandlingResult(ExecutionFailureHandler.java:82)
	at org.apache.flink.runtime.scheduler.DefaultScheduler.handleTaskFailure(DefaultScheduler.java:301)
	at org.apache.flink.runtime.scheduler.DefaultScheduler.maybeHandleTaskFailure(DefaultScheduler.java:291)
	at org.apache.flink.runtime.scheduler.DefaultScheduler.updateTaskExecutionStateInternal(DefaultScheduler.java:282)
	at org.apache.flink.runtime.scheduler.SchedulerBase.updateTaskExecutionState(SchedulerBase.java:739)
	at org.apache.flink.runtime.scheduler.UpdateSchedulerNgOnInternalFailuresListener.notifyTaskFailure(UpdateSchedulerNgOnInternalFailuresListener.java:51)
	at org.apache.flink.runtime.executiongraph.DefaultExecutionGraph.notifySchedulerNgAboutInternalTaskFailure(DefaultExecutionGraph.java:1536)
	at org.apache.flink.runtime.executiongraph.Execution.processFail(Execution.java:1118)
	at org.apache.flink.runtime.executiongraph.Execution.processFail(Execution.java:1058)
	at org.apache.flink.runtime.executiongraph.Execution.fail(Execution.java:759)
	at org.apache.flink.runtime.jobmaster.slotpool.SingleLogicalSlot.signalPayloadRelease(SingleLogicalSlot.java:195)
	at org.apache.flink.runtime.jobmaster.slotpool.SingleLogicalSlot.release(SingleLogicalSlot.java:182)
	at org.apache.flink.runtime.scheduler.SharedSlot.lambda$release$4(SharedSlot.java:271)
	at java.util.concurrent.CompletableFuture.uniAcceptNow(CompletableFuture.java:753)
	at java.util.concurrent.CompletableFuture.uniAcceptStage(CompletableFuture.java:731)
	at java.util.concurrent.CompletableFuture.thenAccept(CompletableFuture.java:2108)
	at org.apache.flink.runtime.scheduler.SharedSlot.release(SharedSlot.java:271)
	at org.apache.flink.runtime.jobmaster.slotpool.AllocatedSlot.releasePayload(AllocatedSlot.java:152)
	at org.apache.flink.runtime.jobmaster.slotpool.DefaultDeclarativeSlotPool.releasePayload(DefaultDeclarativeSlotPool.java:482)
	at org.apache.flink.runtime.jobmaster.slotpool.DefaultDeclarativeSlotPool.freeAndReleaseSlots(DefaultDeclarativeSlotPool.java:474)
	at org.apache.flink.runtime.jobmaster.slotpool.DefaultDeclarativeSlotPool.releaseSlots(DefaultDeclarativeSlotPool.java:445)
	at org.apache.flink.runtime.jobmaster.slotpool.DeclarativeSlotPoolService.internalReleaseTaskManager(DeclarativeSlotPoolService.java:249)
	at org.apache.flink.runtime.jobmaster.slotpool.DeclarativeSlotPoolService.releaseTaskManager(DeclarativeSlotPoolService.java:230)
	at org.apache.flink.runtime.jobmaster.JobMaster.disconnectTaskManager(JobMaster.java:505)
	at org.apache.flink.runtime.jobmaster.JobMaster$TaskManagerHeartbeatListener.handleTaskManagerConnectionLoss(JobMaster.java:1376)
	at org.apache.flink.runtime.jobmaster.JobMaster$TaskManagerHeartbeatListener.notifyHeartbeatTimeout(JobMaster.java:1371)
	at org.apache.flink.runtime.heartbeat.HeartbeatMonitorImpl.run(HeartbeatMonitorImpl.java:155)
	at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:515)
	at java.util.concurrent.FutureTask.run(FutureTask.java:264)
	at org.apache.flink.runtime.rpc.akka.AkkaRpcActor.lambda$handleRunAsync$4(AkkaRpcActor.java:443)
	at org.apache.flink.runtime.concurrent.akka.ClassLoadingUtils.runWithContextClassLoader(ClassLoadingUtils.java:68)
	at org.apache.flink.runtime.rpc.akka.AkkaRpcActor.handleRunAsync(AkkaRpcActor.java:443)
	at org.apache.flink.runtime.rpc.akka.AkkaRpcActor.handleRpcMessage(AkkaRpcActor.java:213)
	at org.apache.flink.runtime.rpc.akka.FencedAkkaRpcActor.handleRpcMessage(FencedAkkaRpcActor.java:78)
	at org.apache.flink.runtime.rpc.akka.AkkaRpcActor.handleMessage(AkkaRpcActor.java:163)
	at akka.japi.pf.UnitCaseStatement.apply(CaseStatements.scala:24)
	at akka.japi.pf.UnitCaseStatement.apply(CaseStatements.scala:20)
	at scala.PartialFunction.applyOrElse(PartialFunction.scala:127)
	at scala.PartialFunction.applyOrElse$(PartialFunction.scala:126)
	at akka.japi.pf.UnitCaseStatement.applyOrElse(CaseStatements.scala:20)
	at scala.PartialFunction$OrElse.applyOrElse(PartialFunction.scala:175)
	at scala.PartialFunction$OrElse.applyOrElse(PartialFunction.scala:176)
	at scala.PartialFunction$OrElse.applyOrElse(PartialFunction.scala:176)
	at akka.actor.Actor.aroundReceive(Actor.scala:537)
	at akka.actor.Actor.aroundReceive$(Actor.scala:535)
	at akka.actor.AbstractActor.aroundReceive(AbstractActor.scala:220)
	at akka.actor.ActorCell.receiveMessage(ActorCell.scala:580)
	at akka.actor.ActorCell.invoke(ActorCell.scala:548)
	at akka.dispatch.Mailbox.processMailbox(Mailbox.scala:270)
	at akka.dispatch.Mailbox.run(Mailbox.scala:231)
	at akka.dispatch.Mailbox.exec(Mailbox.scala:243)
	at java.util.concurrent.ForkJoinTask.doExec(ForkJoinTask.java:290)
	at java.util.concurrent.ForkJoinPool$WorkQueue.topLevelExec(ForkJoinPool.java:1020)
	at java.util.concurrent.ForkJoinPool.scan(ForkJoinPool.java:1656)
	at java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1594)
	at java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:183)
Caused by: java.util.concurrent.TimeoutException: Heartbeat of TaskManager with id container_1669888954790_0001_01_000004(beam-loadtests-go-gbk-flink-batch-730-w-3.c.apache-beam-testing.internal:8026) timed out.
	... 31 more
2022/12/01 11:00:25  (): java.util.concurrent.TimeoutException: Heartbeat of TaskManager with id container_1669888954790_0001_01_000004(beam-loadtests-go-gbk-flink-batch-730-w-3.c.apache-beam-testing.internal:8026) timed out.
2022/12/01 11:00:26 Job state: FAILED
2022/12/01 11:00:26 Failed to execute job: job load0tests0go0flink0batch0gbk0101201065426-root-1201100543-9608d127_4c35cef2-160d-4804-90b3-fb3839be5b16 failed
panic: Failed to execute job: job load0tests0go0flink0batch0gbk0101201065426-root-1201100543-9608d127_4c35cef2-160d-4804-90b3-fb3839be5b16 failed

goroutine 1 [running]:
github.com/apache/beam/sdks/v2/go/pkg/beam/log.Fatalf({0x184b6a8, 0xc000128000}, {0x169a74d?, 0x22f9928?}, {0xc0005c7e70?, 0x0?, 0x0?})
	<https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/ws/src/sdks/go/pkg/beam/log/log.go>:153 +0xa5
main.main()
	<https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/ws/src/sdks/go/test/load/group_by_key/group_by_key.go>:98 +0x3eb

> Task :sdks:go:test:load:run FAILED

FAILURE: Build failed with an exception.

* Where:
Build file '<https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/ws/src/sdks/go/test/load/build.gradle'> line: 31

* What went wrong:
Execution failed for task ':sdks:go:test:load:run'.
> Process 'command 'sh'' finished with non-zero exit value 2

* Try:
> Run with --stacktrace option to get the stack trace.
> Run with --info or --debug option to get more log output.

* Get more help at https://help.gradle.org

BUILD FAILED in 55m 20s
12 actionable tasks: 6 executed, 4 from cache, 2 up-to-date

Publishing build scan...
https://gradle.com/s/jmy22nyrhtb22

Build step 'Invoke Gradle script' changed build result to FAILURE
Build step 'Invoke Gradle script' marked build as failure

---------------------------------------------------------------------
To unsubscribe, e-mail: builds-unsubscribe@beam.apache.org
For additional commands, e-mail: builds-help@beam.apache.org


Build failed in Jenkins: beam_LoadTests_Go_GBK_Flink_Batch #729

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See <https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/729/display/redirect?page=changes>

Changes:

[Kenneth Knowles] Fix dependencies of archetype tasks

[Kenneth Knowles] Upgrade checker framework to 3.13.0

[Kenneth Knowles] Upgrade checker framework to 3.14.0

[Kenneth Knowles] Upgrade checker framework to 3.15.0

[Kenneth Knowles] Inline :sdks:java:core:buildDependents so we can incrementally split

[Moritz Mack] [Spark Dataset runner] Fix support for Java 11 (closes #24392)

[Moritz Mack] fix spotless

[noreply] Fix SparkReceiverIOIT test (#24375)

[noreply] Bump cloud.google.com/go/bigquery from 1.42.0 to 1.43.0 in /sdks

[noreply] Bump github.com/aws/aws-sdk-go-v2/feature/s3/manager in /sdks (#24348)

[noreply] pg_23079 remove replacing tabs at playground (#24285)

[noreply] [#24339] Make Slices use iterable coder instead of custom coder.

[noreply] Add custom inference fns to CHANGES.md (#24412)

[noreply] Better warning and Exception message in CalciteUtil (#24414)

[noreply] List breaking change #24339 in Changes.md (#24420)

[noreply] Allow composite output types in sql.Transform. (#24421)

[noreply] Add map_windows support to Go SDK (#24307)

[noreply] Deleted initialNumReaders paramter. (#24355)


------------------------------------------
[...truncated 31.17 KB...]
        urn: "beam:coder:kv:v1"
      >
      component_coder_ids: "c0"
      component_coder_ids: "c0"
    >
  >
  coders: <
    key: "c2"
    value: <
      spec: <
        urn: "beam:coder:global_window:v1"
      >
    >
  >
  coders: <
    key: "c3"
    value: <
      spec: <
        urn: "beam:coder:iterable:v1"
      >
      component_coder_ids: "c0"
    >
  >
  coders: <
    key: "c4"
    value: <
      spec: <
        urn: "beam:coder:kv:v1"
      >
      component_coder_ids: "c0"
      component_coder_ids: "c3"
    >
  >
  coders: <
    key: "c5"
    value: <
      spec: <
        urn: "beam:coder:row:v1"
        payload: "\n\021\n\013num_records\032\002\020\004\n\024\n\016initial_splits\032\002\020\004\n\016\n\010key_size\032\002\020\004\n\020\n\nvalue_size\032\002\020\004\n\022\n\014num_hot_keys\032\002\020\004\n\026\n\020hot_key_fraction\032\002\020\006\022$f691cccd-3963-4ed9-9f25-d9fdfd07b30d"
      >
    >
  >
  coders: <
    key: "c6"
    value: <
      spec: <
        urn: "beam:go:coder:custom:v1"
        payload: "ChdvZmZzZXRyYW5nZS5SZXN0cmljdGlvbhJTCBpKT2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UuUmVzdHJpY3Rpb24atAEKS2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UucmVzdEVuYxJlCBYiUwgaSk9naXRodWIuY29tL2FwYWNoZS9iZWFtL3Nka3MvdjIvZ28vcGtnL2JlYW0vaW8vcnRyYWNrZXJzL29mZnNldHJhbmdlLlJlc3RyaWN0aW9uKgYIFBICCAgqBAgZQAEitAEKS2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UucmVzdERlYxJlCBYiBggUEgIICCpTCBpKT2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UuUmVzdHJpY3Rpb24qBAgZQAE="
      >
    >
  >
  coders: <
    key: "c7"
    value: <
      spec: <
        urn: "beam:coder:length_prefix:v1"
      >
      component_coder_ids: "c6"
    >
  >
  coders: <
    key: "c8"
    value: <
      spec: <
        urn: "beam:coder:bool:v1"
      >
    >
  >
  coders: <
    key: "c9"
    value: <
      spec: <
        urn: "beam:coder:kv:v1"
      >
      component_coder_ids: "c7"
      component_coder_ids: "c8"
    >
  >
  environments: <
    key: "go"
    value: <
      urn: "beam:env:docker:v1"
      payload: "\n6gcr.io/apache-beam-testing/beam-sdk/beam_go_sdk:latest"
      capabilities: "beam:protocol:progress_reporting:v1"
      capabilities: "beam:protocol:multi_core_bundle_processing:v1"
      capabilities: "beam:transform:sdf_truncate_sized_restrictions:v1"
      capabilities: "beam:protocol:****_status:v1"
      capabilities: "beam:protocol:monitoring_info_short_ids:v1"
      capabilities: "beam:version:sdk_base:go"
      capabilities: "beam:coder:bytes:v1"
      capabilities: "beam:coder:bool:v1"
      capabilities: "beam:coder:varint:v1"
      capabilities: "beam:coder:double:v1"
      capabilities: "beam:coder:string_utf8:v1"
      capabilities: "beam:coder:length_prefix:v1"
      capabilities: "beam:coder:kv:v1"
      capabilities: "beam:coder:iterable:v1"
      capabilities: "beam:coder:state_backed_iterable:v1"
      capabilities: "beam:coder:windowed_value:v1"
      capabilities: "beam:coder:global_window:v1"
      capabilities: "beam:coder:interval_window:v1"
      capabilities: "beam:coder:row:v1"
      capabilities: "beam:coder:nullable:v1"
      dependencies: <
        type_urn: "beam:artifact:type:file:v1"
        role_urn: "beam:artifact:role:go_****_binary:v1"
      >
    >
  >
>
root_transform_ids: "s1"
root_transform_ids: "e4"
root_transform_ids: "e5"
root_transform_ids: "e6"
root_transform_ids: "e7"
requirements: "beam:requirement:pardo:splittable_dofn:v1"
2022/11/30 10:05:51 Using specified **** binary: 'linux_amd64/group_by_key'
2022/11/30 10:05:51 Prepared job with id: load-tests-go-flink-batch-gbk-1-1130065420_e1305672-1fc2-48b0-9c3a-15afdeab7cbf and staging token: load-tests-go-flink-batch-gbk-1-1130065420_e1305672-1fc2-48b0-9c3a-15afdeab7cbf
2022/11/30 10:05:58 Staged binary artifact with token: 
2022/11/30 10:05:59 Submitted job: load0tests0go0flink0batch0gbk0101130065420-root-1130100558-2ac2047c_4a6b7b3d-288f-458d-ad80-032169119ebe
2022/11/30 10:05:59 Job state: STOPPED
2022/11/30 10:05:59 Job state: STARTING
2022/11/30 10:05:59 Job state: RUNNING
2022/11/30 10:58:22  (): org.apache.flink.client.program.ProgramInvocationException: Job failed (JobID: 5fc266c3fed9d5a56182fa7dbafa7117)
	at org.apache.flink.client.deployment.ClusterClientJobClientAdapter.lambda$null$6(ClusterClientJobClientAdapter.java:130)
	at java.util.concurrent.CompletableFuture.uniApply(CompletableFuture.java:616)
	at java.util.concurrent.CompletableFuture$UniApply.tryFire(CompletableFuture.java:591)
	at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:488)
	at java.util.concurrent.CompletableFuture.complete(CompletableFuture.java:1975)
	at org.apache.flink.util.concurrent.FutureUtils.lambda$retryOperationWithDelay$9(FutureUtils.java:403)
	at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:774)
	at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:750)
	at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:488)
	at java.util.concurrent.CompletableFuture.complete(CompletableFuture.java:1975)
	at org.apache.flink.client.program.rest.RestClusterClient.lambda$pollResourceAsync$26(RestClusterClient.java:708)
	at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:774)
	at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:750)
	at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:488)
	at java.util.concurrent.CompletableFuture.complete(CompletableFuture.java:1975)
	at org.apache.flink.util.concurrent.FutureUtils.lambda$retryOperationWithDelay$9(FutureUtils.java:403)
	at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:774)
	at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:750)
	at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:488)
	at java.util.concurrent.CompletableFuture.postFire(CompletableFuture.java:575)
	at java.util.concurrent.CompletableFuture$UniCompose.tryFire(CompletableFuture.java:943)
	at java.util.concurrent.CompletableFuture$Completion.run(CompletableFuture.java:456)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:750)
Caused by: org.apache.flink.runtime.client.JobExecutionException: Job execution failed.
	at org.apache.flink.runtime.jobmaster.JobResult.toJobExecutionResult(JobResult.java:144)
	at org.apache.flink.client.deployment.ClusterClientJobClientAdapter.lambda$null$6(ClusterClientJobClientAdapter.java:128)
	... 24 more
Caused by: org.apache.flink.runtime.JobException: Recovery is suppressed by NoRestartBackoffTimeStrategy
	at org.apache.flink.runtime.executiongraph.failover.flip1.ExecutionFailureHandler.handleFailure(ExecutionFailureHandler.java:138)
	at org.apache.flink.runtime.executiongraph.failover.flip1.ExecutionFailureHandler.getFailureHandlingResult(ExecutionFailureHandler.java:82)
	at org.apache.flink.runtime.scheduler.DefaultScheduler.handleTaskFailure(DefaultScheduler.java:301)
	at org.apache.flink.runtime.scheduler.DefaultScheduler.maybeHandleTaskFailure(DefaultScheduler.java:291)
	at org.apache.flink.runtime.scheduler.DefaultScheduler.updateTaskExecutionStateInternal(DefaultScheduler.java:282)
	at org.apache.flink.runtime.scheduler.SchedulerBase.updateTaskExecutionState(SchedulerBase.java:739)
	at org.apache.flink.runtime.scheduler.UpdateSchedulerNgOnInternalFailuresListener.notifyTaskFailure(UpdateSchedulerNgOnInternalFailuresListener.java:51)
	at org.apache.flink.runtime.executiongraph.DefaultExecutionGraph.notifySchedulerNgAboutInternalTaskFailure(DefaultExecutionGraph.java:1536)
	at org.apache.flink.runtime.executiongraph.Execution.processFail(Execution.java:1118)
	at org.apache.flink.runtime.executiongraph.Execution.processFail(Execution.java:1058)
	at org.apache.flink.runtime.executiongraph.Execution.fail(Execution.java:759)
	at org.apache.flink.runtime.jobmaster.slotpool.SingleLogicalSlot.signalPayloadRelease(SingleLogicalSlot.java:195)
	at org.apache.flink.runtime.jobmaster.slotpool.SingleLogicalSlot.release(SingleLogicalSlot.java:182)
	at org.apache.flink.runtime.scheduler.SharedSlot.lambda$release$4(SharedSlot.java:271)
	at java.util.concurrent.CompletableFuture.uniAcceptNow(CompletableFuture.java:753)
	at java.util.concurrent.CompletableFuture.uniAcceptStage(CompletableFuture.java:731)
	at java.util.concurrent.CompletableFuture.thenAccept(CompletableFuture.java:2108)
	at org.apache.flink.runtime.scheduler.SharedSlot.release(SharedSlot.java:271)
	at org.apache.flink.runtime.jobmaster.slotpool.AllocatedSlot.releasePayload(AllocatedSlot.java:152)
	at org.apache.flink.runtime.jobmaster.slotpool.DefaultDeclarativeSlotPool.releasePayload(DefaultDeclarativeSlotPool.java:482)
	at org.apache.flink.runtime.jobmaster.slotpool.DefaultDeclarativeSlotPool.freeAndReleaseSlots(DefaultDeclarativeSlotPool.java:474)
	at org.apache.flink.runtime.jobmaster.slotpool.DefaultDeclarativeSlotPool.releaseSlots(DefaultDeclarativeSlotPool.java:445)
	at org.apache.flink.runtime.jobmaster.slotpool.DeclarativeSlotPoolService.internalReleaseTaskManager(DeclarativeSlotPoolService.java:249)
	at org.apache.flink.runtime.jobmaster.slotpool.DeclarativeSlotPoolService.releaseTaskManager(DeclarativeSlotPoolService.java:230)
	at org.apache.flink.runtime.jobmaster.JobMaster.disconnectTaskManager(JobMaster.java:505)
	at org.apache.flink.runtime.jobmaster.JobMaster$TaskManagerHeartbeatListener.handleTaskManagerConnectionLoss(JobMaster.java:1376)
	at org.apache.flink.runtime.jobmaster.JobMaster$TaskManagerHeartbeatListener.notifyHeartbeatTimeout(JobMaster.java:1371)
	at org.apache.flink.runtime.heartbeat.HeartbeatMonitorImpl.run(HeartbeatMonitorImpl.java:155)
	at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:515)
	at java.util.concurrent.FutureTask.run(FutureTask.java:264)
	at org.apache.flink.runtime.rpc.akka.AkkaRpcActor.lambda$handleRunAsync$4(AkkaRpcActor.java:443)
	at org.apache.flink.runtime.concurrent.akka.ClassLoadingUtils.runWithContextClassLoader(ClassLoadingUtils.java:68)
	at org.apache.flink.runtime.rpc.akka.AkkaRpcActor.handleRunAsync(AkkaRpcActor.java:443)
	at org.apache.flink.runtime.rpc.akka.AkkaRpcActor.handleRpcMessage(AkkaRpcActor.java:213)
	at org.apache.flink.runtime.rpc.akka.FencedAkkaRpcActor.handleRpcMessage(FencedAkkaRpcActor.java:78)
	at org.apache.flink.runtime.rpc.akka.AkkaRpcActor.handleMessage(AkkaRpcActor.java:163)
	at akka.japi.pf.UnitCaseStatement.apply(CaseStatements.scala:24)
	at akka.japi.pf.UnitCaseStatement.apply(CaseStatements.scala:20)
	at scala.PartialFunction.applyOrElse(PartialFunction.scala:127)
	at scala.PartialFunction.applyOrElse$(PartialFunction.scala:126)
	at akka.japi.pf.UnitCaseStatement.applyOrElse(CaseStatements.scala:20)
	at scala.PartialFunction$OrElse.applyOrElse(PartialFunction.scala:175)
	at scala.PartialFunction$OrElse.applyOrElse(PartialFunction.scala:176)
	at scala.PartialFunction$OrElse.applyOrElse(PartialFunction.scala:176)
	at akka.actor.Actor.aroundReceive(Actor.scala:537)
	at akka.actor.Actor.aroundReceive$(Actor.scala:535)
	at akka.actor.AbstractActor.aroundReceive(AbstractActor.scala:220)
	at akka.actor.ActorCell.receiveMessage(ActorCell.scala:580)
	at akka.actor.ActorCell.invoke(ActorCell.scala:548)
	at akka.dispatch.Mailbox.processMailbox(Mailbox.scala:270)
	at akka.dispatch.Mailbox.run(Mailbox.scala:231)
	at akka.dispatch.Mailbox.exec(Mailbox.scala:243)
	at java.util.concurrent.ForkJoinTask.doExec(ForkJoinTask.java:290)
	at java.util.concurrent.ForkJoinPool$WorkQueue.topLevelExec(ForkJoinPool.java:1020)
	at java.util.concurrent.ForkJoinPool.scan(ForkJoinPool.java:1656)
	at java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1594)
	at java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:183)
Caused by: java.util.concurrent.TimeoutException: Heartbeat of TaskManager with id container_1669802564428_0002_01_000004(beam-loadtests-go-gbk-flink-batch-729-w-4.c.apache-beam-testing.internal:8026) timed out.
	... 31 more
2022/11/30 10:58:22  (): java.util.concurrent.TimeoutException: Heartbeat of TaskManager with id container_1669802564428_0002_01_000004(beam-loadtests-go-gbk-flink-batch-729-w-4.c.apache-beam-testing.internal:8026) timed out.
2022/11/30 10:58:22 Job state: FAILED
2022/11/30 10:58:22 Failed to execute job: job load0tests0go0flink0batch0gbk0101130065420-root-1130100558-2ac2047c_4a6b7b3d-288f-458d-ad80-032169119ebe failed
panic: Failed to execute job: job load0tests0go0flink0batch0gbk0101130065420-root-1130100558-2ac2047c_4a6b7b3d-288f-458d-ad80-032169119ebe failed

goroutine 1 [running]:
github.com/apache/beam/sdks/v2/go/pkg/beam/log.Fatalf({0x18499e8, 0xc000120000}, {0x1698bf8?, 0x22f68a8?}, {0xc000023e70?, 0x0?, 0x0?})
	<https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/ws/src/sdks/go/pkg/beam/log/log.go>:153 +0xa5
main.main()
	<https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/ws/src/sdks/go/test/load/group_by_key/group_by_key.go>:98 +0x3eb

> Task :sdks:go:test:load:run FAILED

FAILURE: Build failed with an exception.

* Where:
Build file '<https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/ws/src/sdks/go/test/load/build.gradle'> line: 31

* What went wrong:
Execution failed for task ':sdks:go:test:load:run'.
> Process 'command 'sh'' finished with non-zero exit value 2

* Try:
> Run with --stacktrace option to get the stack trace.
> Run with --info or --debug option to get more log output.

* Get more help at https://help.gradle.org

BUILD FAILED in 53m 3s
12 actionable tasks: 6 executed, 4 from cache, 2 up-to-date

Publishing build scan...
https://gradle.com/s/rhmma33jbclnk

Build step 'Invoke Gradle script' changed build result to FAILURE
Build step 'Invoke Gradle script' marked build as failure

---------------------------------------------------------------------
To unsubscribe, e-mail: builds-unsubscribe@beam.apache.org
For additional commands, e-mail: builds-help@beam.apache.org


Build failed in Jenkins: beam_LoadTests_Go_GBK_Flink_Batch #728

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See <https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/728/display/redirect?page=changes>

Changes:

[Andrew Pilloud] Handle CompleteWorkStatus shutdown signal

[Kenneth Knowles] Upgrade checkerframework gradle plugin to 0.6.19

[Kenneth Knowles] Check for null in BeamFnDataGrpcMultiplexer

[Kenneth Knowles] Upgrade checkerframework to 3.12.0

[Andrew Pilloud] Simplify sdks/java/harness build

[Andrew Pilloud] Move configuration changes before shadowJar

[noreply] [Tour Of Beam] persistence_key for Pg::SaveSnippet (#24287)

[noreply] Get postcommits green and unsickbay (#24342)

[noreply] Fix workflow cron syntax (#24376)

[noreply] concurrency (#24332)

[Andrew Pilloud] Exclude :sdks:java:core from harness jar

[Andrew Pilloud] Enable shadowJar validation for sdks/java/harness

[Andrew Pilloud] Add missing portability runner dependencies

[noreply] Revert "Force discarding mode in with_fanout without rewindowing."

[Andrew Pilloud] Exclude jamm from harness jar

[Andrew Pilloud] Enforce GCP BOM on sdks/java/harness

[noreply] Bump pillow from 9.2.0 to 9.3.0 in

[noreply] Update precombine bencmark to better represent varied workloads (#24343)

[noreply] Merge pull request #24320: update bom to the latest one

[noreply] Merge pull request #24147: First step in adding schema update to Storage


------------------------------------------
[...truncated 89.95 KB...]
      component_coder_ids: "c3"
    >
  >
  coders: <
    key: "c5"
    value: <
      spec: <
        urn: "beam:coder:row:v1"
        payload: "\n\021\n\013num_records\032\002\020\004\n\024\n\016initial_splits\032\002\020\004\n\016\n\010key_size\032\002\020\004\n\020\n\nvalue_size\032\002\020\004\n\022\n\014num_hot_keys\032\002\020\004\n\026\n\020hot_key_fraction\032\002\020\006\022$f691cccd-3963-4ed9-9f25-d9fdfd07b30d"
      >
    >
  >
  coders: <
    key: "c6"
    value: <
      spec: <
        urn: "beam:go:coder:custom:v1"
        payload: "ChdvZmZzZXRyYW5nZS5SZXN0cmljdGlvbhJTCBpKT2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UuUmVzdHJpY3Rpb24atAEKS2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UucmVzdEVuYxJlCBYiUwgaSk9naXRodWIuY29tL2FwYWNoZS9iZWFtL3Nka3MvdjIvZ28vcGtnL2JlYW0vaW8vcnRyYWNrZXJzL29mZnNldHJhbmdlLlJlc3RyaWN0aW9uKgYIFBICCAgqBAgZQAEitAEKS2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UucmVzdERlYxJlCBYiBggUEgIICCpTCBpKT2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UuUmVzdHJpY3Rpb24qBAgZQAE="
      >
    >
  >
  coders: <
    key: "c7"
    value: <
      spec: <
        urn: "beam:coder:length_prefix:v1"
      >
      component_coder_ids: "c6"
    >
  >
  coders: <
    key: "c8"
    value: <
      spec: <
        urn: "beam:coder:bool:v1"
      >
    >
  >
  coders: <
    key: "c9"
    value: <
      spec: <
        urn: "beam:coder:kv:v1"
      >
      component_coder_ids: "c7"
      component_coder_ids: "c8"
    >
  >
  environments: <
    key: "go"
    value: <
      urn: "beam:env:docker:v1"
      payload: "\n6gcr.io/apache-beam-testing/beam-sdk/beam_go_sdk:latest"
      capabilities: "beam:protocol:progress_reporting:v1"
      capabilities: "beam:protocol:multi_core_bundle_processing:v1"
      capabilities: "beam:transform:sdf_truncate_sized_restrictions:v1"
      capabilities: "beam:protocol:****_status:v1"
      capabilities: "beam:protocol:monitoring_info_short_ids:v1"
      capabilities: "beam:version:sdk_base:go"
      capabilities: "beam:coder:bytes:v1"
      capabilities: "beam:coder:bool:v1"
      capabilities: "beam:coder:varint:v1"
      capabilities: "beam:coder:double:v1"
      capabilities: "beam:coder:string_utf8:v1"
      capabilities: "beam:coder:length_prefix:v1"
      capabilities: "beam:coder:kv:v1"
      capabilities: "beam:coder:iterable:v1"
      capabilities: "beam:coder:state_backed_iterable:v1"
      capabilities: "beam:coder:windowed_value:v1"
      capabilities: "beam:coder:global_window:v1"
      capabilities: "beam:coder:interval_window:v1"
      capabilities: "beam:coder:row:v1"
      capabilities: "beam:coder:nullable:v1"
      dependencies: <
        type_urn: "beam:artifact:type:file:v1"
        role_urn: "beam:artifact:role:go_****_binary:v1"
      >
    >
  >
>
root_transform_ids: "s1"
root_transform_ids: "e4"
root_transform_ids: "e5"
root_transform_ids: "e6"
root_transform_ids: "e7"
requirements: "beam:requirement:pardo:splittable_dofn:v1"
2022/11/29 11:13:39 Using specified **** binary: 'linux_amd64/group_by_key'
2022/11/29 11:13:40 Prepared job with id: load-tests-go-flink-batch-gbk-6-1129065410_c8b0201f-35e5-4cf3-8578-17f525bfae27 and staging token: load-tests-go-flink-batch-gbk-6-1129065410_c8b0201f-35e5-4cf3-8578-17f525bfae27
2022/11/29 11:13:42 Staged binary artifact with token: 
2022/11/29 11:13:42 Submitted job: load0tests0go0flink0batch0gbk0601129065410-root-1129111342-549a420d_9eb2da08-7cd4-4334-8f87-ae2e3dda1c76
2022/11/29 11:13:42 Job state: STOPPED
2022/11/29 11:13:42 Job state: STARTING
2022/11/29 11:13:42 Job state: RUNNING
2022/11/29 11:14:02  (): org.apache.flink.client.program.ProgramInvocationException: Job failed (JobID: 2df16be7dcc714c13fcb68691fa57608)
	at org.apache.flink.client.deployment.ClusterClientJobClientAdapter.lambda$null$6(ClusterClientJobClientAdapter.java:130)
	at java.util.concurrent.CompletableFuture.uniApply(CompletableFuture.java:616)
	at java.util.concurrent.CompletableFuture$UniApply.tryFire(CompletableFuture.java:591)
	at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:488)
	at java.util.concurrent.CompletableFuture.complete(CompletableFuture.java:1975)
	at org.apache.flink.util.concurrent.FutureUtils.lambda$retryOperationWithDelay$9(FutureUtils.java:403)
	at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:774)
	at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:750)
	at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:488)
	at java.util.concurrent.CompletableFuture.complete(CompletableFuture.java:1975)
	at org.apache.flink.client.program.rest.RestClusterClient.lambda$pollResourceAsync$26(RestClusterClient.java:708)
	at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:774)
	at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:750)
	at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:488)
	at java.util.concurrent.CompletableFuture.complete(CompletableFuture.java:1975)
	at org.apache.flink.util.concurrent.FutureUtils.lambda$retryOperationWithDelay$9(FutureUtils.java:403)
	at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:774)
	at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:750)
	at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:488)
	at java.util.concurrent.CompletableFuture.postFire(CompletableFuture.java:575)
	at java.util.concurrent.CompletableFuture$UniCompose.tryFire(CompletableFuture.java:943)
	at java.util.concurrent.CompletableFuture$Completion.run(CompletableFuture.java:456)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:750)
Caused by: org.apache.flink.runtime.client.JobExecutionException: Job execution failed.
	at org.apache.flink.runtime.jobmaster.JobResult.toJobExecutionResult(JobResult.java:144)
	at org.apache.flink.client.deployment.ClusterClientJobClientAdapter.lambda$null$6(ClusterClientJobClientAdapter.java:128)
	... 24 more
Caused by: org.apache.flink.runtime.JobException: Recovery is suppressed by NoRestartBackoffTimeStrategy
	at org.apache.flink.runtime.executiongraph.failover.flip1.ExecutionFailureHandler.handleFailure(ExecutionFailureHandler.java:138)
	at org.apache.flink.runtime.executiongraph.failover.flip1.ExecutionFailureHandler.getFailureHandlingResult(ExecutionFailureHandler.java:82)
	at org.apache.flink.runtime.scheduler.DefaultScheduler.handleTaskFailure(DefaultScheduler.java:301)
	at org.apache.flink.runtime.scheduler.DefaultScheduler.maybeHandleTaskFailure(DefaultScheduler.java:291)
	at org.apache.flink.runtime.scheduler.DefaultScheduler.updateTaskExecutionStateInternal(DefaultScheduler.java:282)
	at org.apache.flink.runtime.scheduler.SchedulerBase.updateTaskExecutionState(SchedulerBase.java:739)
	at org.apache.flink.runtime.scheduler.SchedulerNG.updateTaskExecutionState(SchedulerNG.java:78)
	at org.apache.flink.runtime.jobmaster.JobMaster.updateTaskExecutionState(JobMaster.java:443)
	at jdk.internal.reflect.GeneratedMethodAccessor22.invoke(Unknown Source)
	at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:566)
	at org.apache.flink.runtime.rpc.akka.AkkaRpcActor.lambda$handleRpcInvocation$1(AkkaRpcActor.java:304)
	at org.apache.flink.runtime.concurrent.akka.ClassLoadingUtils.runWithContextClassLoader(ClassLoadingUtils.java:83)
	at org.apache.flink.runtime.rpc.akka.AkkaRpcActor.handleRpcInvocation(AkkaRpcActor.java:302)
	at org.apache.flink.runtime.rpc.akka.AkkaRpcActor.handleRpcMessage(AkkaRpcActor.java:217)
	at org.apache.flink.runtime.rpc.akka.FencedAkkaRpcActor.handleRpcMessage(FencedAkkaRpcActor.java:78)
	at org.apache.flink.runtime.rpc.akka.AkkaRpcActor.handleMessage(AkkaRpcActor.java:163)
	at akka.japi.pf.UnitCaseStatement.apply(CaseStatements.scala:24)
	at akka.japi.pf.UnitCaseStatement.apply(CaseStatements.scala:20)
	at scala.PartialFunction.applyOrElse(PartialFunction.scala:127)
	at scala.PartialFunction.applyOrElse$(PartialFunction.scala:126)
	at akka.japi.pf.UnitCaseStatement.applyOrElse(CaseStatements.scala:20)
	at scala.PartialFunction$OrElse.applyOrElse(PartialFunction.scala:175)
	at scala.PartialFunction$OrElse.applyOrElse(PartialFunction.scala:176)
	at scala.PartialFunction$OrElse.applyOrElse(PartialFunction.scala:176)
	at akka.actor.Actor.aroundReceive(Actor.scala:537)
	at akka.actor.Actor.aroundReceive$(Actor.scala:535)
	at akka.actor.AbstractActor.aroundReceive(AbstractActor.scala:220)
	at akka.actor.ActorCell.receiveMessage(ActorCell.scala:580)
	at akka.actor.ActorCell.invoke(ActorCell.scala:548)
	at akka.dispatch.Mailbox.processMailbox(Mailbox.scala:270)
	at akka.dispatch.Mailbox.run(Mailbox.scala:231)
	at akka.dispatch.Mailbox.exec(Mailbox.scala:243)
	at java.util.concurrent.ForkJoinTask.doExec(ForkJoinTask.java:290)
	at java.util.concurrent.ForkJoinPool$WorkQueue.topLevelExec(ForkJoinPool.java:1020)
	at java.util.concurrent.ForkJoinPool.scan(ForkJoinPool.java:1656)
	at java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1594)
	at java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:183)
Caused by: java.lang.Exception: The data preparation for task 'GroupReduce (GroupReduce at CoGBK)' , caused an error: java.util.concurrent.ExecutionException: java.lang.RuntimeException: Error obtaining the sorted input: Thread 'SortMerger Reading Thread' terminated due to an exception: Connection unexpectedly closed by remote task manager 'beam-loadtests-go-gbk-flink-batch-728-w-0.c.apache-beam-testing.internal/10.128.0.244:41517'. This might indicate that the remote task manager was lost.
	at org.apache.flink.runtime.operators.BatchTask.run(BatchTask.java:487)
	at org.apache.flink.runtime.operators.BatchTask.invoke(BatchTask.java:357)
	at org.apache.flink.runtime.taskmanager.Task.runWithSystemExitMonitoring(Task.java:948)
	at org.apache.flink.runtime.taskmanager.Task.restoreAndInvoke(Task.java:927)
	at org.apache.flink.runtime.taskmanager.Task.doRun(Task.java:741)
	at org.apache.flink.runtime.taskmanager.Task.run(Task.java:563)
	at java.lang.Thread.run(Thread.java:829)
Caused by: org.apache.flink.util.WrappingRuntimeException: java.util.concurrent.ExecutionException: java.lang.RuntimeException: Error obtaining the sorted input: Thread 'SortMerger Reading Thread' terminated due to an exception: Connection unexpectedly closed by remote task manager 'beam-loadtests-go-gbk-flink-batch-728-w-0.c.apache-beam-testing.internal/10.128.0.244:41517'. This might indicate that the remote task manager was lost.
	at org.apache.flink.runtime.operators.sort.ExternalSorter.getIterator(ExternalSorter.java:262)
	at org.apache.flink.runtime.operators.BatchTask.getInput(BatchTask.java:1222)
	at org.apache.flink.runtime.operators.GroupReduceDriver.prepare(GroupReduceDriver.java:105)
	at org.apache.flink.runtime.operators.BatchTask.run(BatchTask.java:479)
	... 6 more
Caused by: java.util.concurrent.ExecutionException: java.lang.RuntimeException: Error obtaining the sorted input: Thread 'SortMerger Reading Thread' terminated due to an exception: Connection unexpectedly closed by remote task manager 'beam-loadtests-go-gbk-flink-batch-728-w-0.c.apache-beam-testing.internal/10.128.0.244:41517'. This might indicate that the remote task manager was lost.
	at java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:395)
	at java.util.concurrent.CompletableFuture.get(CompletableFuture.java:1999)
	at org.apache.flink.runtime.operators.sort.ExternalSorter.getIterator(ExternalSorter.java:259)
	... 9 more
Caused by: java.lang.RuntimeException: Error obtaining the sorted input: Thread 'SortMerger Reading Thread' terminated due to an exception: Connection unexpectedly closed by remote task manager 'beam-loadtests-go-gbk-flink-batch-728-w-0.c.apache-beam-testing.internal/10.128.0.244:41517'. This might indicate that the remote task manager was lost.
	at org.apache.flink.runtime.operators.sort.ExternalSorter.lambda$getIterator$1(ExternalSorter.java:256)
	at java.util.concurrent.CompletableFuture.uniExceptionally(CompletableFuture.java:986)
	at java.util.concurrent.CompletableFuture$UniExceptionally.tryFire(CompletableFuture.java:970)
	at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:506)
	at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2088)
	at org.apache.flink.runtime.operators.sort.ExternalSorterBuilder.lambda$doBuild$1(ExternalSorterBuilder.java:397)
	at org.apache.flink.runtime.operators.sort.ThreadBase.internalHandleException(ThreadBase.java:121)
	at org.apache.flink.runtime.operators.sort.ThreadBase.run(ThreadBase.java:75)
Caused by: java.io.IOException: Thread 'SortMerger Reading Thread' terminated due to an exception: Connection unexpectedly closed by remote task manager 'beam-loadtests-go-gbk-flink-batch-728-w-0.c.apache-beam-testing.internal/10.128.0.244:41517'. This might indicate that the remote task manager was lost.
	at org.apache.flink.runtime.operators.sort.ThreadBase.run(ThreadBase.java:80)
Caused by: org.apache.flink.runtime.io.network.netty.exception.RemoteTransportException: Connection unexpectedly closed by remote task manager 'beam-loadtests-go-gbk-flink-batch-728-w-0.c.apache-beam-testing.internal/10.128.0.244:41517'. This might indicate that the remote task manager was lost.
	at org.apache.flink.runtime.io.network.netty.CreditBasedPartitionRequestClientHandler.channelInactive(CreditBasedPartitionRequestClientHandler.java:127)
	at org.apache.flink.shaded.netty4.io.netty.channel.AbstractChannelHandlerContext.invokeChannelInactive(AbstractChannelHandlerContext.java:262)
	at org.apache.flink.shaded.netty4.io.netty.channel.AbstractChannelHandlerContext.invokeChannelInactive(AbstractChannelHandlerContext.java:248)
	at org.apache.flink.shaded.netty4.io.netty.channel.AbstractChannelHandlerContext.fireChannelInactive(AbstractChannelHandlerContext.java:241)
	at org.apache.flink.shaded.netty4.io.netty.channel.ChannelInboundHandlerAdapter.channelInactive(ChannelInboundHandlerAdapter.java:81)
	at org.apache.flink.runtime.io.network.netty.NettyMessageClientDecoderDelegate.channelInactive(NettyMessageClientDecoderDelegate.java:94)
	at org.apache.flink.shaded.netty4.io.netty.channel.AbstractChannelHandlerContext.invokeChannelInactive(AbstractChannelHandlerContext.java:262)
	at org.apache.flink.shaded.netty4.io.netty.channel.AbstractChannelHandlerContext.invokeChannelInactive(AbstractChannelHandlerContext.java:248)
	at org.apache.flink.shaded.netty4.io.netty.channel.AbstractChannelHandlerContext.fireChannelInactive(AbstractChannelHandlerContext.java:241)
	at org.apache.flink.shaded.netty4.io.netty.channel.DefaultChannelPipeline$HeadContext.channelInactive(DefaultChannelPipeline.java:1405)
	at org.apache.flink.shaded.netty4.io.netty.channel.AbstractChannelHandlerContext.invokeChannelInactive(AbstractChannelHandlerContext.java:262)
	at org.apache.flink.shaded.netty4.io.netty.channel.AbstractChannelHandlerContext.invokeChannelInactive(AbstractChannelHandlerContext.java:248)
	at org.apache.flink.shaded.netty4.io.netty.channel.DefaultChannelPipeline.fireChannelInactive(DefaultChannelPipeline.java:901)
	at org.apache.flink.shaded.netty4.io.netty.channel.AbstractChannel$AbstractUnsafe$8.run(AbstractChannel.java:831)
	at org.apache.flink.shaded.netty4.io.netty.util.concurrent.AbstractEventExecutor.safeExecute(AbstractEventExecutor.java:164)
	at org.apache.flink.shaded.netty4.io.netty.util.concurrent.SingleThreadEventExecutor.runAllTasks(SingleThreadEventExecutor.java:469)
	at org.apache.flink.shaded.netty4.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:384)
	at org.apache.flink.shaded.netty4.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:986)
	at org.apache.flink.shaded.netty4.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
	at java.lang.Thread.run(Thread.java:829)
2022/11/29 11:14:02  (): org.apache.flink.runtime.io.network.netty.exception.RemoteTransportException: Connection unexpectedly closed by remote task manager 'beam-loadtests-go-gbk-flink-batch-728-w-0.c.apache-beam-testing.internal/10.128.0.244:41517'. This might indicate that the remote task manager was lost.
2022/11/29 11:14:02 Job state: FAILED
2022/11/29 11:14:02 Failed to execute job: job load0tests0go0flink0batch0gbk0601129065410-root-1129111342-549a420d_9eb2da08-7cd4-4334-8f87-ae2e3dda1c76 failed
panic: Failed to execute job: job load0tests0go0flink0batch0gbk0601129065410-root-1129111342-549a420d_9eb2da08-7cd4-4334-8f87-ae2e3dda1c76 failed

goroutine 1 [running]:
github.com/apache/beam/sdks/v2/go/pkg/beam/log.Fatalf({0x1848328, 0xc000128000}, {0x1697797?, 0x22f38a8?}, {0xc00029de70?, 0x0?, 0xc8?})
	<https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/ws/src/sdks/go/pkg/beam/log/log.go>:153 +0xa5
main.main()
	<https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/ws/src/sdks/go/test/load/group_by_key/group_by_key.go>:98 +0x3eb

> Task :sdks:go:test:load:run FAILED

FAILURE: Build failed with an exception.

* Where:
Build file '<https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/ws/src/sdks/go/test/load/build.gradle'> line: 31

* What went wrong:
Execution failed for task ':sdks:go:test:load:run'.
> Process 'command 'sh'' finished with non-zero exit value 2

* Try:
> Run with --stacktrace option to get the stack trace.
> Run with --info or --debug option to get more log output.

* Get more help at https://help.gradle.org

BUILD FAILED in 31s
12 actionable tasks: 2 executed, 10 up-to-date

Publishing build scan...
https://gradle.com/s/mujcpgqjqmtso

Build step 'Invoke Gradle script' changed build result to FAILURE
Build step 'Invoke Gradle script' marked build as failure

---------------------------------------------------------------------
To unsubscribe, e-mail: builds-unsubscribe@beam.apache.org
For additional commands, e-mail: builds-help@beam.apache.org


Build failed in Jenkins: beam_LoadTests_Go_GBK_Flink_Batch #727

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See <https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/727/display/redirect>

Changes:


------------------------------------------
[...truncated 31.15 KB...]
        urn: "beam:coder:kv:v1"
      >
      component_coder_ids: "c0"
      component_coder_ids: "c0"
    >
  >
  coders: <
    key: "c2"
    value: <
      spec: <
        urn: "beam:coder:global_window:v1"
      >
    >
  >
  coders: <
    key: "c3"
    value: <
      spec: <
        urn: "beam:coder:iterable:v1"
      >
      component_coder_ids: "c0"
    >
  >
  coders: <
    key: "c4"
    value: <
      spec: <
        urn: "beam:coder:kv:v1"
      >
      component_coder_ids: "c0"
      component_coder_ids: "c3"
    >
  >
  coders: <
    key: "c5"
    value: <
      spec: <
        urn: "beam:coder:row:v1"
        payload: "\n\021\n\013num_records\032\002\020\004\n\024\n\016initial_splits\032\002\020\004\n\016\n\010key_size\032\002\020\004\n\020\n\nvalue_size\032\002\020\004\n\022\n\014num_hot_keys\032\002\020\004\n\026\n\020hot_key_fraction\032\002\020\006\022$f691cccd-3963-4ed9-9f25-d9fdfd07b30d"
      >
    >
  >
  coders: <
    key: "c6"
    value: <
      spec: <
        urn: "beam:go:coder:custom:v1"
        payload: "ChdvZmZzZXRyYW5nZS5SZXN0cmljdGlvbhJTCBpKT2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UuUmVzdHJpY3Rpb24atAEKS2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UucmVzdEVuYxJlCBYiUwgaSk9naXRodWIuY29tL2FwYWNoZS9iZWFtL3Nka3MvdjIvZ28vcGtnL2JlYW0vaW8vcnRyYWNrZXJzL29mZnNldHJhbmdlLlJlc3RyaWN0aW9uKgYIFBICCAgqBAgZQAEitAEKS2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UucmVzdERlYxJlCBYiBggUEgIICCpTCBpKT2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UuUmVzdHJpY3Rpb24qBAgZQAE="
      >
    >
  >
  coders: <
    key: "c7"
    value: <
      spec: <
        urn: "beam:coder:length_prefix:v1"
      >
      component_coder_ids: "c6"
    >
  >
  coders: <
    key: "c8"
    value: <
      spec: <
        urn: "beam:coder:bool:v1"
      >
    >
  >
  coders: <
    key: "c9"
    value: <
      spec: <
        urn: "beam:coder:kv:v1"
      >
      component_coder_ids: "c7"
      component_coder_ids: "c8"
    >
  >
  environments: <
    key: "go"
    value: <
      urn: "beam:env:docker:v1"
      payload: "\n6gcr.io/apache-beam-testing/beam-sdk/beam_go_sdk:latest"
      capabilities: "beam:protocol:progress_reporting:v1"
      capabilities: "beam:protocol:multi_core_bundle_processing:v1"
      capabilities: "beam:transform:sdf_truncate_sized_restrictions:v1"
      capabilities: "beam:protocol:****_status:v1"
      capabilities: "beam:protocol:monitoring_info_short_ids:v1"
      capabilities: "beam:version:sdk_base:go"
      capabilities: "beam:coder:bytes:v1"
      capabilities: "beam:coder:bool:v1"
      capabilities: "beam:coder:varint:v1"
      capabilities: "beam:coder:double:v1"
      capabilities: "beam:coder:string_utf8:v1"
      capabilities: "beam:coder:length_prefix:v1"
      capabilities: "beam:coder:kv:v1"
      capabilities: "beam:coder:iterable:v1"
      capabilities: "beam:coder:state_backed_iterable:v1"
      capabilities: "beam:coder:windowed_value:v1"
      capabilities: "beam:coder:global_window:v1"
      capabilities: "beam:coder:interval_window:v1"
      capabilities: "beam:coder:row:v1"
      capabilities: "beam:coder:nullable:v1"
      dependencies: <
        type_urn: "beam:artifact:type:file:v1"
        role_urn: "beam:artifact:role:go_****_binary:v1"
      >
    >
  >
>
root_transform_ids: "s1"
root_transform_ids: "e4"
root_transform_ids: "e5"
root_transform_ids: "e6"
root_transform_ids: "e7"
requirements: "beam:requirement:pardo:splittable_dofn:v1"
2022/11/28 10:05:37 Using specified **** binary: 'linux_amd64/group_by_key'
2022/11/28 10:05:37 Prepared job with id: load-tests-go-flink-batch-gbk-1-1128065407_314d5a06-1dd3-439c-98ac-6ac3ab5c23eb and staging token: load-tests-go-flink-batch-gbk-1-1128065407_314d5a06-1dd3-439c-98ac-6ac3ab5c23eb
2022/11/28 10:05:46 Staged binary artifact with token: 
2022/11/28 10:05:49 Submitted job: load0tests0go0flink0batch0gbk0101128065407-root-1128100547-af3e65b2_9e3b8668-f884-476e-8767-04800bc6ba7e
2022/11/28 10:05:49 Job state: STOPPED
2022/11/28 10:05:49 Job state: STARTING
2022/11/28 10:05:49 Job state: RUNNING
2022/11/28 10:58:58  (): org.apache.flink.client.program.ProgramInvocationException: Job failed (JobID: a1a90344059edc8b11b0c8e1fb521c28)
	at org.apache.flink.client.deployment.ClusterClientJobClientAdapter.lambda$null$6(ClusterClientJobClientAdapter.java:130)
	at java.util.concurrent.CompletableFuture.uniApply(CompletableFuture.java:616)
	at java.util.concurrent.CompletableFuture$UniApply.tryFire(CompletableFuture.java:591)
	at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:488)
	at java.util.concurrent.CompletableFuture.complete(CompletableFuture.java:1975)
	at org.apache.flink.util.concurrent.FutureUtils.lambda$retryOperationWithDelay$9(FutureUtils.java:403)
	at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:774)
	at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:750)
	at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:488)
	at java.util.concurrent.CompletableFuture.complete(CompletableFuture.java:1975)
	at org.apache.flink.client.program.rest.RestClusterClient.lambda$pollResourceAsync$26(RestClusterClient.java:708)
	at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:774)
	at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:750)
	at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:488)
	at java.util.concurrent.CompletableFuture.complete(CompletableFuture.java:1975)
	at org.apache.flink.util.concurrent.FutureUtils.lambda$retryOperationWithDelay$9(FutureUtils.java:403)
	at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:774)
	at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:750)
	at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:488)
	at java.util.concurrent.CompletableFuture.postFire(CompletableFuture.java:575)
	at java.util.concurrent.CompletableFuture$UniCompose.tryFire(CompletableFuture.java:943)
	at java.util.concurrent.CompletableFuture$Completion.run(CompletableFuture.java:456)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:750)
Caused by: org.apache.flink.runtime.client.JobExecutionException: Job execution failed.
	at org.apache.flink.runtime.jobmaster.JobResult.toJobExecutionResult(JobResult.java:144)
	at org.apache.flink.client.deployment.ClusterClientJobClientAdapter.lambda$null$6(ClusterClientJobClientAdapter.java:128)
	... 24 more
Caused by: org.apache.flink.runtime.JobException: Recovery is suppressed by NoRestartBackoffTimeStrategy
	at org.apache.flink.runtime.executiongraph.failover.flip1.ExecutionFailureHandler.handleFailure(ExecutionFailureHandler.java:138)
	at org.apache.flink.runtime.executiongraph.failover.flip1.ExecutionFailureHandler.getFailureHandlingResult(ExecutionFailureHandler.java:82)
	at org.apache.flink.runtime.scheduler.DefaultScheduler.handleTaskFailure(DefaultScheduler.java:301)
	at org.apache.flink.runtime.scheduler.DefaultScheduler.maybeHandleTaskFailure(DefaultScheduler.java:291)
	at org.apache.flink.runtime.scheduler.DefaultScheduler.updateTaskExecutionStateInternal(DefaultScheduler.java:282)
	at org.apache.flink.runtime.scheduler.SchedulerBase.updateTaskExecutionState(SchedulerBase.java:739)
	at org.apache.flink.runtime.scheduler.UpdateSchedulerNgOnInternalFailuresListener.notifyTaskFailure(UpdateSchedulerNgOnInternalFailuresListener.java:51)
	at org.apache.flink.runtime.executiongraph.DefaultExecutionGraph.notifySchedulerNgAboutInternalTaskFailure(DefaultExecutionGraph.java:1536)
	at org.apache.flink.runtime.executiongraph.Execution.processFail(Execution.java:1118)
	at org.apache.flink.runtime.executiongraph.Execution.processFail(Execution.java:1058)
	at org.apache.flink.runtime.executiongraph.Execution.fail(Execution.java:759)
	at org.apache.flink.runtime.jobmaster.slotpool.SingleLogicalSlot.signalPayloadRelease(SingleLogicalSlot.java:195)
	at org.apache.flink.runtime.jobmaster.slotpool.SingleLogicalSlot.release(SingleLogicalSlot.java:182)
	at org.apache.flink.runtime.scheduler.SharedSlot.lambda$release$4(SharedSlot.java:271)
	at java.util.concurrent.CompletableFuture.uniAcceptNow(CompletableFuture.java:753)
	at java.util.concurrent.CompletableFuture.uniAcceptStage(CompletableFuture.java:731)
	at java.util.concurrent.CompletableFuture.thenAccept(CompletableFuture.java:2108)
	at org.apache.flink.runtime.scheduler.SharedSlot.release(SharedSlot.java:271)
	at org.apache.flink.runtime.jobmaster.slotpool.AllocatedSlot.releasePayload(AllocatedSlot.java:152)
	at org.apache.flink.runtime.jobmaster.slotpool.DefaultDeclarativeSlotPool.releasePayload(DefaultDeclarativeSlotPool.java:482)
	at org.apache.flink.runtime.jobmaster.slotpool.DefaultDeclarativeSlotPool.freeAndReleaseSlots(DefaultDeclarativeSlotPool.java:474)
	at org.apache.flink.runtime.jobmaster.slotpool.DefaultDeclarativeSlotPool.releaseSlots(DefaultDeclarativeSlotPool.java:445)
	at org.apache.flink.runtime.jobmaster.slotpool.DeclarativeSlotPoolService.internalReleaseTaskManager(DeclarativeSlotPoolService.java:249)
	at org.apache.flink.runtime.jobmaster.slotpool.DeclarativeSlotPoolService.releaseTaskManager(DeclarativeSlotPoolService.java:230)
	at org.apache.flink.runtime.jobmaster.JobMaster.disconnectTaskManager(JobMaster.java:505)
	at org.apache.flink.runtime.jobmaster.JobMaster$TaskManagerHeartbeatListener.handleTaskManagerConnectionLoss(JobMaster.java:1376)
	at org.apache.flink.runtime.jobmaster.JobMaster$TaskManagerHeartbeatListener.notifyHeartbeatTimeout(JobMaster.java:1371)
	at org.apache.flink.runtime.heartbeat.HeartbeatMonitorImpl.run(HeartbeatMonitorImpl.java:155)
	at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:515)
	at java.util.concurrent.FutureTask.run(FutureTask.java:264)
	at org.apache.flink.runtime.rpc.akka.AkkaRpcActor.lambda$handleRunAsync$4(AkkaRpcActor.java:443)
	at org.apache.flink.runtime.concurrent.akka.ClassLoadingUtils.runWithContextClassLoader(ClassLoadingUtils.java:68)
	at org.apache.flink.runtime.rpc.akka.AkkaRpcActor.handleRunAsync(AkkaRpcActor.java:443)
	at org.apache.flink.runtime.rpc.akka.AkkaRpcActor.handleRpcMessage(AkkaRpcActor.java:213)
	at org.apache.flink.runtime.rpc.akka.FencedAkkaRpcActor.handleRpcMessage(FencedAkkaRpcActor.java:78)
	at org.apache.flink.runtime.rpc.akka.AkkaRpcActor.handleMessage(AkkaRpcActor.java:163)
	at akka.japi.pf.UnitCaseStatement.apply(CaseStatements.scala:24)
	at akka.japi.pf.UnitCaseStatement.apply(CaseStatements.scala:20)
	at scala.PartialFunction.applyOrElse(PartialFunction.scala:127)
	at scala.PartialFunction.applyOrElse$(PartialFunction.scala:126)
	at akka.japi.pf.UnitCaseStatement.applyOrElse(CaseStatements.scala:20)
	at scala.PartialFunction$OrElse.applyOrElse(PartialFunction.scala:175)
	at scala.PartialFunction$OrElse.applyOrElse(PartialFunction.scala:176)
	at scala.PartialFunction$OrElse.applyOrElse(PartialFunction.scala:176)
	at akka.actor.Actor.aroundReceive(Actor.scala:537)
	at akka.actor.Actor.aroundReceive$(Actor.scala:535)
	at akka.actor.AbstractActor.aroundReceive(AbstractActor.scala:220)
	at akka.actor.ActorCell.receiveMessage(ActorCell.scala:580)
	at akka.actor.ActorCell.invoke(ActorCell.scala:548)
	at akka.dispatch.Mailbox.processMailbox(Mailbox.scala:270)
	at akka.dispatch.Mailbox.run(Mailbox.scala:231)
	at akka.dispatch.Mailbox.exec(Mailbox.scala:243)
	at java.util.concurrent.ForkJoinTask.doExec(ForkJoinTask.java:290)
	at java.util.concurrent.ForkJoinPool$WorkQueue.topLevelExec(ForkJoinPool.java:1020)
	at java.util.concurrent.ForkJoinPool.scan(ForkJoinPool.java:1656)
	at java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1594)
	at java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:183)
Caused by: java.util.concurrent.TimeoutException: Heartbeat of TaskManager with id container_1669629769912_0001_01_000004(beam-loadtests-go-gbk-flink-batch-727-w-2.c.apache-beam-testing.internal:8026) timed out.
	... 31 more
2022/11/28 10:58:58  (): java.util.concurrent.TimeoutException: Heartbeat of TaskManager with id container_1669629769912_0001_01_000004(beam-loadtests-go-gbk-flink-batch-727-w-2.c.apache-beam-testing.internal:8026) timed out.
2022/11/28 10:58:58 Job state: FAILED
2022/11/28 10:58:58 Failed to execute job: job load0tests0go0flink0batch0gbk0101128065407-root-1128100547-af3e65b2_9e3b8668-f884-476e-8767-04800bc6ba7e failed
panic: Failed to execute job: job load0tests0go0flink0batch0gbk0101128065407-root-1128100547-af3e65b2_9e3b8668-f884-476e-8767-04800bc6ba7e failed

goroutine 1 [running]:
github.com/apache/beam/sdks/v2/go/pkg/beam/log.Fatalf({0x1848328, 0xc000120000}, {0x1697797?, 0x22f38a8?}, {0xc000509e70?, 0x0?, 0x0?})
	<https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/ws/src/sdks/go/pkg/beam/log/log.go>:153 +0xa5
main.main()
	<https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/ws/src/sdks/go/test/load/group_by_key/group_by_key.go>:98 +0x3eb

> Task :sdks:go:test:load:run FAILED

FAILURE: Build failed with an exception.

* Where:
Build file '<https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/ws/src/sdks/go/test/load/build.gradle'> line: 31

* What went wrong:
Execution failed for task ':sdks:go:test:load:run'.
> Process 'command 'sh'' finished with non-zero exit value 2

* Try:
> Run with --stacktrace option to get the stack trace.
> Run with --info or --debug option to get more log output.

* Get more help at https://help.gradle.org

BUILD FAILED in 53m 41s
12 actionable tasks: 6 executed, 4 from cache, 2 up-to-date

Publishing build scan...
https://gradle.com/s/6l2367jaqk3yg

Build step 'Invoke Gradle script' changed build result to FAILURE
Build step 'Invoke Gradle script' marked build as failure

---------------------------------------------------------------------
To unsubscribe, e-mail: builds-unsubscribe@beam.apache.org
For additional commands, e-mail: builds-help@beam.apache.org


Build failed in Jenkins: beam_LoadTests_Go_GBK_Flink_Batch #726

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See <https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/726/display/redirect>

Changes:


------------------------------------------
[...truncated 31.20 KB...]
        urn: "beam:coder:kv:v1"
      >
      component_coder_ids: "c0"
      component_coder_ids: "c0"
    >
  >
  coders: <
    key: "c2"
    value: <
      spec: <
        urn: "beam:coder:global_window:v1"
      >
    >
  >
  coders: <
    key: "c3"
    value: <
      spec: <
        urn: "beam:coder:iterable:v1"
      >
      component_coder_ids: "c0"
    >
  >
  coders: <
    key: "c4"
    value: <
      spec: <
        urn: "beam:coder:kv:v1"
      >
      component_coder_ids: "c0"
      component_coder_ids: "c3"
    >
  >
  coders: <
    key: "c5"
    value: <
      spec: <
        urn: "beam:coder:row:v1"
        payload: "\n\021\n\013num_records\032\002\020\004\n\024\n\016initial_splits\032\002\020\004\n\016\n\010key_size\032\002\020\004\n\020\n\nvalue_size\032\002\020\004\n\022\n\014num_hot_keys\032\002\020\004\n\026\n\020hot_key_fraction\032\002\020\006\022$f691cccd-3963-4ed9-9f25-d9fdfd07b30d"
      >
    >
  >
  coders: <
    key: "c6"
    value: <
      spec: <
        urn: "beam:go:coder:custom:v1"
        payload: "ChdvZmZzZXRyYW5nZS5SZXN0cmljdGlvbhJTCBpKT2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UuUmVzdHJpY3Rpb24atAEKS2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UucmVzdEVuYxJlCBYiUwgaSk9naXRodWIuY29tL2FwYWNoZS9iZWFtL3Nka3MvdjIvZ28vcGtnL2JlYW0vaW8vcnRyYWNrZXJzL29mZnNldHJhbmdlLlJlc3RyaWN0aW9uKgYIFBICCAgqBAgZQAEitAEKS2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UucmVzdERlYxJlCBYiBggUEgIICCpTCBpKT2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UuUmVzdHJpY3Rpb24qBAgZQAE="
      >
    >
  >
  coders: <
    key: "c7"
    value: <
      spec: <
        urn: "beam:coder:length_prefix:v1"
      >
      component_coder_ids: "c6"
    >
  >
  coders: <
    key: "c8"
    value: <
      spec: <
        urn: "beam:coder:bool:v1"
      >
    >
  >
  coders: <
    key: "c9"
    value: <
      spec: <
        urn: "beam:coder:kv:v1"
      >
      component_coder_ids: "c7"
      component_coder_ids: "c8"
    >
  >
  environments: <
    key: "go"
    value: <
      urn: "beam:env:docker:v1"
      payload: "\n6gcr.io/apache-beam-testing/beam-sdk/beam_go_sdk:latest"
      capabilities: "beam:protocol:progress_reporting:v1"
      capabilities: "beam:protocol:multi_core_bundle_processing:v1"
      capabilities: "beam:transform:sdf_truncate_sized_restrictions:v1"
      capabilities: "beam:protocol:****_status:v1"
      capabilities: "beam:protocol:monitoring_info_short_ids:v1"
      capabilities: "beam:version:sdk_base:go"
      capabilities: "beam:coder:bytes:v1"
      capabilities: "beam:coder:bool:v1"
      capabilities: "beam:coder:varint:v1"
      capabilities: "beam:coder:double:v1"
      capabilities: "beam:coder:string_utf8:v1"
      capabilities: "beam:coder:length_prefix:v1"
      capabilities: "beam:coder:kv:v1"
      capabilities: "beam:coder:iterable:v1"
      capabilities: "beam:coder:state_backed_iterable:v1"
      capabilities: "beam:coder:windowed_value:v1"
      capabilities: "beam:coder:global_window:v1"
      capabilities: "beam:coder:interval_window:v1"
      capabilities: "beam:coder:row:v1"
      capabilities: "beam:coder:nullable:v1"
      dependencies: <
        type_urn: "beam:artifact:type:file:v1"
        role_urn: "beam:artifact:role:go_****_binary:v1"
      >
    >
  >
>
root_transform_ids: "s1"
root_transform_ids: "e4"
root_transform_ids: "e5"
root_transform_ids: "e6"
root_transform_ids: "e7"
requirements: "beam:requirement:pardo:splittable_dofn:v1"
2022/11/27 10:05:39 Using specified **** binary: 'linux_amd64/group_by_key'
2022/11/27 10:05:40 Prepared job with id: load-tests-go-flink-batch-gbk-1-1127065414_fb17d781-48af-4ac9-9963-71d0f076fe07 and staging token: load-tests-go-flink-batch-gbk-1-1127065414_fb17d781-48af-4ac9-9963-71d0f076fe07
2022/11/27 10:05:46 Staged binary artifact with token: 
2022/11/27 10:05:47 Submitted job: load0tests0go0flink0batch0gbk0101127065414-root-1127100546-3be41483_29510c75-be5f-4e4b-a6a1-121b5bd0c3f1
2022/11/27 10:05:47 Job state: STOPPED
2022/11/27 10:05:47 Job state: STARTING
2022/11/27 10:05:47 Job state: RUNNING
2022/11/27 11:01:08  (): org.apache.flink.client.program.ProgramInvocationException: Job failed (JobID: 6d0ff1673a9af8c9fa4b94a56d108cf5)
	at org.apache.flink.client.deployment.ClusterClientJobClientAdapter.lambda$null$6(ClusterClientJobClientAdapter.java:130)
	at java.util.concurrent.CompletableFuture.uniApply(CompletableFuture.java:616)
	at java.util.concurrent.CompletableFuture$UniApply.tryFire(CompletableFuture.java:591)
	at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:488)
	at java.util.concurrent.CompletableFuture.complete(CompletableFuture.java:1975)
	at org.apache.flink.util.concurrent.FutureUtils.lambda$retryOperationWithDelay$9(FutureUtils.java:403)
	at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:774)
	at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:750)
	at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:488)
	at java.util.concurrent.CompletableFuture.complete(CompletableFuture.java:1975)
	at org.apache.flink.client.program.rest.RestClusterClient.lambda$pollResourceAsync$26(RestClusterClient.java:708)
	at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:774)
	at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:750)
	at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:488)
	at java.util.concurrent.CompletableFuture.complete(CompletableFuture.java:1975)
	at org.apache.flink.util.concurrent.FutureUtils.lambda$retryOperationWithDelay$9(FutureUtils.java:403)
	at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:774)
	at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:750)
	at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:488)
	at java.util.concurrent.CompletableFuture.postFire(CompletableFuture.java:575)
	at java.util.concurrent.CompletableFuture$UniCompose.tryFire(CompletableFuture.java:943)
	at java.util.concurrent.CompletableFuture$Completion.run(CompletableFuture.java:456)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:750)
Caused by: org.apache.flink.runtime.client.JobExecutionException: Job execution failed.
	at org.apache.flink.runtime.jobmaster.JobResult.toJobExecutionResult(JobResult.java:144)
	at org.apache.flink.client.deployment.ClusterClientJobClientAdapter.lambda$null$6(ClusterClientJobClientAdapter.java:128)
	... 24 more
Caused by: org.apache.flink.runtime.JobException: Recovery is suppressed by NoRestartBackoffTimeStrategy
	at org.apache.flink.runtime.executiongraph.failover.flip1.ExecutionFailureHandler.handleFailure(ExecutionFailureHandler.java:138)
	at org.apache.flink.runtime.executiongraph.failover.flip1.ExecutionFailureHandler.getFailureHandlingResult(ExecutionFailureHandler.java:82)
	at org.apache.flink.runtime.scheduler.DefaultScheduler.handleTaskFailure(DefaultScheduler.java:301)
	at org.apache.flink.runtime.scheduler.DefaultScheduler.maybeHandleTaskFailure(DefaultScheduler.java:291)
	at org.apache.flink.runtime.scheduler.DefaultScheduler.updateTaskExecutionStateInternal(DefaultScheduler.java:282)
	at org.apache.flink.runtime.scheduler.SchedulerBase.updateTaskExecutionState(SchedulerBase.java:739)
	at org.apache.flink.runtime.scheduler.UpdateSchedulerNgOnInternalFailuresListener.notifyTaskFailure(UpdateSchedulerNgOnInternalFailuresListener.java:51)
	at org.apache.flink.runtime.executiongraph.DefaultExecutionGraph.notifySchedulerNgAboutInternalTaskFailure(DefaultExecutionGraph.java:1536)
	at org.apache.flink.runtime.executiongraph.Execution.processFail(Execution.java:1118)
	at org.apache.flink.runtime.executiongraph.Execution.processFail(Execution.java:1058)
	at org.apache.flink.runtime.executiongraph.Execution.fail(Execution.java:759)
	at org.apache.flink.runtime.jobmaster.slotpool.SingleLogicalSlot.signalPayloadRelease(SingleLogicalSlot.java:195)
	at org.apache.flink.runtime.jobmaster.slotpool.SingleLogicalSlot.release(SingleLogicalSlot.java:182)
	at org.apache.flink.runtime.scheduler.SharedSlot.lambda$release$4(SharedSlot.java:271)
	at java.util.concurrent.CompletableFuture.uniAcceptNow(CompletableFuture.java:753)
	at java.util.concurrent.CompletableFuture.uniAcceptStage(CompletableFuture.java:731)
	at java.util.concurrent.CompletableFuture.thenAccept(CompletableFuture.java:2108)
	at org.apache.flink.runtime.scheduler.SharedSlot.release(SharedSlot.java:271)
	at org.apache.flink.runtime.jobmaster.slotpool.AllocatedSlot.releasePayload(AllocatedSlot.java:152)
	at org.apache.flink.runtime.jobmaster.slotpool.DefaultDeclarativeSlotPool.releasePayload(DefaultDeclarativeSlotPool.java:482)
	at org.apache.flink.runtime.jobmaster.slotpool.DefaultDeclarativeSlotPool.freeAndReleaseSlots(DefaultDeclarativeSlotPool.java:474)
	at org.apache.flink.runtime.jobmaster.slotpool.DefaultDeclarativeSlotPool.releaseSlots(DefaultDeclarativeSlotPool.java:445)
	at org.apache.flink.runtime.jobmaster.slotpool.DeclarativeSlotPoolService.internalReleaseTaskManager(DeclarativeSlotPoolService.java:249)
	at org.apache.flink.runtime.jobmaster.slotpool.DeclarativeSlotPoolService.releaseTaskManager(DeclarativeSlotPoolService.java:230)
	at org.apache.flink.runtime.jobmaster.JobMaster.disconnectTaskManager(JobMaster.java:505)
	at org.apache.flink.runtime.jobmaster.JobMaster$TaskManagerHeartbeatListener.handleTaskManagerConnectionLoss(JobMaster.java:1376)
	at org.apache.flink.runtime.jobmaster.JobMaster$TaskManagerHeartbeatListener.notifyHeartbeatTimeout(JobMaster.java:1371)
	at org.apache.flink.runtime.heartbeat.HeartbeatMonitorImpl.run(HeartbeatMonitorImpl.java:155)
	at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:515)
	at java.util.concurrent.FutureTask.run(FutureTask.java:264)
	at org.apache.flink.runtime.rpc.akka.AkkaRpcActor.lambda$handleRunAsync$4(AkkaRpcActor.java:443)
	at org.apache.flink.runtime.concurrent.akka.ClassLoadingUtils.runWithContextClassLoader(ClassLoadingUtils.java:68)
	at org.apache.flink.runtime.rpc.akka.AkkaRpcActor.handleRunAsync(AkkaRpcActor.java:443)
	at org.apache.flink.runtime.rpc.akka.AkkaRpcActor.handleRpcMessage(AkkaRpcActor.java:213)
	at org.apache.flink.runtime.rpc.akka.FencedAkkaRpcActor.handleRpcMessage(FencedAkkaRpcActor.java:78)
	at org.apache.flink.runtime.rpc.akka.AkkaRpcActor.handleMessage(AkkaRpcActor.java:163)
	at akka.japi.pf.UnitCaseStatement.apply(CaseStatements.scala:24)
	at akka.japi.pf.UnitCaseStatement.apply(CaseStatements.scala:20)
	at scala.PartialFunction.applyOrElse(PartialFunction.scala:127)
	at scala.PartialFunction.applyOrElse$(PartialFunction.scala:126)
	at akka.japi.pf.UnitCaseStatement.applyOrElse(CaseStatements.scala:20)
	at scala.PartialFunction$OrElse.applyOrElse(PartialFunction.scala:175)
	at scala.PartialFunction$OrElse.applyOrElse(PartialFunction.scala:176)
	at scala.PartialFunction$OrElse.applyOrElse(PartialFunction.scala:176)
	at akka.actor.Actor.aroundReceive(Actor.scala:537)
	at akka.actor.Actor.aroundReceive$(Actor.scala:535)
	at akka.actor.AbstractActor.aroundReceive(AbstractActor.scala:220)
	at akka.actor.ActorCell.receiveMessage(ActorCell.scala:580)
	at akka.actor.ActorCell.invoke(ActorCell.scala:548)
	at akka.dispatch.Mailbox.processMailbox(Mailbox.scala:270)
	at akka.dispatch.Mailbox.run(Mailbox.scala:231)
	at akka.dispatch.Mailbox.exec(Mailbox.scala:243)
	at java.util.concurrent.ForkJoinTask.doExec(ForkJoinTask.java:290)
	at java.util.concurrent.ForkJoinPool$WorkQueue.topLevelExec(ForkJoinPool.java:1020)
	at java.util.concurrent.ForkJoinPool.scan(ForkJoinPool.java:1656)
	at java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1594)
	at java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:183)
Caused by: java.util.concurrent.TimeoutException: Heartbeat of TaskManager with id container_1669543379383_0001_01_000002(beam-loadtests-go-gbk-flink-batch-726-w-2.c.apache-beam-testing.internal:8026) timed out.
	... 31 more
2022/11/27 11:01:08  (): java.util.concurrent.TimeoutException: Heartbeat of TaskManager with id container_1669543379383_0001_01_000002(beam-loadtests-go-gbk-flink-batch-726-w-2.c.apache-beam-testing.internal:8026) timed out.
2022/11/27 11:01:08 Job state: FAILED
2022/11/27 11:01:08 Failed to execute job: job load0tests0go0flink0batch0gbk0101127065414-root-1127100546-3be41483_29510c75-be5f-4e4b-a6a1-121b5bd0c3f1 failed
panic: Failed to execute job: job load0tests0go0flink0batch0gbk0101127065414-root-1127100546-3be41483_29510c75-be5f-4e4b-a6a1-121b5bd0c3f1 failed

goroutine 1 [running]:
github.com/apache/beam/sdks/v2/go/pkg/beam/log.Fatalf({0x1848328, 0xc00004e0c0}, {0x1697797?, 0x22f38a8?}, {0xc0006efe70?, 0x0?, 0x0?})
	<https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/ws/src/sdks/go/pkg/beam/log/log.go>:153 +0xa5
main.main()
	<https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/ws/src/sdks/go/test/load/group_by_key/group_by_key.go>:98 +0x3eb

> Task :sdks:go:test:load:run FAILED

FAILURE: Build failed with an exception.

* Where:
Build file '<https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/ws/src/sdks/go/test/load/build.gradle'> line: 31

* What went wrong:
Execution failed for task ':sdks:go:test:load:run'.
> Process 'command 'sh'' finished with non-zero exit value 2

* Try:
> Run with --stacktrace option to get the stack trace.
> Run with --info or --debug option to get more log output.

* Get more help at https://help.gradle.org

BUILD FAILED in 55m 36s
12 actionable tasks: 6 executed, 4 from cache, 2 up-to-date

Publishing build scan...
https://gradle.com/s/nfig3zj3sbyuk

Build step 'Invoke Gradle script' changed build result to FAILURE
Build step 'Invoke Gradle script' marked build as failure

---------------------------------------------------------------------
To unsubscribe, e-mail: builds-unsubscribe@beam.apache.org
For additional commands, e-mail: builds-help@beam.apache.org


Build failed in Jenkins: beam_LoadTests_Go_GBK_Flink_Batch #725

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See <https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/725/display/redirect?page=changes>

Changes:

[noreply] Update java-multi-language-pipelines.md (#24345)


------------------------------------------
[...truncated 31.10 KB...]
        urn: "beam:coder:kv:v1"
      >
      component_coder_ids: "c0"
      component_coder_ids: "c0"
    >
  >
  coders: <
    key: "c2"
    value: <
      spec: <
        urn: "beam:coder:global_window:v1"
      >
    >
  >
  coders: <
    key: "c3"
    value: <
      spec: <
        urn: "beam:coder:iterable:v1"
      >
      component_coder_ids: "c0"
    >
  >
  coders: <
    key: "c4"
    value: <
      spec: <
        urn: "beam:coder:kv:v1"
      >
      component_coder_ids: "c0"
      component_coder_ids: "c3"
    >
  >
  coders: <
    key: "c5"
    value: <
      spec: <
        urn: "beam:coder:row:v1"
        payload: "\n\021\n\013num_records\032\002\020\004\n\024\n\016initial_splits\032\002\020\004\n\016\n\010key_size\032\002\020\004\n\020\n\nvalue_size\032\002\020\004\n\022\n\014num_hot_keys\032\002\020\004\n\026\n\020hot_key_fraction\032\002\020\006\022$f691cccd-3963-4ed9-9f25-d9fdfd07b30d"
      >
    >
  >
  coders: <
    key: "c6"
    value: <
      spec: <
        urn: "beam:go:coder:custom:v1"
        payload: "ChdvZmZzZXRyYW5nZS5SZXN0cmljdGlvbhJTCBpKT2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UuUmVzdHJpY3Rpb24atAEKS2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UucmVzdEVuYxJlCBYiUwgaSk9naXRodWIuY29tL2FwYWNoZS9iZWFtL3Nka3MvdjIvZ28vcGtnL2JlYW0vaW8vcnRyYWNrZXJzL29mZnNldHJhbmdlLlJlc3RyaWN0aW9uKgYIFBICCAgqBAgZQAEitAEKS2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UucmVzdERlYxJlCBYiBggUEgIICCpTCBpKT2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UuUmVzdHJpY3Rpb24qBAgZQAE="
      >
    >
  >
  coders: <
    key: "c7"
    value: <
      spec: <
        urn: "beam:coder:length_prefix:v1"
      >
      component_coder_ids: "c6"
    >
  >
  coders: <
    key: "c8"
    value: <
      spec: <
        urn: "beam:coder:bool:v1"
      >
    >
  >
  coders: <
    key: "c9"
    value: <
      spec: <
        urn: "beam:coder:kv:v1"
      >
      component_coder_ids: "c7"
      component_coder_ids: "c8"
    >
  >
  environments: <
    key: "go"
    value: <
      urn: "beam:env:docker:v1"
      payload: "\n6gcr.io/apache-beam-testing/beam-sdk/beam_go_sdk:latest"
      capabilities: "beam:protocol:progress_reporting:v1"
      capabilities: "beam:protocol:multi_core_bundle_processing:v1"
      capabilities: "beam:transform:sdf_truncate_sized_restrictions:v1"
      capabilities: "beam:protocol:****_status:v1"
      capabilities: "beam:protocol:monitoring_info_short_ids:v1"
      capabilities: "beam:version:sdk_base:go"
      capabilities: "beam:coder:bytes:v1"
      capabilities: "beam:coder:bool:v1"
      capabilities: "beam:coder:varint:v1"
      capabilities: "beam:coder:double:v1"
      capabilities: "beam:coder:string_utf8:v1"
      capabilities: "beam:coder:length_prefix:v1"
      capabilities: "beam:coder:kv:v1"
      capabilities: "beam:coder:iterable:v1"
      capabilities: "beam:coder:state_backed_iterable:v1"
      capabilities: "beam:coder:windowed_value:v1"
      capabilities: "beam:coder:global_window:v1"
      capabilities: "beam:coder:interval_window:v1"
      capabilities: "beam:coder:row:v1"
      capabilities: "beam:coder:nullable:v1"
      dependencies: <
        type_urn: "beam:artifact:type:file:v1"
        role_urn: "beam:artifact:role:go_****_binary:v1"
      >
    >
  >
>
root_transform_ids: "s1"
root_transform_ids: "e4"
root_transform_ids: "e5"
root_transform_ids: "e6"
root_transform_ids: "e7"
requirements: "beam:requirement:pardo:splittable_dofn:v1"
2022/11/26 10:05:23 Using specified **** binary: 'linux_amd64/group_by_key'
2022/11/26 10:05:24 Prepared job with id: load-tests-go-flink-batch-gbk-1-1126065410_351e2007-f557-4daf-b078-cf86c9114044 and staging token: load-tests-go-flink-batch-gbk-1-1126065410_351e2007-f557-4daf-b078-cf86c9114044
2022/11/26 10:05:33 Staged binary artifact with token: 
2022/11/26 10:05:36 Submitted job: load0tests0go0flink0batch0gbk0101126065410-root-1126100533-585f26ae_dc69a8d3-8143-4086-95f9-5287502b2806
2022/11/26 10:05:36 Job state: STOPPED
2022/11/26 10:05:36 Job state: STARTING
2022/11/26 10:05:36 Job state: RUNNING
2022/11/26 11:03:59  (): org.apache.flink.client.program.ProgramInvocationException: Job failed (JobID: 011c1532170813d1f857d58d6428a5d2)
	at org.apache.flink.client.deployment.ClusterClientJobClientAdapter.lambda$null$6(ClusterClientJobClientAdapter.java:130)
	at java.util.concurrent.CompletableFuture.uniApply(CompletableFuture.java:616)
	at java.util.concurrent.CompletableFuture$UniApply.tryFire(CompletableFuture.java:591)
	at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:488)
	at java.util.concurrent.CompletableFuture.complete(CompletableFuture.java:1975)
	at org.apache.flink.util.concurrent.FutureUtils.lambda$retryOperationWithDelay$9(FutureUtils.java:403)
	at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:774)
	at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:750)
	at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:488)
	at java.util.concurrent.CompletableFuture.complete(CompletableFuture.java:1975)
	at org.apache.flink.client.program.rest.RestClusterClient.lambda$pollResourceAsync$26(RestClusterClient.java:708)
	at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:774)
	at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:750)
	at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:488)
	at java.util.concurrent.CompletableFuture.complete(CompletableFuture.java:1975)
	at org.apache.flink.util.concurrent.FutureUtils.lambda$retryOperationWithDelay$9(FutureUtils.java:403)
	at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:774)
	at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:750)
	at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:488)
	at java.util.concurrent.CompletableFuture.postFire(CompletableFuture.java:575)
	at java.util.concurrent.CompletableFuture$UniCompose.tryFire(CompletableFuture.java:943)
	at java.util.concurrent.CompletableFuture$Completion.run(CompletableFuture.java:456)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:750)
Caused by: org.apache.flink.runtime.client.JobExecutionException: Job execution failed.
	at org.apache.flink.runtime.jobmaster.JobResult.toJobExecutionResult(JobResult.java:144)
	at org.apache.flink.client.deployment.ClusterClientJobClientAdapter.lambda$null$6(ClusterClientJobClientAdapter.java:128)
	... 24 more
Caused by: org.apache.flink.runtime.JobException: Recovery is suppressed by NoRestartBackoffTimeStrategy
	at org.apache.flink.runtime.executiongraph.failover.flip1.ExecutionFailureHandler.handleFailure(ExecutionFailureHandler.java:138)
	at org.apache.flink.runtime.executiongraph.failover.flip1.ExecutionFailureHandler.getFailureHandlingResult(ExecutionFailureHandler.java:82)
	at org.apache.flink.runtime.scheduler.DefaultScheduler.handleTaskFailure(DefaultScheduler.java:301)
	at org.apache.flink.runtime.scheduler.DefaultScheduler.maybeHandleTaskFailure(DefaultScheduler.java:291)
	at org.apache.flink.runtime.scheduler.DefaultScheduler.updateTaskExecutionStateInternal(DefaultScheduler.java:282)
	at org.apache.flink.runtime.scheduler.SchedulerBase.updateTaskExecutionState(SchedulerBase.java:739)
	at org.apache.flink.runtime.scheduler.UpdateSchedulerNgOnInternalFailuresListener.notifyTaskFailure(UpdateSchedulerNgOnInternalFailuresListener.java:51)
	at org.apache.flink.runtime.executiongraph.DefaultExecutionGraph.notifySchedulerNgAboutInternalTaskFailure(DefaultExecutionGraph.java:1536)
	at org.apache.flink.runtime.executiongraph.Execution.processFail(Execution.java:1118)
	at org.apache.flink.runtime.executiongraph.Execution.processFail(Execution.java:1058)
	at org.apache.flink.runtime.executiongraph.Execution.fail(Execution.java:759)
	at org.apache.flink.runtime.jobmaster.slotpool.SingleLogicalSlot.signalPayloadRelease(SingleLogicalSlot.java:195)
	at org.apache.flink.runtime.jobmaster.slotpool.SingleLogicalSlot.release(SingleLogicalSlot.java:182)
	at org.apache.flink.runtime.scheduler.SharedSlot.lambda$release$4(SharedSlot.java:271)
	at java.util.concurrent.CompletableFuture.uniAcceptNow(CompletableFuture.java:753)
	at java.util.concurrent.CompletableFuture.uniAcceptStage(CompletableFuture.java:731)
	at java.util.concurrent.CompletableFuture.thenAccept(CompletableFuture.java:2108)
	at org.apache.flink.runtime.scheduler.SharedSlot.release(SharedSlot.java:271)
	at org.apache.flink.runtime.jobmaster.slotpool.AllocatedSlot.releasePayload(AllocatedSlot.java:152)
	at org.apache.flink.runtime.jobmaster.slotpool.DefaultDeclarativeSlotPool.releasePayload(DefaultDeclarativeSlotPool.java:482)
	at org.apache.flink.runtime.jobmaster.slotpool.DefaultDeclarativeSlotPool.freeAndReleaseSlots(DefaultDeclarativeSlotPool.java:474)
	at org.apache.flink.runtime.jobmaster.slotpool.DefaultDeclarativeSlotPool.releaseSlots(DefaultDeclarativeSlotPool.java:445)
	at org.apache.flink.runtime.jobmaster.slotpool.DeclarativeSlotPoolService.internalReleaseTaskManager(DeclarativeSlotPoolService.java:249)
	at org.apache.flink.runtime.jobmaster.slotpool.DeclarativeSlotPoolService.releaseTaskManager(DeclarativeSlotPoolService.java:230)
	at org.apache.flink.runtime.jobmaster.JobMaster.disconnectTaskManager(JobMaster.java:505)
	at org.apache.flink.runtime.jobmaster.JobMaster$TaskManagerHeartbeatListener.handleTaskManagerConnectionLoss(JobMaster.java:1376)
	at org.apache.flink.runtime.jobmaster.JobMaster$TaskManagerHeartbeatListener.notifyHeartbeatTimeout(JobMaster.java:1371)
	at org.apache.flink.runtime.heartbeat.HeartbeatMonitorImpl.run(HeartbeatMonitorImpl.java:155)
	at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:515)
	at java.util.concurrent.FutureTask.run(FutureTask.java:264)
	at org.apache.flink.runtime.rpc.akka.AkkaRpcActor.lambda$handleRunAsync$4(AkkaRpcActor.java:443)
	at org.apache.flink.runtime.concurrent.akka.ClassLoadingUtils.runWithContextClassLoader(ClassLoadingUtils.java:68)
	at org.apache.flink.runtime.rpc.akka.AkkaRpcActor.handleRunAsync(AkkaRpcActor.java:443)
	at org.apache.flink.runtime.rpc.akka.AkkaRpcActor.handleRpcMessage(AkkaRpcActor.java:213)
	at org.apache.flink.runtime.rpc.akka.FencedAkkaRpcActor.handleRpcMessage(FencedAkkaRpcActor.java:78)
	at org.apache.flink.runtime.rpc.akka.AkkaRpcActor.handleMessage(AkkaRpcActor.java:163)
	at akka.japi.pf.UnitCaseStatement.apply(CaseStatements.scala:24)
	at akka.japi.pf.UnitCaseStatement.apply(CaseStatements.scala:20)
	at scala.PartialFunction.applyOrElse(PartialFunction.scala:127)
	at scala.PartialFunction.applyOrElse$(PartialFunction.scala:126)
	at akka.japi.pf.UnitCaseStatement.applyOrElse(CaseStatements.scala:20)
	at scala.PartialFunction$OrElse.applyOrElse(PartialFunction.scala:175)
	at scala.PartialFunction$OrElse.applyOrElse(PartialFunction.scala:176)
	at scala.PartialFunction$OrElse.applyOrElse(PartialFunction.scala:176)
	at akka.actor.Actor.aroundReceive(Actor.scala:537)
	at akka.actor.Actor.aroundReceive$(Actor.scala:535)
	at akka.actor.AbstractActor.aroundReceive(AbstractActor.scala:220)
	at akka.actor.ActorCell.receiveMessage(ActorCell.scala:580)
	at akka.actor.ActorCell.invoke(ActorCell.scala:548)
	at akka.dispatch.Mailbox.processMailbox(Mailbox.scala:270)
	at akka.dispatch.Mailbox.run(Mailbox.scala:231)
	at akka.dispatch.Mailbox.exec(Mailbox.scala:243)
	at java.util.concurrent.ForkJoinTask.doExec(ForkJoinTask.java:290)
	at java.util.concurrent.ForkJoinPool$WorkQueue.topLevelExec(ForkJoinPool.java:1020)
	at java.util.concurrent.ForkJoinPool.scan(ForkJoinPool.java:1656)
	at java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1594)
	at java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:183)
Caused by: java.util.concurrent.TimeoutException: Heartbeat of TaskManager with id container_1669456952431_0001_01_000003(beam-loadtests-go-gbk-flink-batch-725-w-3.c.apache-beam-testing.internal:8026) timed out.
	... 31 more
2022/11/26 11:03:59  (): java.util.concurrent.TimeoutException: Heartbeat of TaskManager with id container_1669456952431_0001_01_000003(beam-loadtests-go-gbk-flink-batch-725-w-3.c.apache-beam-testing.internal:8026) timed out.
2022/11/26 11:03:59 Job state: FAILED
2022/11/26 11:03:59 Failed to execute job: job load0tests0go0flink0batch0gbk0101126065410-root-1126100533-585f26ae_dc69a8d3-8143-4086-95f9-5287502b2806 failed
panic: Failed to execute job: job load0tests0go0flink0batch0gbk0101126065410-root-1126100533-585f26ae_dc69a8d3-8143-4086-95f9-5287502b2806 failed

goroutine 1 [running]:
github.com/apache/beam/sdks/v2/go/pkg/beam/log.Fatalf({0x1848328, 0xc00004e0c0}, {0x1697797?, 0x22f38a8?}, {0xc00036fe70?, 0x0?, 0x0?})
	<https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/ws/src/sdks/go/pkg/beam/log/log.go>:153 +0xa5
main.main()
	<https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/ws/src/sdks/go/test/load/group_by_key/group_by_key.go>:98 +0x3eb

> Task :sdks:go:test:load:run FAILED

FAILURE: Build failed with an exception.

* Where:
Build file '<https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/ws/src/sdks/go/test/load/build.gradle'> line: 31

* What went wrong:
Execution failed for task ':sdks:go:test:load:run'.
> Process 'command 'sh'' finished with non-zero exit value 2

* Try:
> Run with --stacktrace option to get the stack trace.
> Run with --info or --debug option to get more log output.

* Get more help at https://help.gradle.org

BUILD FAILED in 58m 57s
12 actionable tasks: 6 executed, 4 from cache, 2 up-to-date

Publishing build scan...
https://gradle.com/s/uerqoi3awbu3i

Build step 'Invoke Gradle script' changed build result to FAILURE
Build step 'Invoke Gradle script' marked build as failure

---------------------------------------------------------------------
To unsubscribe, e-mail: builds-unsubscribe@beam.apache.org
For additional commands, e-mail: builds-help@beam.apache.org


Build failed in Jenkins: beam_LoadTests_Go_GBK_Flink_Batch #724

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See <https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/724/display/redirect>

Changes:


------------------------------------------
[...truncated 31.28 KB...]
        urn: "beam:coder:kv:v1"
      >
      component_coder_ids: "c0"
      component_coder_ids: "c0"
    >
  >
  coders: <
    key: "c2"
    value: <
      spec: <
        urn: "beam:coder:global_window:v1"
      >
    >
  >
  coders: <
    key: "c3"
    value: <
      spec: <
        urn: "beam:coder:iterable:v1"
      >
      component_coder_ids: "c0"
    >
  >
  coders: <
    key: "c4"
    value: <
      spec: <
        urn: "beam:coder:kv:v1"
      >
      component_coder_ids: "c0"
      component_coder_ids: "c3"
    >
  >
  coders: <
    key: "c5"
    value: <
      spec: <
        urn: "beam:coder:row:v1"
        payload: "\n\021\n\013num_records\032\002\020\004\n\024\n\016initial_splits\032\002\020\004\n\016\n\010key_size\032\002\020\004\n\020\n\nvalue_size\032\002\020\004\n\022\n\014num_hot_keys\032\002\020\004\n\026\n\020hot_key_fraction\032\002\020\006\022$f691cccd-3963-4ed9-9f25-d9fdfd07b30d"
      >
    >
  >
  coders: <
    key: "c6"
    value: <
      spec: <
        urn: "beam:go:coder:custom:v1"
        payload: "ChdvZmZzZXRyYW5nZS5SZXN0cmljdGlvbhJTCBpKT2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UuUmVzdHJpY3Rpb24atAEKS2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UucmVzdEVuYxJlCBYiUwgaSk9naXRodWIuY29tL2FwYWNoZS9iZWFtL3Nka3MvdjIvZ28vcGtnL2JlYW0vaW8vcnRyYWNrZXJzL29mZnNldHJhbmdlLlJlc3RyaWN0aW9uKgYIFBICCAgqBAgZQAEitAEKS2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UucmVzdERlYxJlCBYiBggUEgIICCpTCBpKT2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UuUmVzdHJpY3Rpb24qBAgZQAE="
      >
    >
  >
  coders: <
    key: "c7"
    value: <
      spec: <
        urn: "beam:coder:length_prefix:v1"
      >
      component_coder_ids: "c6"
    >
  >
  coders: <
    key: "c8"
    value: <
      spec: <
        urn: "beam:coder:bool:v1"
      >
    >
  >
  coders: <
    key: "c9"
    value: <
      spec: <
        urn: "beam:coder:kv:v1"
      >
      component_coder_ids: "c7"
      component_coder_ids: "c8"
    >
  >
  environments: <
    key: "go"
    value: <
      urn: "beam:env:docker:v1"
      payload: "\n6gcr.io/apache-beam-testing/beam-sdk/beam_go_sdk:latest"
      capabilities: "beam:protocol:progress_reporting:v1"
      capabilities: "beam:protocol:multi_core_bundle_processing:v1"
      capabilities: "beam:transform:sdf_truncate_sized_restrictions:v1"
      capabilities: "beam:protocol:****_status:v1"
      capabilities: "beam:protocol:monitoring_info_short_ids:v1"
      capabilities: "beam:version:sdk_base:go"
      capabilities: "beam:coder:bytes:v1"
      capabilities: "beam:coder:bool:v1"
      capabilities: "beam:coder:varint:v1"
      capabilities: "beam:coder:double:v1"
      capabilities: "beam:coder:string_utf8:v1"
      capabilities: "beam:coder:length_prefix:v1"
      capabilities: "beam:coder:kv:v1"
      capabilities: "beam:coder:iterable:v1"
      capabilities: "beam:coder:state_backed_iterable:v1"
      capabilities: "beam:coder:windowed_value:v1"
      capabilities: "beam:coder:global_window:v1"
      capabilities: "beam:coder:interval_window:v1"
      capabilities: "beam:coder:row:v1"
      capabilities: "beam:coder:nullable:v1"
      dependencies: <
        type_urn: "beam:artifact:type:file:v1"
        role_urn: "beam:artifact:role:go_****_binary:v1"
      >
    >
  >
>
root_transform_ids: "s1"
root_transform_ids: "e4"
root_transform_ids: "e5"
root_transform_ids: "e6"
root_transform_ids: "e7"
requirements: "beam:requirement:pardo:splittable_dofn:v1"
2022/11/25 10:06:07 Using specified **** binary: 'linux_amd64/group_by_key'
2022/11/25 10:06:07 Prepared job with id: load-tests-go-flink-batch-gbk-1-1125065415_82d8da95-c1aa-4dfb-8a6e-77ef7665f03e and staging token: load-tests-go-flink-batch-gbk-1-1125065415_82d8da95-c1aa-4dfb-8a6e-77ef7665f03e
2022/11/25 10:06:14 Staged binary artifact with token: 
2022/11/25 10:06:15 Submitted job: load0tests0go0flink0batch0gbk0101125065415-root-1125100614-c2204e39_2fdd5ff5-83a0-4a5e-86ca-08c14b044219
2022/11/25 10:06:15 Job state: STOPPED
2022/11/25 10:06:15 Job state: STARTING
2022/11/25 10:06:15 Job state: RUNNING
2022/11/25 11:00:00  (): org.apache.flink.client.program.ProgramInvocationException: Job failed (JobID: 5780e879c4c328fab36cb437a922941b)
	at org.apache.flink.client.deployment.ClusterClientJobClientAdapter.lambda$null$6(ClusterClientJobClientAdapter.java:130)
	at java.util.concurrent.CompletableFuture.uniApply(CompletableFuture.java:616)
	at java.util.concurrent.CompletableFuture$UniApply.tryFire(CompletableFuture.java:591)
	at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:488)
	at java.util.concurrent.CompletableFuture.complete(CompletableFuture.java:1975)
	at org.apache.flink.util.concurrent.FutureUtils.lambda$retryOperationWithDelay$9(FutureUtils.java:403)
	at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:774)
	at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:750)
	at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:488)
	at java.util.concurrent.CompletableFuture.complete(CompletableFuture.java:1975)
	at org.apache.flink.client.program.rest.RestClusterClient.lambda$pollResourceAsync$26(RestClusterClient.java:708)
	at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:774)
	at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:750)
	at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:488)
	at java.util.concurrent.CompletableFuture.complete(CompletableFuture.java:1975)
	at org.apache.flink.util.concurrent.FutureUtils.lambda$retryOperationWithDelay$9(FutureUtils.java:403)
	at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:774)
	at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:750)
	at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:488)
	at java.util.concurrent.CompletableFuture.postFire(CompletableFuture.java:575)
	at java.util.concurrent.CompletableFuture$UniCompose.tryFire(CompletableFuture.java:943)
	at java.util.concurrent.CompletableFuture$Completion.run(CompletableFuture.java:456)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:750)
Caused by: org.apache.flink.runtime.client.JobExecutionException: Job execution failed.
	at org.apache.flink.runtime.jobmaster.JobResult.toJobExecutionResult(JobResult.java:144)
	at org.apache.flink.client.deployment.ClusterClientJobClientAdapter.lambda$null$6(ClusterClientJobClientAdapter.java:128)
	... 24 more
Caused by: org.apache.flink.runtime.JobException: Recovery is suppressed by NoRestartBackoffTimeStrategy
	at org.apache.flink.runtime.executiongraph.failover.flip1.ExecutionFailureHandler.handleFailure(ExecutionFailureHandler.java:138)
	at org.apache.flink.runtime.executiongraph.failover.flip1.ExecutionFailureHandler.getFailureHandlingResult(ExecutionFailureHandler.java:82)
	at org.apache.flink.runtime.scheduler.DefaultScheduler.handleTaskFailure(DefaultScheduler.java:301)
	at org.apache.flink.runtime.scheduler.DefaultScheduler.maybeHandleTaskFailure(DefaultScheduler.java:291)
	at org.apache.flink.runtime.scheduler.DefaultScheduler.updateTaskExecutionStateInternal(DefaultScheduler.java:282)
	at org.apache.flink.runtime.scheduler.SchedulerBase.updateTaskExecutionState(SchedulerBase.java:739)
	at org.apache.flink.runtime.scheduler.UpdateSchedulerNgOnInternalFailuresListener.notifyTaskFailure(UpdateSchedulerNgOnInternalFailuresListener.java:51)
	at org.apache.flink.runtime.executiongraph.DefaultExecutionGraph.notifySchedulerNgAboutInternalTaskFailure(DefaultExecutionGraph.java:1536)
	at org.apache.flink.runtime.executiongraph.Execution.processFail(Execution.java:1118)
	at org.apache.flink.runtime.executiongraph.Execution.processFail(Execution.java:1058)
	at org.apache.flink.runtime.executiongraph.Execution.fail(Execution.java:759)
	at org.apache.flink.runtime.jobmaster.slotpool.SingleLogicalSlot.signalPayloadRelease(SingleLogicalSlot.java:195)
	at org.apache.flink.runtime.jobmaster.slotpool.SingleLogicalSlot.release(SingleLogicalSlot.java:182)
	at org.apache.flink.runtime.scheduler.SharedSlot.lambda$release$4(SharedSlot.java:271)
	at java.util.concurrent.CompletableFuture.uniAcceptNow(CompletableFuture.java:753)
	at java.util.concurrent.CompletableFuture.uniAcceptStage(CompletableFuture.java:731)
	at java.util.concurrent.CompletableFuture.thenAccept(CompletableFuture.java:2108)
	at org.apache.flink.runtime.scheduler.SharedSlot.release(SharedSlot.java:271)
	at org.apache.flink.runtime.jobmaster.slotpool.AllocatedSlot.releasePayload(AllocatedSlot.java:152)
	at org.apache.flink.runtime.jobmaster.slotpool.DefaultDeclarativeSlotPool.releasePayload(DefaultDeclarativeSlotPool.java:482)
	at org.apache.flink.runtime.jobmaster.slotpool.DefaultDeclarativeSlotPool.freeAndReleaseSlots(DefaultDeclarativeSlotPool.java:474)
	at org.apache.flink.runtime.jobmaster.slotpool.DefaultDeclarativeSlotPool.releaseSlots(DefaultDeclarativeSlotPool.java:445)
	at org.apache.flink.runtime.jobmaster.slotpool.DeclarativeSlotPoolService.internalReleaseTaskManager(DeclarativeSlotPoolService.java:249)
	at org.apache.flink.runtime.jobmaster.slotpool.DeclarativeSlotPoolService.releaseTaskManager(DeclarativeSlotPoolService.java:230)
	at org.apache.flink.runtime.jobmaster.JobMaster.disconnectTaskManager(JobMaster.java:505)
	at org.apache.flink.runtime.jobmaster.JobMaster$TaskManagerHeartbeatListener.handleTaskManagerConnectionLoss(JobMaster.java:1376)
	at org.apache.flink.runtime.jobmaster.JobMaster$TaskManagerHeartbeatListener.notifyHeartbeatTimeout(JobMaster.java:1371)
	at org.apache.flink.runtime.heartbeat.HeartbeatMonitorImpl.run(HeartbeatMonitorImpl.java:155)
	at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:515)
	at java.util.concurrent.FutureTask.run(FutureTask.java:264)
	at org.apache.flink.runtime.rpc.akka.AkkaRpcActor.lambda$handleRunAsync$4(AkkaRpcActor.java:443)
	at org.apache.flink.runtime.concurrent.akka.ClassLoadingUtils.runWithContextClassLoader(ClassLoadingUtils.java:68)
	at org.apache.flink.runtime.rpc.akka.AkkaRpcActor.handleRunAsync(AkkaRpcActor.java:443)
	at org.apache.flink.runtime.rpc.akka.AkkaRpcActor.handleRpcMessage(AkkaRpcActor.java:213)
	at org.apache.flink.runtime.rpc.akka.FencedAkkaRpcActor.handleRpcMessage(FencedAkkaRpcActor.java:78)
	at org.apache.flink.runtime.rpc.akka.AkkaRpcActor.handleMessage(AkkaRpcActor.java:163)
	at akka.japi.pf.UnitCaseStatement.apply(CaseStatements.scala:24)
	at akka.japi.pf.UnitCaseStatement.apply(CaseStatements.scala:20)
	at scala.PartialFunction.applyOrElse(PartialFunction.scala:127)
	at scala.PartialFunction.applyOrElse$(PartialFunction.scala:126)
	at akka.japi.pf.UnitCaseStatement.applyOrElse(CaseStatements.scala:20)
	at scala.PartialFunction$OrElse.applyOrElse(PartialFunction.scala:175)
	at scala.PartialFunction$OrElse.applyOrElse(PartialFunction.scala:176)
	at scala.PartialFunction$OrElse.applyOrElse(PartialFunction.scala:176)
	at akka.actor.Actor.aroundReceive(Actor.scala:537)
	at akka.actor.Actor.aroundReceive$(Actor.scala:535)
	at akka.actor.AbstractActor.aroundReceive(AbstractActor.scala:220)
	at akka.actor.ActorCell.receiveMessage(ActorCell.scala:580)
	at akka.actor.ActorCell.invoke(ActorCell.scala:548)
	at akka.dispatch.Mailbox.processMailbox(Mailbox.scala:270)
	at akka.dispatch.Mailbox.run(Mailbox.scala:231)
	at akka.dispatch.Mailbox.exec(Mailbox.scala:243)
	at java.util.concurrent.ForkJoinTask.doExec(ForkJoinTask.java:290)
	at java.util.concurrent.ForkJoinPool$WorkQueue.topLevelExec(ForkJoinPool.java:1020)
	at java.util.concurrent.ForkJoinPool.scan(ForkJoinPool.java:1656)
	at java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1594)
	at java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:183)
Caused by: java.util.concurrent.TimeoutException: Heartbeat of TaskManager with id container_1669370579231_0001_01_000003(beam-loadtests-go-gbk-flink-batch-724-w-0.c.apache-beam-testing.internal:8026) timed out.
	... 31 more
2022/11/25 11:00:00  (): java.util.concurrent.TimeoutException: Heartbeat of TaskManager with id container_1669370579231_0001_01_000003(beam-loadtests-go-gbk-flink-batch-724-w-0.c.apache-beam-testing.internal:8026) timed out.
2022/11/25 11:00:00 Job state: FAILED
2022/11/25 11:00:00 Failed to execute job: job load0tests0go0flink0batch0gbk0101125065415-root-1125100614-c2204e39_2fdd5ff5-83a0-4a5e-86ca-08c14b044219 failed
panic: Failed to execute job: job load0tests0go0flink0batch0gbk0101125065415-root-1125100614-c2204e39_2fdd5ff5-83a0-4a5e-86ca-08c14b044219 failed

goroutine 1 [running]:
github.com/apache/beam/sdks/v2/go/pkg/beam/log.Fatalf({0x1848328, 0xc000128000}, {0x1697797?, 0x22f38a8?}, {0xc000323e70?, 0x0?, 0x0?})
	<https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/ws/src/sdks/go/pkg/beam/log/log.go>:153 +0xa5
main.main()
	<https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/ws/src/sdks/go/test/load/group_by_key/group_by_key.go>:98 +0x3eb

> Task :sdks:go:test:load:run FAILED

FAILURE: Build failed with an exception.

* Where:
Build file '<https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/ws/src/sdks/go/test/load/build.gradle'> line: 31

* What went wrong:
Execution failed for task ':sdks:go:test:load:run'.
> Process 'command 'sh'' finished with non-zero exit value 2

* Try:
> Run with --stacktrace option to get the stack trace.
> Run with --info or --debug option to get more log output.

* Get more help at https://help.gradle.org

BUILD FAILED in 54m 9s
12 actionable tasks: 6 executed, 4 from cache, 2 up-to-date

Publishing build scan...
https://gradle.com/s/dgpj3nsxpzmxq

Build step 'Invoke Gradle script' changed build result to FAILURE
Build step 'Invoke Gradle script' marked build as failure

---------------------------------------------------------------------
To unsubscribe, e-mail: builds-unsubscribe@beam.apache.org
For additional commands, e-mail: builds-help@beam.apache.org


Build failed in Jenkins: beam_LoadTests_Go_GBK_Flink_Batch #723

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See <https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/723/display/redirect?page=changes>

Changes:

[bulat.safiullin] [Website] update table text content overflow #23460

[Moritz Mack] [Spark dataset runner] Fix translation to run in the evaluation thread

[Moritz Mack] [Metrics] Add 'performance tests' tag to JMH dashboard (related to

[noreply] Bump github.com/aws/aws-sdk-go-v2/credentials in /sdks (#24318)

[noreply] Update apache beam installation in notebook (#24336)

[noreply] Adds GCP core dependency to the test expansion service (#24308)

[noreply] Update dataflow containers to coincide with objsize 0.6.1 update

[noreply] Add test configurations for deterministic outputs on Dataflow (#24325)

[noreply] Updates ExpansionService to support dynamically discovering and

[noreply] Enable streaming runner v2 tests that were forgotten to be enabled.

[noreply] A schema transform implementation for SpannerIO.Write (#24278)


------------------------------------------
[...truncated 31.28 KB...]
        urn: "beam:coder:kv:v1"
      >
      component_coder_ids: "c0"
      component_coder_ids: "c0"
    >
  >
  coders: <
    key: "c2"
    value: <
      spec: <
        urn: "beam:coder:global_window:v1"
      >
    >
  >
  coders: <
    key: "c3"
    value: <
      spec: <
        urn: "beam:coder:iterable:v1"
      >
      component_coder_ids: "c0"
    >
  >
  coders: <
    key: "c4"
    value: <
      spec: <
        urn: "beam:coder:kv:v1"
      >
      component_coder_ids: "c0"
      component_coder_ids: "c3"
    >
  >
  coders: <
    key: "c5"
    value: <
      spec: <
        urn: "beam:coder:row:v1"
        payload: "\n\021\n\013num_records\032\002\020\004\n\024\n\016initial_splits\032\002\020\004\n\016\n\010key_size\032\002\020\004\n\020\n\nvalue_size\032\002\020\004\n\022\n\014num_hot_keys\032\002\020\004\n\026\n\020hot_key_fraction\032\002\020\006\022$f691cccd-3963-4ed9-9f25-d9fdfd07b30d"
      >
    >
  >
  coders: <
    key: "c6"
    value: <
      spec: <
        urn: "beam:go:coder:custom:v1"
        payload: "ChdvZmZzZXRyYW5nZS5SZXN0cmljdGlvbhJTCBpKT2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UuUmVzdHJpY3Rpb24atAEKS2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UucmVzdEVuYxJlCBYiUwgaSk9naXRodWIuY29tL2FwYWNoZS9iZWFtL3Nka3MvdjIvZ28vcGtnL2JlYW0vaW8vcnRyYWNrZXJzL29mZnNldHJhbmdlLlJlc3RyaWN0aW9uKgYIFBICCAgqBAgZQAEitAEKS2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UucmVzdERlYxJlCBYiBggUEgIICCpTCBpKT2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UuUmVzdHJpY3Rpb24qBAgZQAE="
      >
    >
  >
  coders: <
    key: "c7"
    value: <
      spec: <
        urn: "beam:coder:length_prefix:v1"
      >
      component_coder_ids: "c6"
    >
  >
  coders: <
    key: "c8"
    value: <
      spec: <
        urn: "beam:coder:bool:v1"
      >
    >
  >
  coders: <
    key: "c9"
    value: <
      spec: <
        urn: "beam:coder:kv:v1"
      >
      component_coder_ids: "c7"
      component_coder_ids: "c8"
    >
  >
  environments: <
    key: "go"
    value: <
      urn: "beam:env:docker:v1"
      payload: "\n6gcr.io/apache-beam-testing/beam-sdk/beam_go_sdk:latest"
      capabilities: "beam:protocol:progress_reporting:v1"
      capabilities: "beam:protocol:multi_core_bundle_processing:v1"
      capabilities: "beam:transform:sdf_truncate_sized_restrictions:v1"
      capabilities: "beam:protocol:****_status:v1"
      capabilities: "beam:protocol:monitoring_info_short_ids:v1"
      capabilities: "beam:version:sdk_base:go"
      capabilities: "beam:coder:bytes:v1"
      capabilities: "beam:coder:bool:v1"
      capabilities: "beam:coder:varint:v1"
      capabilities: "beam:coder:double:v1"
      capabilities: "beam:coder:string_utf8:v1"
      capabilities: "beam:coder:length_prefix:v1"
      capabilities: "beam:coder:kv:v1"
      capabilities: "beam:coder:iterable:v1"
      capabilities: "beam:coder:state_backed_iterable:v1"
      capabilities: "beam:coder:windowed_value:v1"
      capabilities: "beam:coder:global_window:v1"
      capabilities: "beam:coder:interval_window:v1"
      capabilities: "beam:coder:row:v1"
      capabilities: "beam:coder:nullable:v1"
      dependencies: <
        type_urn: "beam:artifact:type:file:v1"
        role_urn: "beam:artifact:role:go_****_binary:v1"
      >
    >
  >
>
root_transform_ids: "s1"
root_transform_ids: "e4"
root_transform_ids: "e5"
root_transform_ids: "e6"
root_transform_ids: "e7"
requirements: "beam:requirement:pardo:splittable_dofn:v1"
2022/11/24 10:05:50 Using specified **** binary: 'linux_amd64/group_by_key'
2022/11/24 10:05:50 Prepared job with id: load-tests-go-flink-batch-gbk-1-1124065416_ce791895-d877-483c-b03b-5cc8392bf603 and staging token: load-tests-go-flink-batch-gbk-1-1124065416_ce791895-d877-483c-b03b-5cc8392bf603
2022/11/24 10:05:57 Staged binary artifact with token: 
2022/11/24 10:05:58 Submitted job: load0tests0go0flink0batch0gbk0101124065416-root-1124100557-10de5aff_cb50797b-7c16-4ad2-a49f-ec115576d31d
2022/11/24 10:05:59 Job state: STOPPED
2022/11/24 10:05:59 Job state: STARTING
2022/11/24 10:05:59 Job state: RUNNING
2022/11/24 11:04:22  (): org.apache.flink.client.program.ProgramInvocationException: Job failed (JobID: 70ea866d07cfb697effc7d732e717565)
	at org.apache.flink.client.deployment.ClusterClientJobClientAdapter.lambda$null$6(ClusterClientJobClientAdapter.java:130)
	at java.util.concurrent.CompletableFuture.uniApply(CompletableFuture.java:616)
	at java.util.concurrent.CompletableFuture$UniApply.tryFire(CompletableFuture.java:591)
	at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:488)
	at java.util.concurrent.CompletableFuture.complete(CompletableFuture.java:1975)
	at org.apache.flink.util.concurrent.FutureUtils.lambda$retryOperationWithDelay$9(FutureUtils.java:403)
	at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:774)
	at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:750)
	at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:488)
	at java.util.concurrent.CompletableFuture.complete(CompletableFuture.java:1975)
	at org.apache.flink.client.program.rest.RestClusterClient.lambda$pollResourceAsync$26(RestClusterClient.java:708)
	at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:774)
	at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:750)
	at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:488)
	at java.util.concurrent.CompletableFuture.complete(CompletableFuture.java:1975)
	at org.apache.flink.util.concurrent.FutureUtils.lambda$retryOperationWithDelay$9(FutureUtils.java:403)
	at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:774)
	at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:750)
	at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:488)
	at java.util.concurrent.CompletableFuture.postFire(CompletableFuture.java:575)
	at java.util.concurrent.CompletableFuture$UniCompose.tryFire(CompletableFuture.java:943)
	at java.util.concurrent.CompletableFuture$Completion.run(CompletableFuture.java:456)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:750)
Caused by: org.apache.flink.runtime.client.JobExecutionException: Job execution failed.
	at org.apache.flink.runtime.jobmaster.JobResult.toJobExecutionResult(JobResult.java:144)
	at org.apache.flink.client.deployment.ClusterClientJobClientAdapter.lambda$null$6(ClusterClientJobClientAdapter.java:128)
	... 24 more
Caused by: org.apache.flink.runtime.JobException: Recovery is suppressed by NoRestartBackoffTimeStrategy
	at org.apache.flink.runtime.executiongraph.failover.flip1.ExecutionFailureHandler.handleFailure(ExecutionFailureHandler.java:138)
	at org.apache.flink.runtime.executiongraph.failover.flip1.ExecutionFailureHandler.getFailureHandlingResult(ExecutionFailureHandler.java:82)
	at org.apache.flink.runtime.scheduler.DefaultScheduler.handleTaskFailure(DefaultScheduler.java:301)
	at org.apache.flink.runtime.scheduler.DefaultScheduler.maybeHandleTaskFailure(DefaultScheduler.java:291)
	at org.apache.flink.runtime.scheduler.DefaultScheduler.updateTaskExecutionStateInternal(DefaultScheduler.java:282)
	at org.apache.flink.runtime.scheduler.SchedulerBase.updateTaskExecutionState(SchedulerBase.java:739)
	at org.apache.flink.runtime.scheduler.UpdateSchedulerNgOnInternalFailuresListener.notifyTaskFailure(UpdateSchedulerNgOnInternalFailuresListener.java:51)
	at org.apache.flink.runtime.executiongraph.DefaultExecutionGraph.notifySchedulerNgAboutInternalTaskFailure(DefaultExecutionGraph.java:1536)
	at org.apache.flink.runtime.executiongraph.Execution.processFail(Execution.java:1118)
	at org.apache.flink.runtime.executiongraph.Execution.processFail(Execution.java:1058)
	at org.apache.flink.runtime.executiongraph.Execution.fail(Execution.java:759)
	at org.apache.flink.runtime.jobmaster.slotpool.SingleLogicalSlot.signalPayloadRelease(SingleLogicalSlot.java:195)
	at org.apache.flink.runtime.jobmaster.slotpool.SingleLogicalSlot.release(SingleLogicalSlot.java:182)
	at org.apache.flink.runtime.scheduler.SharedSlot.lambda$release$4(SharedSlot.java:271)
	at java.util.concurrent.CompletableFuture.uniAcceptNow(CompletableFuture.java:753)
	at java.util.concurrent.CompletableFuture.uniAcceptStage(CompletableFuture.java:731)
	at java.util.concurrent.CompletableFuture.thenAccept(CompletableFuture.java:2108)
	at org.apache.flink.runtime.scheduler.SharedSlot.release(SharedSlot.java:271)
	at org.apache.flink.runtime.jobmaster.slotpool.AllocatedSlot.releasePayload(AllocatedSlot.java:152)
	at org.apache.flink.runtime.jobmaster.slotpool.DefaultDeclarativeSlotPool.releasePayload(DefaultDeclarativeSlotPool.java:482)
	at org.apache.flink.runtime.jobmaster.slotpool.DefaultDeclarativeSlotPool.freeAndReleaseSlots(DefaultDeclarativeSlotPool.java:474)
	at org.apache.flink.runtime.jobmaster.slotpool.DefaultDeclarativeSlotPool.releaseSlots(DefaultDeclarativeSlotPool.java:445)
	at org.apache.flink.runtime.jobmaster.slotpool.DeclarativeSlotPoolService.internalReleaseTaskManager(DeclarativeSlotPoolService.java:249)
	at org.apache.flink.runtime.jobmaster.slotpool.DeclarativeSlotPoolService.releaseTaskManager(DeclarativeSlotPoolService.java:230)
	at org.apache.flink.runtime.jobmaster.JobMaster.disconnectTaskManager(JobMaster.java:505)
	at org.apache.flink.runtime.jobmaster.JobMaster$TaskManagerHeartbeatListener.handleTaskManagerConnectionLoss(JobMaster.java:1376)
	at org.apache.flink.runtime.jobmaster.JobMaster$TaskManagerHeartbeatListener.notifyHeartbeatTimeout(JobMaster.java:1371)
	at org.apache.flink.runtime.heartbeat.HeartbeatMonitorImpl.run(HeartbeatMonitorImpl.java:155)
	at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:515)
	at java.util.concurrent.FutureTask.run(FutureTask.java:264)
	at org.apache.flink.runtime.rpc.akka.AkkaRpcActor.lambda$handleRunAsync$4(AkkaRpcActor.java:443)
	at org.apache.flink.runtime.concurrent.akka.ClassLoadingUtils.runWithContextClassLoader(ClassLoadingUtils.java:68)
	at org.apache.flink.runtime.rpc.akka.AkkaRpcActor.handleRunAsync(AkkaRpcActor.java:443)
	at org.apache.flink.runtime.rpc.akka.AkkaRpcActor.handleRpcMessage(AkkaRpcActor.java:213)
	at org.apache.flink.runtime.rpc.akka.FencedAkkaRpcActor.handleRpcMessage(FencedAkkaRpcActor.java:78)
	at org.apache.flink.runtime.rpc.akka.AkkaRpcActor.handleMessage(AkkaRpcActor.java:163)
	at akka.japi.pf.UnitCaseStatement.apply(CaseStatements.scala:24)
	at akka.japi.pf.UnitCaseStatement.apply(CaseStatements.scala:20)
	at scala.PartialFunction.applyOrElse(PartialFunction.scala:127)
	at scala.PartialFunction.applyOrElse$(PartialFunction.scala:126)
	at akka.japi.pf.UnitCaseStatement.applyOrElse(CaseStatements.scala:20)
	at scala.PartialFunction$OrElse.applyOrElse(PartialFunction.scala:175)
	at scala.PartialFunction$OrElse.applyOrElse(PartialFunction.scala:176)
	at scala.PartialFunction$OrElse.applyOrElse(PartialFunction.scala:176)
	at akka.actor.Actor.aroundReceive(Actor.scala:537)
	at akka.actor.Actor.aroundReceive$(Actor.scala:535)
	at akka.actor.AbstractActor.aroundReceive(AbstractActor.scala:220)
	at akka.actor.ActorCell.receiveMessage(ActorCell.scala:580)
	at akka.actor.ActorCell.invoke(ActorCell.scala:548)
	at akka.dispatch.Mailbox.processMailbox(Mailbox.scala:270)
	at akka.dispatch.Mailbox.run(Mailbox.scala:231)
	at akka.dispatch.Mailbox.exec(Mailbox.scala:243)
	at java.util.concurrent.ForkJoinTask.doExec(ForkJoinTask.java:290)
	at java.util.concurrent.ForkJoinPool$WorkQueue.topLevelExec(ForkJoinPool.java:1020)
	at java.util.concurrent.ForkJoinPool.scan(ForkJoinPool.java:1656)
	at java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1594)
	at java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:183)
Caused by: java.util.concurrent.TimeoutException: Heartbeat of TaskManager with id container_1669284164442_0001_01_000002(beam-loadtests-go-gbk-flink-batch-723-w-2.c.apache-beam-testing.internal:8026) timed out.
	... 31 more
2022/11/24 11:04:22  (): java.util.concurrent.TimeoutException: Heartbeat of TaskManager with id container_1669284164442_0001_01_000002(beam-loadtests-go-gbk-flink-batch-723-w-2.c.apache-beam-testing.internal:8026) timed out.
2022/11/24 11:04:22 Job state: FAILED
2022/11/24 11:04:22 Failed to execute job: job load0tests0go0flink0batch0gbk0101124065416-root-1124100557-10de5aff_cb50797b-7c16-4ad2-a49f-ec115576d31d failed
panic: Failed to execute job: job load0tests0go0flink0batch0gbk0101124065416-root-1124100557-10de5aff_cb50797b-7c16-4ad2-a49f-ec115576d31d failed

goroutine 1 [running]:
github.com/apache/beam/sdks/v2/go/pkg/beam/log.Fatalf({0x1848328, 0xc00004e0c0}, {0x1697797?, 0x22f38a8?}, {0xc00030be70?, 0x0?, 0x0?})
	<https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/ws/src/sdks/go/pkg/beam/log/log.go>:153 +0xa5
main.main()
	<https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/ws/src/sdks/go/test/load/group_by_key/group_by_key.go>:98 +0x3eb

> Task :sdks:go:test:load:run FAILED

FAILURE: Build failed with an exception.

* Where:
Build file '<https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/ws/src/sdks/go/test/load/build.gradle'> line: 31

* What went wrong:
Execution failed for task ':sdks:go:test:load:run'.
> Process 'command 'sh'' finished with non-zero exit value 2

* Try:
> Run with --stacktrace option to get the stack trace.
> Run with --info or --debug option to get more log output.

* Get more help at https://help.gradle.org

BUILD FAILED in 58m 53s
12 actionable tasks: 6 executed, 4 from cache, 2 up-to-date

Publishing build scan...
https://gradle.com/s/47ncysz6juct2

Build step 'Invoke Gradle script' changed build result to FAILURE
Build step 'Invoke Gradle script' marked build as failure

---------------------------------------------------------------------
To unsubscribe, e-mail: builds-unsubscribe@beam.apache.org
For additional commands, e-mail: builds-help@beam.apache.org


Build failed in Jenkins: beam_LoadTests_Go_GBK_Flink_Batch #722

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See <https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/722/display/redirect?page=changes>

Changes:

[bulat.safiullin] [Website] change share-your-story link, add text to ADD_CASE_STUDY.md,

[Robert Bradshaw] Add a portable runner that renders pipelines as a dot graph.

[Robert Bradshaw] Add basic tests for render runner.

[Robert Bradshaw] Add the ability to pass a pipeline proto directly.

[Robert Bradshaw] lint

[mr.malarg] pg_23865 fix selected example at list

[leha] Expand all categories that contain a selected example (#23865)

[Valentyn Tymofieiev] Fix typo.

[Valentyn Tymofieiev] Serve the graph when output file is not specified.

[Valentyn Tymofieiev] Serve the graph when output file is not specified.

[Valentyn Tymofieiev] Fix parsing of standalone protos.

[Valentyn Tymofieiev] Support reading from GCS.

[Valentyn Tymofieiev] Add text logging.

[Valentyn Tymofieiev] fix typo.

[Valentyn Tymofieiev] Some lint and yapf.

[Robert Bradshaw] Fix dot detection logic.

[Robert Bradshaw] fix error detected by lint

[Robert Bradshaw] Make gcs an optional dependency.

[Robert Bradshaw] return rather than sys.exit

[kn1kn1] Fix mvn command to refer the GCP_REGION variable

[Robert Bradshaw] lint

[noreply] Bump github.com/aws/aws-sdk-go-v2/feature/s3/manager in /sdks (#24280)

[noreply] Copy editing the machine learning pages (#24301)

[noreply] TensorRT Custom Inference Function Implementation (#24039)

[noreply] Teach Azure Filesystem to authenticate using DefaultAzureCredential in

[noreply] Apply suggestions from code review

[noreply] Add retry to test connections (#23757)

[Robert Bradshaw] More cleanup, mypy.

[noreply] fixed typo

[noreply] [#24266] Update release candidate script to use -PisRelease (#24269)

[noreply] Golang SpannerIO Implementation (#23285)

[noreply] Add rootCaCertificate option to SplunkIO (#24229)

[noreply] [Playground] Remove example bucket (#24198)

[noreply] Extract Go and Python Beam symbols for Playground (#23378)

[noreply] Dask runner tests action (#24324)

[Robert Bradshaw] lint


------------------------------------------
[...truncated 31.25 KB...]
        urn: "beam:coder:kv:v1"
      >
      component_coder_ids: "c0"
      component_coder_ids: "c0"
    >
  >
  coders: <
    key: "c2"
    value: <
      spec: <
        urn: "beam:coder:global_window:v1"
      >
    >
  >
  coders: <
    key: "c3"
    value: <
      spec: <
        urn: "beam:coder:iterable:v1"
      >
      component_coder_ids: "c0"
    >
  >
  coders: <
    key: "c4"
    value: <
      spec: <
        urn: "beam:coder:kv:v1"
      >
      component_coder_ids: "c0"
      component_coder_ids: "c3"
    >
  >
  coders: <
    key: "c5"
    value: <
      spec: <
        urn: "beam:coder:row:v1"
        payload: "\n\021\n\013num_records\032\002\020\004\n\024\n\016initial_splits\032\002\020\004\n\016\n\010key_size\032\002\020\004\n\020\n\nvalue_size\032\002\020\004\n\022\n\014num_hot_keys\032\002\020\004\n\026\n\020hot_key_fraction\032\002\020\006\022$f691cccd-3963-4ed9-9f25-d9fdfd07b30d"
      >
    >
  >
  coders: <
    key: "c6"
    value: <
      spec: <
        urn: "beam:go:coder:custom:v1"
        payload: "ChdvZmZzZXRyYW5nZS5SZXN0cmljdGlvbhJTCBpKT2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UuUmVzdHJpY3Rpb24atAEKS2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UucmVzdEVuYxJlCBYiUwgaSk9naXRodWIuY29tL2FwYWNoZS9iZWFtL3Nka3MvdjIvZ28vcGtnL2JlYW0vaW8vcnRyYWNrZXJzL29mZnNldHJhbmdlLlJlc3RyaWN0aW9uKgYIFBICCAgqBAgZQAEitAEKS2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UucmVzdERlYxJlCBYiBggUEgIICCpTCBpKT2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UuUmVzdHJpY3Rpb24qBAgZQAE="
      >
    >
  >
  coders: <
    key: "c7"
    value: <
      spec: <
        urn: "beam:coder:length_prefix:v1"
      >
      component_coder_ids: "c6"
    >
  >
  coders: <
    key: "c8"
    value: <
      spec: <
        urn: "beam:coder:bool:v1"
      >
    >
  >
  coders: <
    key: "c9"
    value: <
      spec: <
        urn: "beam:coder:kv:v1"
      >
      component_coder_ids: "c7"
      component_coder_ids: "c8"
    >
  >
  environments: <
    key: "go"
    value: <
      urn: "beam:env:docker:v1"
      payload: "\n6gcr.io/apache-beam-testing/beam-sdk/beam_go_sdk:latest"
      capabilities: "beam:protocol:progress_reporting:v1"
      capabilities: "beam:protocol:multi_core_bundle_processing:v1"
      capabilities: "beam:transform:sdf_truncate_sized_restrictions:v1"
      capabilities: "beam:protocol:****_status:v1"
      capabilities: "beam:protocol:monitoring_info_short_ids:v1"
      capabilities: "beam:version:sdk_base:go"
      capabilities: "beam:coder:bytes:v1"
      capabilities: "beam:coder:bool:v1"
      capabilities: "beam:coder:varint:v1"
      capabilities: "beam:coder:double:v1"
      capabilities: "beam:coder:string_utf8:v1"
      capabilities: "beam:coder:length_prefix:v1"
      capabilities: "beam:coder:kv:v1"
      capabilities: "beam:coder:iterable:v1"
      capabilities: "beam:coder:state_backed_iterable:v1"
      capabilities: "beam:coder:windowed_value:v1"
      capabilities: "beam:coder:global_window:v1"
      capabilities: "beam:coder:interval_window:v1"
      capabilities: "beam:coder:row:v1"
      capabilities: "beam:coder:nullable:v1"
      dependencies: <
        type_urn: "beam:artifact:type:file:v1"
        role_urn: "beam:artifact:role:go_****_binary:v1"
      >
    >
  >
>
root_transform_ids: "s1"
root_transform_ids: "e4"
root_transform_ids: "e5"
root_transform_ids: "e6"
root_transform_ids: "e7"
requirements: "beam:requirement:pardo:splittable_dofn:v1"
2022/11/23 10:05:43 Using specified **** binary: 'linux_amd64/group_by_key'
2022/11/23 10:05:43 Prepared job with id: load-tests-go-flink-batch-gbk-1-1123065428_a6cae6de-ed88-4837-abc0-80299e15f1c5 and staging token: load-tests-go-flink-batch-gbk-1-1123065428_a6cae6de-ed88-4837-abc0-80299e15f1c5
2022/11/23 10:05:50 Staged binary artifact with token: 
2022/11/23 10:05:51 Submitted job: load0tests0go0flink0batch0gbk0101123065428-root-1123100550-f17f4681_b66d635f-0331-4cf2-918b-b55c90778457
2022/11/23 10:05:51 Job state: STOPPED
2022/11/23 10:05:51 Job state: STARTING
2022/11/23 10:05:51 Job state: RUNNING
2022/11/23 10:59:22  (): org.apache.flink.client.program.ProgramInvocationException: Job failed (JobID: d46fbe3d3c7e678ce78293d951d2d7b7)
	at org.apache.flink.client.deployment.ClusterClientJobClientAdapter.lambda$null$6(ClusterClientJobClientAdapter.java:130)
	at java.util.concurrent.CompletableFuture.uniApply(CompletableFuture.java:616)
	at java.util.concurrent.CompletableFuture$UniApply.tryFire(CompletableFuture.java:591)
	at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:488)
	at java.util.concurrent.CompletableFuture.complete(CompletableFuture.java:1975)
	at org.apache.flink.util.concurrent.FutureUtils.lambda$retryOperationWithDelay$9(FutureUtils.java:403)
	at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:774)
	at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:750)
	at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:488)
	at java.util.concurrent.CompletableFuture.complete(CompletableFuture.java:1975)
	at org.apache.flink.client.program.rest.RestClusterClient.lambda$pollResourceAsync$26(RestClusterClient.java:708)
	at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:774)
	at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:750)
	at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:488)
	at java.util.concurrent.CompletableFuture.complete(CompletableFuture.java:1975)
	at org.apache.flink.util.concurrent.FutureUtils.lambda$retryOperationWithDelay$9(FutureUtils.java:403)
	at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:774)
	at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:750)
	at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:488)
	at java.util.concurrent.CompletableFuture.postFire(CompletableFuture.java:575)
	at java.util.concurrent.CompletableFuture$UniCompose.tryFire(CompletableFuture.java:943)
	at java.util.concurrent.CompletableFuture$Completion.run(CompletableFuture.java:456)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:750)
Caused by: org.apache.flink.runtime.client.JobExecutionException: Job execution failed.
	at org.apache.flink.runtime.jobmaster.JobResult.toJobExecutionResult(JobResult.java:144)
	at org.apache.flink.client.deployment.ClusterClientJobClientAdapter.lambda$null$6(ClusterClientJobClientAdapter.java:128)
	... 24 more
Caused by: org.apache.flink.runtime.JobException: Recovery is suppressed by NoRestartBackoffTimeStrategy
	at org.apache.flink.runtime.executiongraph.failover.flip1.ExecutionFailureHandler.handleFailure(ExecutionFailureHandler.java:138)
	at org.apache.flink.runtime.executiongraph.failover.flip1.ExecutionFailureHandler.getFailureHandlingResult(ExecutionFailureHandler.java:82)
	at org.apache.flink.runtime.scheduler.DefaultScheduler.handleTaskFailure(DefaultScheduler.java:301)
	at org.apache.flink.runtime.scheduler.DefaultScheduler.maybeHandleTaskFailure(DefaultScheduler.java:291)
	at org.apache.flink.runtime.scheduler.DefaultScheduler.updateTaskExecutionStateInternal(DefaultScheduler.java:282)
	at org.apache.flink.runtime.scheduler.SchedulerBase.updateTaskExecutionState(SchedulerBase.java:739)
	at org.apache.flink.runtime.scheduler.UpdateSchedulerNgOnInternalFailuresListener.notifyTaskFailure(UpdateSchedulerNgOnInternalFailuresListener.java:51)
	at org.apache.flink.runtime.executiongraph.DefaultExecutionGraph.notifySchedulerNgAboutInternalTaskFailure(DefaultExecutionGraph.java:1536)
	at org.apache.flink.runtime.executiongraph.Execution.processFail(Execution.java:1118)
	at org.apache.flink.runtime.executiongraph.Execution.processFail(Execution.java:1058)
	at org.apache.flink.runtime.executiongraph.Execution.fail(Execution.java:759)
	at org.apache.flink.runtime.jobmaster.slotpool.SingleLogicalSlot.signalPayloadRelease(SingleLogicalSlot.java:195)
	at org.apache.flink.runtime.jobmaster.slotpool.SingleLogicalSlot.release(SingleLogicalSlot.java:182)
	at org.apache.flink.runtime.scheduler.SharedSlot.lambda$release$4(SharedSlot.java:271)
	at java.util.concurrent.CompletableFuture.uniAcceptNow(CompletableFuture.java:753)
	at java.util.concurrent.CompletableFuture.uniAcceptStage(CompletableFuture.java:731)
	at java.util.concurrent.CompletableFuture.thenAccept(CompletableFuture.java:2108)
	at org.apache.flink.runtime.scheduler.SharedSlot.release(SharedSlot.java:271)
	at org.apache.flink.runtime.jobmaster.slotpool.AllocatedSlot.releasePayload(AllocatedSlot.java:152)
	at org.apache.flink.runtime.jobmaster.slotpool.DefaultDeclarativeSlotPool.releasePayload(DefaultDeclarativeSlotPool.java:482)
	at org.apache.flink.runtime.jobmaster.slotpool.DefaultDeclarativeSlotPool.freeAndReleaseSlots(DefaultDeclarativeSlotPool.java:474)
	at org.apache.flink.runtime.jobmaster.slotpool.DefaultDeclarativeSlotPool.releaseSlots(DefaultDeclarativeSlotPool.java:445)
	at org.apache.flink.runtime.jobmaster.slotpool.DeclarativeSlotPoolService.internalReleaseTaskManager(DeclarativeSlotPoolService.java:249)
	at org.apache.flink.runtime.jobmaster.slotpool.DeclarativeSlotPoolService.releaseTaskManager(DeclarativeSlotPoolService.java:230)
	at org.apache.flink.runtime.jobmaster.JobMaster.disconnectTaskManager(JobMaster.java:505)
	at org.apache.flink.runtime.jobmaster.JobMaster$TaskManagerHeartbeatListener.handleTaskManagerConnectionLoss(JobMaster.java:1376)
	at org.apache.flink.runtime.jobmaster.JobMaster$TaskManagerHeartbeatListener.notifyHeartbeatTimeout(JobMaster.java:1371)
	at org.apache.flink.runtime.heartbeat.HeartbeatMonitorImpl.run(HeartbeatMonitorImpl.java:155)
	at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:515)
	at java.util.concurrent.FutureTask.run(FutureTask.java:264)
	at org.apache.flink.runtime.rpc.akka.AkkaRpcActor.lambda$handleRunAsync$4(AkkaRpcActor.java:443)
	at org.apache.flink.runtime.concurrent.akka.ClassLoadingUtils.runWithContextClassLoader(ClassLoadingUtils.java:68)
	at org.apache.flink.runtime.rpc.akka.AkkaRpcActor.handleRunAsync(AkkaRpcActor.java:443)
	at org.apache.flink.runtime.rpc.akka.AkkaRpcActor.handleRpcMessage(AkkaRpcActor.java:213)
	at org.apache.flink.runtime.rpc.akka.FencedAkkaRpcActor.handleRpcMessage(FencedAkkaRpcActor.java:78)
	at org.apache.flink.runtime.rpc.akka.AkkaRpcActor.handleMessage(AkkaRpcActor.java:163)
	at akka.japi.pf.UnitCaseStatement.apply(CaseStatements.scala:24)
	at akka.japi.pf.UnitCaseStatement.apply(CaseStatements.scala:20)
	at scala.PartialFunction.applyOrElse(PartialFunction.scala:127)
	at scala.PartialFunction.applyOrElse$(PartialFunction.scala:126)
	at akka.japi.pf.UnitCaseStatement.applyOrElse(CaseStatements.scala:20)
	at scala.PartialFunction$OrElse.applyOrElse(PartialFunction.scala:175)
	at scala.PartialFunction$OrElse.applyOrElse(PartialFunction.scala:176)
	at scala.PartialFunction$OrElse.applyOrElse(PartialFunction.scala:176)
	at akka.actor.Actor.aroundReceive(Actor.scala:537)
	at akka.actor.Actor.aroundReceive$(Actor.scala:535)
	at akka.actor.AbstractActor.aroundReceive(AbstractActor.scala:220)
	at akka.actor.ActorCell.receiveMessage(ActorCell.scala:580)
	at akka.actor.ActorCell.invoke(ActorCell.scala:548)
	at akka.dispatch.Mailbox.processMailbox(Mailbox.scala:270)
	at akka.dispatch.Mailbox.run(Mailbox.scala:231)
	at akka.dispatch.Mailbox.exec(Mailbox.scala:243)
	at java.util.concurrent.ForkJoinTask.doExec(ForkJoinTask.java:290)
	at java.util.concurrent.ForkJoinPool$WorkQueue.topLevelExec(ForkJoinPool.java:1020)
	at java.util.concurrent.ForkJoinPool.scan(ForkJoinPool.java:1656)
	at java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1594)
	at java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:183)
Caused by: java.util.concurrent.TimeoutException: Heartbeat of TaskManager with id container_1669197762273_0002_01_000002(beam-loadtests-go-gbk-flink-batch-722-w-4.c.apache-beam-testing.internal:8026) timed out.
	... 31 more
2022/11/23 10:59:22  (): java.util.concurrent.TimeoutException: Heartbeat of TaskManager with id container_1669197762273_0002_01_000002(beam-loadtests-go-gbk-flink-batch-722-w-4.c.apache-beam-testing.internal:8026) timed out.
2022/11/23 10:59:23 Job state: FAILED
2022/11/23 10:59:23 Failed to execute job: job load0tests0go0flink0batch0gbk0101123065428-root-1123100550-f17f4681_b66d635f-0331-4cf2-918b-b55c90778457 failed
panic: Failed to execute job: job load0tests0go0flink0batch0gbk0101123065428-root-1123100550-f17f4681_b66d635f-0331-4cf2-918b-b55c90778457 failed

goroutine 1 [running]:
github.com/apache/beam/sdks/v2/go/pkg/beam/log.Fatalf({0x1848328, 0xc000120000}, {0x1697797?, 0x22f38a8?}, {0xc000565e70?, 0x0?, 0x0?})
	<https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/ws/src/sdks/go/pkg/beam/log/log.go>:153 +0xa5
main.main()
	<https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/ws/src/sdks/go/test/load/group_by_key/group_by_key.go>:98 +0x3eb

> Task :sdks:go:test:load:run FAILED

FAILURE: Build failed with an exception.

* Where:
Build file '<https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/ws/src/sdks/go/test/load/build.gradle'> line: 31

* What went wrong:
Execution failed for task ':sdks:go:test:load:run'.
> Process 'command 'sh'' finished with non-zero exit value 2

* Try:
> Run with --stacktrace option to get the stack trace.
> Run with --info or --debug option to get more log output.

* Get more help at https://help.gradle.org

BUILD FAILED in 54m
12 actionable tasks: 6 executed, 4 from cache, 2 up-to-date

Publishing build scan...
https://gradle.com/s/2ubaztsbrbi2c

Build step 'Invoke Gradle script' changed build result to FAILURE
Build step 'Invoke Gradle script' marked build as failure

---------------------------------------------------------------------
To unsubscribe, e-mail: builds-unsubscribe@beam.apache.org
For additional commands, e-mail: builds-help@beam.apache.org


Build failed in Jenkins: beam_LoadTests_Go_GBK_Flink_Batch #721

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See <https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/721/display/redirect?page=changes>

Changes:

[Robert Bradshaw] Work around coders bug on Dataflow.

[Robert Bradshaw] Add a test runner for running multiple pipelines in parallel.

[Robert Bradshaw] Run tests as single Dataflow pipeline.

[Robert Bradshaw] Github hook for dataflow pipelines.

[Robert Bradshaw] Guard dataflow run against GCP credentials.

[Robert Bradshaw] Guard running of precommit against having variables set.

[bulat.safiullin] [Website] add lazy loading attr to images #24250

[noreply] [Playground] Use current Go SDK by default (#24256)

[noreply] Fix dashboard links

[noreply] Bump github.com/aws/aws-sdk-go-v2/config from 1.18.1 to 1.18.2 in /sdks

[noreply] Add warning about google-cloud-platform-core dependency change in #24235

[noreply] Add GetSize implementation for DetectNewPartitions SDF (#23997)

[noreply] Add ZstdCoder to wrap coders with Zstandard compression (#24093)

[noreply] [#24261] Update to objsize 0.6.1 (#24262)

[noreply] Create template for failing tests. (#21728)

[noreply] Add record_metrics argument to utils.BatchElements (#23701)

[noreply] Performance test parameters followup fix (#24291)


------------------------------------------
[...truncated 31.20 KB...]
        urn: "beam:coder:kv:v1"
      >
      component_coder_ids: "c0"
      component_coder_ids: "c0"
    >
  >
  coders: <
    key: "c2"
    value: <
      spec: <
        urn: "beam:coder:global_window:v1"
      >
    >
  >
  coders: <
    key: "c3"
    value: <
      spec: <
        urn: "beam:coder:iterable:v1"
      >
      component_coder_ids: "c0"
    >
  >
  coders: <
    key: "c4"
    value: <
      spec: <
        urn: "beam:coder:kv:v1"
      >
      component_coder_ids: "c0"
      component_coder_ids: "c3"
    >
  >
  coders: <
    key: "c5"
    value: <
      spec: <
        urn: "beam:coder:row:v1"
        payload: "\n\021\n\013num_records\032\002\020\004\n\024\n\016initial_splits\032\002\020\004\n\016\n\010key_size\032\002\020\004\n\020\n\nvalue_size\032\002\020\004\n\022\n\014num_hot_keys\032\002\020\004\n\026\n\020hot_key_fraction\032\002\020\006\022$f691cccd-3963-4ed9-9f25-d9fdfd07b30d"
      >
    >
  >
  coders: <
    key: "c6"
    value: <
      spec: <
        urn: "beam:go:coder:custom:v1"
        payload: "ChdvZmZzZXRyYW5nZS5SZXN0cmljdGlvbhJTCBpKT2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UuUmVzdHJpY3Rpb24atAEKS2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UucmVzdEVuYxJlCBYiUwgaSk9naXRodWIuY29tL2FwYWNoZS9iZWFtL3Nka3MvdjIvZ28vcGtnL2JlYW0vaW8vcnRyYWNrZXJzL29mZnNldHJhbmdlLlJlc3RyaWN0aW9uKgYIFBICCAgqBAgZQAEitAEKS2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UucmVzdERlYxJlCBYiBggUEgIICCpTCBpKT2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UuUmVzdHJpY3Rpb24qBAgZQAE="
      >
    >
  >
  coders: <
    key: "c7"
    value: <
      spec: <
        urn: "beam:coder:length_prefix:v1"
      >
      component_coder_ids: "c6"
    >
  >
  coders: <
    key: "c8"
    value: <
      spec: <
        urn: "beam:coder:bool:v1"
      >
    >
  >
  coders: <
    key: "c9"
    value: <
      spec: <
        urn: "beam:coder:kv:v1"
      >
      component_coder_ids: "c7"
      component_coder_ids: "c8"
    >
  >
  environments: <
    key: "go"
    value: <
      urn: "beam:env:docker:v1"
      payload: "\n6gcr.io/apache-beam-testing/beam-sdk/beam_go_sdk:latest"
      capabilities: "beam:protocol:progress_reporting:v1"
      capabilities: "beam:protocol:multi_core_bundle_processing:v1"
      capabilities: "beam:transform:sdf_truncate_sized_restrictions:v1"
      capabilities: "beam:protocol:****_status:v1"
      capabilities: "beam:protocol:monitoring_info_short_ids:v1"
      capabilities: "beam:version:sdk_base:go"
      capabilities: "beam:coder:bytes:v1"
      capabilities: "beam:coder:bool:v1"
      capabilities: "beam:coder:varint:v1"
      capabilities: "beam:coder:double:v1"
      capabilities: "beam:coder:string_utf8:v1"
      capabilities: "beam:coder:length_prefix:v1"
      capabilities: "beam:coder:kv:v1"
      capabilities: "beam:coder:iterable:v1"
      capabilities: "beam:coder:state_backed_iterable:v1"
      capabilities: "beam:coder:windowed_value:v1"
      capabilities: "beam:coder:global_window:v1"
      capabilities: "beam:coder:interval_window:v1"
      capabilities: "beam:coder:row:v1"
      capabilities: "beam:coder:nullable:v1"
      dependencies: <
        type_urn: "beam:artifact:type:file:v1"
        role_urn: "beam:artifact:role:go_****_binary:v1"
      >
    >
  >
>
root_transform_ids: "s1"
root_transform_ids: "e4"
root_transform_ids: "e5"
root_transform_ids: "e6"
root_transform_ids: "e7"
requirements: "beam:requirement:pardo:splittable_dofn:v1"
2022/11/22 13:41:45 Using specified **** binary: 'linux_amd64/group_by_key'
2022/11/22 13:41:46 Prepared job with id: load-tests-go-flink-batch-gbk-1-1122050224_689aa069-c26f-417b-9f7d-f93163509554 and staging token: load-tests-go-flink-batch-gbk-1-1122050224_689aa069-c26f-417b-9f7d-f93163509554
2022/11/22 13:41:53 Staged binary artifact with token: 
2022/11/22 13:41:55 Submitted job: load0tests0go0flink0batch0gbk0101122050224-root-1122134153-7ae9f899_60a972d4-5610-4aee-ace3-3890cd8b23f1
2022/11/22 13:41:55 Job state: STOPPED
2022/11/22 13:41:55 Job state: STARTING
2022/11/22 13:41:55 Job state: RUNNING
2022/11/22 14:34:54  (): org.apache.flink.client.program.ProgramInvocationException: Job failed (JobID: 104a763bcddab4bf6e49faf3a00fa210)
	at org.apache.flink.client.deployment.ClusterClientJobClientAdapter.lambda$null$6(ClusterClientJobClientAdapter.java:130)
	at java.util.concurrent.CompletableFuture.uniApply(CompletableFuture.java:616)
	at java.util.concurrent.CompletableFuture$UniApply.tryFire(CompletableFuture.java:591)
	at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:488)
	at java.util.concurrent.CompletableFuture.complete(CompletableFuture.java:1975)
	at org.apache.flink.util.concurrent.FutureUtils.lambda$retryOperationWithDelay$9(FutureUtils.java:403)
	at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:774)
	at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:750)
	at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:488)
	at java.util.concurrent.CompletableFuture.complete(CompletableFuture.java:1975)
	at org.apache.flink.client.program.rest.RestClusterClient.lambda$pollResourceAsync$26(RestClusterClient.java:708)
	at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:774)
	at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:750)
	at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:488)
	at java.util.concurrent.CompletableFuture.complete(CompletableFuture.java:1975)
	at org.apache.flink.util.concurrent.FutureUtils.lambda$retryOperationWithDelay$9(FutureUtils.java:403)
	at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:774)
	at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:750)
	at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:488)
	at java.util.concurrent.CompletableFuture.postFire(CompletableFuture.java:575)
	at java.util.concurrent.CompletableFuture$UniCompose.tryFire(CompletableFuture.java:943)
	at java.util.concurrent.CompletableFuture$Completion.run(CompletableFuture.java:456)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:750)
Caused by: org.apache.flink.runtime.client.JobExecutionException: Job execution failed.
	at org.apache.flink.runtime.jobmaster.JobResult.toJobExecutionResult(JobResult.java:144)
	at org.apache.flink.client.deployment.ClusterClientJobClientAdapter.lambda$null$6(ClusterClientJobClientAdapter.java:128)
	... 24 more
Caused by: org.apache.flink.runtime.JobException: Recovery is suppressed by NoRestartBackoffTimeStrategy
	at org.apache.flink.runtime.executiongraph.failover.flip1.ExecutionFailureHandler.handleFailure(ExecutionFailureHandler.java:138)
	at org.apache.flink.runtime.executiongraph.failover.flip1.ExecutionFailureHandler.getFailureHandlingResult(ExecutionFailureHandler.java:82)
	at org.apache.flink.runtime.scheduler.DefaultScheduler.handleTaskFailure(DefaultScheduler.java:301)
	at org.apache.flink.runtime.scheduler.DefaultScheduler.maybeHandleTaskFailure(DefaultScheduler.java:291)
	at org.apache.flink.runtime.scheduler.DefaultScheduler.updateTaskExecutionStateInternal(DefaultScheduler.java:282)
	at org.apache.flink.runtime.scheduler.SchedulerBase.updateTaskExecutionState(SchedulerBase.java:739)
	at org.apache.flink.runtime.scheduler.UpdateSchedulerNgOnInternalFailuresListener.notifyTaskFailure(UpdateSchedulerNgOnInternalFailuresListener.java:51)
	at org.apache.flink.runtime.executiongraph.DefaultExecutionGraph.notifySchedulerNgAboutInternalTaskFailure(DefaultExecutionGraph.java:1536)
	at org.apache.flink.runtime.executiongraph.Execution.processFail(Execution.java:1118)
	at org.apache.flink.runtime.executiongraph.Execution.processFail(Execution.java:1058)
	at org.apache.flink.runtime.executiongraph.Execution.fail(Execution.java:759)
	at org.apache.flink.runtime.jobmaster.slotpool.SingleLogicalSlot.signalPayloadRelease(SingleLogicalSlot.java:195)
	at org.apache.flink.runtime.jobmaster.slotpool.SingleLogicalSlot.release(SingleLogicalSlot.java:182)
	at org.apache.flink.runtime.scheduler.SharedSlot.lambda$release$4(SharedSlot.java:271)
	at java.util.concurrent.CompletableFuture.uniAcceptNow(CompletableFuture.java:753)
	at java.util.concurrent.CompletableFuture.uniAcceptStage(CompletableFuture.java:731)
	at java.util.concurrent.CompletableFuture.thenAccept(CompletableFuture.java:2108)
	at org.apache.flink.runtime.scheduler.SharedSlot.release(SharedSlot.java:271)
	at org.apache.flink.runtime.jobmaster.slotpool.AllocatedSlot.releasePayload(AllocatedSlot.java:152)
	at org.apache.flink.runtime.jobmaster.slotpool.DefaultDeclarativeSlotPool.releasePayload(DefaultDeclarativeSlotPool.java:482)
	at org.apache.flink.runtime.jobmaster.slotpool.DefaultDeclarativeSlotPool.freeAndReleaseSlots(DefaultDeclarativeSlotPool.java:474)
	at org.apache.flink.runtime.jobmaster.slotpool.DefaultDeclarativeSlotPool.releaseSlots(DefaultDeclarativeSlotPool.java:445)
	at org.apache.flink.runtime.jobmaster.slotpool.DeclarativeSlotPoolService.internalReleaseTaskManager(DeclarativeSlotPoolService.java:249)
	at org.apache.flink.runtime.jobmaster.slotpool.DeclarativeSlotPoolService.releaseTaskManager(DeclarativeSlotPoolService.java:230)
	at org.apache.flink.runtime.jobmaster.JobMaster.disconnectTaskManager(JobMaster.java:505)
	at org.apache.flink.runtime.jobmaster.JobMaster$TaskManagerHeartbeatListener.handleTaskManagerConnectionLoss(JobMaster.java:1376)
	at org.apache.flink.runtime.jobmaster.JobMaster$TaskManagerHeartbeatListener.notifyHeartbeatTimeout(JobMaster.java:1371)
	at org.apache.flink.runtime.heartbeat.HeartbeatMonitorImpl.run(HeartbeatMonitorImpl.java:155)
	at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:515)
	at java.util.concurrent.FutureTask.run(FutureTask.java:264)
	at org.apache.flink.runtime.rpc.akka.AkkaRpcActor.lambda$handleRunAsync$4(AkkaRpcActor.java:443)
	at org.apache.flink.runtime.concurrent.akka.ClassLoadingUtils.runWithContextClassLoader(ClassLoadingUtils.java:68)
	at org.apache.flink.runtime.rpc.akka.AkkaRpcActor.handleRunAsync(AkkaRpcActor.java:443)
	at org.apache.flink.runtime.rpc.akka.AkkaRpcActor.handleRpcMessage(AkkaRpcActor.java:213)
	at org.apache.flink.runtime.rpc.akka.FencedAkkaRpcActor.handleRpcMessage(FencedAkkaRpcActor.java:78)
	at org.apache.flink.runtime.rpc.akka.AkkaRpcActor.handleMessage(AkkaRpcActor.java:163)
	at akka.japi.pf.UnitCaseStatement.apply(CaseStatements.scala:24)
	at akka.japi.pf.UnitCaseStatement.apply(CaseStatements.scala:20)
	at scala.PartialFunction.applyOrElse(PartialFunction.scala:127)
	at scala.PartialFunction.applyOrElse$(PartialFunction.scala:126)
	at akka.japi.pf.UnitCaseStatement.applyOrElse(CaseStatements.scala:20)
	at scala.PartialFunction$OrElse.applyOrElse(PartialFunction.scala:175)
	at scala.PartialFunction$OrElse.applyOrElse(PartialFunction.scala:176)
	at scala.PartialFunction$OrElse.applyOrElse(PartialFunction.scala:176)
	at akka.actor.Actor.aroundReceive(Actor.scala:537)
	at akka.actor.Actor.aroundReceive$(Actor.scala:535)
	at akka.actor.AbstractActor.aroundReceive(AbstractActor.scala:220)
	at akka.actor.ActorCell.receiveMessage(ActorCell.scala:580)
	at akka.actor.ActorCell.invoke(ActorCell.scala:548)
	at akka.dispatch.Mailbox.processMailbox(Mailbox.scala:270)
	at akka.dispatch.Mailbox.run(Mailbox.scala:231)
	at akka.dispatch.Mailbox.exec(Mailbox.scala:243)
	at java.util.concurrent.ForkJoinTask.doExec(ForkJoinTask.java:290)
	at java.util.concurrent.ForkJoinPool$WorkQueue.topLevelExec(ForkJoinPool.java:1020)
	at java.util.concurrent.ForkJoinPool.scan(ForkJoinPool.java:1656)
	at java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1594)
	at java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:183)
Caused by: java.util.concurrent.TimeoutException: Heartbeat of TaskManager with id container_1669124330912_0001_01_000002(beam-loadtests-go-gbk-flink-batch-721-w-2.c.apache-beam-testing.internal:8026) timed out.
	... 31 more
2022/11/22 14:34:54  (): java.util.concurrent.TimeoutException: Heartbeat of TaskManager with id container_1669124330912_0001_01_000002(beam-loadtests-go-gbk-flink-batch-721-w-2.c.apache-beam-testing.internal:8026) timed out.
2022/11/22 14:34:54 Job state: FAILED
2022/11/22 14:34:54 Failed to execute job: job load0tests0go0flink0batch0gbk0101122050224-root-1122134153-7ae9f899_60a972d4-5610-4aee-ace3-3890cd8b23f1 failed
panic: Failed to execute job: job load0tests0go0flink0batch0gbk0101122050224-root-1122134153-7ae9f899_60a972d4-5610-4aee-ace3-3890cd8b23f1 failed

goroutine 1 [running]:
github.com/apache/beam/sdks/v2/go/pkg/beam/log.Fatalf({0x1848328, 0xc000120000}, {0x1697797?, 0x22f38a8?}, {0xc00029be70?, 0x0?, 0x0?})
	<https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/ws/src/sdks/go/pkg/beam/log/log.go>:153 +0xa5
main.main()
	<https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/ws/src/sdks/go/test/load/group_by_key/group_by_key.go>:98 +0x3eb

> Task :sdks:go:test:load:run FAILED

FAILURE: Build failed with an exception.

* Where:
Build file '<https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/ws/src/sdks/go/test/load/build.gradle'> line: 31

* What went wrong:
Execution failed for task ':sdks:go:test:load:run'.
> Process 'command 'sh'' finished with non-zero exit value 2

* Try:
> Run with --stacktrace option to get the stack trace.
> Run with --info or --debug option to get more log output.

* Get more help at https://help.gradle.org

BUILD FAILED in 53m 30s
12 actionable tasks: 6 executed, 4 from cache, 2 up-to-date

Publishing build scan...
https://gradle.com/s/cqb7xha5r3go6

Build step 'Invoke Gradle script' changed build result to FAILURE
Build step 'Invoke Gradle script' marked build as failure

---------------------------------------------------------------------
To unsubscribe, e-mail: builds-unsubscribe@beam.apache.org
For additional commands, e-mail: builds-help@beam.apache.org


Build failed in Jenkins: beam_LoadTests_Go_GBK_Flink_Batch #720

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See <https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/720/display/redirect>

Changes:


------------------------------------------
[...truncated 31.27 KB...]
        urn: "beam:coder:kv:v1"
      >
      component_coder_ids: "c0"
      component_coder_ids: "c0"
    >
  >
  coders: <
    key: "c2"
    value: <
      spec: <
        urn: "beam:coder:global_window:v1"
      >
    >
  >
  coders: <
    key: "c3"
    value: <
      spec: <
        urn: "beam:coder:iterable:v1"
      >
      component_coder_ids: "c0"
    >
  >
  coders: <
    key: "c4"
    value: <
      spec: <
        urn: "beam:coder:kv:v1"
      >
      component_coder_ids: "c0"
      component_coder_ids: "c3"
    >
  >
  coders: <
    key: "c5"
    value: <
      spec: <
        urn: "beam:coder:row:v1"
        payload: "\n\021\n\013num_records\032\002\020\004\n\024\n\016initial_splits\032\002\020\004\n\016\n\010key_size\032\002\020\004\n\020\n\nvalue_size\032\002\020\004\n\022\n\014num_hot_keys\032\002\020\004\n\026\n\020hot_key_fraction\032\002\020\006\022$f691cccd-3963-4ed9-9f25-d9fdfd07b30d"
      >
    >
  >
  coders: <
    key: "c6"
    value: <
      spec: <
        urn: "beam:go:coder:custom:v1"
        payload: "ChdvZmZzZXRyYW5nZS5SZXN0cmljdGlvbhJTCBpKT2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UuUmVzdHJpY3Rpb24atAEKS2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UucmVzdEVuYxJlCBYiUwgaSk9naXRodWIuY29tL2FwYWNoZS9iZWFtL3Nka3MvdjIvZ28vcGtnL2JlYW0vaW8vcnRyYWNrZXJzL29mZnNldHJhbmdlLlJlc3RyaWN0aW9uKgYIFBICCAgqBAgZQAEitAEKS2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UucmVzdERlYxJlCBYiBggUEgIICCpTCBpKT2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UuUmVzdHJpY3Rpb24qBAgZQAE="
      >
    >
  >
  coders: <
    key: "c7"
    value: <
      spec: <
        urn: "beam:coder:length_prefix:v1"
      >
      component_coder_ids: "c6"
    >
  >
  coders: <
    key: "c8"
    value: <
      spec: <
        urn: "beam:coder:bool:v1"
      >
    >
  >
  coders: <
    key: "c9"
    value: <
      spec: <
        urn: "beam:coder:kv:v1"
      >
      component_coder_ids: "c7"
      component_coder_ids: "c8"
    >
  >
  environments: <
    key: "go"
    value: <
      urn: "beam:env:docker:v1"
      payload: "\n6gcr.io/apache-beam-testing/beam-sdk/beam_go_sdk:latest"
      capabilities: "beam:protocol:progress_reporting:v1"
      capabilities: "beam:protocol:multi_core_bundle_processing:v1"
      capabilities: "beam:transform:sdf_truncate_sized_restrictions:v1"
      capabilities: "beam:protocol:****_status:v1"
      capabilities: "beam:protocol:monitoring_info_short_ids:v1"
      capabilities: "beam:version:sdk_base:go"
      capabilities: "beam:coder:bytes:v1"
      capabilities: "beam:coder:bool:v1"
      capabilities: "beam:coder:varint:v1"
      capabilities: "beam:coder:double:v1"
      capabilities: "beam:coder:string_utf8:v1"
      capabilities: "beam:coder:length_prefix:v1"
      capabilities: "beam:coder:kv:v1"
      capabilities: "beam:coder:iterable:v1"
      capabilities: "beam:coder:state_backed_iterable:v1"
      capabilities: "beam:coder:windowed_value:v1"
      capabilities: "beam:coder:global_window:v1"
      capabilities: "beam:coder:interval_window:v1"
      capabilities: "beam:coder:row:v1"
      capabilities: "beam:coder:nullable:v1"
      dependencies: <
        type_urn: "beam:artifact:type:file:v1"
        role_urn: "beam:artifact:role:go_****_binary:v1"
      >
    >
  >
>
root_transform_ids: "s1"
root_transform_ids: "e4"
root_transform_ids: "e5"
root_transform_ids: "e6"
root_transform_ids: "e7"
requirements: "beam:requirement:pardo:splittable_dofn:v1"
2022/11/21 10:05:50 Using specified **** binary: 'linux_amd64/group_by_key'
2022/11/21 10:05:51 Prepared job with id: load-tests-go-flink-batch-gbk-1-1121065418_b5d654f0-a0d7-4f88-b84f-adcc4d9a6999 and staging token: load-tests-go-flink-batch-gbk-1-1121065418_b5d654f0-a0d7-4f88-b84f-adcc4d9a6999
2022/11/21 10:05:57 Staged binary artifact with token: 
2022/11/21 10:05:59 Submitted job: load0tests0go0flink0batch0gbk0101121065418-root-1121100557-3160df23_dcbecb2e-ac2f-418f-8074-7cdcc3de96b7
2022/11/21 10:05:59 Job state: STOPPED
2022/11/21 10:05:59 Job state: STARTING
2022/11/21 10:05:59 Job state: RUNNING
2022/11/21 11:00:28  (): org.apache.flink.client.program.ProgramInvocationException: Job failed (JobID: 4f43a49042997dcd169288b12776f5fa)
	at org.apache.flink.client.deployment.ClusterClientJobClientAdapter.lambda$null$6(ClusterClientJobClientAdapter.java:130)
	at java.util.concurrent.CompletableFuture.uniApply(CompletableFuture.java:616)
	at java.util.concurrent.CompletableFuture$UniApply.tryFire(CompletableFuture.java:591)
	at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:488)
	at java.util.concurrent.CompletableFuture.complete(CompletableFuture.java:1975)
	at org.apache.flink.util.concurrent.FutureUtils.lambda$retryOperationWithDelay$9(FutureUtils.java:403)
	at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:774)
	at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:750)
	at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:488)
	at java.util.concurrent.CompletableFuture.complete(CompletableFuture.java:1975)
	at org.apache.flink.client.program.rest.RestClusterClient.lambda$pollResourceAsync$26(RestClusterClient.java:708)
	at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:774)
	at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:750)
	at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:488)
	at java.util.concurrent.CompletableFuture.complete(CompletableFuture.java:1975)
	at org.apache.flink.util.concurrent.FutureUtils.lambda$retryOperationWithDelay$9(FutureUtils.java:403)
	at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:774)
	at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:750)
	at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:488)
	at java.util.concurrent.CompletableFuture.postFire(CompletableFuture.java:575)
	at java.util.concurrent.CompletableFuture$UniCompose.tryFire(CompletableFuture.java:943)
	at java.util.concurrent.CompletableFuture$Completion.run(CompletableFuture.java:456)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:750)
Caused by: org.apache.flink.runtime.client.JobExecutionException: Job execution failed.
	at org.apache.flink.runtime.jobmaster.JobResult.toJobExecutionResult(JobResult.java:144)
	at org.apache.flink.client.deployment.ClusterClientJobClientAdapter.lambda$null$6(ClusterClientJobClientAdapter.java:128)
	... 24 more
Caused by: org.apache.flink.runtime.JobException: Recovery is suppressed by NoRestartBackoffTimeStrategy
	at org.apache.flink.runtime.executiongraph.failover.flip1.ExecutionFailureHandler.handleFailure(ExecutionFailureHandler.java:138)
	at org.apache.flink.runtime.executiongraph.failover.flip1.ExecutionFailureHandler.getFailureHandlingResult(ExecutionFailureHandler.java:82)
	at org.apache.flink.runtime.scheduler.DefaultScheduler.handleTaskFailure(DefaultScheduler.java:301)
	at org.apache.flink.runtime.scheduler.DefaultScheduler.maybeHandleTaskFailure(DefaultScheduler.java:291)
	at org.apache.flink.runtime.scheduler.DefaultScheduler.updateTaskExecutionStateInternal(DefaultScheduler.java:282)
	at org.apache.flink.runtime.scheduler.SchedulerBase.updateTaskExecutionState(SchedulerBase.java:739)
	at org.apache.flink.runtime.scheduler.UpdateSchedulerNgOnInternalFailuresListener.notifyTaskFailure(UpdateSchedulerNgOnInternalFailuresListener.java:51)
	at org.apache.flink.runtime.executiongraph.DefaultExecutionGraph.notifySchedulerNgAboutInternalTaskFailure(DefaultExecutionGraph.java:1536)
	at org.apache.flink.runtime.executiongraph.Execution.processFail(Execution.java:1118)
	at org.apache.flink.runtime.executiongraph.Execution.processFail(Execution.java:1058)
	at org.apache.flink.runtime.executiongraph.Execution.fail(Execution.java:759)
	at org.apache.flink.runtime.jobmaster.slotpool.SingleLogicalSlot.signalPayloadRelease(SingleLogicalSlot.java:195)
	at org.apache.flink.runtime.jobmaster.slotpool.SingleLogicalSlot.release(SingleLogicalSlot.java:182)
	at org.apache.flink.runtime.scheduler.SharedSlot.lambda$release$4(SharedSlot.java:271)
	at java.util.concurrent.CompletableFuture.uniAcceptNow(CompletableFuture.java:753)
	at java.util.concurrent.CompletableFuture.uniAcceptStage(CompletableFuture.java:731)
	at java.util.concurrent.CompletableFuture.thenAccept(CompletableFuture.java:2108)
	at org.apache.flink.runtime.scheduler.SharedSlot.release(SharedSlot.java:271)
	at org.apache.flink.runtime.jobmaster.slotpool.AllocatedSlot.releasePayload(AllocatedSlot.java:152)
	at org.apache.flink.runtime.jobmaster.slotpool.DefaultDeclarativeSlotPool.releasePayload(DefaultDeclarativeSlotPool.java:482)
	at org.apache.flink.runtime.jobmaster.slotpool.DefaultDeclarativeSlotPool.freeAndReleaseSlots(DefaultDeclarativeSlotPool.java:474)
	at org.apache.flink.runtime.jobmaster.slotpool.DefaultDeclarativeSlotPool.releaseSlots(DefaultDeclarativeSlotPool.java:445)
	at org.apache.flink.runtime.jobmaster.slotpool.DeclarativeSlotPoolService.internalReleaseTaskManager(DeclarativeSlotPoolService.java:249)
	at org.apache.flink.runtime.jobmaster.slotpool.DeclarativeSlotPoolService.releaseTaskManager(DeclarativeSlotPoolService.java:230)
	at org.apache.flink.runtime.jobmaster.JobMaster.disconnectTaskManager(JobMaster.java:505)
	at org.apache.flink.runtime.jobmaster.JobMaster$TaskManagerHeartbeatListener.handleTaskManagerConnectionLoss(JobMaster.java:1376)
	at org.apache.flink.runtime.jobmaster.JobMaster$TaskManagerHeartbeatListener.notifyHeartbeatTimeout(JobMaster.java:1371)
	at org.apache.flink.runtime.heartbeat.HeartbeatMonitorImpl.run(HeartbeatMonitorImpl.java:155)
	at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:515)
	at java.util.concurrent.FutureTask.run(FutureTask.java:264)
	at org.apache.flink.runtime.rpc.akka.AkkaRpcActor.lambda$handleRunAsync$4(AkkaRpcActor.java:443)
	at org.apache.flink.runtime.concurrent.akka.ClassLoadingUtils.runWithContextClassLoader(ClassLoadingUtils.java:68)
	at org.apache.flink.runtime.rpc.akka.AkkaRpcActor.handleRunAsync(AkkaRpcActor.java:443)
	at org.apache.flink.runtime.rpc.akka.AkkaRpcActor.handleRpcMessage(AkkaRpcActor.java:213)
	at org.apache.flink.runtime.rpc.akka.FencedAkkaRpcActor.handleRpcMessage(FencedAkkaRpcActor.java:78)
	at org.apache.flink.runtime.rpc.akka.AkkaRpcActor.handleMessage(AkkaRpcActor.java:163)
	at akka.japi.pf.UnitCaseStatement.apply(CaseStatements.scala:24)
	at akka.japi.pf.UnitCaseStatement.apply(CaseStatements.scala:20)
	at scala.PartialFunction.applyOrElse(PartialFunction.scala:127)
	at scala.PartialFunction.applyOrElse$(PartialFunction.scala:126)
	at akka.japi.pf.UnitCaseStatement.applyOrElse(CaseStatements.scala:20)
	at scala.PartialFunction$OrElse.applyOrElse(PartialFunction.scala:175)
	at scala.PartialFunction$OrElse.applyOrElse(PartialFunction.scala:176)
	at scala.PartialFunction$OrElse.applyOrElse(PartialFunction.scala:176)
	at akka.actor.Actor.aroundReceive(Actor.scala:537)
	at akka.actor.Actor.aroundReceive$(Actor.scala:535)
	at akka.actor.AbstractActor.aroundReceive(AbstractActor.scala:220)
	at akka.actor.ActorCell.receiveMessage(ActorCell.scala:580)
	at akka.actor.ActorCell.invoke(ActorCell.scala:548)
	at akka.dispatch.Mailbox.processMailbox(Mailbox.scala:270)
	at akka.dispatch.Mailbox.run(Mailbox.scala:231)
	at akka.dispatch.Mailbox.exec(Mailbox.scala:243)
	at java.util.concurrent.ForkJoinTask.doExec(ForkJoinTask.java:290)
	at java.util.concurrent.ForkJoinPool$WorkQueue.topLevelExec(ForkJoinPool.java:1020)
	at java.util.concurrent.ForkJoinPool.scan(ForkJoinPool.java:1656)
	at java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1594)
	at java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:183)
Caused by: java.util.concurrent.TimeoutException: Heartbeat of TaskManager with id container_1669024970692_0001_01_000003(beam-loadtests-go-gbk-flink-batch-720-w-3.c.apache-beam-testing.internal:8026) timed out.
	... 31 more
2022/11/21 11:00:28  (): java.util.concurrent.TimeoutException: Heartbeat of TaskManager with id container_1669024970692_0001_01_000003(beam-loadtests-go-gbk-flink-batch-720-w-3.c.apache-beam-testing.internal:8026) timed out.
2022/11/21 11:00:29 Job state: FAILED
2022/11/21 11:00:29 Failed to execute job: job load0tests0go0flink0batch0gbk0101121065418-root-1121100557-3160df23_dcbecb2e-ac2f-418f-8074-7cdcc3de96b7 failed
panic: Failed to execute job: job load0tests0go0flink0batch0gbk0101121065418-root-1121100557-3160df23_dcbecb2e-ac2f-418f-8074-7cdcc3de96b7 failed

goroutine 1 [running]:
github.com/apache/beam/sdks/v2/go/pkg/beam/log.Fatalf({0x1848328, 0xc000120000}, {0x1697797?, 0x22f38a8?}, {0xc0003ade70?, 0x0?, 0x0?})
	<https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/ws/src/sdks/go/pkg/beam/log/log.go>:153 +0xa5
main.main()
	<https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/ws/src/sdks/go/test/load/group_by_key/group_by_key.go>:98 +0x3eb

> Task :sdks:go:test:load:run FAILED

FAILURE: Build failed with an exception.

* Where:
Build file '<https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/ws/src/sdks/go/test/load/build.gradle'> line: 31

* What went wrong:
Execution failed for task ':sdks:go:test:load:run'.
> Process 'command 'sh'' finished with non-zero exit value 2

* Try:
> Run with --stacktrace option to get the stack trace.
> Run with --info or --debug option to get more log output.

* Get more help at https://help.gradle.org

BUILD FAILED in 54m 56s
12 actionable tasks: 6 executed, 4 from cache, 2 up-to-date

Publishing build scan...
https://gradle.com/s/vzwqmd7lr2ijy

Build step 'Invoke Gradle script' changed build result to FAILURE
Build step 'Invoke Gradle script' marked build as failure

---------------------------------------------------------------------
To unsubscribe, e-mail: builds-unsubscribe@beam.apache.org
For additional commands, e-mail: builds-help@beam.apache.org


Build failed in Jenkins: beam_LoadTests_Go_GBK_Flink_Batch #719

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See <https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/719/display/redirect?page=changes>

Changes:

[samuelw] Fix OrderedListState for Dataflow Streaming pipelines on SE.


------------------------------------------
[...truncated 31.28 KB...]
        urn: "beam:coder:kv:v1"
      >
      component_coder_ids: "c0"
      component_coder_ids: "c0"
    >
  >
  coders: <
    key: "c2"
    value: <
      spec: <
        urn: "beam:coder:global_window:v1"
      >
    >
  >
  coders: <
    key: "c3"
    value: <
      spec: <
        urn: "beam:coder:iterable:v1"
      >
      component_coder_ids: "c0"
    >
  >
  coders: <
    key: "c4"
    value: <
      spec: <
        urn: "beam:coder:kv:v1"
      >
      component_coder_ids: "c0"
      component_coder_ids: "c3"
    >
  >
  coders: <
    key: "c5"
    value: <
      spec: <
        urn: "beam:coder:row:v1"
        payload: "\n\021\n\013num_records\032\002\020\004\n\024\n\016initial_splits\032\002\020\004\n\016\n\010key_size\032\002\020\004\n\020\n\nvalue_size\032\002\020\004\n\022\n\014num_hot_keys\032\002\020\004\n\026\n\020hot_key_fraction\032\002\020\006\022$f691cccd-3963-4ed9-9f25-d9fdfd07b30d"
      >
    >
  >
  coders: <
    key: "c6"
    value: <
      spec: <
        urn: "beam:go:coder:custom:v1"
        payload: "ChdvZmZzZXRyYW5nZS5SZXN0cmljdGlvbhJTCBpKT2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UuUmVzdHJpY3Rpb24atAEKS2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UucmVzdEVuYxJlCBYiUwgaSk9naXRodWIuY29tL2FwYWNoZS9iZWFtL3Nka3MvdjIvZ28vcGtnL2JlYW0vaW8vcnRyYWNrZXJzL29mZnNldHJhbmdlLlJlc3RyaWN0aW9uKgYIFBICCAgqBAgZQAEitAEKS2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UucmVzdERlYxJlCBYiBggUEgIICCpTCBpKT2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UuUmVzdHJpY3Rpb24qBAgZQAE="
      >
    >
  >
  coders: <
    key: "c7"
    value: <
      spec: <
        urn: "beam:coder:length_prefix:v1"
      >
      component_coder_ids: "c6"
    >
  >
  coders: <
    key: "c8"
    value: <
      spec: <
        urn: "beam:coder:bool:v1"
      >
    >
  >
  coders: <
    key: "c9"
    value: <
      spec: <
        urn: "beam:coder:kv:v1"
      >
      component_coder_ids: "c7"
      component_coder_ids: "c8"
    >
  >
  environments: <
    key: "go"
    value: <
      urn: "beam:env:docker:v1"
      payload: "\n6gcr.io/apache-beam-testing/beam-sdk/beam_go_sdk:latest"
      capabilities: "beam:protocol:progress_reporting:v1"
      capabilities: "beam:protocol:multi_core_bundle_processing:v1"
      capabilities: "beam:transform:sdf_truncate_sized_restrictions:v1"
      capabilities: "beam:protocol:****_status:v1"
      capabilities: "beam:protocol:monitoring_info_short_ids:v1"
      capabilities: "beam:version:sdk_base:go"
      capabilities: "beam:coder:bytes:v1"
      capabilities: "beam:coder:bool:v1"
      capabilities: "beam:coder:varint:v1"
      capabilities: "beam:coder:double:v1"
      capabilities: "beam:coder:string_utf8:v1"
      capabilities: "beam:coder:length_prefix:v1"
      capabilities: "beam:coder:kv:v1"
      capabilities: "beam:coder:iterable:v1"
      capabilities: "beam:coder:state_backed_iterable:v1"
      capabilities: "beam:coder:windowed_value:v1"
      capabilities: "beam:coder:global_window:v1"
      capabilities: "beam:coder:interval_window:v1"
      capabilities: "beam:coder:row:v1"
      capabilities: "beam:coder:nullable:v1"
      dependencies: <
        type_urn: "beam:artifact:type:file:v1"
        role_urn: "beam:artifact:role:go_****_binary:v1"
      >
    >
  >
>
root_transform_ids: "s1"
root_transform_ids: "e4"
root_transform_ids: "e5"
root_transform_ids: "e6"
root_transform_ids: "e7"
requirements: "beam:requirement:pardo:splittable_dofn:v1"
2022/11/20 10:05:41 Using specified **** binary: 'linux_amd64/group_by_key'
2022/11/20 10:05:42 Prepared job with id: load-tests-go-flink-batch-gbk-1-1120065415_b99de2b6-d069-4ebb-8007-131ef6b5b0f4 and staging token: load-tests-go-flink-batch-gbk-1-1120065415_b99de2b6-d069-4ebb-8007-131ef6b5b0f4
2022/11/20 10:05:50 Staged binary artifact with token: 
2022/11/20 10:05:52 Submitted job: load0tests0go0flink0batch0gbk0101120065415-root-1120100551-9bd95f0f_f9773a90-5a34-4b3c-88c1-18c5d4c3538e
2022/11/20 10:05:52 Job state: STOPPED
2022/11/20 10:05:52 Job state: STARTING
2022/11/20 10:05:52 Job state: RUNNING
2022/11/20 11:04:47  (): org.apache.flink.client.program.ProgramInvocationException: Job failed (JobID: 94136ac4aa73ce87e3b4828eb09c3f92)
	at org.apache.flink.client.deployment.ClusterClientJobClientAdapter.lambda$null$6(ClusterClientJobClientAdapter.java:130)
	at java.util.concurrent.CompletableFuture.uniApply(CompletableFuture.java:616)
	at java.util.concurrent.CompletableFuture$UniApply.tryFire(CompletableFuture.java:591)
	at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:488)
	at java.util.concurrent.CompletableFuture.complete(CompletableFuture.java:1975)
	at org.apache.flink.util.concurrent.FutureUtils.lambda$retryOperationWithDelay$9(FutureUtils.java:403)
	at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:774)
	at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:750)
	at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:488)
	at java.util.concurrent.CompletableFuture.complete(CompletableFuture.java:1975)
	at org.apache.flink.client.program.rest.RestClusterClient.lambda$pollResourceAsync$26(RestClusterClient.java:708)
	at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:774)
	at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:750)
	at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:488)
	at java.util.concurrent.CompletableFuture.complete(CompletableFuture.java:1975)
	at org.apache.flink.util.concurrent.FutureUtils.lambda$retryOperationWithDelay$9(FutureUtils.java:403)
	at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:774)
	at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:750)
	at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:488)
	at java.util.concurrent.CompletableFuture.postFire(CompletableFuture.java:575)
	at java.util.concurrent.CompletableFuture$UniCompose.tryFire(CompletableFuture.java:943)
	at java.util.concurrent.CompletableFuture$Completion.run(CompletableFuture.java:456)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:750)
Caused by: org.apache.flink.runtime.client.JobExecutionException: Job execution failed.
	at org.apache.flink.runtime.jobmaster.JobResult.toJobExecutionResult(JobResult.java:144)
	at org.apache.flink.client.deployment.ClusterClientJobClientAdapter.lambda$null$6(ClusterClientJobClientAdapter.java:128)
	... 24 more
Caused by: org.apache.flink.runtime.JobException: Recovery is suppressed by NoRestartBackoffTimeStrategy
	at org.apache.flink.runtime.executiongraph.failover.flip1.ExecutionFailureHandler.handleFailure(ExecutionFailureHandler.java:138)
	at org.apache.flink.runtime.executiongraph.failover.flip1.ExecutionFailureHandler.getFailureHandlingResult(ExecutionFailureHandler.java:82)
	at org.apache.flink.runtime.scheduler.DefaultScheduler.handleTaskFailure(DefaultScheduler.java:301)
	at org.apache.flink.runtime.scheduler.DefaultScheduler.maybeHandleTaskFailure(DefaultScheduler.java:291)
	at org.apache.flink.runtime.scheduler.DefaultScheduler.updateTaskExecutionStateInternal(DefaultScheduler.java:282)
	at org.apache.flink.runtime.scheduler.SchedulerBase.updateTaskExecutionState(SchedulerBase.java:739)
	at org.apache.flink.runtime.scheduler.UpdateSchedulerNgOnInternalFailuresListener.notifyTaskFailure(UpdateSchedulerNgOnInternalFailuresListener.java:51)
	at org.apache.flink.runtime.executiongraph.DefaultExecutionGraph.notifySchedulerNgAboutInternalTaskFailure(DefaultExecutionGraph.java:1536)
	at org.apache.flink.runtime.executiongraph.Execution.processFail(Execution.java:1118)
	at org.apache.flink.runtime.executiongraph.Execution.processFail(Execution.java:1058)
	at org.apache.flink.runtime.executiongraph.Execution.fail(Execution.java:759)
	at org.apache.flink.runtime.jobmaster.slotpool.SingleLogicalSlot.signalPayloadRelease(SingleLogicalSlot.java:195)
	at org.apache.flink.runtime.jobmaster.slotpool.SingleLogicalSlot.release(SingleLogicalSlot.java:182)
	at org.apache.flink.runtime.scheduler.SharedSlot.lambda$release$4(SharedSlot.java:271)
	at java.util.concurrent.CompletableFuture.uniAcceptNow(CompletableFuture.java:753)
	at java.util.concurrent.CompletableFuture.uniAcceptStage(CompletableFuture.java:731)
	at java.util.concurrent.CompletableFuture.thenAccept(CompletableFuture.java:2108)
	at org.apache.flink.runtime.scheduler.SharedSlot.release(SharedSlot.java:271)
	at org.apache.flink.runtime.jobmaster.slotpool.AllocatedSlot.releasePayload(AllocatedSlot.java:152)
	at org.apache.flink.runtime.jobmaster.slotpool.DefaultDeclarativeSlotPool.releasePayload(DefaultDeclarativeSlotPool.java:482)
	at org.apache.flink.runtime.jobmaster.slotpool.DefaultDeclarativeSlotPool.freeAndReleaseSlots(DefaultDeclarativeSlotPool.java:474)
	at org.apache.flink.runtime.jobmaster.slotpool.DefaultDeclarativeSlotPool.releaseSlots(DefaultDeclarativeSlotPool.java:445)
	at org.apache.flink.runtime.jobmaster.slotpool.DeclarativeSlotPoolService.internalReleaseTaskManager(DeclarativeSlotPoolService.java:249)
	at org.apache.flink.runtime.jobmaster.slotpool.DeclarativeSlotPoolService.releaseTaskManager(DeclarativeSlotPoolService.java:230)
	at org.apache.flink.runtime.jobmaster.JobMaster.disconnectTaskManager(JobMaster.java:505)
	at org.apache.flink.runtime.jobmaster.JobMaster$TaskManagerHeartbeatListener.handleTaskManagerConnectionLoss(JobMaster.java:1376)
	at org.apache.flink.runtime.jobmaster.JobMaster$TaskManagerHeartbeatListener.notifyHeartbeatTimeout(JobMaster.java:1371)
	at org.apache.flink.runtime.heartbeat.HeartbeatMonitorImpl.run(HeartbeatMonitorImpl.java:155)
	at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:515)
	at java.util.concurrent.FutureTask.run(FutureTask.java:264)
	at org.apache.flink.runtime.rpc.akka.AkkaRpcActor.lambda$handleRunAsync$4(AkkaRpcActor.java:443)
	at org.apache.flink.runtime.concurrent.akka.ClassLoadingUtils.runWithContextClassLoader(ClassLoadingUtils.java:68)
	at org.apache.flink.runtime.rpc.akka.AkkaRpcActor.handleRunAsync(AkkaRpcActor.java:443)
	at org.apache.flink.runtime.rpc.akka.AkkaRpcActor.handleRpcMessage(AkkaRpcActor.java:213)
	at org.apache.flink.runtime.rpc.akka.FencedAkkaRpcActor.handleRpcMessage(FencedAkkaRpcActor.java:78)
	at org.apache.flink.runtime.rpc.akka.AkkaRpcActor.handleMessage(AkkaRpcActor.java:163)
	at akka.japi.pf.UnitCaseStatement.apply(CaseStatements.scala:24)
	at akka.japi.pf.UnitCaseStatement.apply(CaseStatements.scala:20)
	at scala.PartialFunction.applyOrElse(PartialFunction.scala:127)
	at scala.PartialFunction.applyOrElse$(PartialFunction.scala:126)
	at akka.japi.pf.UnitCaseStatement.applyOrElse(CaseStatements.scala:20)
	at scala.PartialFunction$OrElse.applyOrElse(PartialFunction.scala:175)
	at scala.PartialFunction$OrElse.applyOrElse(PartialFunction.scala:176)
	at scala.PartialFunction$OrElse.applyOrElse(PartialFunction.scala:176)
	at akka.actor.Actor.aroundReceive(Actor.scala:537)
	at akka.actor.Actor.aroundReceive$(Actor.scala:535)
	at akka.actor.AbstractActor.aroundReceive(AbstractActor.scala:220)
	at akka.actor.ActorCell.receiveMessage(ActorCell.scala:580)
	at akka.actor.ActorCell.invoke(ActorCell.scala:548)
	at akka.dispatch.Mailbox.processMailbox(Mailbox.scala:270)
	at akka.dispatch.Mailbox.run(Mailbox.scala:231)
	at akka.dispatch.Mailbox.exec(Mailbox.scala:243)
	at java.util.concurrent.ForkJoinTask.doExec(ForkJoinTask.java:290)
	at java.util.concurrent.ForkJoinPool$WorkQueue.topLevelExec(ForkJoinPool.java:1020)
	at java.util.concurrent.ForkJoinPool.scan(ForkJoinPool.java:1656)
	at java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1594)
	at java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:183)
Caused by: java.util.concurrent.TimeoutException: Heartbeat of TaskManager with id container_1668938566552_0001_01_000005(beam-loadtests-go-gbk-flink-batch-719-w-3.c.apache-beam-testing.internal:8026) timed out.
	... 31 more
2022/11/20 11:04:47  (): java.util.concurrent.TimeoutException: Heartbeat of TaskManager with id container_1668938566552_0001_01_000005(beam-loadtests-go-gbk-flink-batch-719-w-3.c.apache-beam-testing.internal:8026) timed out.
2022/11/20 11:04:47 Job state: FAILED
2022/11/20 11:04:48 Failed to execute job: job load0tests0go0flink0batch0gbk0101120065415-root-1120100551-9bd95f0f_f9773a90-5a34-4b3c-88c1-18c5d4c3538e failed
panic: Failed to execute job: job load0tests0go0flink0batch0gbk0101120065415-root-1120100551-9bd95f0f_f9773a90-5a34-4b3c-88c1-18c5d4c3538e failed

goroutine 1 [running]:
github.com/apache/beam/sdks/v2/go/pkg/beam/log.Fatalf({0x1848328, 0xc000120000}, {0x1697797?, 0x22f38a8?}, {0xc00002de70?, 0x0?, 0x0?})
	<https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/ws/src/sdks/go/pkg/beam/log/log.go>:153 +0xa5
main.main()
	<https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/ws/src/sdks/go/test/load/group_by_key/group_by_key.go>:98 +0x3eb

> Task :sdks:go:test:load:run FAILED

FAILURE: Build failed with an exception.

* Where:
Build file '<https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/ws/src/sdks/go/test/load/build.gradle'> line: 31

* What went wrong:
Execution failed for task ':sdks:go:test:load:run'.
> Process 'command 'sh'' finished with non-zero exit value 2

* Try:
> Run with --stacktrace option to get the stack trace.
> Run with --info or --debug option to get more log output.

* Get more help at https://help.gradle.org

BUILD FAILED in 59m 27s
12 actionable tasks: 6 executed, 4 from cache, 2 up-to-date

Publishing build scan...
https://gradle.com/s/mwzgp43glrk6y

Build step 'Invoke Gradle script' changed build result to FAILURE
Build step 'Invoke Gradle script' marked build as failure

---------------------------------------------------------------------
To unsubscribe, e-mail: builds-unsubscribe@beam.apache.org
For additional commands, e-mail: builds-help@beam.apache.org


Build failed in Jenkins: beam_LoadTests_Go_GBK_Flink_Batch #718

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See <https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/718/display/redirect?page=changes>

Changes:

[Kenneth Knowles] Remove overly broad CanIgnoreReturnValue

[Kenneth Knowles] Add @RunWith annotation to pubsublite test

[noreply] Move dashboard links to dedicated section

[noreply] [Spark dataset runner] Cache datasets if used multiple times  (#24009)

[noreply] Remove section from troubleshooting about fixed dictionary issue

[noreply] Fix flink XVR tests (#24228)

[noreply] Adding the list of example notebooks to the ML readme file. (#24255)

[noreply] Updating timezone for Beam 2.43.0 release (#24258)

[Kenneth Knowles] Only skip checkerframework if explicitly requested

[noreply] Issue#21430 Updated dataframe io to avoid pruning

[noreply] SingleStoreIO (#23535)

[chamikaramj] Update Java Multi-lang quickstart after the Beam 2.43.0 release


------------------------------------------
[...truncated 31.26 KB...]
        urn: "beam:coder:kv:v1"
      >
      component_coder_ids: "c0"
      component_coder_ids: "c0"
    >
  >
  coders: <
    key: "c2"
    value: <
      spec: <
        urn: "beam:coder:global_window:v1"
      >
    >
  >
  coders: <
    key: "c3"
    value: <
      spec: <
        urn: "beam:coder:iterable:v1"
      >
      component_coder_ids: "c0"
    >
  >
  coders: <
    key: "c4"
    value: <
      spec: <
        urn: "beam:coder:kv:v1"
      >
      component_coder_ids: "c0"
      component_coder_ids: "c3"
    >
  >
  coders: <
    key: "c5"
    value: <
      spec: <
        urn: "beam:coder:row:v1"
        payload: "\n\021\n\013num_records\032\002\020\004\n\024\n\016initial_splits\032\002\020\004\n\016\n\010key_size\032\002\020\004\n\020\n\nvalue_size\032\002\020\004\n\022\n\014num_hot_keys\032\002\020\004\n\026\n\020hot_key_fraction\032\002\020\006\022$f691cccd-3963-4ed9-9f25-d9fdfd07b30d"
      >
    >
  >
  coders: <
    key: "c6"
    value: <
      spec: <
        urn: "beam:go:coder:custom:v1"
        payload: "ChdvZmZzZXRyYW5nZS5SZXN0cmljdGlvbhJTCBpKT2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UuUmVzdHJpY3Rpb24atAEKS2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UucmVzdEVuYxJlCBYiUwgaSk9naXRodWIuY29tL2FwYWNoZS9iZWFtL3Nka3MvdjIvZ28vcGtnL2JlYW0vaW8vcnRyYWNrZXJzL29mZnNldHJhbmdlLlJlc3RyaWN0aW9uKgYIFBICCAgqBAgZQAEitAEKS2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UucmVzdERlYxJlCBYiBggUEgIICCpTCBpKT2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UuUmVzdHJpY3Rpb24qBAgZQAE="
      >
    >
  >
  coders: <
    key: "c7"
    value: <
      spec: <
        urn: "beam:coder:length_prefix:v1"
      >
      component_coder_ids: "c6"
    >
  >
  coders: <
    key: "c8"
    value: <
      spec: <
        urn: "beam:coder:bool:v1"
      >
    >
  >
  coders: <
    key: "c9"
    value: <
      spec: <
        urn: "beam:coder:kv:v1"
      >
      component_coder_ids: "c7"
      component_coder_ids: "c8"
    >
  >
  environments: <
    key: "go"
    value: <
      urn: "beam:env:docker:v1"
      payload: "\n6gcr.io/apache-beam-testing/beam-sdk/beam_go_sdk:latest"
      capabilities: "beam:protocol:progress_reporting:v1"
      capabilities: "beam:protocol:multi_core_bundle_processing:v1"
      capabilities: "beam:transform:sdf_truncate_sized_restrictions:v1"
      capabilities: "beam:protocol:****_status:v1"
      capabilities: "beam:protocol:monitoring_info_short_ids:v1"
      capabilities: "beam:version:sdk_base:go"
      capabilities: "beam:coder:bytes:v1"
      capabilities: "beam:coder:bool:v1"
      capabilities: "beam:coder:varint:v1"
      capabilities: "beam:coder:double:v1"
      capabilities: "beam:coder:string_utf8:v1"
      capabilities: "beam:coder:length_prefix:v1"
      capabilities: "beam:coder:kv:v1"
      capabilities: "beam:coder:iterable:v1"
      capabilities: "beam:coder:state_backed_iterable:v1"
      capabilities: "beam:coder:windowed_value:v1"
      capabilities: "beam:coder:global_window:v1"
      capabilities: "beam:coder:interval_window:v1"
      capabilities: "beam:coder:row:v1"
      capabilities: "beam:coder:nullable:v1"
      dependencies: <
        type_urn: "beam:artifact:type:file:v1"
        role_urn: "beam:artifact:role:go_****_binary:v1"
      >
    >
  >
>
root_transform_ids: "s1"
root_transform_ids: "e4"
root_transform_ids: "e5"
root_transform_ids: "e6"
root_transform_ids: "e7"
requirements: "beam:requirement:pardo:splittable_dofn:v1"
2022/11/19 10:05:48 Using specified **** binary: 'linux_amd64/group_by_key'
2022/11/19 10:05:49 Prepared job with id: load-tests-go-flink-batch-gbk-1-1119065427_2bc9cc0a-e0e5-41ba-9815-eaf817024fa0 and staging token: load-tests-go-flink-batch-gbk-1-1119065427_2bc9cc0a-e0e5-41ba-9815-eaf817024fa0
2022/11/19 10:05:55 Staged binary artifact with token: 
2022/11/19 10:05:57 Submitted job: load0tests0go0flink0batch0gbk0101119065427-root-1119100555-bc3434e9_5d970462-34b6-478f-b175-07337428361c
2022/11/19 10:05:57 Job state: STOPPED
2022/11/19 10:05:57 Job state: STARTING
2022/11/19 10:05:57 Job state: RUNNING
2022/11/19 10:59:57  (): org.apache.flink.client.program.ProgramInvocationException: Job failed (JobID: 266aa5982a137586a1a44fd296f32322)
	at org.apache.flink.client.deployment.ClusterClientJobClientAdapter.lambda$null$6(ClusterClientJobClientAdapter.java:130)
	at java.util.concurrent.CompletableFuture.uniApply(CompletableFuture.java:616)
	at java.util.concurrent.CompletableFuture$UniApply.tryFire(CompletableFuture.java:591)
	at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:488)
	at java.util.concurrent.CompletableFuture.complete(CompletableFuture.java:1975)
	at org.apache.flink.util.concurrent.FutureUtils.lambda$retryOperationWithDelay$9(FutureUtils.java:403)
	at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:774)
	at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:750)
	at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:488)
	at java.util.concurrent.CompletableFuture.complete(CompletableFuture.java:1975)
	at org.apache.flink.client.program.rest.RestClusterClient.lambda$pollResourceAsync$26(RestClusterClient.java:708)
	at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:774)
	at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:750)
	at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:488)
	at java.util.concurrent.CompletableFuture.complete(CompletableFuture.java:1975)
	at org.apache.flink.util.concurrent.FutureUtils.lambda$retryOperationWithDelay$9(FutureUtils.java:403)
	at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:774)
	at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:750)
	at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:488)
	at java.util.concurrent.CompletableFuture.postFire(CompletableFuture.java:575)
	at java.util.concurrent.CompletableFuture$UniCompose.tryFire(CompletableFuture.java:943)
	at java.util.concurrent.CompletableFuture$Completion.run(CompletableFuture.java:456)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:750)
Caused by: org.apache.flink.runtime.client.JobExecutionException: Job execution failed.
	at org.apache.flink.runtime.jobmaster.JobResult.toJobExecutionResult(JobResult.java:144)
	at org.apache.flink.client.deployment.ClusterClientJobClientAdapter.lambda$null$6(ClusterClientJobClientAdapter.java:128)
	... 24 more
Caused by: org.apache.flink.runtime.JobException: Recovery is suppressed by NoRestartBackoffTimeStrategy
	at org.apache.flink.runtime.executiongraph.failover.flip1.ExecutionFailureHandler.handleFailure(ExecutionFailureHandler.java:138)
	at org.apache.flink.runtime.executiongraph.failover.flip1.ExecutionFailureHandler.getFailureHandlingResult(ExecutionFailureHandler.java:82)
	at org.apache.flink.runtime.scheduler.DefaultScheduler.handleTaskFailure(DefaultScheduler.java:301)
	at org.apache.flink.runtime.scheduler.DefaultScheduler.maybeHandleTaskFailure(DefaultScheduler.java:291)
	at org.apache.flink.runtime.scheduler.DefaultScheduler.updateTaskExecutionStateInternal(DefaultScheduler.java:282)
	at org.apache.flink.runtime.scheduler.SchedulerBase.updateTaskExecutionState(SchedulerBase.java:739)
	at org.apache.flink.runtime.scheduler.UpdateSchedulerNgOnInternalFailuresListener.notifyTaskFailure(UpdateSchedulerNgOnInternalFailuresListener.java:51)
	at org.apache.flink.runtime.executiongraph.DefaultExecutionGraph.notifySchedulerNgAboutInternalTaskFailure(DefaultExecutionGraph.java:1536)
	at org.apache.flink.runtime.executiongraph.Execution.processFail(Execution.java:1118)
	at org.apache.flink.runtime.executiongraph.Execution.processFail(Execution.java:1058)
	at org.apache.flink.runtime.executiongraph.Execution.fail(Execution.java:759)
	at org.apache.flink.runtime.jobmaster.slotpool.SingleLogicalSlot.signalPayloadRelease(SingleLogicalSlot.java:195)
	at org.apache.flink.runtime.jobmaster.slotpool.SingleLogicalSlot.release(SingleLogicalSlot.java:182)
	at org.apache.flink.runtime.scheduler.SharedSlot.lambda$release$4(SharedSlot.java:271)
	at java.util.concurrent.CompletableFuture.uniAcceptNow(CompletableFuture.java:753)
	at java.util.concurrent.CompletableFuture.uniAcceptStage(CompletableFuture.java:731)
	at java.util.concurrent.CompletableFuture.thenAccept(CompletableFuture.java:2108)
	at org.apache.flink.runtime.scheduler.SharedSlot.release(SharedSlot.java:271)
	at org.apache.flink.runtime.jobmaster.slotpool.AllocatedSlot.releasePayload(AllocatedSlot.java:152)
	at org.apache.flink.runtime.jobmaster.slotpool.DefaultDeclarativeSlotPool.releasePayload(DefaultDeclarativeSlotPool.java:482)
	at org.apache.flink.runtime.jobmaster.slotpool.DefaultDeclarativeSlotPool.freeAndReleaseSlots(DefaultDeclarativeSlotPool.java:474)
	at org.apache.flink.runtime.jobmaster.slotpool.DefaultDeclarativeSlotPool.releaseSlots(DefaultDeclarativeSlotPool.java:445)
	at org.apache.flink.runtime.jobmaster.slotpool.DeclarativeSlotPoolService.internalReleaseTaskManager(DeclarativeSlotPoolService.java:249)
	at org.apache.flink.runtime.jobmaster.slotpool.DeclarativeSlotPoolService.releaseTaskManager(DeclarativeSlotPoolService.java:230)
	at org.apache.flink.runtime.jobmaster.JobMaster.disconnectTaskManager(JobMaster.java:505)
	at org.apache.flink.runtime.jobmaster.JobMaster$TaskManagerHeartbeatListener.handleTaskManagerConnectionLoss(JobMaster.java:1376)
	at org.apache.flink.runtime.jobmaster.JobMaster$TaskManagerHeartbeatListener.notifyHeartbeatTimeout(JobMaster.java:1371)
	at org.apache.flink.runtime.heartbeat.HeartbeatMonitorImpl.run(HeartbeatMonitorImpl.java:155)
	at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:515)
	at java.util.concurrent.FutureTask.run(FutureTask.java:264)
	at org.apache.flink.runtime.rpc.akka.AkkaRpcActor.lambda$handleRunAsync$4(AkkaRpcActor.java:443)
	at org.apache.flink.runtime.concurrent.akka.ClassLoadingUtils.runWithContextClassLoader(ClassLoadingUtils.java:68)
	at org.apache.flink.runtime.rpc.akka.AkkaRpcActor.handleRunAsync(AkkaRpcActor.java:443)
	at org.apache.flink.runtime.rpc.akka.AkkaRpcActor.handleRpcMessage(AkkaRpcActor.java:213)
	at org.apache.flink.runtime.rpc.akka.FencedAkkaRpcActor.handleRpcMessage(FencedAkkaRpcActor.java:78)
	at org.apache.flink.runtime.rpc.akka.AkkaRpcActor.handleMessage(AkkaRpcActor.java:163)
	at akka.japi.pf.UnitCaseStatement.apply(CaseStatements.scala:24)
	at akka.japi.pf.UnitCaseStatement.apply(CaseStatements.scala:20)
	at scala.PartialFunction.applyOrElse(PartialFunction.scala:127)
	at scala.PartialFunction.applyOrElse$(PartialFunction.scala:126)
	at akka.japi.pf.UnitCaseStatement.applyOrElse(CaseStatements.scala:20)
	at scala.PartialFunction$OrElse.applyOrElse(PartialFunction.scala:175)
	at scala.PartialFunction$OrElse.applyOrElse(PartialFunction.scala:176)
	at scala.PartialFunction$OrElse.applyOrElse(PartialFunction.scala:176)
	at akka.actor.Actor.aroundReceive(Actor.scala:537)
	at akka.actor.Actor.aroundReceive$(Actor.scala:535)
	at akka.actor.AbstractActor.aroundReceive(AbstractActor.scala:220)
	at akka.actor.ActorCell.receiveMessage(ActorCell.scala:580)
	at akka.actor.ActorCell.invoke(ActorCell.scala:548)
	at akka.dispatch.Mailbox.processMailbox(Mailbox.scala:270)
	at akka.dispatch.Mailbox.run(Mailbox.scala:231)
	at akka.dispatch.Mailbox.exec(Mailbox.scala:243)
	at java.util.concurrent.ForkJoinTask.doExec(ForkJoinTask.java:290)
	at java.util.concurrent.ForkJoinPool$WorkQueue.topLevelExec(ForkJoinPool.java:1020)
	at java.util.concurrent.ForkJoinPool.scan(ForkJoinPool.java:1656)
	at java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1594)
	at java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:183)
Caused by: java.util.concurrent.TimeoutException: Heartbeat of TaskManager with id container_1668852172786_0001_01_000004(beam-loadtests-go-gbk-flink-batch-718-w-1.c.apache-beam-testing.internal:8026) timed out.
	... 31 more
2022/11/19 10:59:57  (): java.util.concurrent.TimeoutException: Heartbeat of TaskManager with id container_1668852172786_0001_01_000004(beam-loadtests-go-gbk-flink-batch-718-w-1.c.apache-beam-testing.internal:8026) timed out.
2022/11/19 10:59:58 Job state: FAILED
2022/11/19 10:59:58 Failed to execute job: job load0tests0go0flink0batch0gbk0101119065427-root-1119100555-bc3434e9_5d970462-34b6-478f-b175-07337428361c failed
panic: Failed to execute job: job load0tests0go0flink0batch0gbk0101119065427-root-1119100555-bc3434e9_5d970462-34b6-478f-b175-07337428361c failed

goroutine 1 [running]:
github.com/apache/beam/sdks/v2/go/pkg/beam/log.Fatalf({0x1848328, 0xc00004e0d0}, {0x1697797?, 0x22f38a8?}, {0xc000647e70?, 0x0?, 0x0?})
	<https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/ws/src/sdks/go/pkg/beam/log/log.go>:153 +0xa5
main.main()
	<https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/ws/src/sdks/go/test/load/group_by_key/group_by_key.go>:98 +0x3eb

> Task :sdks:go:test:load:run FAILED

FAILURE: Build failed with an exception.

* Where:
Build file '<https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/ws/src/sdks/go/test/load/build.gradle'> line: 31

* What went wrong:
Execution failed for task ':sdks:go:test:load:run'.
> Process 'command 'sh'' finished with non-zero exit value 2

* Try:
> Run with --stacktrace option to get the stack trace.
> Run with --info or --debug option to get more log output.

* Get more help at https://help.gradle.org

BUILD FAILED in 54m 36s
12 actionable tasks: 6 executed, 4 from cache, 2 up-to-date

Publishing build scan...
https://gradle.com/s/u6fq3lqqslawu

Build step 'Invoke Gradle script' changed build result to FAILURE
Build step 'Invoke Gradle script' marked build as failure

---------------------------------------------------------------------
To unsubscribe, e-mail: builds-unsubscribe@beam.apache.org
For additional commands, e-mail: builds-help@beam.apache.org


Build failed in Jenkins: beam_LoadTests_Go_GBK_Flink_Batch #717

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See <https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/717/display/redirect?page=changes>

Changes:

[Moritz Mack] [Metrics] Add new performance dashboard for Java JMH benchmarks (closes

[Kenneth Knowles] Fix checkArgument format string in ByteKeyRange

[Yi Hu] Unify test parameters for certain IOs based on test row and grafana

[noreply] Bump github.com/aws/aws-sdk-go-v2/config from 1.18.0 to 1.18.1 in /sdks

[noreply] Add enableGzipHttpCompression option to SplunkIO (#24197)

[noreply] [Playground] Examples CI restore (#24155)

[noreply] Bump github.com/aws/aws-sdk-go-v2/service/s3 in /sdks (#24220)

[noreply] Issue#24161 Updated docstring for Clusters class

[Kenneth Knowles] Fix checkArgument format string in BigQueryQueryHelper

[Kenneth Knowles] Fix checkArgument calls in BQ dynamic destinations

[noreply] Force discarding mode in with_fanout without rewindowing. (#23828)

[noreply] Removed trailing whitespaces.

[noreply] Clarify that SDF authors need to make the restriction sizing method

[noreply] Remove google-cloud-platform-core dependency from harness (#24235)

[noreply] Document our benchmarks (#24216)

[noreply] Website updates for Beam 2.43.0 release (#24044)

[Kenneth Knowles] Add @RunWith annotation to BQ test class

[chamikaramj] Fix release date

[chamikaramj] Few more fixes to the Website


------------------------------------------
[...truncated 31.13 KB...]
        urn: "beam:coder:kv:v1"
      >
      component_coder_ids: "c0"
      component_coder_ids: "c0"
    >
  >
  coders: <
    key: "c2"
    value: <
      spec: <
        urn: "beam:coder:global_window:v1"
      >
    >
  >
  coders: <
    key: "c3"
    value: <
      spec: <
        urn: "beam:coder:iterable:v1"
      >
      component_coder_ids: "c0"
    >
  >
  coders: <
    key: "c4"
    value: <
      spec: <
        urn: "beam:coder:kv:v1"
      >
      component_coder_ids: "c0"
      component_coder_ids: "c3"
    >
  >
  coders: <
    key: "c5"
    value: <
      spec: <
        urn: "beam:coder:row:v1"
        payload: "\n\021\n\013num_records\032\002\020\004\n\024\n\016initial_splits\032\002\020\004\n\016\n\010key_size\032\002\020\004\n\020\n\nvalue_size\032\002\020\004\n\022\n\014num_hot_keys\032\002\020\004\n\026\n\020hot_key_fraction\032\002\020\006\022$f691cccd-3963-4ed9-9f25-d9fdfd07b30d"
      >
    >
  >
  coders: <
    key: "c6"
    value: <
      spec: <
        urn: "beam:go:coder:custom:v1"
        payload: "ChdvZmZzZXRyYW5nZS5SZXN0cmljdGlvbhJTCBpKT2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UuUmVzdHJpY3Rpb24atAEKS2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UucmVzdEVuYxJlCBYiUwgaSk9naXRodWIuY29tL2FwYWNoZS9iZWFtL3Nka3MvdjIvZ28vcGtnL2JlYW0vaW8vcnRyYWNrZXJzL29mZnNldHJhbmdlLlJlc3RyaWN0aW9uKgYIFBICCAgqBAgZQAEitAEKS2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UucmVzdERlYxJlCBYiBggUEgIICCpTCBpKT2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UuUmVzdHJpY3Rpb24qBAgZQAE="
      >
    >
  >
  coders: <
    key: "c7"
    value: <
      spec: <
        urn: "beam:coder:length_prefix:v1"
      >
      component_coder_ids: "c6"
    >
  >
  coders: <
    key: "c8"
    value: <
      spec: <
        urn: "beam:coder:bool:v1"
      >
    >
  >
  coders: <
    key: "c9"
    value: <
      spec: <
        urn: "beam:coder:kv:v1"
      >
      component_coder_ids: "c7"
      component_coder_ids: "c8"
    >
  >
  environments: <
    key: "go"
    value: <
      urn: "beam:env:docker:v1"
      payload: "\n6gcr.io/apache-beam-testing/beam-sdk/beam_go_sdk:latest"
      capabilities: "beam:protocol:progress_reporting:v1"
      capabilities: "beam:protocol:multi_core_bundle_processing:v1"
      capabilities: "beam:transform:sdf_truncate_sized_restrictions:v1"
      capabilities: "beam:protocol:****_status:v1"
      capabilities: "beam:protocol:monitoring_info_short_ids:v1"
      capabilities: "beam:version:sdk_base:go"
      capabilities: "beam:coder:bytes:v1"
      capabilities: "beam:coder:bool:v1"
      capabilities: "beam:coder:varint:v1"
      capabilities: "beam:coder:double:v1"
      capabilities: "beam:coder:string_utf8:v1"
      capabilities: "beam:coder:length_prefix:v1"
      capabilities: "beam:coder:kv:v1"
      capabilities: "beam:coder:iterable:v1"
      capabilities: "beam:coder:state_backed_iterable:v1"
      capabilities: "beam:coder:windowed_value:v1"
      capabilities: "beam:coder:global_window:v1"
      capabilities: "beam:coder:interval_window:v1"
      capabilities: "beam:coder:row:v1"
      capabilities: "beam:coder:nullable:v1"
      dependencies: <
        type_urn: "beam:artifact:type:file:v1"
        role_urn: "beam:artifact:role:go_****_binary:v1"
      >
    >
  >
>
root_transform_ids: "s1"
root_transform_ids: "e4"
root_transform_ids: "e5"
root_transform_ids: "e6"
root_transform_ids: "e7"
requirements: "beam:requirement:pardo:splittable_dofn:v1"
2022/11/18 10:05:23 Using specified **** binary: 'linux_amd64/group_by_key'
2022/11/18 10:05:23 Prepared job with id: load-tests-go-flink-batch-gbk-1-1118065429_5ec37746-ba6d-4666-9e62-32df0eb9623a and staging token: load-tests-go-flink-batch-gbk-1-1118065429_5ec37746-ba6d-4666-9e62-32df0eb9623a
2022/11/18 10:05:33 Staged binary artifact with token: 
2022/11/18 10:05:36 Submitted job: load0tests0go0flink0batch0gbk0101118065429-root-1118100533-6febb3_658d5f47-78f2-4698-9a00-4afefe29d7e6
2022/11/18 10:05:36 Job state: STOPPED
2022/11/18 10:05:36 Job state: STARTING
2022/11/18 10:05:36 Job state: RUNNING
2022/11/18 10:59:44  (): org.apache.flink.client.program.ProgramInvocationException: Job failed (JobID: c1bdbfa9b9174095dc0b560affe02d04)
	at org.apache.flink.client.deployment.ClusterClientJobClientAdapter.lambda$null$6(ClusterClientJobClientAdapter.java:130)
	at java.util.concurrent.CompletableFuture.uniApply(CompletableFuture.java:616)
	at java.util.concurrent.CompletableFuture$UniApply.tryFire(CompletableFuture.java:591)
	at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:488)
	at java.util.concurrent.CompletableFuture.complete(CompletableFuture.java:1975)
	at org.apache.flink.util.concurrent.FutureUtils.lambda$retryOperationWithDelay$9(FutureUtils.java:403)
	at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:774)
	at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:750)
	at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:488)
	at java.util.concurrent.CompletableFuture.complete(CompletableFuture.java:1975)
	at org.apache.flink.client.program.rest.RestClusterClient.lambda$pollResourceAsync$26(RestClusterClient.java:708)
	at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:774)
	at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:750)
	at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:488)
	at java.util.concurrent.CompletableFuture.complete(CompletableFuture.java:1975)
	at org.apache.flink.util.concurrent.FutureUtils.lambda$retryOperationWithDelay$9(FutureUtils.java:403)
	at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:774)
	at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:750)
	at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:488)
	at java.util.concurrent.CompletableFuture.postFire(CompletableFuture.java:575)
	at java.util.concurrent.CompletableFuture$UniCompose.tryFire(CompletableFuture.java:943)
	at java.util.concurrent.CompletableFuture$Completion.run(CompletableFuture.java:456)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:750)
Caused by: org.apache.flink.runtime.client.JobExecutionException: Job execution failed.
	at org.apache.flink.runtime.jobmaster.JobResult.toJobExecutionResult(JobResult.java:144)
	at org.apache.flink.client.deployment.ClusterClientJobClientAdapter.lambda$null$6(ClusterClientJobClientAdapter.java:128)
	... 24 more
Caused by: org.apache.flink.runtime.JobException: Recovery is suppressed by NoRestartBackoffTimeStrategy
	at org.apache.flink.runtime.executiongraph.failover.flip1.ExecutionFailureHandler.handleFailure(ExecutionFailureHandler.java:138)
	at org.apache.flink.runtime.executiongraph.failover.flip1.ExecutionFailureHandler.getFailureHandlingResult(ExecutionFailureHandler.java:82)
	at org.apache.flink.runtime.scheduler.DefaultScheduler.handleTaskFailure(DefaultScheduler.java:301)
	at org.apache.flink.runtime.scheduler.DefaultScheduler.maybeHandleTaskFailure(DefaultScheduler.java:291)
	at org.apache.flink.runtime.scheduler.DefaultScheduler.updateTaskExecutionStateInternal(DefaultScheduler.java:282)
	at org.apache.flink.runtime.scheduler.SchedulerBase.updateTaskExecutionState(SchedulerBase.java:739)
	at org.apache.flink.runtime.scheduler.UpdateSchedulerNgOnInternalFailuresListener.notifyTaskFailure(UpdateSchedulerNgOnInternalFailuresListener.java:51)
	at org.apache.flink.runtime.executiongraph.DefaultExecutionGraph.notifySchedulerNgAboutInternalTaskFailure(DefaultExecutionGraph.java:1536)
	at org.apache.flink.runtime.executiongraph.Execution.processFail(Execution.java:1118)
	at org.apache.flink.runtime.executiongraph.Execution.processFail(Execution.java:1058)
	at org.apache.flink.runtime.executiongraph.Execution.fail(Execution.java:759)
	at org.apache.flink.runtime.jobmaster.slotpool.SingleLogicalSlot.signalPayloadRelease(SingleLogicalSlot.java:195)
	at org.apache.flink.runtime.jobmaster.slotpool.SingleLogicalSlot.release(SingleLogicalSlot.java:182)
	at org.apache.flink.runtime.scheduler.SharedSlot.lambda$release$4(SharedSlot.java:271)
	at java.util.concurrent.CompletableFuture.uniAcceptNow(CompletableFuture.java:753)
	at java.util.concurrent.CompletableFuture.uniAcceptStage(CompletableFuture.java:731)
	at java.util.concurrent.CompletableFuture.thenAccept(CompletableFuture.java:2108)
	at org.apache.flink.runtime.scheduler.SharedSlot.release(SharedSlot.java:271)
	at org.apache.flink.runtime.jobmaster.slotpool.AllocatedSlot.releasePayload(AllocatedSlot.java:152)
	at org.apache.flink.runtime.jobmaster.slotpool.DefaultDeclarativeSlotPool.releasePayload(DefaultDeclarativeSlotPool.java:482)
	at org.apache.flink.runtime.jobmaster.slotpool.DefaultDeclarativeSlotPool.freeAndReleaseSlots(DefaultDeclarativeSlotPool.java:474)
	at org.apache.flink.runtime.jobmaster.slotpool.DefaultDeclarativeSlotPool.releaseSlots(DefaultDeclarativeSlotPool.java:445)
	at org.apache.flink.runtime.jobmaster.slotpool.DeclarativeSlotPoolService.internalReleaseTaskManager(DeclarativeSlotPoolService.java:249)
	at org.apache.flink.runtime.jobmaster.slotpool.DeclarativeSlotPoolService.releaseTaskManager(DeclarativeSlotPoolService.java:230)
	at org.apache.flink.runtime.jobmaster.JobMaster.disconnectTaskManager(JobMaster.java:505)
	at org.apache.flink.runtime.jobmaster.JobMaster$TaskManagerHeartbeatListener.handleTaskManagerConnectionLoss(JobMaster.java:1376)
	at org.apache.flink.runtime.jobmaster.JobMaster$TaskManagerHeartbeatListener.notifyHeartbeatTimeout(JobMaster.java:1371)
	at org.apache.flink.runtime.heartbeat.HeartbeatMonitorImpl.run(HeartbeatMonitorImpl.java:155)
	at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:515)
	at java.util.concurrent.FutureTask.run(FutureTask.java:264)
	at org.apache.flink.runtime.rpc.akka.AkkaRpcActor.lambda$handleRunAsync$4(AkkaRpcActor.java:443)
	at org.apache.flink.runtime.concurrent.akka.ClassLoadingUtils.runWithContextClassLoader(ClassLoadingUtils.java:68)
	at org.apache.flink.runtime.rpc.akka.AkkaRpcActor.handleRunAsync(AkkaRpcActor.java:443)
	at org.apache.flink.runtime.rpc.akka.AkkaRpcActor.handleRpcMessage(AkkaRpcActor.java:213)
	at org.apache.flink.runtime.rpc.akka.FencedAkkaRpcActor.handleRpcMessage(FencedAkkaRpcActor.java:78)
	at org.apache.flink.runtime.rpc.akka.AkkaRpcActor.handleMessage(AkkaRpcActor.java:163)
	at akka.japi.pf.UnitCaseStatement.apply(CaseStatements.scala:24)
	at akka.japi.pf.UnitCaseStatement.apply(CaseStatements.scala:20)
	at scala.PartialFunction.applyOrElse(PartialFunction.scala:127)
	at scala.PartialFunction.applyOrElse$(PartialFunction.scala:126)
	at akka.japi.pf.UnitCaseStatement.applyOrElse(CaseStatements.scala:20)
	at scala.PartialFunction$OrElse.applyOrElse(PartialFunction.scala:175)
	at scala.PartialFunction$OrElse.applyOrElse(PartialFunction.scala:176)
	at scala.PartialFunction$OrElse.applyOrElse(PartialFunction.scala:176)
	at akka.actor.Actor.aroundReceive(Actor.scala:537)
	at akka.actor.Actor.aroundReceive$(Actor.scala:535)
	at akka.actor.AbstractActor.aroundReceive(AbstractActor.scala:220)
	at akka.actor.ActorCell.receiveMessage(ActorCell.scala:580)
	at akka.actor.ActorCell.invoke(ActorCell.scala:548)
	at akka.dispatch.Mailbox.processMailbox(Mailbox.scala:270)
	at akka.dispatch.Mailbox.run(Mailbox.scala:231)
	at akka.dispatch.Mailbox.exec(Mailbox.scala:243)
	at java.util.concurrent.ForkJoinTask.doExec(ForkJoinTask.java:290)
	at java.util.concurrent.ForkJoinPool$WorkQueue.topLevelExec(ForkJoinPool.java:1020)
	at java.util.concurrent.ForkJoinPool.scan(ForkJoinPool.java:1656)
	at java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1594)
	at java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:183)
Caused by: java.util.concurrent.TimeoutException: Heartbeat of TaskManager with id container_1668765746244_0001_01_000003(beam-loadtests-go-gbk-flink-batch-717-w-3.c.apache-beam-testing.internal:8026) timed out.
	... 31 more
2022/11/18 10:59:44  (): java.util.concurrent.TimeoutException: Heartbeat of TaskManager with id container_1668765746244_0001_01_000003(beam-loadtests-go-gbk-flink-batch-717-w-3.c.apache-beam-testing.internal:8026) timed out.
2022/11/18 10:59:44 Job state: FAILED
2022/11/18 10:59:44 Failed to execute job: job load0tests0go0flink0batch0gbk0101118065429-root-1118100533-6febb3_658d5f47-78f2-4698-9a00-4afefe29d7e6 failed
panic: Failed to execute job: job load0tests0go0flink0batch0gbk0101118065429-root-1118100533-6febb3_658d5f47-78f2-4698-9a00-4afefe29d7e6 failed

goroutine 1 [running]:
github.com/apache/beam/sdks/v2/go/pkg/beam/log.Fatalf({0x1848328, 0xc00004e0d0}, {0x1697797?, 0x22f38a8?}, {0xc000317e70?, 0x0?, 0x0?})
	<https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/ws/src/sdks/go/pkg/beam/log/log.go>:153 +0xa5
main.main()
	<https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/ws/src/sdks/go/test/load/group_by_key/group_by_key.go>:98 +0x3eb

> Task :sdks:go:test:load:run FAILED

FAILURE: Build failed with an exception.

* Where:
Build file '<https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/ws/src/sdks/go/test/load/build.gradle'> line: 31

* What went wrong:
Execution failed for task ':sdks:go:test:load:run'.
> Process 'command 'sh'' finished with non-zero exit value 2

* Try:
> Run with --stacktrace option to get the stack trace.
> Run with --info or --debug option to get more log output.

* Get more help at https://help.gradle.org

BUILD FAILED in 54m 49s
12 actionable tasks: 6 executed, 4 from cache, 2 up-to-date

Publishing build scan...
https://gradle.com/s/vfvqcjtqv3y7a

Build step 'Invoke Gradle script' changed build result to FAILURE
Build step 'Invoke Gradle script' marked build as failure

---------------------------------------------------------------------
To unsubscribe, e-mail: builds-unsubscribe@beam.apache.org
For additional commands, e-mail: builds-help@beam.apache.org


Build failed in Jenkins: beam_LoadTests_Go_GBK_Flink_Batch #716

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See <https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/716/display/redirect?page=changes>

Changes:

[egalpin] Uses _all to follow alias/datastreams when estimating index size

[chamikaramj] Add a reference to Java RunInference example

[Jozef Vilcek] Re-use serializable pipeline options when already available (#24192)

[lakshmanansathya] refs: issue-24196, fix broken hyperlink

[noreply] Fix Python PostCommit Example CustomPTransformIT on portable (#24159)

[noreply] revert upgrade to go 1.19 for action unit tests (#24189)

[noreply] Use only ValueProviders in SpannerConfig (#24156)

[noreply] [Tour of Beam] [Frontend] Content tree URLs (#23776)

[noreply] Python TextIO Performance Test (#23951)

[Chamikara Madhusanka Jayalath] Temporary update Python RC validation job

[egalpin] Adds test for following aliases when estimating index size

[Chamikara Madhusanka Jayalath] updates

[Chamikara Madhusanka Jayalath] updates

[noreply] Fix PythonLint (#24219)

[noreply] Bump loader-utils from 1.4.1 to 1.4.2 in


------------------------------------------
[...truncated 31.43 KB...]
        urn: "beam:coder:kv:v1"
      >
      component_coder_ids: "c0"
      component_coder_ids: "c0"
    >
  >
  coders: <
    key: "c2"
    value: <
      spec: <
        urn: "beam:coder:global_window:v1"
      >
    >
  >
  coders: <
    key: "c3"
    value: <
      spec: <
        urn: "beam:coder:iterable:v1"
      >
      component_coder_ids: "c0"
    >
  >
  coders: <
    key: "c4"
    value: <
      spec: <
        urn: "beam:coder:kv:v1"
      >
      component_coder_ids: "c0"
      component_coder_ids: "c3"
    >
  >
  coders: <
    key: "c5"
    value: <
      spec: <
        urn: "beam:coder:row:v1"
        payload: "\n\021\n\013num_records\032\002\020\004\n\024\n\016initial_splits\032\002\020\004\n\016\n\010key_size\032\002\020\004\n\020\n\nvalue_size\032\002\020\004\n\022\n\014num_hot_keys\032\002\020\004\n\026\n\020hot_key_fraction\032\002\020\006\022$f691cccd-3963-4ed9-9f25-d9fdfd07b30d"
      >
    >
  >
  coders: <
    key: "c6"
    value: <
      spec: <
        urn: "beam:go:coder:custom:v1"
        payload: "ChdvZmZzZXRyYW5nZS5SZXN0cmljdGlvbhJTCBpKT2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UuUmVzdHJpY3Rpb24atAEKS2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UucmVzdEVuYxJlCBYiUwgaSk9naXRodWIuY29tL2FwYWNoZS9iZWFtL3Nka3MvdjIvZ28vcGtnL2JlYW0vaW8vcnRyYWNrZXJzL29mZnNldHJhbmdlLlJlc3RyaWN0aW9uKgYIFBICCAgqBAgZQAEitAEKS2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UucmVzdERlYxJlCBYiBggUEgIICCpTCBpKT2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UuUmVzdHJpY3Rpb24qBAgZQAE="
      >
    >
  >
  coders: <
    key: "c7"
    value: <
      spec: <
        urn: "beam:coder:length_prefix:v1"
      >
      component_coder_ids: "c6"
    >
  >
  coders: <
    key: "c8"
    value: <
      spec: <
        urn: "beam:coder:bool:v1"
      >
    >
  >
  coders: <
    key: "c9"
    value: <
      spec: <
        urn: "beam:coder:kv:v1"
      >
      component_coder_ids: "c7"
      component_coder_ids: "c8"
    >
  >
  environments: <
    key: "go"
    value: <
      urn: "beam:env:docker:v1"
      payload: "\n6gcr.io/apache-beam-testing/beam-sdk/beam_go_sdk:latest"
      capabilities: "beam:protocol:progress_reporting:v1"
      capabilities: "beam:protocol:multi_core_bundle_processing:v1"
      capabilities: "beam:transform:sdf_truncate_sized_restrictions:v1"
      capabilities: "beam:protocol:****_status:v1"
      capabilities: "beam:protocol:monitoring_info_short_ids:v1"
      capabilities: "beam:version:sdk_base:go"
      capabilities: "beam:coder:bytes:v1"
      capabilities: "beam:coder:bool:v1"
      capabilities: "beam:coder:varint:v1"
      capabilities: "beam:coder:double:v1"
      capabilities: "beam:coder:string_utf8:v1"
      capabilities: "beam:coder:length_prefix:v1"
      capabilities: "beam:coder:kv:v1"
      capabilities: "beam:coder:iterable:v1"
      capabilities: "beam:coder:state_backed_iterable:v1"
      capabilities: "beam:coder:windowed_value:v1"
      capabilities: "beam:coder:global_window:v1"
      capabilities: "beam:coder:interval_window:v1"
      capabilities: "beam:coder:row:v1"
      capabilities: "beam:coder:nullable:v1"
      dependencies: <
        type_urn: "beam:artifact:type:file:v1"
        role_urn: "beam:artifact:role:go_****_binary:v1"
      >
    >
  >
>
root_transform_ids: "s1"
root_transform_ids: "e4"
root_transform_ids: "e5"
root_transform_ids: "e6"
root_transform_ids: "e7"
requirements: "beam:requirement:pardo:splittable_dofn:v1"
2022/11/17 10:06:47 Using specified **** binary: 'linux_amd64/group_by_key'
2022/11/17 10:06:48 Prepared job with id: load-tests-go-flink-batch-gbk-1-1117065423_9a8d6917-6ff0-4754-b704-87cee23529a1 and staging token: load-tests-go-flink-batch-gbk-1-1117065423_9a8d6917-6ff0-4754-b704-87cee23529a1
2022/11/17 10:06:53 Staged binary artifact with token: 
2022/11/17 10:06:55 Submitted job: load0tests0go0flink0batch0gbk0101117065423-root-1117100654-6a6c946_9ae3352e-458c-4bfc-8411-2d76a2110949
2022/11/17 10:06:55 Job state: STOPPED
2022/11/17 10:06:55 Job state: STARTING
2022/11/17 10:06:55 Job state: RUNNING
2022/11/17 11:00:46  (): org.apache.flink.client.program.ProgramInvocationException: Job failed (JobID: 1fc6aefb740e4f93f217c9b05b6dde2d)
	at org.apache.flink.client.deployment.ClusterClientJobClientAdapter.lambda$null$6(ClusterClientJobClientAdapter.java:130)
	at java.util.concurrent.CompletableFuture.uniApply(CompletableFuture.java:616)
	at java.util.concurrent.CompletableFuture$UniApply.tryFire(CompletableFuture.java:591)
	at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:488)
	at java.util.concurrent.CompletableFuture.complete(CompletableFuture.java:1975)
	at org.apache.flink.util.concurrent.FutureUtils.lambda$retryOperationWithDelay$9(FutureUtils.java:403)
	at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:774)
	at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:750)
	at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:488)
	at java.util.concurrent.CompletableFuture.complete(CompletableFuture.java:1975)
	at org.apache.flink.client.program.rest.RestClusterClient.lambda$pollResourceAsync$26(RestClusterClient.java:708)
	at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:774)
	at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:750)
	at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:488)
	at java.util.concurrent.CompletableFuture.complete(CompletableFuture.java:1975)
	at org.apache.flink.util.concurrent.FutureUtils.lambda$retryOperationWithDelay$9(FutureUtils.java:403)
	at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:774)
	at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:750)
	at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:488)
	at java.util.concurrent.CompletableFuture.postFire(CompletableFuture.java:575)
	at java.util.concurrent.CompletableFuture$UniCompose.tryFire(CompletableFuture.java:943)
	at java.util.concurrent.CompletableFuture$Completion.run(CompletableFuture.java:456)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:750)
Caused by: org.apache.flink.runtime.client.JobExecutionException: Job execution failed.
	at org.apache.flink.runtime.jobmaster.JobResult.toJobExecutionResult(JobResult.java:144)
	at org.apache.flink.client.deployment.ClusterClientJobClientAdapter.lambda$null$6(ClusterClientJobClientAdapter.java:128)
	... 24 more
Caused by: org.apache.flink.runtime.JobException: Recovery is suppressed by NoRestartBackoffTimeStrategy
	at org.apache.flink.runtime.executiongraph.failover.flip1.ExecutionFailureHandler.handleFailure(ExecutionFailureHandler.java:138)
	at org.apache.flink.runtime.executiongraph.failover.flip1.ExecutionFailureHandler.getFailureHandlingResult(ExecutionFailureHandler.java:82)
	at org.apache.flink.runtime.scheduler.DefaultScheduler.handleTaskFailure(DefaultScheduler.java:301)
	at org.apache.flink.runtime.scheduler.DefaultScheduler.maybeHandleTaskFailure(DefaultScheduler.java:291)
	at org.apache.flink.runtime.scheduler.DefaultScheduler.updateTaskExecutionStateInternal(DefaultScheduler.java:282)
	at org.apache.flink.runtime.scheduler.SchedulerBase.updateTaskExecutionState(SchedulerBase.java:739)
	at org.apache.flink.runtime.scheduler.UpdateSchedulerNgOnInternalFailuresListener.notifyTaskFailure(UpdateSchedulerNgOnInternalFailuresListener.java:51)
	at org.apache.flink.runtime.executiongraph.DefaultExecutionGraph.notifySchedulerNgAboutInternalTaskFailure(DefaultExecutionGraph.java:1536)
	at org.apache.flink.runtime.executiongraph.Execution.processFail(Execution.java:1118)
	at org.apache.flink.runtime.executiongraph.Execution.processFail(Execution.java:1058)
	at org.apache.flink.runtime.executiongraph.Execution.fail(Execution.java:759)
	at org.apache.flink.runtime.jobmaster.slotpool.SingleLogicalSlot.signalPayloadRelease(SingleLogicalSlot.java:195)
	at org.apache.flink.runtime.jobmaster.slotpool.SingleLogicalSlot.release(SingleLogicalSlot.java:182)
	at org.apache.flink.runtime.scheduler.SharedSlot.lambda$release$4(SharedSlot.java:271)
	at java.util.concurrent.CompletableFuture.uniAcceptNow(CompletableFuture.java:753)
	at java.util.concurrent.CompletableFuture.uniAcceptStage(CompletableFuture.java:731)
	at java.util.concurrent.CompletableFuture.thenAccept(CompletableFuture.java:2108)
	at org.apache.flink.runtime.scheduler.SharedSlot.release(SharedSlot.java:271)
	at org.apache.flink.runtime.jobmaster.slotpool.AllocatedSlot.releasePayload(AllocatedSlot.java:152)
	at org.apache.flink.runtime.jobmaster.slotpool.DefaultDeclarativeSlotPool.releasePayload(DefaultDeclarativeSlotPool.java:482)
	at org.apache.flink.runtime.jobmaster.slotpool.DefaultDeclarativeSlotPool.freeAndReleaseSlots(DefaultDeclarativeSlotPool.java:474)
	at org.apache.flink.runtime.jobmaster.slotpool.DefaultDeclarativeSlotPool.releaseSlots(DefaultDeclarativeSlotPool.java:445)
	at org.apache.flink.runtime.jobmaster.slotpool.DeclarativeSlotPoolService.internalReleaseTaskManager(DeclarativeSlotPoolService.java:249)
	at org.apache.flink.runtime.jobmaster.slotpool.DeclarativeSlotPoolService.releaseTaskManager(DeclarativeSlotPoolService.java:230)
	at org.apache.flink.runtime.jobmaster.JobMaster.disconnectTaskManager(JobMaster.java:505)
	at org.apache.flink.runtime.jobmaster.JobMaster$TaskManagerHeartbeatListener.handleTaskManagerConnectionLoss(JobMaster.java:1376)
	at org.apache.flink.runtime.jobmaster.JobMaster$TaskManagerHeartbeatListener.notifyHeartbeatTimeout(JobMaster.java:1371)
	at org.apache.flink.runtime.heartbeat.HeartbeatMonitorImpl.run(HeartbeatMonitorImpl.java:155)
	at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:515)
	at java.util.concurrent.FutureTask.run(FutureTask.java:264)
	at org.apache.flink.runtime.rpc.akka.AkkaRpcActor.lambda$handleRunAsync$4(AkkaRpcActor.java:443)
	at org.apache.flink.runtime.concurrent.akka.ClassLoadingUtils.runWithContextClassLoader(ClassLoadingUtils.java:68)
	at org.apache.flink.runtime.rpc.akka.AkkaRpcActor.handleRunAsync(AkkaRpcActor.java:443)
	at org.apache.flink.runtime.rpc.akka.AkkaRpcActor.handleRpcMessage(AkkaRpcActor.java:213)
	at org.apache.flink.runtime.rpc.akka.FencedAkkaRpcActor.handleRpcMessage(FencedAkkaRpcActor.java:78)
	at org.apache.flink.runtime.rpc.akka.AkkaRpcActor.handleMessage(AkkaRpcActor.java:163)
	at akka.japi.pf.UnitCaseStatement.apply(CaseStatements.scala:24)
	at akka.japi.pf.UnitCaseStatement.apply(CaseStatements.scala:20)
	at scala.PartialFunction.applyOrElse(PartialFunction.scala:127)
	at scala.PartialFunction.applyOrElse$(PartialFunction.scala:126)
	at akka.japi.pf.UnitCaseStatement.applyOrElse(CaseStatements.scala:20)
	at scala.PartialFunction$OrElse.applyOrElse(PartialFunction.scala:175)
	at scala.PartialFunction$OrElse.applyOrElse(PartialFunction.scala:176)
	at scala.PartialFunction$OrElse.applyOrElse(PartialFunction.scala:176)
	at akka.actor.Actor.aroundReceive(Actor.scala:537)
	at akka.actor.Actor.aroundReceive$(Actor.scala:535)
	at akka.actor.AbstractActor.aroundReceive(AbstractActor.scala:220)
	at akka.actor.ActorCell.receiveMessage(ActorCell.scala:580)
	at akka.actor.ActorCell.invoke(ActorCell.scala:548)
	at akka.dispatch.Mailbox.processMailbox(Mailbox.scala:270)
	at akka.dispatch.Mailbox.run(Mailbox.scala:231)
	at akka.dispatch.Mailbox.exec(Mailbox.scala:243)
	at java.util.concurrent.ForkJoinTask.doExec(ForkJoinTask.java:290)
	at java.util.concurrent.ForkJoinPool$WorkQueue.topLevelExec(ForkJoinPool.java:1020)
	at java.util.concurrent.ForkJoinPool.scan(ForkJoinPool.java:1656)
	at java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1594)
	at java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:183)
Caused by: java.util.concurrent.TimeoutException: Heartbeat of TaskManager with id container_1668679390344_0001_01_000002(beam-loadtests-go-gbk-flink-batch-716-w-3.c.apache-beam-testing.internal:8026) timed out.
	... 31 more
2022/11/17 11:00:46  (): java.util.concurrent.TimeoutException: Heartbeat of TaskManager with id container_1668679390344_0001_01_000002(beam-loadtests-go-gbk-flink-batch-716-w-3.c.apache-beam-testing.internal:8026) timed out.
2022/11/17 11:00:46 Job state: FAILED
2022/11/17 11:00:46 Failed to execute job: job load0tests0go0flink0batch0gbk0101117065423-root-1117100654-6a6c946_9ae3352e-458c-4bfc-8411-2d76a2110949 failed
panic: Failed to execute job: job load0tests0go0flink0batch0gbk0101117065423-root-1117100654-6a6c946_9ae3352e-458c-4bfc-8411-2d76a2110949 failed

goroutine 1 [running]:
github.com/apache/beam/sdks/v2/go/pkg/beam/log.Fatalf({0x1848328, 0xc00004e0d0}, {0x1697797?, 0x22f38a8?}, {0xc00056de70?, 0x0?, 0x0?})
	<https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/ws/src/sdks/go/pkg/beam/log/log.go>:153 +0xa5
main.main()
	<https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/ws/src/sdks/go/test/load/group_by_key/group_by_key.go>:98 +0x3eb

> Task :sdks:go:test:load:run FAILED

FAILURE: Build failed with an exception.

* Where:
Build file '<https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/ws/src/sdks/go/test/load/build.gradle'> line: 31

* What went wrong:
Execution failed for task ':sdks:go:test:load:run'.
> Process 'command 'sh'' finished with non-zero exit value 2

* Try:
> Run with --stacktrace option to get the stack trace.
> Run with --info or --debug option to get more log output.

* Get more help at https://help.gradle.org

BUILD FAILED in 54m 37s
12 actionable tasks: 6 executed, 4 from cache, 2 up-to-date

Publishing build scan...
https://gradle.com/s/oxcwwqmqjinfw

Build step 'Invoke Gradle script' changed build result to FAILURE
Build step 'Invoke Gradle script' marked build as failure

---------------------------------------------------------------------
To unsubscribe, e-mail: builds-unsubscribe@beam.apache.org
For additional commands, e-mail: builds-help@beam.apache.org


Build failed in Jenkins: beam_LoadTests_Go_GBK_Flink_Batch #715

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See <https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/715/display/redirect?page=changes>

Changes:

[Kenneth Knowles] Fix arguments to checkState in BatchViewOverrides

[bulat.safiullin] [Website] update pre tag copy link styles #23064

[noreply] Bump github.com/aws/aws-sdk-go-v2/feature/s3/manager in /sdks (#24131)

[noreply] Editorial review of the ML notebooks. (#24125)

[noreply] Configure flutter_code_editor options with Hugo shortcode (#23926)

[noreply] Eliminate CalciteUtil.CharType logical type (#24013)

[noreply] [Playground] Move Playground in GKE and Infrastructure change (#23928)

[noreply] Fix broken notebook (#24179)

[noreply] Add error reporting for BatchConverter match failure (#24022)

[noreply] Update automation to use Go 1.19 (#24175)

[noreply] Fix broken json for notebook (#24183)

[noreply] Using Teardown context instead of deprecated finalize (#24180)

[noreply] [Python]Support pipe operator as Union (PEP -604) (#24106)

[noreply] Updated README of Interactive Beam

[noreply] Minor update

[noreply] Add custom inference function support to the PyTorch model handler

[noreply] Strip FGAC database role from changestreams metadata requests (#24177)


------------------------------------------
[...truncated 31.31 KB...]
        urn: "beam:coder:kv:v1"
      >
      component_coder_ids: "c0"
      component_coder_ids: "c0"
    >
  >
  coders: <
    key: "c2"
    value: <
      spec: <
        urn: "beam:coder:global_window:v1"
      >
    >
  >
  coders: <
    key: "c3"
    value: <
      spec: <
        urn: "beam:coder:iterable:v1"
      >
      component_coder_ids: "c0"
    >
  >
  coders: <
    key: "c4"
    value: <
      spec: <
        urn: "beam:coder:kv:v1"
      >
      component_coder_ids: "c0"
      component_coder_ids: "c3"
    >
  >
  coders: <
    key: "c5"
    value: <
      spec: <
        urn: "beam:coder:row:v1"
        payload: "\n\021\n\013num_records\032\002\020\004\n\024\n\016initial_splits\032\002\020\004\n\016\n\010key_size\032\002\020\004\n\020\n\nvalue_size\032\002\020\004\n\022\n\014num_hot_keys\032\002\020\004\n\026\n\020hot_key_fraction\032\002\020\006\022$f691cccd-3963-4ed9-9f25-d9fdfd07b30d"
      >
    >
  >
  coders: <
    key: "c6"
    value: <
      spec: <
        urn: "beam:go:coder:custom:v1"
        payload: "ChdvZmZzZXRyYW5nZS5SZXN0cmljdGlvbhJTCBpKT2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UuUmVzdHJpY3Rpb24atAEKS2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UucmVzdEVuYxJlCBYiUwgaSk9naXRodWIuY29tL2FwYWNoZS9iZWFtL3Nka3MvdjIvZ28vcGtnL2JlYW0vaW8vcnRyYWNrZXJzL29mZnNldHJhbmdlLlJlc3RyaWN0aW9uKgYIFBICCAgqBAgZQAEitAEKS2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UucmVzdERlYxJlCBYiBggUEgIICCpTCBpKT2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UuUmVzdHJpY3Rpb24qBAgZQAE="
      >
    >
  >
  coders: <
    key: "c7"
    value: <
      spec: <
        urn: "beam:coder:length_prefix:v1"
      >
      component_coder_ids: "c6"
    >
  >
  coders: <
    key: "c8"
    value: <
      spec: <
        urn: "beam:coder:bool:v1"
      >
    >
  >
  coders: <
    key: "c9"
    value: <
      spec: <
        urn: "beam:coder:kv:v1"
      >
      component_coder_ids: "c7"
      component_coder_ids: "c8"
    >
  >
  environments: <
    key: "go"
    value: <
      urn: "beam:env:docker:v1"
      payload: "\n6gcr.io/apache-beam-testing/beam-sdk/beam_go_sdk:latest"
      capabilities: "beam:protocol:progress_reporting:v1"
      capabilities: "beam:protocol:multi_core_bundle_processing:v1"
      capabilities: "beam:transform:sdf_truncate_sized_restrictions:v1"
      capabilities: "beam:protocol:****_status:v1"
      capabilities: "beam:protocol:monitoring_info_short_ids:v1"
      capabilities: "beam:version:sdk_base:go"
      capabilities: "beam:coder:bytes:v1"
      capabilities: "beam:coder:bool:v1"
      capabilities: "beam:coder:varint:v1"
      capabilities: "beam:coder:double:v1"
      capabilities: "beam:coder:string_utf8:v1"
      capabilities: "beam:coder:length_prefix:v1"
      capabilities: "beam:coder:kv:v1"
      capabilities: "beam:coder:iterable:v1"
      capabilities: "beam:coder:state_backed_iterable:v1"
      capabilities: "beam:coder:windowed_value:v1"
      capabilities: "beam:coder:global_window:v1"
      capabilities: "beam:coder:interval_window:v1"
      capabilities: "beam:coder:row:v1"
      capabilities: "beam:coder:nullable:v1"
      dependencies: <
        type_urn: "beam:artifact:type:file:v1"
        role_urn: "beam:artifact:role:go_****_binary:v1"
      >
    >
  >
>
root_transform_ids: "s1"
root_transform_ids: "e4"
root_transform_ids: "e5"
root_transform_ids: "e6"
root_transform_ids: "e7"
requirements: "beam:requirement:pardo:splittable_dofn:v1"
2022/11/16 10:06:05 Using specified **** binary: 'linux_amd64/group_by_key'
2022/11/16 10:06:05 Prepared job with id: load-tests-go-flink-batch-gbk-1-1116065405_9d7610a1-9b1d-49a1-8c2a-d29d732ee556 and staging token: load-tests-go-flink-batch-gbk-1-1116065405_9d7610a1-9b1d-49a1-8c2a-d29d732ee556
2022/11/16 10:06:12 Staged binary artifact with token: 
2022/11/16 10:06:14 Submitted job: load0tests0go0flink0batch0gbk0101116065405-root-1116100612-4462e0e_f97f3f7e-e806-475d-85d7-f21c31cc4400
2022/11/16 10:06:14 Job state: STOPPED
2022/11/16 10:06:14 Job state: STARTING
2022/11/16 10:06:14 Job state: RUNNING
2022/11/16 10:58:54  (): org.apache.flink.client.program.ProgramInvocationException: Job failed (JobID: 39a5bc30fd5955953d0d6de2eb542c08)
	at org.apache.flink.client.deployment.ClusterClientJobClientAdapter.lambda$null$6(ClusterClientJobClientAdapter.java:130)
	at java.util.concurrent.CompletableFuture.uniApply(CompletableFuture.java:616)
	at java.util.concurrent.CompletableFuture$UniApply.tryFire(CompletableFuture.java:591)
	at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:488)
	at java.util.concurrent.CompletableFuture.complete(CompletableFuture.java:1975)
	at org.apache.flink.util.concurrent.FutureUtils.lambda$retryOperationWithDelay$9(FutureUtils.java:403)
	at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:774)
	at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:750)
	at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:488)
	at java.util.concurrent.CompletableFuture.complete(CompletableFuture.java:1975)
	at org.apache.flink.client.program.rest.RestClusterClient.lambda$pollResourceAsync$26(RestClusterClient.java:708)
	at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:774)
	at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:750)
	at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:488)
	at java.util.concurrent.CompletableFuture.complete(CompletableFuture.java:1975)
	at org.apache.flink.util.concurrent.FutureUtils.lambda$retryOperationWithDelay$9(FutureUtils.java:403)
	at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:774)
	at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:750)
	at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:488)
	at java.util.concurrent.CompletableFuture.postFire(CompletableFuture.java:575)
	at java.util.concurrent.CompletableFuture$UniCompose.tryFire(CompletableFuture.java:943)
	at java.util.concurrent.CompletableFuture$Completion.run(CompletableFuture.java:456)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:750)
Caused by: org.apache.flink.runtime.client.JobExecutionException: Job execution failed.
	at org.apache.flink.runtime.jobmaster.JobResult.toJobExecutionResult(JobResult.java:144)
	at org.apache.flink.client.deployment.ClusterClientJobClientAdapter.lambda$null$6(ClusterClientJobClientAdapter.java:128)
	... 24 more
Caused by: org.apache.flink.runtime.JobException: Recovery is suppressed by NoRestartBackoffTimeStrategy
	at org.apache.flink.runtime.executiongraph.failover.flip1.ExecutionFailureHandler.handleFailure(ExecutionFailureHandler.java:138)
	at org.apache.flink.runtime.executiongraph.failover.flip1.ExecutionFailureHandler.getFailureHandlingResult(ExecutionFailureHandler.java:82)
	at org.apache.flink.runtime.scheduler.DefaultScheduler.handleTaskFailure(DefaultScheduler.java:301)
	at org.apache.flink.runtime.scheduler.DefaultScheduler.maybeHandleTaskFailure(DefaultScheduler.java:291)
	at org.apache.flink.runtime.scheduler.DefaultScheduler.updateTaskExecutionStateInternal(DefaultScheduler.java:282)
	at org.apache.flink.runtime.scheduler.SchedulerBase.updateTaskExecutionState(SchedulerBase.java:739)
	at org.apache.flink.runtime.scheduler.UpdateSchedulerNgOnInternalFailuresListener.notifyTaskFailure(UpdateSchedulerNgOnInternalFailuresListener.java:51)
	at org.apache.flink.runtime.executiongraph.DefaultExecutionGraph.notifySchedulerNgAboutInternalTaskFailure(DefaultExecutionGraph.java:1536)
	at org.apache.flink.runtime.executiongraph.Execution.processFail(Execution.java:1118)
	at org.apache.flink.runtime.executiongraph.Execution.processFail(Execution.java:1058)
	at org.apache.flink.runtime.executiongraph.Execution.fail(Execution.java:759)
	at org.apache.flink.runtime.jobmaster.slotpool.SingleLogicalSlot.signalPayloadRelease(SingleLogicalSlot.java:195)
	at org.apache.flink.runtime.jobmaster.slotpool.SingleLogicalSlot.release(SingleLogicalSlot.java:182)
	at org.apache.flink.runtime.scheduler.SharedSlot.lambda$release$4(SharedSlot.java:271)
	at java.util.concurrent.CompletableFuture.uniAcceptNow(CompletableFuture.java:753)
	at java.util.concurrent.CompletableFuture.uniAcceptStage(CompletableFuture.java:731)
	at java.util.concurrent.CompletableFuture.thenAccept(CompletableFuture.java:2108)
	at org.apache.flink.runtime.scheduler.SharedSlot.release(SharedSlot.java:271)
	at org.apache.flink.runtime.jobmaster.slotpool.AllocatedSlot.releasePayload(AllocatedSlot.java:152)
	at org.apache.flink.runtime.jobmaster.slotpool.DefaultDeclarativeSlotPool.releasePayload(DefaultDeclarativeSlotPool.java:482)
	at org.apache.flink.runtime.jobmaster.slotpool.DefaultDeclarativeSlotPool.freeAndReleaseSlots(DefaultDeclarativeSlotPool.java:474)
	at org.apache.flink.runtime.jobmaster.slotpool.DefaultDeclarativeSlotPool.releaseSlots(DefaultDeclarativeSlotPool.java:445)
	at org.apache.flink.runtime.jobmaster.slotpool.DeclarativeSlotPoolService.internalReleaseTaskManager(DeclarativeSlotPoolService.java:249)
	at org.apache.flink.runtime.jobmaster.slotpool.DeclarativeSlotPoolService.releaseTaskManager(DeclarativeSlotPoolService.java:230)
	at org.apache.flink.runtime.jobmaster.JobMaster.disconnectTaskManager(JobMaster.java:505)
	at org.apache.flink.runtime.jobmaster.JobMaster$TaskManagerHeartbeatListener.handleTaskManagerConnectionLoss(JobMaster.java:1376)
	at org.apache.flink.runtime.jobmaster.JobMaster$TaskManagerHeartbeatListener.notifyHeartbeatTimeout(JobMaster.java:1371)
	at org.apache.flink.runtime.heartbeat.HeartbeatMonitorImpl.run(HeartbeatMonitorImpl.java:155)
	at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:515)
	at java.util.concurrent.FutureTask.run(FutureTask.java:264)
	at org.apache.flink.runtime.rpc.akka.AkkaRpcActor.lambda$handleRunAsync$4(AkkaRpcActor.java:443)
	at org.apache.flink.runtime.concurrent.akka.ClassLoadingUtils.runWithContextClassLoader(ClassLoadingUtils.java:68)
	at org.apache.flink.runtime.rpc.akka.AkkaRpcActor.handleRunAsync(AkkaRpcActor.java:443)
	at org.apache.flink.runtime.rpc.akka.AkkaRpcActor.handleRpcMessage(AkkaRpcActor.java:213)
	at org.apache.flink.runtime.rpc.akka.FencedAkkaRpcActor.handleRpcMessage(FencedAkkaRpcActor.java:78)
	at org.apache.flink.runtime.rpc.akka.AkkaRpcActor.handleMessage(AkkaRpcActor.java:163)
	at akka.japi.pf.UnitCaseStatement.apply(CaseStatements.scala:24)
	at akka.japi.pf.UnitCaseStatement.apply(CaseStatements.scala:20)
	at scala.PartialFunction.applyOrElse(PartialFunction.scala:127)
	at scala.PartialFunction.applyOrElse$(PartialFunction.scala:126)
	at akka.japi.pf.UnitCaseStatement.applyOrElse(CaseStatements.scala:20)
	at scala.PartialFunction$OrElse.applyOrElse(PartialFunction.scala:175)
	at scala.PartialFunction$OrElse.applyOrElse(PartialFunction.scala:176)
	at scala.PartialFunction$OrElse.applyOrElse(PartialFunction.scala:176)
	at akka.actor.Actor.aroundReceive(Actor.scala:537)
	at akka.actor.Actor.aroundReceive$(Actor.scala:535)
	at akka.actor.AbstractActor.aroundReceive(AbstractActor.scala:220)
	at akka.actor.ActorCell.receiveMessage(ActorCell.scala:580)
	at akka.actor.ActorCell.invoke(ActorCell.scala:548)
	at akka.dispatch.Mailbox.processMailbox(Mailbox.scala:270)
	at akka.dispatch.Mailbox.run(Mailbox.scala:231)
	at akka.dispatch.Mailbox.exec(Mailbox.scala:243)
	at java.util.concurrent.ForkJoinTask.doExec(ForkJoinTask.java:290)
	at java.util.concurrent.ForkJoinPool$WorkQueue.topLevelExec(ForkJoinPool.java:1020)
	at java.util.concurrent.ForkJoinPool.scan(ForkJoinPool.java:1656)
	at java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1594)
	at java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:183)
Caused by: java.util.concurrent.TimeoutException: Heartbeat of TaskManager with id container_1668592968333_0001_01_000004(beam-loadtests-go-gbk-flink-batch-715-w-3.c.apache-beam-testing.internal:8026) timed out.
	... 31 more
2022/11/16 10:58:54  (): java.util.concurrent.TimeoutException: Heartbeat of TaskManager with id container_1668592968333_0001_01_000004(beam-loadtests-go-gbk-flink-batch-715-w-3.c.apache-beam-testing.internal:8026) timed out.
2022/11/16 10:58:54 Job state: FAILED
2022/11/16 10:58:54 Failed to execute job: job load0tests0go0flink0batch0gbk0101116065405-root-1116100612-4462e0e_f97f3f7e-e806-475d-85d7-f21c31cc4400 failed
panic: Failed to execute job: job load0tests0go0flink0batch0gbk0101116065405-root-1116100612-4462e0e_f97f3f7e-e806-475d-85d7-f21c31cc4400 failed

goroutine 1 [running]:
github.com/apache/beam/sdks/v2/go/pkg/beam/log.Fatalf({0x1848328, 0xc000120000}, {0x1697797?, 0x22f38a8?}, {0xc000291e70?, 0x0?, 0x0?})
	<https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/ws/src/sdks/go/pkg/beam/log/log.go>:153 +0xa5
main.main()
	<https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/ws/src/sdks/go/test/load/group_by_key/group_by_key.go>:98 +0x3eb

> Task :sdks:go:test:load:run FAILED

FAILURE: Build failed with an exception.

* Where:
Build file '<https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/ws/src/sdks/go/test/load/build.gradle'> line: 31

* What went wrong:
Execution failed for task ':sdks:go:test:load:run'.
> Process 'command 'sh'' finished with non-zero exit value 2

* Try:
> Run with --stacktrace option to get the stack trace.
> Run with --info or --debug option to get more log output.

* Get more help at https://help.gradle.org

BUILD FAILED in 53m 23s
12 actionable tasks: 6 executed, 4 from cache, 2 up-to-date

Publishing build scan...
https://gradle.com/s/buyo4rzgrn5iu

Build step 'Invoke Gradle script' changed build result to FAILURE
Build step 'Invoke Gradle script' marked build as failure

---------------------------------------------------------------------
To unsubscribe, e-mail: builds-unsubscribe@beam.apache.org
For additional commands, e-mail: builds-help@beam.apache.org


Build failed in Jenkins: beam_LoadTests_Go_GBK_Flink_Batch #714

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See <https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/714/display/redirect?page=changes>

Changes:

[noreply] Bump loader-utils

[chamikaramj] Updates Multi-lang Java quickstart

[Kenneth Knowles] Fix checkArgument format in GcsPath

[noreply] [Tour Of Beam] verify that unit exists when saving progress (#24118)

[noreply] Cleanup stale BQ datasets (#24158)

[noreply] Support SqlTypes Date and Timestamp (MicrosInstant) in AvroUtils

[noreply] Add more tests for S3 filesystem (#24138)

[noreply] Merge pull request #23333: Track time on Cloud Dataflow streaming data

[Robert Bradshaw] Rename the test_splits flag to direct_test_splits.

[noreply] Adding a quickstart to README for the TS SDK (#23509)

[noreply] More dataset templates to clean up (#24162)

[noreply] Implement embedded WebAssembly example (#24081)

[noreply] [Dockerized Jenkins] Update README how to use local repo (#24055)

[noreply] [Dockerized Jenkins] Fix build of dockerized jenkins (fixes #24053)


------------------------------------------
[...truncated 70.54 KB...]
  coders: <
    key: "c5"
    value: <
      spec: <
        urn: "beam:coder:row:v1"
        payload: "\n\021\n\013num_records\032\002\020\004\n\024\n\016initial_splits\032\002\020\004\n\016\n\010key_size\032\002\020\004\n\020\n\nvalue_size\032\002\020\004\n\022\n\014num_hot_keys\032\002\020\004\n\026\n\020hot_key_fraction\032\002\020\006\022$f691cccd-3963-4ed9-9f25-d9fdfd07b30d"
      >
    >
  >
  coders: <
    key: "c6"
    value: <
      spec: <
        urn: "beam:go:coder:custom:v1"
        payload: "ChdvZmZzZXRyYW5nZS5SZXN0cmljdGlvbhJTCBpKT2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UuUmVzdHJpY3Rpb24atAEKS2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UucmVzdEVuYxJlCBYiUwgaSk9naXRodWIuY29tL2FwYWNoZS9iZWFtL3Nka3MvdjIvZ28vcGtnL2JlYW0vaW8vcnRyYWNrZXJzL29mZnNldHJhbmdlLlJlc3RyaWN0aW9uKgYIFBICCAgqBAgZQAEitAEKS2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UucmVzdERlYxJlCBYiBggUEgIICCpTCBpKT2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UuUmVzdHJpY3Rpb24qBAgZQAE="
      >
    >
  >
  coders: <
    key: "c7"
    value: <
      spec: <
        urn: "beam:coder:length_prefix:v1"
      >
      component_coder_ids: "c6"
    >
  >
  coders: <
    key: "c8"
    value: <
      spec: <
        urn: "beam:coder:bool:v1"
      >
    >
  >
  coders: <
    key: "c9"
    value: <
      spec: <
        urn: "beam:coder:kv:v1"
      >
      component_coder_ids: "c7"
      component_coder_ids: "c8"
    >
  >
  environments: <
    key: "go"
    value: <
      urn: "beam:env:docker:v1"
      payload: "\n6gcr.io/apache-beam-testing/beam-sdk/beam_go_sdk:latest"
      capabilities: "beam:protocol:progress_reporting:v1"
      capabilities: "beam:protocol:multi_core_bundle_processing:v1"
      capabilities: "beam:transform:sdf_truncate_sized_restrictions:v1"
      capabilities: "beam:protocol:****_status:v1"
      capabilities: "beam:protocol:monitoring_info_short_ids:v1"
      capabilities: "beam:version:sdk_base:go"
      capabilities: "beam:coder:bytes:v1"
      capabilities: "beam:coder:bool:v1"
      capabilities: "beam:coder:varint:v1"
      capabilities: "beam:coder:double:v1"
      capabilities: "beam:coder:string_utf8:v1"
      capabilities: "beam:coder:length_prefix:v1"
      capabilities: "beam:coder:kv:v1"
      capabilities: "beam:coder:iterable:v1"
      capabilities: "beam:coder:state_backed_iterable:v1"
      capabilities: "beam:coder:windowed_value:v1"
      capabilities: "beam:coder:global_window:v1"
      capabilities: "beam:coder:interval_window:v1"
      capabilities: "beam:coder:row:v1"
      capabilities: "beam:coder:nullable:v1"
      dependencies: <
        type_urn: "beam:artifact:type:file:v1"
        role_urn: "beam:artifact:role:go_****_binary:v1"
      >
    >
  >
>
root_transform_ids: "s1"
root_transform_ids: "e4"
root_transform_ids: "e5"
root_transform_ids: "e6"
root_transform_ids: "e7"
requirements: "beam:requirement:pardo:splittable_dofn:v1"
2022/11/15 11:07:58 Using specified **** binary: 'linux_amd64/group_by_key'
2022/11/15 11:07:58 Prepared job with id: load-tests-go-flink-batch-gbk-3-1115065427_ec5552c2-a36e-4d7b-a964-74d0039a740a and staging token: load-tests-go-flink-batch-gbk-3-1115065427_ec5552c2-a36e-4d7b-a964-74d0039a740a
2022/11/15 11:07:59 Staged binary artifact with token: 
2022/11/15 11:08:00 Submitted job: load0tests0go0flink0batch0gbk0301115065427-root-1115110759-80e2839_4e7661f5-2ccc-4288-abe7-93168c0079d9
2022/11/15 11:08:00 Job state: STOPPED
2022/11/15 11:08:00 Job state: STARTING
2022/11/15 11:08:00 Job state: RUNNING
2022/11/15 11:08:48  (): org.apache.flink.client.program.ProgramInvocationException: Job failed (JobID: 0787b8b9c59e9f4e1bc235c62e1771dc)
	at org.apache.flink.client.deployment.ClusterClientJobClientAdapter.lambda$null$6(ClusterClientJobClientAdapter.java:130)
	at java.util.concurrent.CompletableFuture.uniApply(CompletableFuture.java:616)
	at java.util.concurrent.CompletableFuture$UniApply.tryFire(CompletableFuture.java:591)
	at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:488)
	at java.util.concurrent.CompletableFuture.complete(CompletableFuture.java:1975)
	at org.apache.flink.util.concurrent.FutureUtils.lambda$retryOperationWithDelay$9(FutureUtils.java:403)
	at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:774)
	at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:750)
	at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:488)
	at java.util.concurrent.CompletableFuture.complete(CompletableFuture.java:1975)
	at org.apache.flink.client.program.rest.RestClusterClient.lambda$pollResourceAsync$26(RestClusterClient.java:708)
	at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:774)
	at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:750)
	at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:488)
	at java.util.concurrent.CompletableFuture.complete(CompletableFuture.java:1975)
	at org.apache.flink.util.concurrent.FutureUtils.lambda$retryOperationWithDelay$9(FutureUtils.java:403)
	at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:774)
	at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:750)
	at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:488)
	at java.util.concurrent.CompletableFuture.postFire(CompletableFuture.java:575)
	at java.util.concurrent.CompletableFuture$UniCompose.tryFire(CompletableFuture.java:943)
	at java.util.concurrent.CompletableFuture$Completion.run(CompletableFuture.java:456)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:750)
Caused by: org.apache.flink.runtime.client.JobExecutionException: Job execution failed.
	at org.apache.flink.runtime.jobmaster.JobResult.toJobExecutionResult(JobResult.java:144)
	at org.apache.flink.client.deployment.ClusterClientJobClientAdapter.lambda$null$6(ClusterClientJobClientAdapter.java:128)
	... 24 more
Caused by: org.apache.flink.runtime.JobException: Recovery is suppressed by NoRestartBackoffTimeStrategy
	at org.apache.flink.runtime.executiongraph.failover.flip1.ExecutionFailureHandler.handleFailure(ExecutionFailureHandler.java:138)
	at org.apache.flink.runtime.executiongraph.failover.flip1.ExecutionFailureHandler.getFailureHandlingResult(ExecutionFailureHandler.java:82)
	at org.apache.flink.runtime.scheduler.DefaultScheduler.handleTaskFailure(DefaultScheduler.java:301)
	at org.apache.flink.runtime.scheduler.DefaultScheduler.maybeHandleTaskFailure(DefaultScheduler.java:291)
	at org.apache.flink.runtime.scheduler.DefaultScheduler.updateTaskExecutionStateInternal(DefaultScheduler.java:282)
	at org.apache.flink.runtime.scheduler.SchedulerBase.updateTaskExecutionState(SchedulerBase.java:739)
	at org.apache.flink.runtime.scheduler.SchedulerNG.updateTaskExecutionState(SchedulerNG.java:78)
	at org.apache.flink.runtime.jobmaster.JobMaster.updateTaskExecutionState(JobMaster.java:443)
	at jdk.internal.reflect.GeneratedMethodAccessor22.invoke(Unknown Source)
	at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:566)
	at org.apache.flink.runtime.rpc.akka.AkkaRpcActor.lambda$handleRpcInvocation$1(AkkaRpcActor.java:304)
	at org.apache.flink.runtime.concurrent.akka.ClassLoadingUtils.runWithContextClassLoader(ClassLoadingUtils.java:83)
	at org.apache.flink.runtime.rpc.akka.AkkaRpcActor.handleRpcInvocation(AkkaRpcActor.java:302)
	at org.apache.flink.runtime.rpc.akka.AkkaRpcActor.handleRpcMessage(AkkaRpcActor.java:217)
	at org.apache.flink.runtime.rpc.akka.FencedAkkaRpcActor.handleRpcMessage(FencedAkkaRpcActor.java:78)
	at org.apache.flink.runtime.rpc.akka.AkkaRpcActor.handleMessage(AkkaRpcActor.java:163)
	at akka.japi.pf.UnitCaseStatement.apply(CaseStatements.scala:24)
	at akka.japi.pf.UnitCaseStatement.apply(CaseStatements.scala:20)
	at scala.PartialFunction.applyOrElse(PartialFunction.scala:127)
	at scala.PartialFunction.applyOrElse$(PartialFunction.scala:126)
	at akka.japi.pf.UnitCaseStatement.applyOrElse(CaseStatements.scala:20)
	at scala.PartialFunction$OrElse.applyOrElse(PartialFunction.scala:175)
	at scala.PartialFunction$OrElse.applyOrElse(PartialFunction.scala:176)
	at scala.PartialFunction$OrElse.applyOrElse(PartialFunction.scala:176)
	at akka.actor.Actor.aroundReceive(Actor.scala:537)
	at akka.actor.Actor.aroundReceive$(Actor.scala:535)
	at akka.actor.AbstractActor.aroundReceive(AbstractActor.scala:220)
	at akka.actor.ActorCell.receiveMessage(ActorCell.scala:580)
	at akka.actor.ActorCell.invoke(ActorCell.scala:548)
	at akka.dispatch.Mailbox.processMailbox(Mailbox.scala:270)
	at akka.dispatch.Mailbox.run(Mailbox.scala:231)
	at akka.dispatch.Mailbox.exec(Mailbox.scala:243)
	at java.util.concurrent.ForkJoinTask.doExec(ForkJoinTask.java:290)
	at java.util.concurrent.ForkJoinPool$WorkQueue.topLevelExec(ForkJoinPool.java:1020)
	at java.util.concurrent.ForkJoinPool.scan(ForkJoinPool.java:1656)
	at java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1594)
	at java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:183)
Caused by: java.lang.OutOfMemoryError: Direct buffer memory. The direct out-of-memory error has occurred. This can mean two things: either job(s) require(s) a larger size of JVM direct memory or there is a direct memory leak. The direct memory can be allocated by user code or some of its dependencies. In this case 'taskmanager.memory.task.off-heap.size' configuration option should be increased. Flink framework and its dependencies also consume the direct memory, mostly for network communication. The most of network memory is managed by Flink and should not result in out-of-memory error. In certain special cases, in particular for jobs with high parallelism, the framework may require more direct memory which is not managed by Flink. In this case 'taskmanager.memory.framework.off-heap.size' configuration option should be increased. If the error persists then there is probably a direct memory leak in user code or some of its dependencies which has to be investigated and fixed. The task executor has to be shutdown...
	at java.nio.Bits.reserveMemory(Bits.java:175)
	at java.nio.DirectByteBuffer.<init>(DirectByteBuffer.java:118)
	at java.nio.ByteBuffer.allocateDirect(ByteBuffer.java:317)
	at org.apache.beam.vendor.grpc.v1p48p1.io.netty.buffer.PoolArena$DirectArena.allocateDirect(PoolArena.java:649)
	at org.apache.beam.vendor.grpc.v1p48p1.io.netty.buffer.PoolArena$DirectArena.newChunk(PoolArena.java:624)
	at org.apache.beam.vendor.grpc.v1p48p1.io.netty.buffer.PoolArena.allocateNormal(PoolArena.java:203)
	at org.apache.beam.vendor.grpc.v1p48p1.io.netty.buffer.PoolArena.tcacheAllocateNormal(PoolArena.java:187)
	at org.apache.beam.vendor.grpc.v1p48p1.io.netty.buffer.PoolArena.allocate(PoolArena.java:136)
	at org.apache.beam.vendor.grpc.v1p48p1.io.netty.buffer.PoolArena.allocate(PoolArena.java:126)
	at org.apache.beam.vendor.grpc.v1p48p1.io.netty.buffer.PooledByteBufAllocator.newDirectBuffer(PooledByteBufAllocator.java:396)
	at org.apache.beam.vendor.grpc.v1p48p1.io.netty.buffer.AbstractByteBufAllocator.directBuffer(AbstractByteBufAllocator.java:188)
	at org.apache.beam.vendor.grpc.v1p48p1.io.netty.buffer.AbstractByteBufAllocator.buffer(AbstractByteBufAllocator.java:124)
	at org.apache.beam.vendor.grpc.v1p48p1.io.grpc.netty.NettyWritableBufferAllocator.allocate(NettyWritableBufferAllocator.java:51)
	at org.apache.beam.vendor.grpc.v1p48p1.io.grpc.internal.MessageFramer.writeKnownLengthUncompressed(MessageFramer.java:226)
	at org.apache.beam.vendor.grpc.v1p48p1.io.grpc.internal.MessageFramer.writeUncompressed(MessageFramer.java:168)
	at org.apache.beam.vendor.grpc.v1p48p1.io.grpc.internal.MessageFramer.writePayload(MessageFramer.java:141)
	at org.apache.beam.vendor.grpc.v1p48p1.io.grpc.internal.AbstractStream.writeMessage(AbstractStream.java:65)
	at org.apache.beam.vendor.grpc.v1p48p1.io.grpc.internal.ServerCallImpl.sendMessageInternal(ServerCallImpl.java:171)
	at org.apache.beam.vendor.grpc.v1p48p1.io.grpc.internal.ServerCallImpl.sendMessage(ServerCallImpl.java:153)
	at org.apache.beam.vendor.grpc.v1p48p1.io.grpc.stub.ServerCalls$ServerCallStreamObserverImpl.onNext(ServerCalls.java:380)
	at org.apache.beam.sdk.fn.stream.DirectStreamObserver.onNext(DirectStreamObserver.java:108)
	at org.apache.beam.sdk.fn.data.BeamFnDataOutboundAggregator.flushInternal(BeamFnDataOutboundAggregator.java:169)
	at org.apache.beam.sdk.fn.data.BeamFnDataOutboundAggregator.access$400(BeamFnDataOutboundAggregator.java:65)
	at org.apache.beam.sdk.fn.data.BeamFnDataOutboundAggregator$Receiver.accept(BeamFnDataOutboundAggregator.java:349)
	at org.apache.beam.sdk.fn.data.BeamFnDataOutboundObserver.accept(BeamFnDataOutboundObserver.java:85)
	at org.apache.beam.runners.fnexecution.control.SdkHarnessClient$CountingFnDataReceiver.accept(SdkHarnessClient.java:716)
	at org.apache.beam.runners.flink.translation.functions.FlinkExecutableStageFunction.processElements(FlinkExecutableStageFunction.java:363)
	at org.apache.beam.runners.flink.translation.functions.FlinkExecutableStageFunction.mapPartition(FlinkExecutableStageFunction.java:268)
	at org.apache.flink.runtime.operators.MapPartitionDriver.run(MapPartitionDriver.java:113)
	at org.apache.flink.runtime.operators.BatchTask.run(BatchTask.java:514)
	at org.apache.flink.runtime.operators.BatchTask.invoke(BatchTask.java:357)
	at org.apache.flink.runtime.taskmanager.Task.runWithSystemExitMonitoring(Task.java:948)
	at org.apache.flink.runtime.taskmanager.Task.restoreAndInvoke(Task.java:927)
	at org.apache.flink.runtime.taskmanager.Task.doRun(Task.java:741)
	at org.apache.flink.runtime.taskmanager.Task.run(Task.java:563)
	at java.lang.Thread.run(Thread.java:829)
	Suppressed: java.lang.IllegalStateException: call is closed
		at org.apache.beam.vendor.grpc.v1p48p1.com.google.common.base.Preconditions.checkState(Preconditions.java:502)
		at org.apache.beam.vendor.grpc.v1p48p1.io.grpc.internal.ServerCallImpl.sendMessageInternal(ServerCallImpl.java:161)
		at org.apache.beam.vendor.grpc.v1p48p1.io.grpc.internal.ServerCallImpl.sendMessage(ServerCallImpl.java:153)
		at org.apache.beam.vendor.grpc.v1p48p1.io.grpc.stub.ServerCalls$ServerCallStreamObserverImpl.onNext(ServerCalls.java:380)
		at org.apache.beam.sdk.fn.stream.DirectStreamObserver.onNext(DirectStreamObserver.java:108)
		at org.apache.beam.sdk.fn.data.BeamFnDataOutboundAggregator.sendOrCollectBufferedDataAndFinishOutboundStreams(BeamFnDataOutboundAggregator.java:219)
		at org.apache.beam.sdk.fn.data.BeamFnDataOutboundObserver.close(BeamFnDataOutboundObserver.java:71)
		at org.apache.beam.runners.fnexecution.control.SdkHarnessClient$CountingFnDataReceiver.close(SdkHarnessClient.java:727)
		at org.apache.beam.runners.fnexecution.control.SdkHarnessClient$BundleProcessor$ActiveBundle.close(SdkHarnessClient.java:492)
		at org.apache.beam.runners.fnexecution.control.DefaultJobBundleFactory$SimpleStageBundleFactory$1.close(DefaultJobBundleFactory.java:555)
		at org.apache.beam.runners.flink.translation.functions.FlinkExecutableStageFunction.$closeResource(FlinkExecutableStageFunction.java:269)
		at org.apache.beam.runners.flink.translation.functions.FlinkExecutableStageFunction.mapPartition(FlinkExecutableStageFunction.java:269)
		... 8 more
		Suppressed: java.lang.IllegalStateException: Processing bundle failed, TODO: [https://github.com/apache/beam/issues/18756] abort bundle.
			at org.apache.beam.runners.fnexecution.control.SdkHarnessClient$BundleProcessor$ActiveBundle.close(SdkHarnessClient.java:517)
			... 11 more
2022/11/15 11:08:48  (): java.lang.OutOfMemoryError: Direct buffer memory. The direct out-of-memory error has occurred. This can mean two things: either job(s) require(s) a larger size of JVM direct memory or there is a direct memory leak. The direct memory can be allocated by user code or some of its dependencies. In this case 'taskmanager.memory.task.off-heap.size' configuration option should be increased. Flink framework and its dependencies also consume the direct memory, mostly for network communication. The most of network memory is managed by Flink and should not result in out-of-memory error. In certain special cases, in particular for jobs with high parallelism, the framework may require more direct memory which is not managed by Flink. In this case 'taskmanager.memory.framework.off-heap.size' configuration option should be increased. If the error persists then there is probably a direct memory leak in user code or some of its dependencies which has to be investigated and fixed. The task executor has to be shutdown...
2022/11/15 11:08:48 Job state: FAILED
2022/11/15 11:08:48 Failed to execute job: job load0tests0go0flink0batch0gbk0301115065427-root-1115110759-80e2839_4e7661f5-2ccc-4288-abe7-93168c0079d9 failed
panic: Failed to execute job: job load0tests0go0flink0batch0gbk0301115065427-root-1115110759-80e2839_4e7661f5-2ccc-4288-abe7-93168c0079d9 failed

goroutine 1 [running]:
github.com/apache/beam/sdks/v2/go/pkg/beam/log.Fatalf({0x17fa048, 0xc00004a0d0}, {0x16458f1?, 0x2260620?}, {0xc00015fe70?, 0x0?, 0x0?})
	<https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/ws/src/sdks/go/pkg/beam/log/log.go>:153 +0xa5
main.main()
	<https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/ws/src/sdks/go/test/load/group_by_key/group_by_key.go>:98 +0x3ec

> Task :sdks:go:test:load:run FAILED

FAILURE: Build failed with an exception.

* Where:
Build file '<https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/ws/src/sdks/go/test/load/build.gradle'> line: 31

* What went wrong:
Execution failed for task ':sdks:go:test:load:run'.
> Process 'command 'sh'' finished with non-zero exit value 2

* Try:
> Run with --stacktrace option to get the stack trace.
> Run with --info or --debug option to get more log output.

* Get more help at https://help.gradle.org

BUILD FAILED in 53s
12 actionable tasks: 2 executed, 10 up-to-date

Publishing build scan...
https://gradle.com/s/4t3aig66ib7wo

Build step 'Invoke Gradle script' changed build result to FAILURE
Build step 'Invoke Gradle script' marked build as failure

---------------------------------------------------------------------
To unsubscribe, e-mail: builds-unsubscribe@beam.apache.org
For additional commands, e-mail: builds-help@beam.apache.org


Build failed in Jenkins: beam_LoadTests_Go_GBK_Flink_Batch #713

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See <https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/713/display/redirect?page=changes>

Changes:

[bulat.safiullin] [Website] change headers size from h4,h3 to h2 #24082

[Kenneth Knowles] Fix checkArgument format string in AvroIO

[Kenneth Knowles] Remove extraneous jetbrains annotation

[noreply] Bump golang.org/x/net from 0.1.0 to 0.2.0 in /sdks (#24153)

[noreply] Make MonotonicWatermarkEstimator work like its Java SDK equivalent

[noreply] Test Dataproc 2.1 with Flink load tests (#24129)

[noreply] Change DataflowBatchWorkerHarness doWork error level to INFO (#24135)

[noreply] Bump github.com/aws/aws-sdk-go-v2/config from 1.17.10 to 1.18.0 in /sdks


------------------------------------------
[...truncated 30.94 KB...]
    >
  >
  windowing_strategies: <
    key: "w0"
    value: <
      window_fn: <
        urn: "beam:window_fn:global_windows:v1"
      >
      merge_status: NON_MERGING
      window_coder_id: "c2"
      trigger: <
        default: <
        >
      >
      accumulation_mode: DISCARDING
      output_time: END_OF_WINDOW
      closing_behavior: EMIT_IF_NONEMPTY
      on_time_behavior: FIRE_IF_NONEMPTY
      environment_id: "go"
    >
  >
  coders: <
    key: "c0"
    value: <
      spec: <
        urn: "beam:coder:bytes:v1"
      >
    >
  >
  coders: <
    key: "c1"
    value: <
      spec: <
        urn: "beam:coder:kv:v1"
      >
      component_coder_ids: "c0"
      component_coder_ids: "c0"
    >
  >
  coders: <
    key: "c2"
    value: <
      spec: <
        urn: "beam:coder:global_window:v1"
      >
    >
  >
  coders: <
    key: "c3"
    value: <
      spec: <
        urn: "beam:coder:iterable:v1"
      >
      component_coder_ids: "c0"
    >
  >
  coders: <
    key: "c4"
    value: <
      spec: <
        urn: "beam:coder:kv:v1"
      >
      component_coder_ids: "c0"
      component_coder_ids: "c3"
    >
  >
  coders: <
    key: "c5"
    value: <
      spec: <
        urn: "beam:coder:row:v1"
        payload: "\n\021\n\013num_records\032\002\020\004\n\024\n\016initial_splits\032\002\020\004\n\016\n\010key_size\032\002\020\004\n\020\n\nvalue_size\032\002\020\004\n\022\n\014num_hot_keys\032\002\020\004\n\026\n\020hot_key_fraction\032\002\020\006\022$f691cccd-3963-4ed9-9f25-d9fdfd07b30d"
      >
    >
  >
  coders: <
    key: "c6"
    value: <
      spec: <
        urn: "beam:go:coder:custom:v1"
        payload: "ChdvZmZzZXRyYW5nZS5SZXN0cmljdGlvbhJTCBpKT2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UuUmVzdHJpY3Rpb24atAEKS2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UucmVzdEVuYxJlCBYiUwgaSk9naXRodWIuY29tL2FwYWNoZS9iZWFtL3Nka3MvdjIvZ28vcGtnL2JlYW0vaW8vcnRyYWNrZXJzL29mZnNldHJhbmdlLlJlc3RyaWN0aW9uKgYIFBICCAgqBAgZQAEitAEKS2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UucmVzdERlYxJlCBYiBggUEgIICCpTCBpKT2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UuUmVzdHJpY3Rpb24qBAgZQAE="
      >
    >
  >
  coders: <
    key: "c7"
    value: <
      spec: <
        urn: "beam:coder:length_prefix:v1"
      >
      component_coder_ids: "c6"
    >
  >
  coders: <
    key: "c8"
    value: <
      spec: <
        urn: "beam:coder:bool:v1"
      >
    >
  >
  coders: <
    key: "c9"
    value: <
      spec: <
        urn: "beam:coder:kv:v1"
      >
      component_coder_ids: "c7"
      component_coder_ids: "c8"
    >
  >
  environments: <
    key: "go"
    value: <
      urn: "beam:env:docker:v1"
      payload: "\n6gcr.io/apache-beam-testing/beam-sdk/beam_go_sdk:latest"
      capabilities: "beam:protocol:progress_reporting:v1"
      capabilities: "beam:protocol:multi_core_bundle_processing:v1"
      capabilities: "beam:transform:sdf_truncate_sized_restrictions:v1"
      capabilities: "beam:protocol:****_status:v1"
      capabilities: "beam:protocol:monitoring_info_short_ids:v1"
      capabilities: "beam:version:sdk_base:go"
      capabilities: "beam:coder:bytes:v1"
      capabilities: "beam:coder:bool:v1"
      capabilities: "beam:coder:varint:v1"
      capabilities: "beam:coder:double:v1"
      capabilities: "beam:coder:string_utf8:v1"
      capabilities: "beam:coder:length_prefix:v1"
      capabilities: "beam:coder:kv:v1"
      capabilities: "beam:coder:iterable:v1"
      capabilities: "beam:coder:state_backed_iterable:v1"
      capabilities: "beam:coder:windowed_value:v1"
      capabilities: "beam:coder:global_window:v1"
      capabilities: "beam:coder:interval_window:v1"
      capabilities: "beam:coder:row:v1"
      capabilities: "beam:coder:nullable:v1"
      dependencies: <
        type_urn: "beam:artifact:type:file:v1"
        role_urn: "beam:artifact:role:go_****_binary:v1"
      >
    >
  >
>
root_transform_ids: "s1"
root_transform_ids: "e4"
root_transform_ids: "e5"
root_transform_ids: "e6"
root_transform_ids: "e7"
requirements: "beam:requirement:pardo:splittable_dofn:v1"
2022/11/14 17:53:08 Using specified **** binary: 'linux_amd64/group_by_key'
2022/11/14 17:53:09 Prepared job with id: load-tests-go-flink-batch-gbk-1-1114150224_20737fea-e063-4070-a4a2-f2c54777bba8 and staging token: load-tests-go-flink-batch-gbk-1-1114150224_20737fea-e063-4070-a4a2-f2c54777bba8
2022/11/14 17:53:15 Staged binary artifact with token: 
2022/11/14 17:53:17 Submitted job: load0tests0go0flink0batch0gbk0101114150224-root-1114175315-fd52aad2_eb4453b5-fb9d-4c34-b1af-cbbb90a15684
2022/11/14 17:53:17 Job state: STOPPED
2022/11/14 17:53:17 Job state: STARTING
2022/11/14 17:53:17 Job state: RUNNING
2022/11/14 17:53:31  (): java.lang.RuntimeException: java.util.concurrent.ExecutionException: java.lang.RuntimeException: org.apache.flink.runtime.client.JobInitializationException: Could not start the JobMaster.
	at org.apache.flink.util.ExceptionUtils.rethrow(ExceptionUtils.java:316)
	at org.apache.flink.api.java.ExecutionEnvironment.executeAsync(ExecutionEnvironment.java:1061)
	at org.apache.flink.api.java.ExecutionEnvironment.execute(ExecutionEnvironment.java:958)
	at org.apache.beam.runners.flink.FlinkBatchPortablePipelineTranslator$BatchTranslationContext.execute(FlinkBatchPortablePipelineTranslator.java:195)
	at org.apache.beam.runners.flink.FlinkPipelineRunner.runPipelineWithTranslator(FlinkPipelineRunner.java:132)
	at org.apache.beam.runners.flink.FlinkPipelineRunner.run(FlinkPipelineRunner.java:99)
	at org.apache.beam.runners.jobsubmission.JobInvocation.runPipeline(JobInvocation.java:86)
	at org.apache.beam.vendor.guava.v26_0_jre.com.google.common.util.concurrent.TrustedListenableFutureTask$TrustedFutureInterruptibleTask.runInterruptibly(TrustedListenableFutureTask.java:125)
	at org.apache.beam.vendor.guava.v26_0_jre.com.google.common.util.concurrent.InterruptibleTask.run(InterruptibleTask.java:57)
	at org.apache.beam.vendor.guava.v26_0_jre.com.google.common.util.concurrent.TrustedListenableFutureTask.run(TrustedListenableFutureTask.java:78)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:750)
Caused by: java.util.concurrent.ExecutionException: java.lang.RuntimeException: org.apache.flink.runtime.client.JobInitializationException: Could not start the JobMaster.
	at java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:357)
	at java.util.concurrent.CompletableFuture.get(CompletableFuture.java:1908)
	at org.apache.flink.api.java.ExecutionEnvironment.executeAsync(ExecutionEnvironment.java:1056)
	... 11 more
Caused by: java.lang.RuntimeException: org.apache.flink.runtime.client.JobInitializationException: Could not start the JobMaster.
	at org.apache.flink.util.ExceptionUtils.rethrow(ExceptionUtils.java:316)
	at org.apache.flink.util.function.FunctionUtils.lambda$uncheckedFunction$2(FunctionUtils.java:75)
	at java.util.concurrent.CompletableFuture.uniApply(CompletableFuture.java:616)
	at java.util.concurrent.CompletableFuture$UniApply.tryFire(CompletableFuture.java:591)
	at java.util.concurrent.CompletableFuture$Completion.run(CompletableFuture.java:456)
	... 1 more
Caused by: org.apache.flink.runtime.client.JobInitializationException: Could not start the JobMaster.
	at org.apache.flink.runtime.jobmaster.DefaultJobMasterServiceProcess.lambda$new$0(DefaultJobMasterServiceProcess.java:97)
	at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:859)
	at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:837)
	at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:506)
	at java.util.concurrent.CompletableFuture$AsyncSupply.run(CompletableFuture.java:1705)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628)
	at java.lang.Thread.run(Thread.java:829)
Caused by: java.util.concurrent.CompletionException: org.apache.flink.api.common.InvalidProgramException: The job graph is cyclic.
	at java.util.concurrent.CompletableFuture.encodeThrowable(CompletableFuture.java:314)
	at java.util.concurrent.CompletableFuture.completeThrowable(CompletableFuture.java:319)
	at java.util.concurrent.CompletableFuture$AsyncSupply.run(CompletableFuture.java:1702)
	... 3 more
Caused by: org.apache.flink.api.common.InvalidProgramException: The job graph is cyclic.
	at org.apache.flink.runtime.jobgraph.JobGraph.getVerticesSortedTopologicallyFromSources(JobGraph.java:442)
	at org.apache.flink.runtime.executiongraph.DefaultExecutionGraphBuilder.buildGraph(DefaultExecutionGraphBuilder.java:186)
	at org.apache.flink.runtime.scheduler.DefaultExecutionGraphFactory.createAndRestoreExecutionGraph(DefaultExecutionGraphFactory.java:149)
	at org.apache.flink.runtime.scheduler.SchedulerBase.createAndRestoreExecutionGraph(SchedulerBase.java:363)
	at org.apache.flink.runtime.scheduler.SchedulerBase.<init>(SchedulerBase.java:208)
	at org.apache.flink.runtime.scheduler.DefaultScheduler.<init>(DefaultScheduler.java:191)
	at org.apache.flink.runtime.scheduler.DefaultScheduler.<init>(DefaultScheduler.java:139)
	at org.apache.flink.runtime.scheduler.DefaultSchedulerFactory.createInstance(DefaultSchedulerFactory.java:135)
	at org.apache.flink.runtime.jobmaster.DefaultSlotPoolServiceSchedulerFactory.createScheduler(DefaultSlotPoolServiceSchedulerFactory.java:115)
	at org.apache.flink.runtime.jobmaster.JobMaster.createScheduler(JobMaster.java:345)
	at org.apache.flink.runtime.jobmaster.JobMaster.<init>(JobMaster.java:322)
	at org.apache.flink.runtime.jobmaster.factories.DefaultJobMasterServiceFactory.internalCreateJobMasterService(DefaultJobMasterServiceFactory.java:106)
	at org.apache.flink.runtime.jobmaster.factories.DefaultJobMasterServiceFactory.lambda$createJobMasterService$0(DefaultJobMasterServiceFactory.java:94)
	at org.apache.flink.util.function.FunctionUtils.lambda$uncheckedSupplier$4(FunctionUtils.java:112)
	at java.util.concurrent.CompletableFuture$AsyncSupply.run(CompletableFuture.java:1700)
	... 3 more
2022/11/14 17:53:31  (): org.apache.flink.api.common.InvalidProgramException: The job graph is cyclic.
2022/11/14 17:53:32 Job state: FAILED
2022/11/14 17:53:32 Failed to execute job: job load0tests0go0flink0batch0gbk0101114150224-root-1114175315-fd52aad2_eb4453b5-fb9d-4c34-b1af-cbbb90a15684 failed
panic: Failed to execute job: job load0tests0go0flink0batch0gbk0101114150224-root-1114175315-fd52aad2_eb4453b5-fb9d-4c34-b1af-cbbb90a15684 failed

goroutine 1 [running]:
github.com/apache/beam/sdks/v2/go/pkg/beam/log.Fatalf({0x17fa048, 0xc00004a0d0}, {0x16458f1?, 0x2260620?}, {0xc000457e70?, 0x0?, 0x0?})
	<https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/ws/src/sdks/go/pkg/beam/log/log.go>:153 +0xa5
main.main()
	<https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/ws/src/sdks/go/test/load/group_by_key/group_by_key.go>:98 +0x3ec

> Task :sdks:go:test:load:run FAILED

FAILURE: Build failed with an exception.

* Where:
Build file '<https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/ws/src/sdks/go/test/load/build.gradle'> line: 31

* What went wrong:
Execution failed for task ':sdks:go:test:load:run'.
> Process 'command 'sh'' finished with non-zero exit value 2

* Try:
> Run with --stacktrace option to get the stack trace.
> Run with --info or --debug option to get more log output.

* Get more help at https://help.gradle.org

BUILD FAILED in 54s
12 actionable tasks: 6 executed, 4 from cache, 2 up-to-date

Publishing build scan...
https://gradle.com/s/y32wuklef62wk

Build step 'Invoke Gradle script' changed build result to FAILURE
Build step 'Invoke Gradle script' marked build as failure

---------------------------------------------------------------------
To unsubscribe, e-mail: builds-unsubscribe@beam.apache.org
For additional commands, e-mail: builds-help@beam.apache.org


Build failed in Jenkins: beam_LoadTests_Go_GBK_Flink_Batch #712

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See <https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/712/display/redirect?page=changes>

Changes:

[bulat.safiullin] [Website] update go-dependencies.md java-dependencies.md

[Kenneth Knowles] Fix checkArgument format string in ExecutionStateTracker


------------------------------------------
[...truncated 31.18 KB...]
  coders: <
    key: "c0"
    value: <
      spec: <
        urn: "beam:coder:bytes:v1"
      >
    >
  >
  coders: <
    key: "c1"
    value: <
      spec: <
        urn: "beam:coder:kv:v1"
      >
      component_coder_ids: "c0"
      component_coder_ids: "c0"
    >
  >
  coders: <
    key: "c2"
    value: <
      spec: <
        urn: "beam:coder:global_window:v1"
      >
    >
  >
  coders: <
    key: "c3"
    value: <
      spec: <
        urn: "beam:coder:iterable:v1"
      >
      component_coder_ids: "c0"
    >
  >
  coders: <
    key: "c4"
    value: <
      spec: <
        urn: "beam:coder:kv:v1"
      >
      component_coder_ids: "c0"
      component_coder_ids: "c3"
    >
  >
  coders: <
    key: "c5"
    value: <
      spec: <
        urn: "beam:coder:row:v1"
        payload: "\n\021\n\013num_records\032\002\020\004\n\024\n\016initial_splits\032\002\020\004\n\016\n\010key_size\032\002\020\004\n\020\n\nvalue_size\032\002\020\004\n\022\n\014num_hot_keys\032\002\020\004\n\026\n\020hot_key_fraction\032\002\020\006\022$f691cccd-3963-4ed9-9f25-d9fdfd07b30d"
      >
    >
  >
  coders: <
    key: "c6"
    value: <
      spec: <
        urn: "beam:go:coder:custom:v1"
        payload: "ChdvZmZzZXRyYW5nZS5SZXN0cmljdGlvbhJTCBpKT2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UuUmVzdHJpY3Rpb24atAEKS2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UucmVzdEVuYxJlCBYiUwgaSk9naXRodWIuY29tL2FwYWNoZS9iZWFtL3Nka3MvdjIvZ28vcGtnL2JlYW0vaW8vcnRyYWNrZXJzL29mZnNldHJhbmdlLlJlc3RyaWN0aW9uKgYIFBICCAgqBAgZQAEitAEKS2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UucmVzdERlYxJlCBYiBggUEgIICCpTCBpKT2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UuUmVzdHJpY3Rpb24qBAgZQAE="
      >
    >
  >
  coders: <
    key: "c7"
    value: <
      spec: <
        urn: "beam:coder:length_prefix:v1"
      >
      component_coder_ids: "c6"
    >
  >
  coders: <
    key: "c8"
    value: <
      spec: <
        urn: "beam:coder:bool:v1"
      >
    >
  >
  coders: <
    key: "c9"
    value: <
      spec: <
        urn: "beam:coder:kv:v1"
      >
      component_coder_ids: "c7"
      component_coder_ids: "c8"
    >
  >
  environments: <
    key: "go"
    value: <
      urn: "beam:env:docker:v1"
      payload: "\n6gcr.io/apache-beam-testing/beam-sdk/beam_go_sdk:latest"
      capabilities: "beam:protocol:progress_reporting:v1"
      capabilities: "beam:protocol:multi_core_bundle_processing:v1"
      capabilities: "beam:transform:sdf_truncate_sized_restrictions:v1"
      capabilities: "beam:protocol:****_status:v1"
      capabilities: "beam:protocol:monitoring_info_short_ids:v1"
      capabilities: "beam:version:sdk_base:go"
      capabilities: "beam:coder:bytes:v1"
      capabilities: "beam:coder:bool:v1"
      capabilities: "beam:coder:varint:v1"
      capabilities: "beam:coder:double:v1"
      capabilities: "beam:coder:string_utf8:v1"
      capabilities: "beam:coder:length_prefix:v1"
      capabilities: "beam:coder:kv:v1"
      capabilities: "beam:coder:iterable:v1"
      capabilities: "beam:coder:state_backed_iterable:v1"
      capabilities: "beam:coder:windowed_value:v1"
      capabilities: "beam:coder:global_window:v1"
      capabilities: "beam:coder:interval_window:v1"
      capabilities: "beam:coder:row:v1"
      capabilities: "beam:coder:nullable:v1"
      dependencies: <
        type_urn: "beam:artifact:type:file:v1"
        role_urn: "beam:artifact:role:go_****_binary:v1"
      >
    >
  >
>
root_transform_ids: "s1"
root_transform_ids: "e4"
root_transform_ids: "e5"
root_transform_ids: "e6"
root_transform_ids: "e7"
requirements: "beam:requirement:pardo:splittable_dofn:v1"
2022/11/14 10:04:11 Using specified **** binary: 'linux_amd64/group_by_key'
2022/11/14 10:04:12 Prepared job with id: load-tests-go-flink-batch-gbk-1-1114065411_1744e7ad-48f8-413c-9dc2-fe8d3a761f5d and staging token: load-tests-go-flink-batch-gbk-1-1114065411_1744e7ad-48f8-413c-9dc2-fe8d3a761f5d
2022/11/14 10:04:16 Staged binary artifact with token: 
2022/11/14 10:04:17 Submitted job: load0tests0go0flink0batch0gbk0101114065411-root-1114100416-3398d7cf_6e876df3-30c3-4b8c-a8cd-e076568a6ca0
2022/11/14 10:04:17 Job state: STOPPED
2022/11/14 10:04:17 Job state: STARTING
2022/11/14 10:04:17 Job state: RUNNING
2022/11/14 10:05:26  (): java.lang.RuntimeException: java.util.concurrent.ExecutionException: java.lang.RuntimeException: Error while waiting for job to be initialized
	at org.apache.flink.util.ExceptionUtils.rethrow(ExceptionUtils.java:316)
	at org.apache.flink.api.java.ExecutionEnvironment.executeAsync(ExecutionEnvironment.java:1061)
	at org.apache.flink.api.java.ExecutionEnvironment.execute(ExecutionEnvironment.java:958)
	at org.apache.beam.runners.flink.FlinkBatchPortablePipelineTranslator$BatchTranslationContext.execute(FlinkBatchPortablePipelineTranslator.java:195)
	at org.apache.beam.runners.flink.FlinkPipelineRunner.runPipelineWithTranslator(FlinkPipelineRunner.java:132)
	at org.apache.beam.runners.flink.FlinkPipelineRunner.run(FlinkPipelineRunner.java:99)
	at org.apache.beam.runners.jobsubmission.JobInvocation.runPipeline(JobInvocation.java:86)
	at org.apache.beam.vendor.guava.v26_0_jre.com.google.common.util.concurrent.TrustedListenableFutureTask$TrustedFutureInterruptibleTask.runInterruptibly(TrustedListenableFutureTask.java:125)
	at org.apache.beam.vendor.guava.v26_0_jre.com.google.common.util.concurrent.InterruptibleTask.run(InterruptibleTask.java:57)
	at org.apache.beam.vendor.guava.v26_0_jre.com.google.common.util.concurrent.TrustedListenableFutureTask.run(TrustedListenableFutureTask.java:78)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:750)
Caused by: java.util.concurrent.ExecutionException: java.lang.RuntimeException: Error while waiting for job to be initialized
	at java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:357)
	at java.util.concurrent.CompletableFuture.get(CompletableFuture.java:1908)
	at org.apache.flink.api.java.ExecutionEnvironment.executeAsync(ExecutionEnvironment.java:1056)
	... 11 more
Caused by: java.lang.RuntimeException: Error while waiting for job to be initialized
	at org.apache.flink.client.ClientUtils.waitUntilJobInitializationFinished(ClientUtils.java:160)
	at org.apache.flink.client.deployment.executors.AbstractSessionClusterExecutor.lambda$execute$2(AbstractSessionClusterExecutor.java:82)
	at org.apache.flink.util.function.FunctionUtils.lambda$uncheckedFunction$2(FunctionUtils.java:73)
	at java.util.concurrent.CompletableFuture.uniApply(CompletableFuture.java:616)
	at java.util.concurrent.CompletableFuture$UniApply.tryFire(CompletableFuture.java:591)
	at java.util.concurrent.CompletableFuture$Completion.exec(CompletableFuture.java:457)
	at java.util.concurrent.ForkJoinTask.doExec(ForkJoinTask.java:289)
	at java.util.concurrent.ForkJoinPool$WorkQueue.runTask(ForkJoinPool.java:1056)
	at java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1692)
	at java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:175)
Caused by: java.util.concurrent.ExecutionException: org.apache.flink.runtime.concurrent.FutureUtils$RetryException: Could not complete the operation. Number of retries has been exhausted.
	at java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:357)
	at java.util.concurrent.CompletableFuture.get(CompletableFuture.java:1908)
	at org.apache.flink.client.deployment.executors.AbstractSessionClusterExecutor.lambda$null$0(AbstractSessionClusterExecutor.java:83)
	at org.apache.flink.client.ClientUtils.waitUntilJobInitializationFinished(ClientUtils.java:140)
	... 9 more
Caused by: org.apache.flink.runtime.concurrent.FutureUtils$RetryException: Could not complete the operation. Number of retries has been exhausted.
	at org.apache.flink.runtime.concurrent.FutureUtils.lambda$retryOperationWithDelay$9(FutureUtils.java:386)
	at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:774)
	at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:750)
	at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:488)
	at java.util.concurrent.CompletableFuture.postFire(CompletableFuture.java:575)
	at java.util.concurrent.CompletableFuture$UniCompose.tryFire(CompletableFuture.java:943)
	at java.util.concurrent.CompletableFuture$Completion.run(CompletableFuture.java:456)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:750)
Caused by: java.util.concurrent.CompletionException: org.apache.flink.runtime.rest.util.RestClientException: Response was neither of the expected type([simple type, class org.apache.flink.runtime.rest.messages.job.JobDetailsInfo]) nor an error.
	at java.util.concurrent.CompletableFuture.encodeRelay(CompletableFuture.java:326)
	at java.util.concurrent.CompletableFuture.completeRelay(CompletableFuture.java:338)
	at java.util.concurrent.CompletableFuture.uniRelay(CompletableFuture.java:925)
	at java.util.concurrent.CompletableFuture.uniCompose(CompletableFuture.java:967)
	at java.util.concurrent.CompletableFuture$UniCompose.tryFire(CompletableFuture.java:940)
	... 4 more
Caused by: org.apache.flink.runtime.rest.util.RestClientException: Response was neither of the expected type([simple type, class org.apache.flink.runtime.rest.messages.job.JobDetailsInfo]) nor an error.
	at org.apache.flink.runtime.rest.RestClient.parseResponse(RestClient.java:502)
	at org.apache.flink.runtime.rest.RestClient.lambda$submitRequest$3(RestClient.java:466)
	at java.util.concurrent.CompletableFuture.uniCompose(CompletableFuture.java:966)
	... 5 more
Caused by: org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.exc.MismatchedInputException: Cannot map `null` into type `long` (set DeserializationConfig.DeserializationFeature.FAIL_ON_NULL_FOR_PRIMITIVES to 'false' to allow)
 at [Source: UNKNOWN; line: -1, column: -1] (through reference chain: org.apache.flink.runtime.rest.messages.job.JobDetailsInfo["maxParallelism"])
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.exc.MismatchedInputException.from(MismatchedInputException.java:63)
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.DeserializationContext.reportInputMismatch(DeserializationContext.java:1575)
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.deser.std.NumberDeserializers$PrimitiveOrWrapperDeserializer.getNullValue(NumberDeserializers.java:176)
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.deser.impl.PropertyValueBuffer._findMissing(PropertyValueBuffer.java:204)
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.deser.impl.PropertyValueBuffer.getParameters(PropertyValueBuffer.java:160)
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.deser.ValueInstantiator.createFromObjectWith(ValueInstantiator.java:288)
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.deser.impl.PropertyBasedCreator.build(PropertyBasedCreator.java:202)
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.deser.BeanDeserializer._deserializeUsingPropertyBased(BeanDeserializer.java:520)
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.deser.BeanDeserializerBase.deserializeFromObjectUsingNonDefault(BeanDeserializerBase.java:1390)
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.deser.BeanDeserializer.deserializeFromObject(BeanDeserializer.java:362)
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.deser.BeanDeserializer.deserialize(BeanDeserializer.java:195)
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.deser.DefaultDeserializationContext.readRootValue(DefaultDeserializationContext.java:322)
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.ObjectMapper._readValue(ObjectMapper.java:4569)
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.ObjectMapper.readValue(ObjectMapper.java:2867)
	at org.apache.flink.runtime.rest.RestClient.parseResponse(RestClient.java:475)
	... 7 more
2022/11/14 10:05:26  (): org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.exc.MismatchedInputException: Cannot map `null` into type `long` (set DeserializationConfig.DeserializationFeature.FAIL_ON_NULL_FOR_PRIMITIVES to 'false' to allow)
 at [Source: UNKNOWN; line: -1, column: -1] (through reference chain: org.apache.flink.runtime.rest.messages.job.JobDetailsInfo["maxParallelism"])
2022/11/14 10:05:26 Job state: FAILED
2022/11/14 10:05:26 Failed to execute job: job load0tests0go0flink0batch0gbk0101114065411-root-1114100416-3398d7cf_6e876df3-30c3-4b8c-a8cd-e076568a6ca0 failed
panic: Failed to execute job: job load0tests0go0flink0batch0gbk0101114065411-root-1114100416-3398d7cf_6e876df3-30c3-4b8c-a8cd-e076568a6ca0 failed

goroutine 1 [running]:
github.com/apache/beam/sdks/v2/go/pkg/beam/log.Fatalf({0x17f9e48, 0xc000122000}, {0x16458b5?, 0x225e5e0?}, {0xc0003dde70?, 0x0?, 0x0?})
	<https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/ws/src/sdks/go/pkg/beam/log/log.go>:153 +0xa5
main.main()
	<https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/ws/src/sdks/go/test/load/group_by_key/group_by_key.go>:98 +0x3ec

> Task :sdks:go:test:load:run FAILED

FAILURE: Build failed with an exception.

* Where:
Build file '<https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/ws/src/sdks/go/test/load/build.gradle'> line: 31

* What went wrong:
Execution failed for task ':sdks:go:test:load:run'.
> Process 'command 'sh'' finished with non-zero exit value 2

* Try:
> Run with --stacktrace option to get the stack trace.
> Run with --info or --debug option to get more log output.

* Get more help at https://help.gradle.org

BUILD FAILED in 1m 42s
12 actionable tasks: 6 executed, 4 from cache, 2 up-to-date

Publishing build scan...
https://gradle.com/s/4cebfkgme4hb2

Build step 'Invoke Gradle script' changed build result to FAILURE
Build step 'Invoke Gradle script' marked build as failure

---------------------------------------------------------------------
To unsubscribe, e-mail: builds-unsubscribe@beam.apache.org
For additional commands, e-mail: builds-help@beam.apache.org


Build failed in Jenkins: beam_LoadTests_Go_GBK_Flink_Batch #711

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See <https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/711/display/redirect>

Changes:


------------------------------------------
[...truncated 31.10 KB...]
  coders: <
    key: "c0"
    value: <
      spec: <
        urn: "beam:coder:bytes:v1"
      >
    >
  >
  coders: <
    key: "c1"
    value: <
      spec: <
        urn: "beam:coder:kv:v1"
      >
      component_coder_ids: "c0"
      component_coder_ids: "c0"
    >
  >
  coders: <
    key: "c2"
    value: <
      spec: <
        urn: "beam:coder:global_window:v1"
      >
    >
  >
  coders: <
    key: "c3"
    value: <
      spec: <
        urn: "beam:coder:iterable:v1"
      >
      component_coder_ids: "c0"
    >
  >
  coders: <
    key: "c4"
    value: <
      spec: <
        urn: "beam:coder:kv:v1"
      >
      component_coder_ids: "c0"
      component_coder_ids: "c3"
    >
  >
  coders: <
    key: "c5"
    value: <
      spec: <
        urn: "beam:coder:row:v1"
        payload: "\n\021\n\013num_records\032\002\020\004\n\024\n\016initial_splits\032\002\020\004\n\016\n\010key_size\032\002\020\004\n\020\n\nvalue_size\032\002\020\004\n\022\n\014num_hot_keys\032\002\020\004\n\026\n\020hot_key_fraction\032\002\020\006\022$f691cccd-3963-4ed9-9f25-d9fdfd07b30d"
      >
    >
  >
  coders: <
    key: "c6"
    value: <
      spec: <
        urn: "beam:go:coder:custom:v1"
        payload: "ChdvZmZzZXRyYW5nZS5SZXN0cmljdGlvbhJTCBpKT2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UuUmVzdHJpY3Rpb24atAEKS2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UucmVzdEVuYxJlCBYiUwgaSk9naXRodWIuY29tL2FwYWNoZS9iZWFtL3Nka3MvdjIvZ28vcGtnL2JlYW0vaW8vcnRyYWNrZXJzL29mZnNldHJhbmdlLlJlc3RyaWN0aW9uKgYIFBICCAgqBAgZQAEitAEKS2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UucmVzdERlYxJlCBYiBggUEgIICCpTCBpKT2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UuUmVzdHJpY3Rpb24qBAgZQAE="
      >
    >
  >
  coders: <
    key: "c7"
    value: <
      spec: <
        urn: "beam:coder:length_prefix:v1"
      >
      component_coder_ids: "c6"
    >
  >
  coders: <
    key: "c8"
    value: <
      spec: <
        urn: "beam:coder:bool:v1"
      >
    >
  >
  coders: <
    key: "c9"
    value: <
      spec: <
        urn: "beam:coder:kv:v1"
      >
      component_coder_ids: "c7"
      component_coder_ids: "c8"
    >
  >
  environments: <
    key: "go"
    value: <
      urn: "beam:env:docker:v1"
      payload: "\n6gcr.io/apache-beam-testing/beam-sdk/beam_go_sdk:latest"
      capabilities: "beam:protocol:progress_reporting:v1"
      capabilities: "beam:protocol:multi_core_bundle_processing:v1"
      capabilities: "beam:transform:sdf_truncate_sized_restrictions:v1"
      capabilities: "beam:protocol:****_status:v1"
      capabilities: "beam:protocol:monitoring_info_short_ids:v1"
      capabilities: "beam:version:sdk_base:go"
      capabilities: "beam:coder:bytes:v1"
      capabilities: "beam:coder:bool:v1"
      capabilities: "beam:coder:varint:v1"
      capabilities: "beam:coder:double:v1"
      capabilities: "beam:coder:string_utf8:v1"
      capabilities: "beam:coder:length_prefix:v1"
      capabilities: "beam:coder:kv:v1"
      capabilities: "beam:coder:iterable:v1"
      capabilities: "beam:coder:state_backed_iterable:v1"
      capabilities: "beam:coder:windowed_value:v1"
      capabilities: "beam:coder:global_window:v1"
      capabilities: "beam:coder:interval_window:v1"
      capabilities: "beam:coder:row:v1"
      capabilities: "beam:coder:nullable:v1"
      dependencies: <
        type_urn: "beam:artifact:type:file:v1"
        role_urn: "beam:artifact:role:go_****_binary:v1"
      >
    >
  >
>
root_transform_ids: "s1"
root_transform_ids: "e4"
root_transform_ids: "e5"
root_transform_ids: "e6"
root_transform_ids: "e7"
requirements: "beam:requirement:pardo:splittable_dofn:v1"
2022/11/13 10:03:48 Using specified **** binary: 'linux_amd64/group_by_key'
2022/11/13 10:03:48 Prepared job with id: load-tests-go-flink-batch-gbk-1-1113065406_a933693b-bed7-4c4c-abca-289bf3efb527 and staging token: load-tests-go-flink-batch-gbk-1-1113065406_a933693b-bed7-4c4c-abca-289bf3efb527
2022/11/13 10:03:53 Staged binary artifact with token: 
2022/11/13 10:03:54 Submitted job: load0tests0go0flink0batch0gbk0101113065406-root-1113100353-df28721a_fef0bbe3-64f4-443b-8765-d17e1e084a55
2022/11/13 10:03:54 Job state: STOPPED
2022/11/13 10:03:54 Job state: STARTING
2022/11/13 10:03:54 Job state: RUNNING
2022/11/13 10:05:03  (): java.lang.RuntimeException: java.util.concurrent.ExecutionException: java.lang.RuntimeException: Error while waiting for job to be initialized
	at org.apache.flink.util.ExceptionUtils.rethrow(ExceptionUtils.java:316)
	at org.apache.flink.api.java.ExecutionEnvironment.executeAsync(ExecutionEnvironment.java:1061)
	at org.apache.flink.api.java.ExecutionEnvironment.execute(ExecutionEnvironment.java:958)
	at org.apache.beam.runners.flink.FlinkBatchPortablePipelineTranslator$BatchTranslationContext.execute(FlinkBatchPortablePipelineTranslator.java:195)
	at org.apache.beam.runners.flink.FlinkPipelineRunner.runPipelineWithTranslator(FlinkPipelineRunner.java:132)
	at org.apache.beam.runners.flink.FlinkPipelineRunner.run(FlinkPipelineRunner.java:99)
	at org.apache.beam.runners.jobsubmission.JobInvocation.runPipeline(JobInvocation.java:86)
	at org.apache.beam.vendor.guava.v26_0_jre.com.google.common.util.concurrent.TrustedListenableFutureTask$TrustedFutureInterruptibleTask.runInterruptibly(TrustedListenableFutureTask.java:125)
	at org.apache.beam.vendor.guava.v26_0_jre.com.google.common.util.concurrent.InterruptibleTask.run(InterruptibleTask.java:57)
	at org.apache.beam.vendor.guava.v26_0_jre.com.google.common.util.concurrent.TrustedListenableFutureTask.run(TrustedListenableFutureTask.java:78)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:750)
Caused by: java.util.concurrent.ExecutionException: java.lang.RuntimeException: Error while waiting for job to be initialized
	at java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:357)
	at java.util.concurrent.CompletableFuture.get(CompletableFuture.java:1908)
	at org.apache.flink.api.java.ExecutionEnvironment.executeAsync(ExecutionEnvironment.java:1056)
	... 11 more
Caused by: java.lang.RuntimeException: Error while waiting for job to be initialized
	at org.apache.flink.client.ClientUtils.waitUntilJobInitializationFinished(ClientUtils.java:160)
	at org.apache.flink.client.deployment.executors.AbstractSessionClusterExecutor.lambda$execute$2(AbstractSessionClusterExecutor.java:82)
	at org.apache.flink.util.function.FunctionUtils.lambda$uncheckedFunction$2(FunctionUtils.java:73)
	at java.util.concurrent.CompletableFuture.uniApply(CompletableFuture.java:616)
	at java.util.concurrent.CompletableFuture$UniApply.tryFire(CompletableFuture.java:591)
	at java.util.concurrent.CompletableFuture$Completion.exec(CompletableFuture.java:457)
	at java.util.concurrent.ForkJoinTask.doExec(ForkJoinTask.java:289)
	at java.util.concurrent.ForkJoinPool$WorkQueue.runTask(ForkJoinPool.java:1056)
	at java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1692)
	at java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:175)
Caused by: java.util.concurrent.ExecutionException: org.apache.flink.runtime.concurrent.FutureUtils$RetryException: Could not complete the operation. Number of retries has been exhausted.
	at java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:357)
	at java.util.concurrent.CompletableFuture.get(CompletableFuture.java:1908)
	at org.apache.flink.client.deployment.executors.AbstractSessionClusterExecutor.lambda$null$0(AbstractSessionClusterExecutor.java:83)
	at org.apache.flink.client.ClientUtils.waitUntilJobInitializationFinished(ClientUtils.java:140)
	... 9 more
Caused by: org.apache.flink.runtime.concurrent.FutureUtils$RetryException: Could not complete the operation. Number of retries has been exhausted.
	at org.apache.flink.runtime.concurrent.FutureUtils.lambda$retryOperationWithDelay$9(FutureUtils.java:386)
	at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:774)
	at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:750)
	at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:488)
	at java.util.concurrent.CompletableFuture.postFire(CompletableFuture.java:575)
	at java.util.concurrent.CompletableFuture$UniCompose.tryFire(CompletableFuture.java:943)
	at java.util.concurrent.CompletableFuture$Completion.run(CompletableFuture.java:456)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:750)
Caused by: java.util.concurrent.CompletionException: org.apache.flink.runtime.rest.util.RestClientException: Response was neither of the expected type([simple type, class org.apache.flink.runtime.rest.messages.job.JobDetailsInfo]) nor an error.
	at java.util.concurrent.CompletableFuture.encodeRelay(CompletableFuture.java:326)
	at java.util.concurrent.CompletableFuture.completeRelay(CompletableFuture.java:338)
	at java.util.concurrent.CompletableFuture.uniRelay(CompletableFuture.java:925)
	at java.util.concurrent.CompletableFuture.uniCompose(CompletableFuture.java:967)
	at java.util.concurrent.CompletableFuture$UniCompose.tryFire(CompletableFuture.java:940)
	... 4 more
Caused by: org.apache.flink.runtime.rest.util.RestClientException: Response was neither of the expected type([simple type, class org.apache.flink.runtime.rest.messages.job.JobDetailsInfo]) nor an error.
	at org.apache.flink.runtime.rest.RestClient.parseResponse(RestClient.java:502)
	at org.apache.flink.runtime.rest.RestClient.lambda$submitRequest$3(RestClient.java:466)
	at java.util.concurrent.CompletableFuture.uniCompose(CompletableFuture.java:966)
	... 5 more
Caused by: org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.exc.MismatchedInputException: Cannot map `null` into type `long` (set DeserializationConfig.DeserializationFeature.FAIL_ON_NULL_FOR_PRIMITIVES to 'false' to allow)
 at [Source: UNKNOWN; line: -1, column: -1] (through reference chain: org.apache.flink.runtime.rest.messages.job.JobDetailsInfo["maxParallelism"])
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.exc.MismatchedInputException.from(MismatchedInputException.java:63)
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.DeserializationContext.reportInputMismatch(DeserializationContext.java:1575)
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.deser.std.NumberDeserializers$PrimitiveOrWrapperDeserializer.getNullValue(NumberDeserializers.java:176)
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.deser.impl.PropertyValueBuffer._findMissing(PropertyValueBuffer.java:204)
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.deser.impl.PropertyValueBuffer.getParameters(PropertyValueBuffer.java:160)
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.deser.ValueInstantiator.createFromObjectWith(ValueInstantiator.java:288)
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.deser.impl.PropertyBasedCreator.build(PropertyBasedCreator.java:202)
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.deser.BeanDeserializer._deserializeUsingPropertyBased(BeanDeserializer.java:520)
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.deser.BeanDeserializerBase.deserializeFromObjectUsingNonDefault(BeanDeserializerBase.java:1390)
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.deser.BeanDeserializer.deserializeFromObject(BeanDeserializer.java:362)
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.deser.BeanDeserializer.deserialize(BeanDeserializer.java:195)
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.deser.DefaultDeserializationContext.readRootValue(DefaultDeserializationContext.java:322)
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.ObjectMapper._readValue(ObjectMapper.java:4569)
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.ObjectMapper.readValue(ObjectMapper.java:2867)
	at org.apache.flink.runtime.rest.RestClient.parseResponse(RestClient.java:475)
	... 7 more
2022/11/13 10:05:03  (): org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.exc.MismatchedInputException: Cannot map `null` into type `long` (set DeserializationConfig.DeserializationFeature.FAIL_ON_NULL_FOR_PRIMITIVES to 'false' to allow)
 at [Source: UNKNOWN; line: -1, column: -1] (through reference chain: org.apache.flink.runtime.rest.messages.job.JobDetailsInfo["maxParallelism"])
2022/11/13 10:05:03 Job state: FAILED
2022/11/13 10:05:03 Failed to execute job: job load0tests0go0flink0batch0gbk0101113065406-root-1113100353-df28721a_fef0bbe3-64f4-443b-8765-d17e1e084a55 failed
panic: Failed to execute job: job load0tests0go0flink0batch0gbk0101113065406-root-1113100353-df28721a_fef0bbe3-64f4-443b-8765-d17e1e084a55 failed

goroutine 1 [running]:
github.com/apache/beam/sdks/v2/go/pkg/beam/log.Fatalf({0x17f9e48, 0xc00004a0d0}, {0x16458b5?, 0x225e5e0?}, {0xc00031de70?, 0x0?, 0x0?})
	<https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/ws/src/sdks/go/pkg/beam/log/log.go>:153 +0xa5
main.main()
	<https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/ws/src/sdks/go/test/load/group_by_key/group_by_key.go>:98 +0x3ec

> Task :sdks:go:test:load:run FAILED

FAILURE: Build failed with an exception.

* Where:
Build file '<https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/ws/src/sdks/go/test/load/build.gradle'> line: 31

* What went wrong:
Execution failed for task ':sdks:go:test:load:run'.
> Process 'command 'sh'' finished with non-zero exit value 2

* Try:
> Run with --stacktrace option to get the stack trace.
> Run with --info or --debug option to get more log output.

* Get more help at https://help.gradle.org

BUILD FAILED in 1m 34s
12 actionable tasks: 6 executed, 4 from cache, 2 up-to-date

Publishing build scan...
https://gradle.com/s/yuidep5jsuizg

Build step 'Invoke Gradle script' changed build result to FAILURE
Build step 'Invoke Gradle script' marked build as failure

---------------------------------------------------------------------
To unsubscribe, e-mail: builds-unsubscribe@beam.apache.org
For additional commands, e-mail: builds-help@beam.apache.org


Build failed in Jenkins: beam_LoadTests_Go_GBK_Flink_Batch #710

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See <https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/710/display/redirect?page=changes>

Changes:

[noreply] Add TFX support in pydoc (#23960)

[noreply] Bump cloud.google.com/go/bigtable from 1.17.0 to 1.18.0 in /sdks

[noreply] disable (#24121)

[noreply] Implement PubsubRowToMessage transform (#23897)

[noreply] upgrade testcontainer dependency (#24123)

[noreply] More cleanup containers (#24105)

[noreply] Bump github.com/aws/aws-sdk-go-v2/service/s3 in /sdks (#24112)

[noreply] Bump google.golang.org/api from 0.102.0 to 0.103.0 in /sdks (#24049)

[noreply] Update staging of Python wheels (#24114)

[noreply] Add a ValidatesContainer integration test for use_sibling_sdk_workers

[noreply] Fix checkArgument format string in TestStream (#24134)


------------------------------------------
[...truncated 31.07 KB...]
  coders: <
    key: "c0"
    value: <
      spec: <
        urn: "beam:coder:bytes:v1"
      >
    >
  >
  coders: <
    key: "c1"
    value: <
      spec: <
        urn: "beam:coder:kv:v1"
      >
      component_coder_ids: "c0"
      component_coder_ids: "c0"
    >
  >
  coders: <
    key: "c2"
    value: <
      spec: <
        urn: "beam:coder:global_window:v1"
      >
    >
  >
  coders: <
    key: "c3"
    value: <
      spec: <
        urn: "beam:coder:iterable:v1"
      >
      component_coder_ids: "c0"
    >
  >
  coders: <
    key: "c4"
    value: <
      spec: <
        urn: "beam:coder:kv:v1"
      >
      component_coder_ids: "c0"
      component_coder_ids: "c3"
    >
  >
  coders: <
    key: "c5"
    value: <
      spec: <
        urn: "beam:coder:row:v1"
        payload: "\n\021\n\013num_records\032\002\020\004\n\024\n\016initial_splits\032\002\020\004\n\016\n\010key_size\032\002\020\004\n\020\n\nvalue_size\032\002\020\004\n\022\n\014num_hot_keys\032\002\020\004\n\026\n\020hot_key_fraction\032\002\020\006\022$f691cccd-3963-4ed9-9f25-d9fdfd07b30d"
      >
    >
  >
  coders: <
    key: "c6"
    value: <
      spec: <
        urn: "beam:go:coder:custom:v1"
        payload: "ChdvZmZzZXRyYW5nZS5SZXN0cmljdGlvbhJTCBpKT2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UuUmVzdHJpY3Rpb24atAEKS2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UucmVzdEVuYxJlCBYiUwgaSk9naXRodWIuY29tL2FwYWNoZS9iZWFtL3Nka3MvdjIvZ28vcGtnL2JlYW0vaW8vcnRyYWNrZXJzL29mZnNldHJhbmdlLlJlc3RyaWN0aW9uKgYIFBICCAgqBAgZQAEitAEKS2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UucmVzdERlYxJlCBYiBggUEgIICCpTCBpKT2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UuUmVzdHJpY3Rpb24qBAgZQAE="
      >
    >
  >
  coders: <
    key: "c7"
    value: <
      spec: <
        urn: "beam:coder:length_prefix:v1"
      >
      component_coder_ids: "c6"
    >
  >
  coders: <
    key: "c8"
    value: <
      spec: <
        urn: "beam:coder:bool:v1"
      >
    >
  >
  coders: <
    key: "c9"
    value: <
      spec: <
        urn: "beam:coder:kv:v1"
      >
      component_coder_ids: "c7"
      component_coder_ids: "c8"
    >
  >
  environments: <
    key: "go"
    value: <
      urn: "beam:env:docker:v1"
      payload: "\n6gcr.io/apache-beam-testing/beam-sdk/beam_go_sdk:latest"
      capabilities: "beam:protocol:progress_reporting:v1"
      capabilities: "beam:protocol:multi_core_bundle_processing:v1"
      capabilities: "beam:transform:sdf_truncate_sized_restrictions:v1"
      capabilities: "beam:protocol:****_status:v1"
      capabilities: "beam:protocol:monitoring_info_short_ids:v1"
      capabilities: "beam:version:sdk_base:go"
      capabilities: "beam:coder:bytes:v1"
      capabilities: "beam:coder:bool:v1"
      capabilities: "beam:coder:varint:v1"
      capabilities: "beam:coder:double:v1"
      capabilities: "beam:coder:string_utf8:v1"
      capabilities: "beam:coder:length_prefix:v1"
      capabilities: "beam:coder:kv:v1"
      capabilities: "beam:coder:iterable:v1"
      capabilities: "beam:coder:state_backed_iterable:v1"
      capabilities: "beam:coder:windowed_value:v1"
      capabilities: "beam:coder:global_window:v1"
      capabilities: "beam:coder:interval_window:v1"
      capabilities: "beam:coder:row:v1"
      capabilities: "beam:coder:nullable:v1"
      dependencies: <
        type_urn: "beam:artifact:type:file:v1"
        role_urn: "beam:artifact:role:go_****_binary:v1"
      >
    >
  >
>
root_transform_ids: "s1"
root_transform_ids: "e4"
root_transform_ids: "e5"
root_transform_ids: "e6"
root_transform_ids: "e7"
requirements: "beam:requirement:pardo:splittable_dofn:v1"
2022/11/12 10:04:23 Using specified **** binary: 'linux_amd64/group_by_key'
2022/11/12 10:04:23 Prepared job with id: load-tests-go-flink-batch-gbk-1-1112065409_5df7e907-8591-49c8-9296-797a68d8af0d and staging token: load-tests-go-flink-batch-gbk-1-1112065409_5df7e907-8591-49c8-9296-797a68d8af0d
2022/11/12 10:04:28 Staged binary artifact with token: 
2022/11/12 10:04:29 Submitted job: load0tests0go0flink0batch0gbk0101112065409-root-1112100428-ebf13cb4_591c783d-3c44-49f7-9a65-ba53349f44fb
2022/11/12 10:04:29 Job state: STOPPED
2022/11/12 10:04:29 Job state: STARTING
2022/11/12 10:04:29 Job state: RUNNING
2022/11/12 10:05:38  (): java.lang.RuntimeException: java.util.concurrent.ExecutionException: java.lang.RuntimeException: Error while waiting for job to be initialized
	at org.apache.flink.util.ExceptionUtils.rethrow(ExceptionUtils.java:316)
	at org.apache.flink.api.java.ExecutionEnvironment.executeAsync(ExecutionEnvironment.java:1061)
	at org.apache.flink.api.java.ExecutionEnvironment.execute(ExecutionEnvironment.java:958)
	at org.apache.beam.runners.flink.FlinkBatchPortablePipelineTranslator$BatchTranslationContext.execute(FlinkBatchPortablePipelineTranslator.java:195)
	at org.apache.beam.runners.flink.FlinkPipelineRunner.runPipelineWithTranslator(FlinkPipelineRunner.java:132)
	at org.apache.beam.runners.flink.FlinkPipelineRunner.run(FlinkPipelineRunner.java:99)
	at org.apache.beam.runners.jobsubmission.JobInvocation.runPipeline(JobInvocation.java:86)
	at org.apache.beam.vendor.guava.v26_0_jre.com.google.common.util.concurrent.TrustedListenableFutureTask$TrustedFutureInterruptibleTask.runInterruptibly(TrustedListenableFutureTask.java:125)
	at org.apache.beam.vendor.guava.v26_0_jre.com.google.common.util.concurrent.InterruptibleTask.run(InterruptibleTask.java:57)
	at org.apache.beam.vendor.guava.v26_0_jre.com.google.common.util.concurrent.TrustedListenableFutureTask.run(TrustedListenableFutureTask.java:78)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:750)
Caused by: java.util.concurrent.ExecutionException: java.lang.RuntimeException: Error while waiting for job to be initialized
	at java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:357)
	at java.util.concurrent.CompletableFuture.get(CompletableFuture.java:1908)
	at org.apache.flink.api.java.ExecutionEnvironment.executeAsync(ExecutionEnvironment.java:1056)
	... 11 more
Caused by: java.lang.RuntimeException: Error while waiting for job to be initialized
	at org.apache.flink.client.ClientUtils.waitUntilJobInitializationFinished(ClientUtils.java:160)
	at org.apache.flink.client.deployment.executors.AbstractSessionClusterExecutor.lambda$execute$2(AbstractSessionClusterExecutor.java:82)
	at org.apache.flink.util.function.FunctionUtils.lambda$uncheckedFunction$2(FunctionUtils.java:73)
	at java.util.concurrent.CompletableFuture.uniApply(CompletableFuture.java:616)
	at java.util.concurrent.CompletableFuture$UniApply.tryFire(CompletableFuture.java:591)
	at java.util.concurrent.CompletableFuture$Completion.exec(CompletableFuture.java:457)
	at java.util.concurrent.ForkJoinTask.doExec(ForkJoinTask.java:289)
	at java.util.concurrent.ForkJoinPool$WorkQueue.runTask(ForkJoinPool.java:1056)
	at java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1692)
	at java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:175)
Caused by: java.util.concurrent.ExecutionException: org.apache.flink.runtime.concurrent.FutureUtils$RetryException: Could not complete the operation. Number of retries has been exhausted.
	at java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:357)
	at java.util.concurrent.CompletableFuture.get(CompletableFuture.java:1908)
	at org.apache.flink.client.deployment.executors.AbstractSessionClusterExecutor.lambda$null$0(AbstractSessionClusterExecutor.java:83)
	at org.apache.flink.client.ClientUtils.waitUntilJobInitializationFinished(ClientUtils.java:140)
	... 9 more
Caused by: org.apache.flink.runtime.concurrent.FutureUtils$RetryException: Could not complete the operation. Number of retries has been exhausted.
	at org.apache.flink.runtime.concurrent.FutureUtils.lambda$retryOperationWithDelay$9(FutureUtils.java:386)
	at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:774)
	at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:750)
	at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:488)
	at java.util.concurrent.CompletableFuture.postFire(CompletableFuture.java:575)
	at java.util.concurrent.CompletableFuture$UniCompose.tryFire(CompletableFuture.java:943)
	at java.util.concurrent.CompletableFuture$Completion.run(CompletableFuture.java:456)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:750)
Caused by: java.util.concurrent.CompletionException: org.apache.flink.runtime.rest.util.RestClientException: Response was neither of the expected type([simple type, class org.apache.flink.runtime.rest.messages.job.JobDetailsInfo]) nor an error.
	at java.util.concurrent.CompletableFuture.encodeRelay(CompletableFuture.java:326)
	at java.util.concurrent.CompletableFuture.completeRelay(CompletableFuture.java:338)
	at java.util.concurrent.CompletableFuture.uniRelay(CompletableFuture.java:925)
	at java.util.concurrent.CompletableFuture.uniCompose(CompletableFuture.java:967)
	at java.util.concurrent.CompletableFuture$UniCompose.tryFire(CompletableFuture.java:940)
	... 4 more
Caused by: org.apache.flink.runtime.rest.util.RestClientException: Response was neither of the expected type([simple type, class org.apache.flink.runtime.rest.messages.job.JobDetailsInfo]) nor an error.
	at org.apache.flink.runtime.rest.RestClient.parseResponse(RestClient.java:502)
	at org.apache.flink.runtime.rest.RestClient.lambda$submitRequest$3(RestClient.java:466)
	at java.util.concurrent.CompletableFuture.uniCompose(CompletableFuture.java:966)
	... 5 more
Caused by: org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.exc.MismatchedInputException: Cannot map `null` into type `long` (set DeserializationConfig.DeserializationFeature.FAIL_ON_NULL_FOR_PRIMITIVES to 'false' to allow)
 at [Source: UNKNOWN; line: -1, column: -1] (through reference chain: org.apache.flink.runtime.rest.messages.job.JobDetailsInfo["maxParallelism"])
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.exc.MismatchedInputException.from(MismatchedInputException.java:63)
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.DeserializationContext.reportInputMismatch(DeserializationContext.java:1575)
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.deser.std.NumberDeserializers$PrimitiveOrWrapperDeserializer.getNullValue(NumberDeserializers.java:176)
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.deser.impl.PropertyValueBuffer._findMissing(PropertyValueBuffer.java:204)
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.deser.impl.PropertyValueBuffer.getParameters(PropertyValueBuffer.java:160)
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.deser.ValueInstantiator.createFromObjectWith(ValueInstantiator.java:288)
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.deser.impl.PropertyBasedCreator.build(PropertyBasedCreator.java:202)
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.deser.BeanDeserializer._deserializeUsingPropertyBased(BeanDeserializer.java:520)
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.deser.BeanDeserializerBase.deserializeFromObjectUsingNonDefault(BeanDeserializerBase.java:1390)
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.deser.BeanDeserializer.deserializeFromObject(BeanDeserializer.java:362)
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.deser.BeanDeserializer.deserialize(BeanDeserializer.java:195)
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.deser.DefaultDeserializationContext.readRootValue(DefaultDeserializationContext.java:322)
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.ObjectMapper._readValue(ObjectMapper.java:4569)
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.ObjectMapper.readValue(ObjectMapper.java:2867)
	at org.apache.flink.runtime.rest.RestClient.parseResponse(RestClient.java:475)
	... 7 more
2022/11/12 10:05:38  (): org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.exc.MismatchedInputException: Cannot map `null` into type `long` (set DeserializationConfig.DeserializationFeature.FAIL_ON_NULL_FOR_PRIMITIVES to 'false' to allow)
 at [Source: UNKNOWN; line: -1, column: -1] (through reference chain: org.apache.flink.runtime.rest.messages.job.JobDetailsInfo["maxParallelism"])
2022/11/12 10:05:39 Job state: FAILED
2022/11/12 10:05:39 Failed to execute job: job load0tests0go0flink0batch0gbk0101112065409-root-1112100428-ebf13cb4_591c783d-3c44-49f7-9a65-ba53349f44fb failed
panic: Failed to execute job: job load0tests0go0flink0batch0gbk0101112065409-root-1112100428-ebf13cb4_591c783d-3c44-49f7-9a65-ba53349f44fb failed

goroutine 1 [running]:
github.com/apache/beam/sdks/v2/go/pkg/beam/log.Fatalf({0x17f9e48, 0xc00018a000}, {0x16458b5?, 0x225e5e0?}, {0xc0005b1e70?, 0x0?, 0x0?})
	<https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/ws/src/sdks/go/pkg/beam/log/log.go>:153 +0xa5
main.main()
	<https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/ws/src/sdks/go/test/load/group_by_key/group_by_key.go>:98 +0x3ec

> Task :sdks:go:test:load:run FAILED

FAILURE: Build failed with an exception.

* Where:
Build file '<https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/ws/src/sdks/go/test/load/build.gradle'> line: 31

* What went wrong:
Execution failed for task ':sdks:go:test:load:run'.
> Process 'command 'sh'' finished with non-zero exit value 2

* Try:
> Run with --stacktrace option to get the stack trace.
> Run with --info or --debug option to get more log output.

* Get more help at https://help.gradle.org

BUILD FAILED in 2m 1s
12 actionable tasks: 6 executed, 4 from cache, 2 up-to-date

Publishing build scan...
https://gradle.com/s/tsmjsfm7vtxk4

Build step 'Invoke Gradle script' changed build result to FAILURE
Build step 'Invoke Gradle script' marked build as failure

---------------------------------------------------------------------
To unsubscribe, e-mail: builds-unsubscribe@beam.apache.org
For additional commands, e-mail: builds-help@beam.apache.org


Build failed in Jenkins: beam_LoadTests_Go_GBK_Flink_Batch #709

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See <https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/709/display/redirect?page=changes>

Changes:

[Robert Bradshaw] Compute element counts for all PCollections.

[Robert Bradshaw] Add the ability to schedule splits on the ULR via a pipeline option.

[Robert Bradshaw] Add the a Reshuffle operation and use it in Create.

[Robert Bradshaw] Add dynamic splitting support to the worker.

[noreply] Update style

[Robert Bradshaw] Clarifying comments.

[Robert Bradshaw] Make mypy happy.

[Robert Bradshaw] Reduce flakiness of time-based split manager test.

[noreply] Fix FhirIO javadoc format broken (#24072)

[noreply] Bump github.com/aws/aws-sdk-go-v2/service/s3 in /sdks (#24077)

[noreply] [BEAM-12792] Install pipline dependencies to temporary venv (#16658)

[noreply] [Python]Set pickle library at the Pipeline creation stage (#24069)

[noreply] Improving stale container cleanup script (#24040)

[noreply] Add random string at the end of BigQuery query job name to make it

[noreply] [Playground] update snippet by persistence_key (#24056)

[noreply] [Tour Of Beam] handle CORS pre-flight requests (#24083)

[noreply] Num failed inferences (#23830)

[noreply] Bump github.com/aws/aws-sdk-go-v2/config from 1.5.0 to 1.17.10 in /sdks

[noreply] Add blog post on new ML resources (#24071)

[noreply] fixing linter error (#24104)

[noreply] Support using BigQueryIO Storage Read API with SchemaTransforms (#23827)

[noreply] Wire SamzaPipelineOptions to Exeption listener interface (#24109)

[noreply] Remove TheNeuralBit from the pool of Python reviewers (#24108)


------------------------------------------
[...truncated 31.08 KB...]
  coders: <
    key: "c0"
    value: <
      spec: <
        urn: "beam:coder:bytes:v1"
      >
    >
  >
  coders: <
    key: "c1"
    value: <
      spec: <
        urn: "beam:coder:kv:v1"
      >
      component_coder_ids: "c0"
      component_coder_ids: "c0"
    >
  >
  coders: <
    key: "c2"
    value: <
      spec: <
        urn: "beam:coder:global_window:v1"
      >
    >
  >
  coders: <
    key: "c3"
    value: <
      spec: <
        urn: "beam:coder:iterable:v1"
      >
      component_coder_ids: "c0"
    >
  >
  coders: <
    key: "c4"
    value: <
      spec: <
        urn: "beam:coder:kv:v1"
      >
      component_coder_ids: "c0"
      component_coder_ids: "c3"
    >
  >
  coders: <
    key: "c5"
    value: <
      spec: <
        urn: "beam:coder:row:v1"
        payload: "\n\021\n\013num_records\032\002\020\004\n\024\n\016initial_splits\032\002\020\004\n\016\n\010key_size\032\002\020\004\n\020\n\nvalue_size\032\002\020\004\n\022\n\014num_hot_keys\032\002\020\004\n\026\n\020hot_key_fraction\032\002\020\006\022$f691cccd-3963-4ed9-9f25-d9fdfd07b30d"
      >
    >
  >
  coders: <
    key: "c6"
    value: <
      spec: <
        urn: "beam:go:coder:custom:v1"
        payload: "ChdvZmZzZXRyYW5nZS5SZXN0cmljdGlvbhJTCBpKT2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UuUmVzdHJpY3Rpb24atAEKS2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UucmVzdEVuYxJlCBYiUwgaSk9naXRodWIuY29tL2FwYWNoZS9iZWFtL3Nka3MvdjIvZ28vcGtnL2JlYW0vaW8vcnRyYWNrZXJzL29mZnNldHJhbmdlLlJlc3RyaWN0aW9uKgYIFBICCAgqBAgZQAEitAEKS2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UucmVzdERlYxJlCBYiBggUEgIICCpTCBpKT2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UuUmVzdHJpY3Rpb24qBAgZQAE="
      >
    >
  >
  coders: <
    key: "c7"
    value: <
      spec: <
        urn: "beam:coder:length_prefix:v1"
      >
      component_coder_ids: "c6"
    >
  >
  coders: <
    key: "c8"
    value: <
      spec: <
        urn: "beam:coder:bool:v1"
      >
    >
  >
  coders: <
    key: "c9"
    value: <
      spec: <
        urn: "beam:coder:kv:v1"
      >
      component_coder_ids: "c7"
      component_coder_ids: "c8"
    >
  >
  environments: <
    key: "go"
    value: <
      urn: "beam:env:docker:v1"
      payload: "\n6gcr.io/apache-beam-testing/beam-sdk/beam_go_sdk:latest"
      capabilities: "beam:protocol:progress_reporting:v1"
      capabilities: "beam:protocol:multi_core_bundle_processing:v1"
      capabilities: "beam:transform:sdf_truncate_sized_restrictions:v1"
      capabilities: "beam:protocol:****_status:v1"
      capabilities: "beam:protocol:monitoring_info_short_ids:v1"
      capabilities: "beam:version:sdk_base:go"
      capabilities: "beam:coder:bytes:v1"
      capabilities: "beam:coder:bool:v1"
      capabilities: "beam:coder:varint:v1"
      capabilities: "beam:coder:double:v1"
      capabilities: "beam:coder:string_utf8:v1"
      capabilities: "beam:coder:length_prefix:v1"
      capabilities: "beam:coder:kv:v1"
      capabilities: "beam:coder:iterable:v1"
      capabilities: "beam:coder:state_backed_iterable:v1"
      capabilities: "beam:coder:windowed_value:v1"
      capabilities: "beam:coder:global_window:v1"
      capabilities: "beam:coder:interval_window:v1"
      capabilities: "beam:coder:row:v1"
      capabilities: "beam:coder:nullable:v1"
      dependencies: <
        type_urn: "beam:artifact:type:file:v1"
        role_urn: "beam:artifact:role:go_****_binary:v1"
      >
    >
  >
>
root_transform_ids: "s1"
root_transform_ids: "e4"
root_transform_ids: "e5"
root_transform_ids: "e6"
root_transform_ids: "e7"
requirements: "beam:requirement:pardo:splittable_dofn:v1"
2022/11/11 10:05:34 Using specified **** binary: 'linux_amd64/group_by_key'
2022/11/11 10:05:34 Prepared job with id: load-tests-go-flink-batch-gbk-1-1111065424_73df68ee-cd16-4058-a1e7-5dcf1b0efa8d and staging token: load-tests-go-flink-batch-gbk-1-1111065424_73df68ee-cd16-4058-a1e7-5dcf1b0efa8d
2022/11/11 10:05:40 Staged binary artifact with token: 
2022/11/11 10:05:41 Submitted job: load0tests0go0flink0batch0gbk0101111065424-root-1111100540-34310efe_d11d07df-110d-43f9-9117-4c5165c76853
2022/11/11 10:05:41 Job state: STOPPED
2022/11/11 10:05:41 Job state: STARTING
2022/11/11 10:05:41 Job state: RUNNING
2022/11/11 10:06:50  (): java.lang.RuntimeException: java.util.concurrent.ExecutionException: java.lang.RuntimeException: Error while waiting for job to be initialized
	at org.apache.flink.util.ExceptionUtils.rethrow(ExceptionUtils.java:316)
	at org.apache.flink.api.java.ExecutionEnvironment.executeAsync(ExecutionEnvironment.java:1061)
	at org.apache.flink.api.java.ExecutionEnvironment.execute(ExecutionEnvironment.java:958)
	at org.apache.beam.runners.flink.FlinkBatchPortablePipelineTranslator$BatchTranslationContext.execute(FlinkBatchPortablePipelineTranslator.java:195)
	at org.apache.beam.runners.flink.FlinkPipelineRunner.runPipelineWithTranslator(FlinkPipelineRunner.java:132)
	at org.apache.beam.runners.flink.FlinkPipelineRunner.run(FlinkPipelineRunner.java:99)
	at org.apache.beam.runners.jobsubmission.JobInvocation.runPipeline(JobInvocation.java:86)
	at org.apache.beam.vendor.guava.v26_0_jre.com.google.common.util.concurrent.TrustedListenableFutureTask$TrustedFutureInterruptibleTask.runInterruptibly(TrustedListenableFutureTask.java:125)
	at org.apache.beam.vendor.guava.v26_0_jre.com.google.common.util.concurrent.InterruptibleTask.run(InterruptibleTask.java:57)
	at org.apache.beam.vendor.guava.v26_0_jre.com.google.common.util.concurrent.TrustedListenableFutureTask.run(TrustedListenableFutureTask.java:78)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:750)
Caused by: java.util.concurrent.ExecutionException: java.lang.RuntimeException: Error while waiting for job to be initialized
	at java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:357)
	at java.util.concurrent.CompletableFuture.get(CompletableFuture.java:1908)
	at org.apache.flink.api.java.ExecutionEnvironment.executeAsync(ExecutionEnvironment.java:1056)
	... 11 more
Caused by: java.lang.RuntimeException: Error while waiting for job to be initialized
	at org.apache.flink.client.ClientUtils.waitUntilJobInitializationFinished(ClientUtils.java:160)
	at org.apache.flink.client.deployment.executors.AbstractSessionClusterExecutor.lambda$execute$2(AbstractSessionClusterExecutor.java:82)
	at org.apache.flink.util.function.FunctionUtils.lambda$uncheckedFunction$2(FunctionUtils.java:73)
	at java.util.concurrent.CompletableFuture.uniApply(CompletableFuture.java:616)
	at java.util.concurrent.CompletableFuture$UniApply.tryFire(CompletableFuture.java:591)
	at java.util.concurrent.CompletableFuture$Completion.exec(CompletableFuture.java:457)
	at java.util.concurrent.ForkJoinTask.doExec(ForkJoinTask.java:289)
	at java.util.concurrent.ForkJoinPool$WorkQueue.runTask(ForkJoinPool.java:1056)
	at java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1692)
	at java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:175)
Caused by: java.util.concurrent.ExecutionException: org.apache.flink.runtime.concurrent.FutureUtils$RetryException: Could not complete the operation. Number of retries has been exhausted.
	at java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:357)
	at java.util.concurrent.CompletableFuture.get(CompletableFuture.java:1908)
	at org.apache.flink.client.deployment.executors.AbstractSessionClusterExecutor.lambda$null$0(AbstractSessionClusterExecutor.java:83)
	at org.apache.flink.client.ClientUtils.waitUntilJobInitializationFinished(ClientUtils.java:140)
	... 9 more
Caused by: org.apache.flink.runtime.concurrent.FutureUtils$RetryException: Could not complete the operation. Number of retries has been exhausted.
	at org.apache.flink.runtime.concurrent.FutureUtils.lambda$retryOperationWithDelay$9(FutureUtils.java:386)
	at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:774)
	at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:750)
	at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:488)
	at java.util.concurrent.CompletableFuture.postFire(CompletableFuture.java:575)
	at java.util.concurrent.CompletableFuture$UniCompose.tryFire(CompletableFuture.java:943)
	at java.util.concurrent.CompletableFuture$Completion.run(CompletableFuture.java:456)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:750)
Caused by: java.util.concurrent.CompletionException: org.apache.flink.runtime.rest.util.RestClientException: Response was neither of the expected type([simple type, class org.apache.flink.runtime.rest.messages.job.JobDetailsInfo]) nor an error.
	at java.util.concurrent.CompletableFuture.encodeRelay(CompletableFuture.java:326)
	at java.util.concurrent.CompletableFuture.completeRelay(CompletableFuture.java:338)
	at java.util.concurrent.CompletableFuture.uniRelay(CompletableFuture.java:925)
	at java.util.concurrent.CompletableFuture.uniCompose(CompletableFuture.java:967)
	at java.util.concurrent.CompletableFuture$UniCompose.tryFire(CompletableFuture.java:940)
	... 4 more
Caused by: org.apache.flink.runtime.rest.util.RestClientException: Response was neither of the expected type([simple type, class org.apache.flink.runtime.rest.messages.job.JobDetailsInfo]) nor an error.
	at org.apache.flink.runtime.rest.RestClient.parseResponse(RestClient.java:502)
	at org.apache.flink.runtime.rest.RestClient.lambda$submitRequest$3(RestClient.java:466)
	at java.util.concurrent.CompletableFuture.uniCompose(CompletableFuture.java:966)
	... 5 more
Caused by: org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.exc.MismatchedInputException: Cannot map `null` into type `long` (set DeserializationConfig.DeserializationFeature.FAIL_ON_NULL_FOR_PRIMITIVES to 'false' to allow)
 at [Source: UNKNOWN; line: -1, column: -1] (through reference chain: org.apache.flink.runtime.rest.messages.job.JobDetailsInfo["maxParallelism"])
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.exc.MismatchedInputException.from(MismatchedInputException.java:63)
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.DeserializationContext.reportInputMismatch(DeserializationContext.java:1575)
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.deser.std.NumberDeserializers$PrimitiveOrWrapperDeserializer.getNullValue(NumberDeserializers.java:176)
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.deser.impl.PropertyValueBuffer._findMissing(PropertyValueBuffer.java:204)
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.deser.impl.PropertyValueBuffer.getParameters(PropertyValueBuffer.java:160)
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.deser.ValueInstantiator.createFromObjectWith(ValueInstantiator.java:288)
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.deser.impl.PropertyBasedCreator.build(PropertyBasedCreator.java:202)
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.deser.BeanDeserializer._deserializeUsingPropertyBased(BeanDeserializer.java:520)
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.deser.BeanDeserializerBase.deserializeFromObjectUsingNonDefault(BeanDeserializerBase.java:1390)
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.deser.BeanDeserializer.deserializeFromObject(BeanDeserializer.java:362)
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.deser.BeanDeserializer.deserialize(BeanDeserializer.java:195)
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.deser.DefaultDeserializationContext.readRootValue(DefaultDeserializationContext.java:322)
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.ObjectMapper._readValue(ObjectMapper.java:4569)
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.ObjectMapper.readValue(ObjectMapper.java:2867)
	at org.apache.flink.runtime.rest.RestClient.parseResponse(RestClient.java:475)
	... 7 more
2022/11/11 10:06:50  (): org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.exc.MismatchedInputException: Cannot map `null` into type `long` (set DeserializationConfig.DeserializationFeature.FAIL_ON_NULL_FOR_PRIMITIVES to 'false' to allow)
 at [Source: UNKNOWN; line: -1, column: -1] (through reference chain: org.apache.flink.runtime.rest.messages.job.JobDetailsInfo["maxParallelism"])
2022/11/11 10:06:50 Job state: FAILED
2022/11/11 10:06:50 Failed to execute job: job load0tests0go0flink0batch0gbk0101111065424-root-1111100540-34310efe_d11d07df-110d-43f9-9117-4c5165c76853 failed
panic: Failed to execute job: job load0tests0go0flink0batch0gbk0101111065424-root-1111100540-34310efe_d11d07df-110d-43f9-9117-4c5165c76853 failed

goroutine 1 [running]:
github.com/apache/beam/sdks/v2/go/pkg/beam/log.Fatalf({0x17f8aa8, 0xc00019a000}, {0x1644755?, 0x225c520?}, {0xc000397e70?, 0x0?, 0x0?})
	<https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/ws/src/sdks/go/pkg/beam/log/log.go>:153 +0xa5
main.main()
	<https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/ws/src/sdks/go/test/load/group_by_key/group_by_key.go>:98 +0x3ec

> Task :sdks:go:test:load:run FAILED

FAILURE: Build failed with an exception.

* Where:
Build file '<https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/ws/src/sdks/go/test/load/build.gradle'> line: 31

* What went wrong:
Execution failed for task ':sdks:go:test:load:run'.
> Process 'command 'sh'' finished with non-zero exit value 2

* Try:
> Run with --stacktrace option to get the stack trace.
> Run with --info or --debug option to get more log output.

* Get more help at https://help.gradle.org

BUILD FAILED in 2m 21s
12 actionable tasks: 6 executed, 4 from cache, 2 up-to-date

Publishing build scan...
https://gradle.com/s/pxpuuw7pdtysm

Build step 'Invoke Gradle script' changed build result to FAILURE
Build step 'Invoke Gradle script' marked build as failure

---------------------------------------------------------------------
To unsubscribe, e-mail: builds-unsubscribe@beam.apache.org
For additional commands, e-mail: builds-help@beam.apache.org


Build failed in Jenkins: beam_LoadTests_Go_GBK_Flink_Batch #708

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See <https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/708/display/redirect?page=changes>

Changes:

[Moritz Mack] [Spark Dataset runner] Enable projection pushdown for Spark dataset

[noreply] Fix dependency mismatch in Playground Java runner  (#24059)

[noreply] added comments for tensorflow notebook (#23726)

[noreply] Convert initialisms to all caps (#24061)

[noreply] skip output coder field in exp request (#24066)

[noreply] test: add more tests to throughput estimator (#23915)

[noreply] Remove a duplicate label (#24043)

[noreply] Update datastore_wordcount.py (#23724)


------------------------------------------
[...truncated 31.14 KB...]
  coders: <
    key: "c0"
    value: <
      spec: <
        urn: "beam:coder:bytes:v1"
      >
    >
  >
  coders: <
    key: "c1"
    value: <
      spec: <
        urn: "beam:coder:kv:v1"
      >
      component_coder_ids: "c0"
      component_coder_ids: "c0"
    >
  >
  coders: <
    key: "c2"
    value: <
      spec: <
        urn: "beam:coder:global_window:v1"
      >
    >
  >
  coders: <
    key: "c3"
    value: <
      spec: <
        urn: "beam:coder:iterable:v1"
      >
      component_coder_ids: "c0"
    >
  >
  coders: <
    key: "c4"
    value: <
      spec: <
        urn: "beam:coder:kv:v1"
      >
      component_coder_ids: "c0"
      component_coder_ids: "c3"
    >
  >
  coders: <
    key: "c5"
    value: <
      spec: <
        urn: "beam:coder:row:v1"
        payload: "\n\021\n\013num_records\032\002\020\004\n\024\n\016initial_splits\032\002\020\004\n\016\n\010key_size\032\002\020\004\n\020\n\nvalue_size\032\002\020\004\n\022\n\014num_hot_keys\032\002\020\004\n\026\n\020hot_key_fraction\032\002\020\006\022$f691cccd-3963-4ed9-9f25-d9fdfd07b30d"
      >
    >
  >
  coders: <
    key: "c6"
    value: <
      spec: <
        urn: "beam:go:coder:custom:v1"
        payload: "ChdvZmZzZXRyYW5nZS5SZXN0cmljdGlvbhJTCBpKT2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UuUmVzdHJpY3Rpb24atAEKS2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UucmVzdEVuYxJlCBYiUwgaSk9naXRodWIuY29tL2FwYWNoZS9iZWFtL3Nka3MvdjIvZ28vcGtnL2JlYW0vaW8vcnRyYWNrZXJzL29mZnNldHJhbmdlLlJlc3RyaWN0aW9uKgYIFBICCAgqBAgZQAEitAEKS2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UucmVzdERlYxJlCBYiBggUEgIICCpTCBpKT2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UuUmVzdHJpY3Rpb24qBAgZQAE="
      >
    >
  >
  coders: <
    key: "c7"
    value: <
      spec: <
        urn: "beam:coder:length_prefix:v1"
      >
      component_coder_ids: "c6"
    >
  >
  coders: <
    key: "c8"
    value: <
      spec: <
        urn: "beam:coder:bool:v1"
      >
    >
  >
  coders: <
    key: "c9"
    value: <
      spec: <
        urn: "beam:coder:kv:v1"
      >
      component_coder_ids: "c7"
      component_coder_ids: "c8"
    >
  >
  environments: <
    key: "go"
    value: <
      urn: "beam:env:docker:v1"
      payload: "\n6gcr.io/apache-beam-testing/beam-sdk/beam_go_sdk:latest"
      capabilities: "beam:protocol:progress_reporting:v1"
      capabilities: "beam:protocol:multi_core_bundle_processing:v1"
      capabilities: "beam:transform:sdf_truncate_sized_restrictions:v1"
      capabilities: "beam:protocol:****_status:v1"
      capabilities: "beam:protocol:monitoring_info_short_ids:v1"
      capabilities: "beam:version:sdk_base:go"
      capabilities: "beam:coder:bytes:v1"
      capabilities: "beam:coder:bool:v1"
      capabilities: "beam:coder:varint:v1"
      capabilities: "beam:coder:double:v1"
      capabilities: "beam:coder:string_utf8:v1"
      capabilities: "beam:coder:length_prefix:v1"
      capabilities: "beam:coder:kv:v1"
      capabilities: "beam:coder:iterable:v1"
      capabilities: "beam:coder:state_backed_iterable:v1"
      capabilities: "beam:coder:windowed_value:v1"
      capabilities: "beam:coder:global_window:v1"
      capabilities: "beam:coder:interval_window:v1"
      capabilities: "beam:coder:row:v1"
      capabilities: "beam:coder:nullable:v1"
      dependencies: <
        type_urn: "beam:artifact:type:file:v1"
        role_urn: "beam:artifact:role:go_****_binary:v1"
      >
    >
  >
>
root_transform_ids: "s1"
root_transform_ids: "e4"
root_transform_ids: "e5"
root_transform_ids: "e6"
root_transform_ids: "e7"
requirements: "beam:requirement:pardo:splittable_dofn:v1"
2022/11/10 10:04:16 Using specified **** binary: 'linux_amd64/group_by_key'
2022/11/10 10:04:16 Prepared job with id: load-tests-go-flink-batch-gbk-1-1110065417_9970c1f4-92c2-471f-9d9f-dccd34a18a32 and staging token: load-tests-go-flink-batch-gbk-1-1110065417_9970c1f4-92c2-471f-9d9f-dccd34a18a32
2022/11/10 10:04:21 Staged binary artifact with token: 
2022/11/10 10:04:22 Submitted job: load0tests0go0flink0batch0gbk0101110065417-root-1110100421-7fd16bbe_453ec552-2251-4a1f-8662-6dafc22b922f
2022/11/10 10:04:22 Job state: STOPPED
2022/11/10 10:04:22 Job state: STARTING
2022/11/10 10:04:22 Job state: RUNNING
2022/11/10 10:05:31  (): java.lang.RuntimeException: java.util.concurrent.ExecutionException: java.lang.RuntimeException: Error while waiting for job to be initialized
	at org.apache.flink.util.ExceptionUtils.rethrow(ExceptionUtils.java:316)
	at org.apache.flink.api.java.ExecutionEnvironment.executeAsync(ExecutionEnvironment.java:1061)
	at org.apache.flink.api.java.ExecutionEnvironment.execute(ExecutionEnvironment.java:958)
	at org.apache.beam.runners.flink.FlinkBatchPortablePipelineTranslator$BatchTranslationContext.execute(FlinkBatchPortablePipelineTranslator.java:195)
	at org.apache.beam.runners.flink.FlinkPipelineRunner.runPipelineWithTranslator(FlinkPipelineRunner.java:132)
	at org.apache.beam.runners.flink.FlinkPipelineRunner.run(FlinkPipelineRunner.java:99)
	at org.apache.beam.runners.jobsubmission.JobInvocation.runPipeline(JobInvocation.java:86)
	at org.apache.beam.vendor.guava.v26_0_jre.com.google.common.util.concurrent.TrustedListenableFutureTask$TrustedFutureInterruptibleTask.runInterruptibly(TrustedListenableFutureTask.java:125)
	at org.apache.beam.vendor.guava.v26_0_jre.com.google.common.util.concurrent.InterruptibleTask.run(InterruptibleTask.java:57)
	at org.apache.beam.vendor.guava.v26_0_jre.com.google.common.util.concurrent.TrustedListenableFutureTask.run(TrustedListenableFutureTask.java:78)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:750)
Caused by: java.util.concurrent.ExecutionException: java.lang.RuntimeException: Error while waiting for job to be initialized
	at java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:357)
	at java.util.concurrent.CompletableFuture.get(CompletableFuture.java:1908)
	at org.apache.flink.api.java.ExecutionEnvironment.executeAsync(ExecutionEnvironment.java:1056)
	... 11 more
Caused by: java.lang.RuntimeException: Error while waiting for job to be initialized
	at org.apache.flink.client.ClientUtils.waitUntilJobInitializationFinished(ClientUtils.java:160)
	at org.apache.flink.client.deployment.executors.AbstractSessionClusterExecutor.lambda$execute$2(AbstractSessionClusterExecutor.java:82)
	at org.apache.flink.util.function.FunctionUtils.lambda$uncheckedFunction$2(FunctionUtils.java:73)
	at java.util.concurrent.CompletableFuture.uniApply(CompletableFuture.java:616)
	at java.util.concurrent.CompletableFuture$UniApply.tryFire(CompletableFuture.java:591)
	at java.util.concurrent.CompletableFuture$Completion.exec(CompletableFuture.java:457)
	at java.util.concurrent.ForkJoinTask.doExec(ForkJoinTask.java:289)
	at java.util.concurrent.ForkJoinPool$WorkQueue.runTask(ForkJoinPool.java:1056)
	at java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1692)
	at java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:175)
Caused by: java.util.concurrent.ExecutionException: org.apache.flink.runtime.concurrent.FutureUtils$RetryException: Could not complete the operation. Number of retries has been exhausted.
	at java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:357)
	at java.util.concurrent.CompletableFuture.get(CompletableFuture.java:1908)
	at org.apache.flink.client.deployment.executors.AbstractSessionClusterExecutor.lambda$null$0(AbstractSessionClusterExecutor.java:83)
	at org.apache.flink.client.ClientUtils.waitUntilJobInitializationFinished(ClientUtils.java:140)
	... 9 more
Caused by: org.apache.flink.runtime.concurrent.FutureUtils$RetryException: Could not complete the operation. Number of retries has been exhausted.
	at org.apache.flink.runtime.concurrent.FutureUtils.lambda$retryOperationWithDelay$9(FutureUtils.java:386)
	at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:774)
	at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:750)
	at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:488)
	at java.util.concurrent.CompletableFuture.postFire(CompletableFuture.java:575)
	at java.util.concurrent.CompletableFuture$UniCompose.tryFire(CompletableFuture.java:943)
	at java.util.concurrent.CompletableFuture$Completion.run(CompletableFuture.java:456)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:750)
Caused by: java.util.concurrent.CompletionException: org.apache.flink.runtime.rest.util.RestClientException: Response was neither of the expected type([simple type, class org.apache.flink.runtime.rest.messages.job.JobDetailsInfo]) nor an error.
	at java.util.concurrent.CompletableFuture.encodeRelay(CompletableFuture.java:326)
	at java.util.concurrent.CompletableFuture.completeRelay(CompletableFuture.java:338)
	at java.util.concurrent.CompletableFuture.uniRelay(CompletableFuture.java:925)
	at java.util.concurrent.CompletableFuture.uniCompose(CompletableFuture.java:967)
	at java.util.concurrent.CompletableFuture$UniCompose.tryFire(CompletableFuture.java:940)
	... 4 more
Caused by: org.apache.flink.runtime.rest.util.RestClientException: Response was neither of the expected type([simple type, class org.apache.flink.runtime.rest.messages.job.JobDetailsInfo]) nor an error.
	at org.apache.flink.runtime.rest.RestClient.parseResponse(RestClient.java:502)
	at org.apache.flink.runtime.rest.RestClient.lambda$submitRequest$3(RestClient.java:466)
	at java.util.concurrent.CompletableFuture.uniCompose(CompletableFuture.java:966)
	... 5 more
Caused by: org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.exc.MismatchedInputException: Cannot map `null` into type `long` (set DeserializationConfig.DeserializationFeature.FAIL_ON_NULL_FOR_PRIMITIVES to 'false' to allow)
 at [Source: UNKNOWN; line: -1, column: -1] (through reference chain: org.apache.flink.runtime.rest.messages.job.JobDetailsInfo["maxParallelism"])
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.exc.MismatchedInputException.from(MismatchedInputException.java:63)
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.DeserializationContext.reportInputMismatch(DeserializationContext.java:1575)
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.deser.std.NumberDeserializers$PrimitiveOrWrapperDeserializer.getNullValue(NumberDeserializers.java:176)
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.deser.impl.PropertyValueBuffer._findMissing(PropertyValueBuffer.java:204)
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.deser.impl.PropertyValueBuffer.getParameters(PropertyValueBuffer.java:160)
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.deser.ValueInstantiator.createFromObjectWith(ValueInstantiator.java:288)
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.deser.impl.PropertyBasedCreator.build(PropertyBasedCreator.java:202)
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.deser.BeanDeserializer._deserializeUsingPropertyBased(BeanDeserializer.java:520)
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.deser.BeanDeserializerBase.deserializeFromObjectUsingNonDefault(BeanDeserializerBase.java:1390)
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.deser.BeanDeserializer.deserializeFromObject(BeanDeserializer.java:362)
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.deser.BeanDeserializer.deserialize(BeanDeserializer.java:195)
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.deser.DefaultDeserializationContext.readRootValue(DefaultDeserializationContext.java:322)
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.ObjectMapper._readValue(ObjectMapper.java:4569)
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.ObjectMapper.readValue(ObjectMapper.java:2867)
	at org.apache.flink.runtime.rest.RestClient.parseResponse(RestClient.java:475)
	... 7 more
2022/11/10 10:05:31  (): org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.exc.MismatchedInputException: Cannot map `null` into type `long` (set DeserializationConfig.DeserializationFeature.FAIL_ON_NULL_FOR_PRIMITIVES to 'false' to allow)
 at [Source: UNKNOWN; line: -1, column: -1] (through reference chain: org.apache.flink.runtime.rest.messages.job.JobDetailsInfo["maxParallelism"])
2022/11/10 10:05:32 Job state: FAILED
2022/11/10 10:05:32 Failed to execute job: job load0tests0go0flink0batch0gbk0101110065417-root-1110100421-7fd16bbe_453ec552-2251-4a1f-8662-6dafc22b922f failed
panic: Failed to execute job: job load0tests0go0flink0batch0gbk0101110065417-root-1110100421-7fd16bbe_453ec552-2251-4a1f-8662-6dafc22b922f failed

goroutine 1 [running]:
github.com/apache/beam/sdks/v2/go/pkg/beam/log.Fatalf({0x17f8aa8, 0xc000122000}, {0x1644755?, 0x225c520?}, {0xc0000fbe70?, 0x0?, 0x0?})
	<https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/ws/src/sdks/go/pkg/beam/log/log.go>:153 +0xa5
main.main()
	<https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/ws/src/sdks/go/test/load/group_by_key/group_by_key.go>:98 +0x3ec

> Task :sdks:go:test:load:run FAILED

FAILURE: Build failed with an exception.

* Where:
Build file '<https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/ws/src/sdks/go/test/load/build.gradle'> line: 31

* What went wrong:
Execution failed for task ':sdks:go:test:load:run'.
> Process 'command 'sh'' finished with non-zero exit value 2

* Try:
> Run with --stacktrace option to get the stack trace.
> Run with --info or --debug option to get more log output.

* Get more help at https://help.gradle.org

BUILD FAILED in 1m 47s
12 actionable tasks: 6 executed, 4 from cache, 2 up-to-date

Publishing build scan...
https://gradle.com/s/2xkgnlbw7cry4

Build step 'Invoke Gradle script' changed build result to FAILURE
Build step 'Invoke Gradle script' marked build as failure

---------------------------------------------------------------------
To unsubscribe, e-mail: builds-unsubscribe@beam.apache.org
For additional commands, e-mail: builds-help@beam.apache.org


Build failed in Jenkins: beam_LoadTests_Go_GBK_Flink_Batch #707

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See <https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/707/display/redirect?page=changes>

Changes:

[noreply] Update release notes. (#23986)

[noreply] [Go] Pipeline Resource Hints (#23990)

[noreply] [#21250] Trivial removal of loop over something that always has one

[noreply] Bump cloud.google.com/go/bigtable from 1.16.0 to 1.17.0 in /sdks

[noreply] Editorial review of the ML base API descriptions (#24026)

[noreply] Update my Twitter handle (#23653)

[noreply] Retroactively announce Batched DoFn support in 2.42.0 Blog (#24011)

[noreply] Bump cloud.google.com/go/storage from 1.27.0 to 1.28.0 in /sdks (#24028)

[noreply] [Go] Add pipeline resource hints to CHANGES.md (#24036)

[noreply] Handle Avro schema generation for logical data types in

[noreply] [Go SDK] S3 implementation of the Beam filesystem (#23992)


------------------------------------------
[...truncated 31.12 KB...]
  coders: <
    key: "c0"
    value: <
      spec: <
        urn: "beam:coder:bytes:v1"
      >
    >
  >
  coders: <
    key: "c1"
    value: <
      spec: <
        urn: "beam:coder:kv:v1"
      >
      component_coder_ids: "c0"
      component_coder_ids: "c0"
    >
  >
  coders: <
    key: "c2"
    value: <
      spec: <
        urn: "beam:coder:global_window:v1"
      >
    >
  >
  coders: <
    key: "c3"
    value: <
      spec: <
        urn: "beam:coder:iterable:v1"
      >
      component_coder_ids: "c0"
    >
  >
  coders: <
    key: "c4"
    value: <
      spec: <
        urn: "beam:coder:kv:v1"
      >
      component_coder_ids: "c0"
      component_coder_ids: "c3"
    >
  >
  coders: <
    key: "c5"
    value: <
      spec: <
        urn: "beam:coder:row:v1"
        payload: "\n\021\n\013num_records\032\002\020\004\n\024\n\016initial_splits\032\002\020\004\n\016\n\010key_size\032\002\020\004\n\020\n\nvalue_size\032\002\020\004\n\022\n\014num_hot_keys\032\002\020\004\n\026\n\020hot_key_fraction\032\002\020\006\022$f691cccd-3963-4ed9-9f25-d9fdfd07b30d"
      >
    >
  >
  coders: <
    key: "c6"
    value: <
      spec: <
        urn: "beam:go:coder:custom:v1"
        payload: "ChdvZmZzZXRyYW5nZS5SZXN0cmljdGlvbhJTCBpKT2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UuUmVzdHJpY3Rpb24atAEKS2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UucmVzdEVuYxJlCBYiUwgaSk9naXRodWIuY29tL2FwYWNoZS9iZWFtL3Nka3MvdjIvZ28vcGtnL2JlYW0vaW8vcnRyYWNrZXJzL29mZnNldHJhbmdlLlJlc3RyaWN0aW9uKgYIFBICCAgqBAgZQAEitAEKS2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UucmVzdERlYxJlCBYiBggUEgIICCpTCBpKT2dpdGh1Yi5jb20vYXBhY2hlL2JlYW0vc2Rrcy92Mi9nby9wa2cvYmVhbS9pby9ydHJhY2tlcnMvb2Zmc2V0cmFuZ2UuUmVzdHJpY3Rpb24qBAgZQAE="
      >
    >
  >
  coders: <
    key: "c7"
    value: <
      spec: <
        urn: "beam:coder:length_prefix:v1"
      >
      component_coder_ids: "c6"
    >
  >
  coders: <
    key: "c8"
    value: <
      spec: <
        urn: "beam:coder:bool:v1"
      >
    >
  >
  coders: <
    key: "c9"
    value: <
      spec: <
        urn: "beam:coder:kv:v1"
      >
      component_coder_ids: "c7"
      component_coder_ids: "c8"
    >
  >
  environments: <
    key: "go"
    value: <
      urn: "beam:env:docker:v1"
      payload: "\n6gcr.io/apache-beam-testing/beam-sdk/beam_go_sdk:latest"
      capabilities: "beam:protocol:progress_reporting:v1"
      capabilities: "beam:protocol:multi_core_bundle_processing:v1"
      capabilities: "beam:transform:sdf_truncate_sized_restrictions:v1"
      capabilities: "beam:protocol:****_status:v1"
      capabilities: "beam:protocol:monitoring_info_short_ids:v1"
      capabilities: "beam:version:sdk_base:go"
      capabilities: "beam:coder:bytes:v1"
      capabilities: "beam:coder:bool:v1"
      capabilities: "beam:coder:varint:v1"
      capabilities: "beam:coder:double:v1"
      capabilities: "beam:coder:string_utf8:v1"
      capabilities: "beam:coder:length_prefix:v1"
      capabilities: "beam:coder:kv:v1"
      capabilities: "beam:coder:iterable:v1"
      capabilities: "beam:coder:state_backed_iterable:v1"
      capabilities: "beam:coder:windowed_value:v1"
      capabilities: "beam:coder:global_window:v1"
      capabilities: "beam:coder:interval_window:v1"
      capabilities: "beam:coder:row:v1"
      capabilities: "beam:coder:nullable:v1"
      dependencies: <
        type_urn: "beam:artifact:type:file:v1"
        role_urn: "beam:artifact:role:go_****_binary:v1"
      >
    >
  >
>
root_transform_ids: "s1"
root_transform_ids: "e4"
root_transform_ids: "e5"
root_transform_ids: "e6"
root_transform_ids: "e7"
requirements: "beam:requirement:pardo:splittable_dofn:v1"
2022/11/09 10:04:26 Using specified **** binary: 'linux_amd64/group_by_key'
2022/11/09 10:04:26 Prepared job with id: load-tests-go-flink-batch-gbk-1-1109065502_c6630df6-ad67-4c22-87fe-c5f00539add8 and staging token: load-tests-go-flink-batch-gbk-1-1109065502_c6630df6-ad67-4c22-87fe-c5f00539add8
2022/11/09 10:04:31 Staged binary artifact with token: 
2022/11/09 10:04:33 Submitted job: load0tests0go0flink0batch0gbk0101109065502-root-1109100432-1b775295_bacf5168-d7d3-46aa-9fbc-4c52a2713e65
2022/11/09 10:04:33 Job state: STOPPED
2022/11/09 10:04:33 Job state: STARTING
2022/11/09 10:04:33 Job state: RUNNING
2022/11/09 10:05:42  (): java.lang.RuntimeException: java.util.concurrent.ExecutionException: java.lang.RuntimeException: Error while waiting for job to be initialized
	at org.apache.flink.util.ExceptionUtils.rethrow(ExceptionUtils.java:316)
	at org.apache.flink.api.java.ExecutionEnvironment.executeAsync(ExecutionEnvironment.java:1061)
	at org.apache.flink.api.java.ExecutionEnvironment.execute(ExecutionEnvironment.java:958)
	at org.apache.beam.runners.flink.FlinkBatchPortablePipelineTranslator$BatchTranslationContext.execute(FlinkBatchPortablePipelineTranslator.java:195)
	at org.apache.beam.runners.flink.FlinkPipelineRunner.runPipelineWithTranslator(FlinkPipelineRunner.java:132)
	at org.apache.beam.runners.flink.FlinkPipelineRunner.run(FlinkPipelineRunner.java:99)
	at org.apache.beam.runners.jobsubmission.JobInvocation.runPipeline(JobInvocation.java:86)
	at org.apache.beam.vendor.guava.v26_0_jre.com.google.common.util.concurrent.TrustedListenableFutureTask$TrustedFutureInterruptibleTask.runInterruptibly(TrustedListenableFutureTask.java:125)
	at org.apache.beam.vendor.guava.v26_0_jre.com.google.common.util.concurrent.InterruptibleTask.run(InterruptibleTask.java:57)
	at org.apache.beam.vendor.guava.v26_0_jre.com.google.common.util.concurrent.TrustedListenableFutureTask.run(TrustedListenableFutureTask.java:78)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:750)
Caused by: java.util.concurrent.ExecutionException: java.lang.RuntimeException: Error while waiting for job to be initialized
	at java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:357)
	at java.util.concurrent.CompletableFuture.get(CompletableFuture.java:1908)
	at org.apache.flink.api.java.ExecutionEnvironment.executeAsync(ExecutionEnvironment.java:1056)
	... 11 more
Caused by: java.lang.RuntimeException: Error while waiting for job to be initialized
	at org.apache.flink.client.ClientUtils.waitUntilJobInitializationFinished(ClientUtils.java:160)
	at org.apache.flink.client.deployment.executors.AbstractSessionClusterExecutor.lambda$execute$2(AbstractSessionClusterExecutor.java:82)
	at org.apache.flink.util.function.FunctionUtils.lambda$uncheckedFunction$2(FunctionUtils.java:73)
	at java.util.concurrent.CompletableFuture.uniApply(CompletableFuture.java:616)
	at java.util.concurrent.CompletableFuture$UniApply.tryFire(CompletableFuture.java:591)
	at java.util.concurrent.CompletableFuture$Completion.exec(CompletableFuture.java:457)
	at java.util.concurrent.ForkJoinTask.doExec(ForkJoinTask.java:289)
	at java.util.concurrent.ForkJoinPool$WorkQueue.runTask(ForkJoinPool.java:1056)
	at java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1692)
	at java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:175)
Caused by: java.util.concurrent.ExecutionException: org.apache.flink.runtime.concurrent.FutureUtils$RetryException: Could not complete the operation. Number of retries has been exhausted.
	at java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:357)
	at java.util.concurrent.CompletableFuture.get(CompletableFuture.java:1908)
	at org.apache.flink.client.deployment.executors.AbstractSessionClusterExecutor.lambda$null$0(AbstractSessionClusterExecutor.java:83)
	at org.apache.flink.client.ClientUtils.waitUntilJobInitializationFinished(ClientUtils.java:140)
	... 9 more
Caused by: org.apache.flink.runtime.concurrent.FutureUtils$RetryException: Could not complete the operation. Number of retries has been exhausted.
	at org.apache.flink.runtime.concurrent.FutureUtils.lambda$retryOperationWithDelay$9(FutureUtils.java:386)
	at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:774)
	at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:750)
	at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:488)
	at java.util.concurrent.CompletableFuture.postFire(CompletableFuture.java:575)
	at java.util.concurrent.CompletableFuture$UniCompose.tryFire(CompletableFuture.java:943)
	at java.util.concurrent.CompletableFuture$Completion.run(CompletableFuture.java:456)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:750)
Caused by: java.util.concurrent.CompletionException: org.apache.flink.runtime.rest.util.RestClientException: Response was neither of the expected type([simple type, class org.apache.flink.runtime.rest.messages.job.JobDetailsInfo]) nor an error.
	at java.util.concurrent.CompletableFuture.encodeRelay(CompletableFuture.java:326)
	at java.util.concurrent.CompletableFuture.completeRelay(CompletableFuture.java:338)
	at java.util.concurrent.CompletableFuture.uniRelay(CompletableFuture.java:925)
	at java.util.concurrent.CompletableFuture.uniCompose(CompletableFuture.java:967)
	at java.util.concurrent.CompletableFuture$UniCompose.tryFire(CompletableFuture.java:940)
	... 4 more
Caused by: org.apache.flink.runtime.rest.util.RestClientException: Response was neither of the expected type([simple type, class org.apache.flink.runtime.rest.messages.job.JobDetailsInfo]) nor an error.
	at org.apache.flink.runtime.rest.RestClient.parseResponse(RestClient.java:502)
	at org.apache.flink.runtime.rest.RestClient.lambda$submitRequest$3(RestClient.java:466)
	at java.util.concurrent.CompletableFuture.uniCompose(CompletableFuture.java:966)
	... 5 more
Caused by: org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.exc.MismatchedInputException: Cannot map `null` into type `long` (set DeserializationConfig.DeserializationFeature.FAIL_ON_NULL_FOR_PRIMITIVES to 'false' to allow)
 at [Source: UNKNOWN; line: -1, column: -1] (through reference chain: org.apache.flink.runtime.rest.messages.job.JobDetailsInfo["maxParallelism"])
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.exc.MismatchedInputException.from(MismatchedInputException.java:63)
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.DeserializationContext.reportInputMismatch(DeserializationContext.java:1575)
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.deser.std.NumberDeserializers$PrimitiveOrWrapperDeserializer.getNullValue(NumberDeserializers.java:176)
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.deser.impl.PropertyValueBuffer._findMissing(PropertyValueBuffer.java:204)
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.deser.impl.PropertyValueBuffer.getParameters(PropertyValueBuffer.java:160)
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.deser.ValueInstantiator.createFromObjectWith(ValueInstantiator.java:288)
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.deser.impl.PropertyBasedCreator.build(PropertyBasedCreator.java:202)
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.deser.BeanDeserializer._deserializeUsingPropertyBased(BeanDeserializer.java:520)
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.deser.BeanDeserializerBase.deserializeFromObjectUsingNonDefault(BeanDeserializerBase.java:1390)
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.deser.BeanDeserializer.deserializeFromObject(BeanDeserializer.java:362)
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.deser.BeanDeserializer.deserialize(BeanDeserializer.java:195)
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.deser.DefaultDeserializationContext.readRootValue(DefaultDeserializationContext.java:322)
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.ObjectMapper._readValue(ObjectMapper.java:4569)
	at org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.ObjectMapper.readValue(ObjectMapper.java:2867)
	at org.apache.flink.runtime.rest.RestClient.parseResponse(RestClient.java:475)
	... 7 more
2022/11/09 10:05:42  (): org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.exc.MismatchedInputException: Cannot map `null` into type `long` (set DeserializationConfig.DeserializationFeature.FAIL_ON_NULL_FOR_PRIMITIVES to 'false' to allow)
 at [Source: UNKNOWN; line: -1, column: -1] (through reference chain: org.apache.flink.runtime.rest.messages.job.JobDetailsInfo["maxParallelism"])
2022/11/09 10:05:42 Job state: FAILED
2022/11/09 10:05:42 Failed to execute job: job load0tests0go0flink0batch0gbk0101109065502-root-1109100432-1b775295_bacf5168-d7d3-46aa-9fbc-4c52a2713e65 failed
panic: Failed to execute job: job load0tests0go0flink0batch0gbk0101109065502-root-1109100432-1b775295_bacf5168-d7d3-46aa-9fbc-4c52a2713e65 failed

goroutine 1 [running]:
github.com/apache/beam/sdks/v2/go/pkg/beam/log.Fatalf({0x17f8ca8, 0xc00004a0d0}, {0x16448f5?, 0x225d520?}, {0xc000715e70?, 0x0?, 0x0?})
	<https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/ws/src/sdks/go/pkg/beam/log/log.go>:153 +0xa5
main.main()
	<https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/ws/src/sdks/go/test/load/group_by_key/group_by_key.go>:98 +0x3ec

> Task :sdks:go:test:load:run FAILED

FAILURE: Build failed with an exception.

* Where:
Build file '<https://ci-beam.apache.org/job/beam_LoadTests_Go_GBK_Flink_Batch/ws/src/sdks/go/test/load/build.gradle'> line: 31

* What went wrong:
Execution failed for task ':sdks:go:test:load:run'.
> Process 'command 'sh'' finished with non-zero exit value 2

* Try:
> Run with --stacktrace option to get the stack trace.
> Run with --info or --debug option to get more log output.

* Get more help at https://help.gradle.org

BUILD FAILED in 1m 58s
12 actionable tasks: 6 executed, 4 from cache, 2 up-to-date

Publishing build scan...
https://gradle.com/s/visd27srfbgcc

Build step 'Invoke Gradle script' changed build result to FAILURE
Build step 'Invoke Gradle script' marked build as failure

---------------------------------------------------------------------
To unsubscribe, e-mail: builds-unsubscribe@beam.apache.org
For additional commands, e-mail: builds-help@beam.apache.org